summaryrefslogtreecommitdiffstats
path: root/venv/lib/python3.9/site-packages/numpy/distutils
diff options
context:
space:
mode:
Diffstat (limited to 'venv/lib/python3.9/site-packages/numpy/distutils')
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/__config__.py115
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/__init__.py64
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/__init__.pyi4
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/_shell_utils.py91
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/armccompiler.py26
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/ccompiler.py814
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/ccompiler_opt.py2659
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimd.c27
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimddp.c16
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdfhm.c19
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdhp.c15
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx2.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_clx.c22
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c24
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_icl.c26
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knl.c25
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knm.c30
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_skx.c26
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512cd.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512f.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_f16c.c22
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma3.c22
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma4.c13
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon.c19
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_fp16.c11
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c21
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_popcnt.c32
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse2.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse3.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse41.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse42.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_ssse3.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx.c21
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx2.c13
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx3.c13
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx4.c14
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vx.c16
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe.c25
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe2.c21
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_xop.c12
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c18
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c16
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c41
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx4_mma.c21
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx_asm.c36
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/test_flags.c1
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/__init__.py41
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/autodist.py148
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/bdist_rpm.py22
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/build.py62
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/build_clib.py468
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/build_ext.py740
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/build_py.py31
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/build_scripts.py49
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/build_src.py773
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/config.py516
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/config_compiler.py126
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/develop.py15
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/egg_info.py25
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/install.py79
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/install_clib.py40
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/install_data.py24
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/install_headers.py25
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/sdist.py27
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/conv_template.py329
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/core.py215
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/cpuinfo.py683
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/exec_command.py315
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/extension.py107
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/__init__.py1030
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/absoft.py156
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/arm.py71
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/compaq.py120
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/environment.py88
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/fujitsu.py46
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/g95.py42
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/gnu.py555
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/hpux.py41
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/ibm.py97
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/intel.py211
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/lahey.py45
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/mips.py54
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/nag.py87
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/none.py28
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/nv.py53
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/pathf95.py33
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/pg.py128
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/sun.py51
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/vast.py52
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/from_template.py261
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/intelccompiler.py111
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/lib2def.py116
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/line_endings.py77
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/log.py111
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c6
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/mingw32ccompiler.py592
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/misc_util.py2493
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/msvc9compiler.py63
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/msvccompiler.py76
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/npy_pkg_config.py437
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/numpy_distribution.py17
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/pathccompiler.py21
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/setup.py17
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/system_info.py3172
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/__init__.py0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_build_ext.py74
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt.py808
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt_conf.py176
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_exec_command.py217
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler.py43
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py55
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_intel.py30
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py22
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_from_template.py44
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_log.py34
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py42
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_misc_util.py82
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_npy_pkg_config.py84
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_shell_utils.py79
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_system_info.py323
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/unixccompiler.py141
123 files changed, 22283 insertions, 0 deletions
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/__config__.py b/venv/lib/python3.9/site-packages/numpy/distutils/__config__.py
new file mode 100644
index 00000000..2590dbb9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/__config__.py
@@ -0,0 +1,115 @@
+# This file is generated by numpy's setup.py
+# It contains system_info results at the time of building this package.
+__all__ = ["get_info","show"]
+
+
+import os
+import sys
+
+extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
+
+if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
+ os.add_dll_directory(extra_dll_dir)
+
+openblas64__info={'libraries': ['openblas64_', 'openblas64_'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None)], 'runtime_library_dirs': ['/usr/local/lib']}
+blas_ilp64_opt_info={'libraries': ['openblas64_', 'openblas64_'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None)], 'runtime_library_dirs': ['/usr/local/lib']}
+openblas64__lapack_info={'libraries': ['openblas64_', 'openblas64_'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None), ('HAVE_LAPACKE', None)], 'runtime_library_dirs': ['/usr/local/lib']}
+lapack_ilp64_opt_info={'libraries': ['openblas64_', 'openblas64_'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None), ('HAVE_LAPACKE', None)], 'runtime_library_dirs': ['/usr/local/lib']}
+
+def get_info(name):
+ g = globals()
+ return g.get(name, g.get(name + "_info", {}))
+
+def show():
+ """
+ Show libraries in the system on which NumPy was built.
+
+ Print information about various resources (libraries, library
+ directories, include directories, etc.) in the system on which
+ NumPy was built.
+
+ See Also
+ --------
+ get_include : Returns the directory containing NumPy C
+ header files.
+
+ Notes
+ -----
+ 1. Classes specifying the information to be printed are defined
+ in the `numpy.distutils.system_info` module.
+
+ Information may include:
+
+ * ``language``: language used to write the libraries (mostly
+ C or f77)
+ * ``libraries``: names of libraries found in the system
+ * ``library_dirs``: directories containing the libraries
+ * ``include_dirs``: directories containing library header files
+ * ``src_dirs``: directories containing library source files
+ * ``define_macros``: preprocessor macros used by
+ ``distutils.setup``
+ * ``baseline``: minimum CPU features required
+ * ``found``: dispatched features supported in the system
+ * ``not found``: dispatched features that are not supported
+ in the system
+
+ 2. NumPy BLAS/LAPACK Installation Notes
+
+ Installing a numpy wheel (``pip install numpy`` or force it
+ via ``pip install numpy --only-binary :numpy: numpy``) includes
+ an OpenBLAS implementation of the BLAS and LAPACK linear algebra
+ APIs. In this case, ``library_dirs`` reports the original build
+ time configuration as compiled with gcc/gfortran; at run time
+ the OpenBLAS library is in
+ ``site-packages/numpy.libs/`` (linux), or
+ ``site-packages/numpy/.dylibs/`` (macOS), or
+ ``site-packages/numpy/.libs/`` (windows).
+
+ Installing numpy from source
+ (``pip install numpy --no-binary numpy``) searches for BLAS and
+ LAPACK dynamic link libraries at build time as influenced by
+ environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and
+ NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER;
+ or the optional file ``~/.numpy-site.cfg``.
+ NumPy remembers those locations and expects to load the same
+ libraries at run-time.
+ In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS
+ library) is in the default build-time search order after
+ 'openblas'.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.show_config()
+ blas_opt_info:
+ language = c
+ define_macros = [('HAVE_CBLAS', None)]
+ libraries = ['openblas', 'openblas']
+ library_dirs = ['/usr/local/lib']
+ """
+ from numpy.core._multiarray_umath import (
+ __cpu_features__, __cpu_baseline__, __cpu_dispatch__
+ )
+ for name,info_dict in globals().items():
+ if name[0] == "_" or type(info_dict) is not type({}): continue
+ print(name + ":")
+ if not info_dict:
+ print(" NOT AVAILABLE")
+ for k,v in info_dict.items():
+ v = str(v)
+ if k == "sources" and len(v) > 200:
+ v = v[:60] + " ...\n... " + v[-60:]
+ print(" %s = %s" % (k,v))
+
+ features_found, features_not_found = [], []
+ for feature in __cpu_dispatch__:
+ if __cpu_features__[feature]:
+ features_found.append(feature)
+ else:
+ features_not_found.append(feature)
+
+ print("Supported SIMD extensions in this NumPy install:")
+ print(" baseline = %s" % (','.join(__cpu_baseline__)))
+ print(" found = %s" % (','.join(features_found)))
+ print(" not found = %s" % (','.join(features_not_found)))
+
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/__init__.py b/venv/lib/python3.9/site-packages/numpy/distutils/__init__.py
new file mode 100644
index 00000000..f74ed4d3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/__init__.py
@@ -0,0 +1,64 @@
+"""
+An enhanced distutils, providing support for Fortran compilers, for BLAS,
+LAPACK and other common libraries for numerical computing, and more.
+
+Public submodules are::
+
+ misc_util
+ system_info
+ cpu_info
+ log
+ exec_command
+
+For details, please see the *Packaging* and *NumPy Distutils User Guide*
+sections of the NumPy Reference Guide.
+
+For configuring the preference for and location of libraries like BLAS and
+LAPACK, and for setting include paths and similar build options, please see
+``site.cfg.example`` in the root of the NumPy repository or sdist.
+
+"""
+
+import warnings
+
+# Must import local ccompiler ASAP in order to get
+# customized CCompiler.spawn effective.
+from . import ccompiler
+from . import unixccompiler
+
+from .npy_pkg_config import *
+
+warnings.warn("\n\n"
+ " `numpy.distutils` is deprecated since NumPy 1.23.0, as a result\n"
+ " of the deprecation of `distutils` itself. It will be removed for\n"
+ " Python >= 3.12. For older Python versions it will remain present.\n"
+ " It is recommended to use `setuptools < 60.0` for those Python versions.\n"
+ " For more details, see:\n"
+ " https://numpy.org/devdocs/reference/distutils_status_migration.html \n\n",
+ DeprecationWarning, stacklevel=2
+)
+del warnings
+
+# If numpy is installed, add distutils.test()
+try:
+ from . import __config__
+ # Normally numpy is installed if the above import works, but an interrupted
+ # in-place build could also have left a __config__.py. In that case the
+ # next import may still fail, so keep it inside the try block.
+ from numpy._pytesttester import PytestTester
+ test = PytestTester(__name__)
+ del PytestTester
+except ImportError:
+ pass
+
+
+def customized_fcompiler(plat=None, compiler=None):
+ from numpy.distutils.fcompiler import new_fcompiler
+ c = new_fcompiler(plat=plat, compiler=compiler)
+ c.customize()
+ return c
+
+def customized_ccompiler(plat=None, compiler=None, verbose=1):
+ c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose)
+ c.customize('')
+ return c
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/__init__.pyi b/venv/lib/python3.9/site-packages/numpy/distutils/__init__.pyi
new file mode 100644
index 00000000..3938d68d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/__init__.pyi
@@ -0,0 +1,4 @@
+from typing import Any
+
+# TODO: remove when the full numpy namespace is defined
+def __getattr__(name: str) -> Any: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/_shell_utils.py b/venv/lib/python3.9/site-packages/numpy/distutils/_shell_utils.py
new file mode 100644
index 00000000..82abd5f4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/_shell_utils.py
@@ -0,0 +1,91 @@
+"""
+Helper functions for interacting with the shell, and consuming shell-style
+parameters provided in config files.
+"""
+import os
+import shlex
+import subprocess
+try:
+ from shlex import quote
+except ImportError:
+ from pipes import quote
+
+__all__ = ['WindowsParser', 'PosixParser', 'NativeParser']
+
+
+class CommandLineParser:
+ """
+ An object that knows how to split and join command-line arguments.
+
+ It must be true that ``argv == split(join(argv))`` for all ``argv``.
+ The reverse neednt be true - `join(split(cmd))` may result in the addition
+ or removal of unnecessary escaping.
+ """
+ @staticmethod
+ def join(argv):
+ """ Join a list of arguments into a command line string """
+ raise NotImplementedError
+
+ @staticmethod
+ def split(cmd):
+ """ Split a command line string into a list of arguments """
+ raise NotImplementedError
+
+
+class WindowsParser:
+ """
+ The parsing behavior used by `subprocess.call("string")` on Windows, which
+ matches the Microsoft C/C++ runtime.
+
+ Note that this is _not_ the behavior of cmd.
+ """
+ @staticmethod
+ def join(argv):
+ # note that list2cmdline is specific to the windows syntax
+ return subprocess.list2cmdline(argv)
+
+ @staticmethod
+ def split(cmd):
+ import ctypes # guarded import for systems without ctypes
+ try:
+ ctypes.windll
+ except AttributeError:
+ raise NotImplementedError
+
+ # Windows has special parsing rules for the executable (no quotes),
+ # that we do not care about - insert a dummy element
+ if not cmd:
+ return []
+ cmd = 'dummy ' + cmd
+
+ CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
+ CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p)
+ CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int))
+
+ nargs = ctypes.c_int()
+ lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs))
+ args = [lpargs[i] for i in range(nargs.value)]
+ assert not ctypes.windll.kernel32.LocalFree(lpargs)
+
+ # strip the element we inserted
+ assert args[0] == "dummy"
+ return args[1:]
+
+
+class PosixParser:
+ """
+ The parsing behavior used by `subprocess.call("string", shell=True)` on Posix.
+ """
+ @staticmethod
+ def join(argv):
+ return ' '.join(quote(arg) for arg in argv)
+
+ @staticmethod
+ def split(cmd):
+ return shlex.split(cmd, posix=True)
+
+
+if os.name == 'nt':
+ NativeParser = WindowsParser
+elif os.name == 'posix':
+ NativeParser = PosixParser
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/armccompiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/armccompiler.py
new file mode 100644
index 00000000..afba7eb3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/armccompiler.py
@@ -0,0 +1,26 @@
+from distutils.unixccompiler import UnixCCompiler
+
+class ArmCCompiler(UnixCCompiler):
+
+ """
+ Arm compiler.
+ """
+
+ compiler_type = 'arm'
+ cc_exe = 'armclang'
+ cxx_exe = 'armclang++'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ UnixCCompiler.__init__(self, verbose, dry_run, force)
+ cc_compiler = self.cc_exe
+ cxx_compiler = self.cxx_exe
+ self.set_executables(compiler=cc_compiler +
+ ' -O3 -fPIC',
+ compiler_so=cc_compiler +
+ ' -O3 -fPIC',
+ compiler_cxx=cxx_compiler +
+ ' -O3 -fPIC',
+ linker_exe=cc_compiler +
+ ' -lamath',
+ linker_so=cc_compiler +
+ ' -lamath -shared')
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/ccompiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/ccompiler.py
new file mode 100644
index 00000000..f0487cb6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/ccompiler.py
@@ -0,0 +1,814 @@
+import os
+import re
+import sys
+import shlex
+import time
+import subprocess
+from copy import copy
+from distutils import ccompiler
+from distutils.ccompiler import (
+ compiler_class, gen_lib_options, get_default_compiler, new_compiler,
+ CCompiler
+)
+from distutils.errors import (
+ DistutilsExecError, DistutilsModuleError, DistutilsPlatformError,
+ CompileError, UnknownFileError
+)
+from distutils.sysconfig import customize_compiler
+from distutils.version import LooseVersion
+
+from numpy.distutils import log
+from numpy.distutils.exec_command import (
+ filepath_from_subprocess_output, forward_bytes_to_stdout
+)
+from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
+ get_num_build_jobs, \
+ _commandline_dep_string, \
+ sanitize_cxx_flags
+
+# globals for parallel build management
+import threading
+
+_job_semaphore = None
+_global_lock = threading.Lock()
+_processing_files = set()
+
+
+def _needs_build(obj, cc_args, extra_postargs, pp_opts):
+ """
+ Check if an objects needs to be rebuild based on its dependencies
+
+ Parameters
+ ----------
+ obj : str
+ object file
+
+ Returns
+ -------
+ bool
+ """
+ # defined in unixcompiler.py
+ dep_file = obj + '.d'
+ if not os.path.exists(dep_file):
+ return True
+
+ # dep_file is a makefile containing 'object: dependencies'
+ # formatted like posix shell (spaces escaped, \ line continuations)
+ # the last line contains the compiler commandline arguments as some
+ # projects may compile an extension multiple times with different
+ # arguments
+ with open(dep_file, "r") as f:
+ lines = f.readlines()
+
+ cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts)
+ last_cmdline = lines[-1]
+ if last_cmdline != cmdline:
+ return True
+
+ contents = ''.join(lines[:-1])
+ deps = [x for x in shlex.split(contents, posix=True)
+ if x != "\n" and not x.endswith(":")]
+
+ try:
+ t_obj = os.stat(obj).st_mtime
+
+ # check if any of the dependencies is newer than the object
+ # the dependencies includes the source used to create the object
+ for f in deps:
+ if os.stat(f).st_mtime > t_obj:
+ return True
+ except OSError:
+ # no object counts as newer (shouldn't happen if dep_file exists)
+ return True
+
+ return False
+
+
+def replace_method(klass, method_name, func):
+ # Py3k does not have unbound method anymore, MethodType does not work
+ m = lambda self, *args, **kw: func(self, *args, **kw)
+ setattr(klass, method_name, m)
+
+
+######################################################################
+## Method that subclasses may redefine. But don't call this method,
+## it i private to CCompiler class and may return unexpected
+## results if used elsewhere. So, you have been warned..
+
+def CCompiler_find_executables(self):
+ """
+ Does nothing here, but is called by the get_version method and can be
+ overridden by subclasses. In particular it is redefined in the `FCompiler`
+ class where more documentation can be found.
+
+ """
+ pass
+
+
+replace_method(CCompiler, 'find_executables', CCompiler_find_executables)
+
+
+# Using customized CCompiler.spawn.
+def CCompiler_spawn(self, cmd, display=None, env=None):
+ """
+ Execute a command in a sub-process.
+
+ Parameters
+ ----------
+ cmd : str
+ The command to execute.
+ display : str or sequence of str, optional
+ The text to add to the log file kept by `numpy.distutils`.
+ If not given, `display` is equal to `cmd`.
+ env : a dictionary for environment variables, optional
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ DistutilsExecError
+ If the command failed, i.e. the exit status was not 0.
+
+ """
+ env = env if env is not None else dict(os.environ)
+ if display is None:
+ display = cmd
+ if is_sequence(display):
+ display = ' '.join(list(display))
+ log.info(display)
+ try:
+ if self.verbose:
+ subprocess.check_output(cmd, env=env)
+ else:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
+ except subprocess.CalledProcessError as exc:
+ o = exc.output
+ s = exc.returncode
+ except OSError as e:
+ # OSError doesn't have the same hooks for the exception
+ # output, but exec_command() historically would use an
+ # empty string for EnvironmentError (base class for
+ # OSError)
+ # o = b''
+ # still that would make the end-user lost in translation!
+ o = f"\n\n{e}\n\n\n"
+ try:
+ o = o.encode(sys.stdout.encoding)
+ except AttributeError:
+ o = o.encode('utf8')
+ # status previously used by exec_command() for parent
+ # of OSError
+ s = 127
+ else:
+ # use a convenience return here so that any kind of
+ # caught exception will execute the default code after the
+ # try / except block, which handles various exceptions
+ return None
+
+ if is_sequence(cmd):
+ cmd = ' '.join(list(cmd))
+
+ if self.verbose:
+ forward_bytes_to_stdout(o)
+
+ if re.search(b'Too many open files', o):
+ msg = '\nTry rerunning setup command until build succeeds.'
+ else:
+ msg = ''
+ raise DistutilsExecError('Command "%s" failed with exit status %d%s' %
+ (cmd, s, msg))
+
+replace_method(CCompiler, 'spawn', CCompiler_spawn)
+
+def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
+ """
+ Return the name of the object files for the given source files.
+
+ Parameters
+ ----------
+ source_filenames : list of str
+ The list of paths to source files. Paths can be either relative or
+ absolute, this is handled transparently.
+ strip_dir : bool, optional
+ Whether to strip the directory from the returned paths. If True,
+ the file name prepended by `output_dir` is returned. Default is False.
+ output_dir : str, optional
+ If given, this path is prepended to the returned paths to the
+ object files.
+
+ Returns
+ -------
+ obj_names : list of str
+ The list of paths to the object files corresponding to the source
+ files in `source_filenames`.
+
+ """
+ if output_dir is None:
+ output_dir = ''
+ obj_names = []
+ for src_name in source_filenames:
+ base, ext = os.path.splitext(os.path.normpath(src_name))
+ base = os.path.splitdrive(base)[1] # Chop off the drive
+ base = base[os.path.isabs(base):] # If abs, chop off leading /
+ if base.startswith('..'):
+ # Resolve starting relative path components, middle ones
+ # (if any) have been handled by os.path.normpath above.
+ i = base.rfind('..')+2
+ d = base[:i]
+ d = os.path.basename(os.path.abspath(d))
+ base = d + base[i:]
+ if ext not in self.src_extensions:
+ raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name))
+ if strip_dir:
+ base = os.path.basename(base)
+ obj_name = os.path.join(output_dir, base + self.obj_extension)
+ obj_names.append(obj_name)
+ return obj_names
+
+replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames)
+
+def CCompiler_compile(self, sources, output_dir=None, macros=None,
+ include_dirs=None, debug=0, extra_preargs=None,
+ extra_postargs=None, depends=None):
+ """
+ Compile one or more source files.
+
+ Please refer to the Python distutils API reference for more details.
+
+ Parameters
+ ----------
+ sources : list of str
+ A list of filenames
+ output_dir : str, optional
+ Path to the output directory.
+ macros : list of tuples
+ A list of macro definitions.
+ include_dirs : list of str, optional
+ The directories to add to the default include file search path for
+ this compilation only.
+ debug : bool, optional
+ Whether or not to output debug symbols in or alongside the object
+ file(s).
+ extra_preargs, extra_postargs : ?
+ Extra pre- and post-arguments.
+ depends : list of str, optional
+ A list of file names that all targets depend on.
+
+ Returns
+ -------
+ objects : list of str
+ A list of object file names, one per source file `sources`.
+
+ Raises
+ ------
+ CompileError
+ If compilation fails.
+
+ """
+ global _job_semaphore
+
+ jobs = get_num_build_jobs()
+
+ # setup semaphore to not exceed number of compile jobs when parallelized at
+ # extension level (python >= 3.5)
+ with _global_lock:
+ if _job_semaphore is None:
+ _job_semaphore = threading.Semaphore(jobs)
+
+ if not sources:
+ return []
+ from numpy.distutils.fcompiler import (FCompiler, is_f_file,
+ has_f90_header)
+ if isinstance(self, FCompiler):
+ display = []
+ for fc in ['f77', 'f90', 'fix']:
+ fcomp = getattr(self, 'compiler_'+fc)
+ if fcomp is None:
+ continue
+ display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp)))
+ display = '\n'.join(display)
+ else:
+ ccomp = self.compiler_so
+ display = "C compiler: %s\n" % (' '.join(ccomp),)
+ log.info(display)
+ macros, objects, extra_postargs, pp_opts, build = \
+ self._setup_compile(output_dir, macros, include_dirs, sources,
+ depends, extra_postargs)
+ cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
+ display = "compile options: '%s'" % (' '.join(cc_args))
+ if extra_postargs:
+ display += "\nextra options: '%s'" % (' '.join(extra_postargs))
+ log.info(display)
+
+ def single_compile(args):
+ obj, (src, ext) = args
+ if not _needs_build(obj, cc_args, extra_postargs, pp_opts):
+ return
+
+ # check if we are currently already processing the same object
+ # happens when using the same source in multiple extensions
+ while True:
+ # need explicit lock as there is no atomic check and add with GIL
+ with _global_lock:
+ # file not being worked on, start working
+ if obj not in _processing_files:
+ _processing_files.add(obj)
+ break
+ # wait for the processing to end
+ time.sleep(0.1)
+
+ try:
+ # retrieve slot from our #job semaphore and build
+ with _job_semaphore:
+ self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
+ finally:
+ # register being done processing
+ with _global_lock:
+ _processing_files.remove(obj)
+
+
+ if isinstance(self, FCompiler):
+ objects_to_build = list(build.keys())
+ f77_objects, other_objects = [], []
+ for obj in objects:
+ if obj in objects_to_build:
+ src, ext = build[obj]
+ if self.compiler_type=='absoft':
+ obj = cyg2win32(obj)
+ src = cyg2win32(src)
+ if is_f_file(src) and not has_f90_header(src):
+ f77_objects.append((obj, (src, ext)))
+ else:
+ other_objects.append((obj, (src, ext)))
+
+ # f77 objects can be built in parallel
+ build_items = f77_objects
+ # build f90 modules serial, module files are generated during
+ # compilation and may be used by files later in the list so the
+ # ordering is important
+ for o in other_objects:
+ single_compile(o)
+ else:
+ build_items = build.items()
+
+ if len(build) > 1 and jobs > 1:
+ # build parallel
+ from concurrent.futures import ThreadPoolExecutor
+ with ThreadPoolExecutor(jobs) as pool:
+ res = pool.map(single_compile, build_items)
+ list(res) # access result to raise errors
+ else:
+ # build serial
+ for o in build_items:
+ single_compile(o)
+
+ # Return *all* object filenames, not just the ones we just built.
+ return objects
+
+replace_method(CCompiler, 'compile', CCompiler_compile)
+
+def CCompiler_customize_cmd(self, cmd, ignore=()):
+ """
+ Customize compiler using distutils command.
+
+ Parameters
+ ----------
+ cmd : class instance
+ An instance inheriting from `distutils.cmd.Command`.
+ ignore : sequence of str, optional
+ List of `CCompiler` commands (without ``'set_'``) that should not be
+ altered. Strings that are checked for are:
+ ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs',
+ 'rpath', 'link_objects')``.
+
+ Returns
+ -------
+ None
+
+ """
+ log.info('customize %s using %s' % (self.__class__.__name__,
+ cmd.__class__.__name__))
+
+ if hasattr(self, 'compiler') and 'clang' in self.compiler[0]:
+ # clang defaults to a non-strict floating error point model.
+ # Since NumPy and most Python libs give warnings for these, override:
+ self.compiler.append('-ftrapping-math')
+ self.compiler_so.append('-ftrapping-math')
+
+ def allow(attr):
+ return getattr(cmd, attr, None) is not None and attr not in ignore
+
+ if allow('include_dirs'):
+ self.set_include_dirs(cmd.include_dirs)
+ if allow('define'):
+ for (name, value) in cmd.define:
+ self.define_macro(name, value)
+ if allow('undef'):
+ for macro in cmd.undef:
+ self.undefine_macro(macro)
+ if allow('libraries'):
+ self.set_libraries(self.libraries + cmd.libraries)
+ if allow('library_dirs'):
+ self.set_library_dirs(self.library_dirs + cmd.library_dirs)
+ if allow('rpath'):
+ self.set_runtime_library_dirs(cmd.rpath)
+ if allow('link_objects'):
+ self.set_link_objects(cmd.link_objects)
+
+replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd)
+
+def _compiler_to_string(compiler):
+ props = []
+ mx = 0
+ keys = list(compiler.executables.keys())
+ for key in ['version', 'libraries', 'library_dirs',
+ 'object_switch', 'compile_switch',
+ 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']:
+ if key not in keys:
+ keys.append(key)
+ for key in keys:
+ if hasattr(compiler, key):
+ v = getattr(compiler, key)
+ mx = max(mx, len(key))
+ props.append((key, repr(v)))
+ fmt = '%-' + repr(mx+1) + 's = %s'
+ lines = [fmt % prop for prop in props]
+ return '\n'.join(lines)
+
+def CCompiler_show_customization(self):
+ """
+ Print the compiler customizations to stdout.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ None
+
+ Notes
+ -----
+ Printing is only done if the distutils log threshold is < 2.
+
+ """
+ try:
+ self.get_version()
+ except Exception:
+ pass
+ if log._global_log.threshold<2:
+ print('*'*80)
+ print(self.__class__)
+ print(_compiler_to_string(self))
+ print('*'*80)
+
+replace_method(CCompiler, 'show_customization', CCompiler_show_customization)
+
+def CCompiler_customize(self, dist, need_cxx=0):
+ """
+ Do any platform-specific customization of a compiler instance.
+
+ This method calls `distutils.sysconfig.customize_compiler` for
+ platform-specific customization, as well as optionally remove a flag
+ to suppress spurious warnings in case C++ code is being compiled.
+
+ Parameters
+ ----------
+ dist : object
+ This parameter is not used for anything.
+ need_cxx : bool, optional
+ Whether or not C++ has to be compiled. If so (True), the
+ ``"-Wstrict-prototypes"`` option is removed to prevent spurious
+ warnings. Default is False.
+
+ Returns
+ -------
+ None
+
+ Notes
+ -----
+ All the default options used by distutils can be extracted with::
+
+ from distutils import sysconfig
+ sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',
+ 'CCSHARED', 'LDSHARED', 'SO')
+
+ """
+ # See FCompiler.customize for suggested usage.
+ log.info('customize %s' % (self.__class__.__name__))
+ customize_compiler(self)
+ if need_cxx:
+ # In general, distutils uses -Wstrict-prototypes, but this option is
+ # not valid for C++ code, only for C. Remove it if it's there to
+ # avoid a spurious warning on every compilation.
+ try:
+ self.compiler_so.remove('-Wstrict-prototypes')
+ except (AttributeError, ValueError):
+ pass
+
+ if hasattr(self, 'compiler') and 'cc' in self.compiler[0]:
+ if not self.compiler_cxx:
+ if self.compiler[0].startswith('gcc'):
+ a, b = 'gcc', 'g++'
+ else:
+ a, b = 'cc', 'c++'
+ self.compiler_cxx = [self.compiler[0].replace(a, b)]\
+ + self.compiler[1:]
+ else:
+ if hasattr(self, 'compiler'):
+ log.warn("#### %s #######" % (self.compiler,))
+ if not hasattr(self, 'compiler_cxx'):
+ log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__)
+
+
+ # check if compiler supports gcc style automatic dependencies
+ # run on every extension so skip for known good compilers
+ if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or
+ 'g++' in self.compiler[0] or
+ 'clang' in self.compiler[0]):
+ self._auto_depends = True
+ elif os.name == 'posix':
+ import tempfile
+ import shutil
+ tmpdir = tempfile.mkdtemp()
+ try:
+ fn = os.path.join(tmpdir, "file.c")
+ with open(fn, "w") as f:
+ f.write("int a;\n")
+ self.compile([fn], output_dir=tmpdir,
+ extra_preargs=['-MMD', '-MF', fn + '.d'])
+ self._auto_depends = True
+ except CompileError:
+ self._auto_depends = False
+ finally:
+ shutil.rmtree(tmpdir)
+
+ return
+
+replace_method(CCompiler, 'customize', CCompiler_customize)
+
+def simple_version_match(pat=r'[-.\d]+', ignore='', start=''):
+ """
+ Simple matching of version numbers, for use in CCompiler and FCompiler.
+
+ Parameters
+ ----------
+ pat : str, optional
+ A regular expression matching version numbers.
+ Default is ``r'[-.\\d]+'``.
+ ignore : str, optional
+ A regular expression matching patterns to skip.
+ Default is ``''``, in which case nothing is skipped.
+ start : str, optional
+ A regular expression matching the start of where to start looking
+ for version numbers.
+ Default is ``''``, in which case searching is started at the
+ beginning of the version string given to `matcher`.
+
+ Returns
+ -------
+ matcher : callable
+ A function that is appropriate to use as the ``.version_match``
+ attribute of a `CCompiler` class. `matcher` takes a single parameter,
+ a version string.
+
+ """
+ def matcher(self, version_string):
+ # version string may appear in the second line, so getting rid
+ # of new lines:
+ version_string = version_string.replace('\n', ' ')
+ pos = 0
+ if start:
+ m = re.match(start, version_string)
+ if not m:
+ return None
+ pos = m.end()
+ while True:
+ m = re.search(pat, version_string[pos:])
+ if not m:
+ return None
+ if ignore and re.match(ignore, m.group(0)):
+ pos = m.end()
+ continue
+ break
+ return m.group(0)
+ return matcher
+
+def CCompiler_get_version(self, force=False, ok_status=[0]):
+ """
+ Return compiler version, or None if compiler is not available.
+
+ Parameters
+ ----------
+ force : bool, optional
+ If True, force a new determination of the version, even if the
+ compiler already has a version attribute. Default is False.
+ ok_status : list of int, optional
+ The list of status values returned by the version look-up process
+ for which a version string is returned. If the status value is not
+ in `ok_status`, None is returned. Default is ``[0]``.
+
+ Returns
+ -------
+ version : str or None
+ Version string, in the format of `distutils.version.LooseVersion`.
+
+ """
+ if not force and hasattr(self, 'version'):
+ return self.version
+ self.find_executables()
+ try:
+ version_cmd = self.version_cmd
+ except AttributeError:
+ return None
+ if not version_cmd or not version_cmd[0]:
+ return None
+ try:
+ matcher = self.version_match
+ except AttributeError:
+ try:
+ pat = self.version_pattern
+ except AttributeError:
+ return None
+ def matcher(version_string):
+ m = re.match(pat, version_string)
+ if not m:
+ return None
+ version = m.group('version')
+ return version
+
+ try:
+ output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as exc:
+ output = exc.output
+ status = exc.returncode
+ except OSError:
+ # match the historical returns for a parent
+ # exception class caught by exec_command()
+ status = 127
+ output = b''
+ else:
+ # output isn't actually a filepath but we do this
+ # for now to match previous distutils behavior
+ output = filepath_from_subprocess_output(output)
+ status = 0
+
+ version = None
+ if status in ok_status:
+ version = matcher(output)
+ if version:
+ version = LooseVersion(version)
+ self.version = version
+ return version
+
+replace_method(CCompiler, 'get_version', CCompiler_get_version)
+
+def CCompiler_cxx_compiler(self):
+ """
+ Return the C++ compiler.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ cxx : class instance
+ The C++ compiler, as a `CCompiler` instance.
+
+ """
+ if self.compiler_type in ('msvc', 'intelw', 'intelemw'):
+ return self
+
+ cxx = copy(self)
+ cxx.compiler_cxx = cxx.compiler_cxx
+ cxx.compiler_so = [cxx.compiler_cxx[0]] + \
+ sanitize_cxx_flags(cxx.compiler_so[1:])
+ if (sys.platform.startswith(('aix', 'os400')) and
+ 'ld_so_aix' in cxx.linker_so[0]):
+ # AIX needs the ld_so_aix script included with Python
+ cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \
+ + cxx.linker_so[2:]
+ if sys.platform.startswith('os400'):
+ #This is required by i 7.4 and prievous for PRId64 in printf() call.
+ cxx.compiler_so.append('-D__STDC_FORMAT_MACROS')
+ #This a bug of gcc10.3, which failed to handle the TLS init.
+ cxx.compiler_so.append('-fno-extern-tls-init')
+ cxx.linker_so.append('-fno-extern-tls-init')
+ else:
+ cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:]
+ return cxx
+
+replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler)
+
+compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler',
+ "Intel C Compiler for 32-bit applications")
+compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler',
+ "Intel C Itanium Compiler for Itanium-based applications")
+compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler',
+ "Intel C Compiler for 64-bit applications")
+compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW',
+ "Intel C Compiler for 32-bit applications on Windows")
+compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW',
+ "Intel C Compiler for 64-bit applications on Windows")
+compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler',
+ "PathScale Compiler for SiCortex-based applications")
+compiler_class['arm'] = ('armccompiler', 'ArmCCompiler',
+ "Arm C Compiler")
+
+ccompiler._default_compilers += (('linux.*', 'intel'),
+ ('linux.*', 'intele'),
+ ('linux.*', 'intelem'),
+ ('linux.*', 'pathcc'),
+ ('nt', 'intelw'),
+ ('nt', 'intelemw'))
+
+if sys.platform == 'win32':
+ compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler',
+ "Mingw32 port of GNU C Compiler for Win32"\
+ "(for MSC built Python)")
+ if mingw32():
+ # On windows platforms, we want to default to mingw32 (gcc)
+ # because msvc can't build blitz stuff.
+ log.info('Setting mingw32 as default compiler for nt.')
+ ccompiler._default_compilers = (('nt', 'mingw32'),) \
+ + ccompiler._default_compilers
+
+
+_distutils_new_compiler = new_compiler
+def new_compiler (plat=None,
+ compiler=None,
+ verbose=None,
+ dry_run=0,
+ force=0):
+ # Try first C compilers from numpy.distutils.
+ if verbose is None:
+ verbose = log.get_threshold() <= log.INFO
+ if plat is None:
+ plat = os.name
+ try:
+ if compiler is None:
+ compiler = get_default_compiler(plat)
+ (module_name, class_name, long_description) = compiler_class[compiler]
+ except KeyError:
+ msg = "don't know how to compile C/C++ code on platform '%s'" % plat
+ if compiler is not None:
+ msg = msg + " with '%s' compiler" % compiler
+ raise DistutilsPlatformError(msg)
+ module_name = "numpy.distutils." + module_name
+ try:
+ __import__ (module_name)
+ except ImportError as e:
+ msg = str(e)
+ log.info('%s in numpy.distutils; trying from distutils',
+ str(msg))
+ module_name = module_name[6:]
+ try:
+ __import__(module_name)
+ except ImportError as e:
+ msg = str(e)
+ raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \
+ module_name)
+ try:
+ module = sys.modules[module_name]
+ klass = vars(module)[class_name]
+ except KeyError:
+ raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " +
+ "in module '%s'") % (class_name, module_name))
+ compiler = klass(None, dry_run, force)
+ compiler.verbose = verbose
+ log.debug('new_compiler returns %s' % (klass))
+ return compiler
+
+ccompiler.new_compiler = new_compiler
+
+_distutils_gen_lib_options = gen_lib_options
+def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries):
+ # the version of this function provided by CPython allows the following
+ # to return lists, which are unpacked automatically:
+ # - compiler.runtime_library_dir_option
+ # our version extends the behavior to:
+ # - compiler.library_dir_option
+ # - compiler.library_option
+ # - compiler.find_library_file
+ r = _distutils_gen_lib_options(compiler, library_dirs,
+ runtime_library_dirs, libraries)
+ lib_opts = []
+ for i in r:
+ if is_sequence(i):
+ lib_opts.extend(list(i))
+ else:
+ lib_opts.append(i)
+ return lib_opts
+ccompiler.gen_lib_options = gen_lib_options
+
+# Also fix up the various compiler modules, which do
+# from distutils.ccompiler import gen_lib_options
+# Don't bother with mwerks, as we don't support Classic Mac.
+for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']:
+ _m = sys.modules.get('distutils.' + _cc + 'compiler')
+ if _m is not None:
+ setattr(_m, 'gen_lib_options', gen_lib_options)
+
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/ccompiler_opt.py b/venv/lib/python3.9/site-packages/numpy/distutils/ccompiler_opt.py
new file mode 100644
index 00000000..da550722
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/ccompiler_opt.py
@@ -0,0 +1,2659 @@
+"""Provides the `CCompilerOpt` class, used for handling the CPU/hardware
+optimization, starting from parsing the command arguments, to managing the
+relation between the CPU baseline and dispatch-able features,
+also generating the required C headers and ending with compiling
+the sources with proper compiler's flags.
+
+`CCompilerOpt` doesn't provide runtime detection for the CPU features,
+instead only focuses on the compiler side, but it creates abstract C headers
+that can be used later for the final runtime dispatching process."""
+
+import atexit
+import inspect
+import os
+import pprint
+import re
+import subprocess
+import textwrap
+
+# These flags are used to compile any C++ source within Numpy.
+# They are chosen to have very few runtime dependencies.
+NPY_CXX_FLAGS = [
+ '-std=c++11', # Minimal standard version
+ '-D__STDC_VERSION__=0', # for compatibility with C headers
+ '-fno-exceptions', # no exception support
+ '-fno-rtti'] # no runtime type information
+
+
+class _Config:
+ """An abstract class holds all configurable attributes of `CCompilerOpt`,
+ these class attributes can be used to change the default behavior
+ of `CCompilerOpt` in order to fit other requirements.
+
+ Attributes
+ ----------
+ conf_nocache : bool
+ Set True to disable memory and file cache.
+ Default is False.
+
+ conf_noopt : bool
+ Set True to forces the optimization to be disabled,
+ in this case `CCompilerOpt` tends to generate all
+ expected headers in order to 'not' break the build.
+ Default is False.
+
+ conf_cache_factors : list
+ Add extra factors to the primary caching factors. The caching factors
+ are utilized to determine if there are changes had happened that
+ requires to discard the cache and re-updating it. The primary factors
+ are the arguments of `CCompilerOpt` and `CCompiler`'s properties(type, flags, etc).
+ Default is list of two items, containing the time of last modification
+ of `ccompiler_opt` and value of attribute "conf_noopt"
+
+ conf_tmp_path : str,
+ The path of temporary directory. Default is auto-created
+ temporary directory via ``tempfile.mkdtemp()``.
+
+ conf_check_path : str
+ The path of testing files. Each added CPU feature must have a
+ **C** source file contains at least one intrinsic or instruction that
+ related to this feature, so it can be tested against the compiler.
+ Default is ``./distutils/checks``.
+
+ conf_target_groups : dict
+ Extra tokens that can be reached from dispatch-able sources through
+ the special mark ``@targets``. Default is an empty dictionary.
+
+ **Notes**:
+ - case-insensitive for tokens and group names
+ - sign '#' must stick in the begin of group name and only within ``@targets``
+
+ **Example**:
+ .. code-block:: console
+
+ $ "@targets #avx_group other_tokens" > group_inside.c
+
+ >>> CCompilerOpt.conf_target_groups["avx_group"] = \\
+ "$werror $maxopt avx2 avx512f avx512_skx"
+ >>> cco = CCompilerOpt(cc_instance)
+ >>> cco.try_dispatch(["group_inside.c"])
+
+ conf_c_prefix : str
+ The prefix of public C definitions. Default is ``"NPY_"``.
+
+ conf_c_prefix_ : str
+ The prefix of internal C definitions. Default is ``"NPY__"``.
+
+ conf_cc_flags : dict
+ Nested dictionaries defining several compiler flags
+ that linked to some major functions, the main key
+ represent the compiler name and sub-keys represent
+ flags names. Default is already covers all supported
+ **C** compilers.
+
+ Sub-keys explained as follows:
+
+ "native": str or None
+ used by argument option `native`, to detect the current
+ machine support via the compiler.
+ "werror": str or None
+ utilized to treat warning as errors during testing CPU features
+ against the compiler and also for target's policy `$werror`
+ via dispatch-able sources.
+ "maxopt": str or None
+ utilized for target's policy '$maxopt' and the value should
+ contains the maximum acceptable optimization by the compiler.
+ e.g. in gcc `'-O3'`
+
+ **Notes**:
+ * case-sensitive for compiler names and flags
+ * use space to separate multiple flags
+ * any flag will tested against the compiler and it will skipped
+ if it's not applicable.
+
+ conf_min_features : dict
+ A dictionary defines the used CPU features for
+ argument option `'min'`, the key represent the CPU architecture
+ name e.g. `'x86'`. Default values provide the best effort
+ on wide range of users platforms.
+
+ **Note**: case-sensitive for architecture names.
+
+ conf_features : dict
+ Nested dictionaries used for identifying the CPU features.
+ the primary key is represented as a feature name or group name
+ that gathers several features. Default values covers all
+ supported features but without the major options like "flags",
+ these undefined options handle it by method `conf_features_partial()`.
+ Default value is covers almost all CPU features for *X86*, *IBM/Power64*
+ and *ARM 7/8*.
+
+ Sub-keys explained as follows:
+
+ "implies" : str or list, optional,
+ List of CPU feature names to be implied by it,
+ the feature name must be defined within `conf_features`.
+ Default is None.
+
+ "flags": str or list, optional
+ List of compiler flags. Default is None.
+
+ "detect": str or list, optional
+ List of CPU feature names that required to be detected
+ in runtime. By default, its the feature name or features
+ in "group" if its specified.
+
+ "implies_detect": bool, optional
+ If True, all "detect" of implied features will be combined.
+ Default is True. see `feature_detect()`.
+
+ "group": str or list, optional
+ Same as "implies" but doesn't require the feature name to be
+ defined within `conf_features`.
+
+ "interest": int, required
+ a key for sorting CPU features
+
+ "headers": str or list, optional
+ intrinsics C header file
+
+ "disable": str, optional
+ force disable feature, the string value should contains the
+ reason of disabling.
+
+ "autovec": bool or None, optional
+ True or False to declare that CPU feature can be auto-vectorized
+ by the compiler.
+ By default(None), treated as True if the feature contains at
+ least one applicable flag. see `feature_can_autovec()`
+
+ "extra_checks": str or list, optional
+ Extra test case names for the CPU feature that need to be tested
+ against the compiler.
+
+ Each test case must have a C file named ``extra_xxxx.c``, where
+ ``xxxx`` is the case name in lower case, under 'conf_check_path'.
+ It should contain at least one intrinsic or function related to the test case.
+
+ If the compiler able to successfully compile the C file then `CCompilerOpt`
+ will add a C ``#define`` for it into the main dispatch header, e.g.
+ ``#define {conf_c_prefix}_XXXX`` where ``XXXX`` is the case name in upper case.
+
+ **NOTES**:
+ * space can be used as separator with options that supports "str or list"
+ * case-sensitive for all values and feature name must be in upper-case.
+ * if flags aren't applicable, its will skipped rather than disable the
+ CPU feature
+ * the CPU feature will disabled if the compiler fail to compile
+ the test file
+ """
+ conf_nocache = False
+ conf_noopt = False
+ conf_cache_factors = None
+ conf_tmp_path = None
+ conf_check_path = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), "checks"
+ )
+ conf_target_groups = {}
+ conf_c_prefix = 'NPY_'
+ conf_c_prefix_ = 'NPY__'
+ conf_cc_flags = dict(
+ gcc = dict(
+ # native should always fail on arm and ppc64,
+ # native usually works only with x86
+ native = '-march=native',
+ opt = '-O3',
+ werror = '-Werror',
+ ),
+ clang = dict(
+ native = '-march=native',
+ opt = "-O3",
+ # One of the following flags needs to be applicable for Clang to
+ # guarantee the sanity of the testing process, however in certain
+ # cases `-Werror` gets skipped during the availability test due to
+ # "unused arguments" warnings.
+ # see https://github.com/numpy/numpy/issues/19624
+ werror = '-Werror=switch -Werror',
+ ),
+ icc = dict(
+ native = '-xHost',
+ opt = '-O3',
+ werror = '-Werror',
+ ),
+ iccw = dict(
+ native = '/QxHost',
+ opt = '/O3',
+ werror = '/Werror',
+ ),
+ msvc = dict(
+ native = None,
+ opt = '/O2',
+ werror = '/WX',
+ )
+ )
+ conf_min_features = dict(
+ x86 = "SSE SSE2",
+ x64 = "SSE SSE2 SSE3",
+ ppc64 = '', # play it safe
+ ppc64le = "VSX VSX2",
+ s390x = '',
+ armhf = '', # play it safe
+ aarch64 = "NEON NEON_FP16 NEON_VFPV4 ASIMD"
+ )
+ conf_features = dict(
+ # X86
+ SSE = dict(
+ interest=1, headers="xmmintrin.h",
+ # enabling SSE without SSE2 is useless also
+ # it's non-optional for x86_64
+ implies="SSE2"
+ ),
+ SSE2 = dict(interest=2, implies="SSE", headers="emmintrin.h"),
+ SSE3 = dict(interest=3, implies="SSE2", headers="pmmintrin.h"),
+ SSSE3 = dict(interest=4, implies="SSE3", headers="tmmintrin.h"),
+ SSE41 = dict(interest=5, implies="SSSE3", headers="smmintrin.h"),
+ POPCNT = dict(interest=6, implies="SSE41", headers="popcntintrin.h"),
+ SSE42 = dict(interest=7, implies="POPCNT"),
+ AVX = dict(
+ interest=8, implies="SSE42", headers="immintrin.h",
+ implies_detect=False
+ ),
+ XOP = dict(interest=9, implies="AVX", headers="x86intrin.h"),
+ FMA4 = dict(interest=10, implies="AVX", headers="x86intrin.h"),
+ F16C = dict(interest=11, implies="AVX"),
+ FMA3 = dict(interest=12, implies="F16C"),
+ AVX2 = dict(interest=13, implies="F16C"),
+ AVX512F = dict(
+ interest=20, implies="FMA3 AVX2", implies_detect=False,
+ extra_checks="AVX512F_REDUCE"
+ ),
+ AVX512CD = dict(interest=21, implies="AVX512F"),
+ AVX512_KNL = dict(
+ interest=40, implies="AVX512CD", group="AVX512ER AVX512PF",
+ detect="AVX512_KNL", implies_detect=False
+ ),
+ AVX512_KNM = dict(
+ interest=41, implies="AVX512_KNL",
+ group="AVX5124FMAPS AVX5124VNNIW AVX512VPOPCNTDQ",
+ detect="AVX512_KNM", implies_detect=False
+ ),
+ AVX512_SKX = dict(
+ interest=42, implies="AVX512CD", group="AVX512VL AVX512BW AVX512DQ",
+ detect="AVX512_SKX", implies_detect=False,
+ extra_checks="AVX512BW_MASK AVX512DQ_MASK"
+ ),
+ AVX512_CLX = dict(
+ interest=43, implies="AVX512_SKX", group="AVX512VNNI",
+ detect="AVX512_CLX"
+ ),
+ AVX512_CNL = dict(
+ interest=44, implies="AVX512_SKX", group="AVX512IFMA AVX512VBMI",
+ detect="AVX512_CNL", implies_detect=False
+ ),
+ AVX512_ICL = dict(
+ interest=45, implies="AVX512_CLX AVX512_CNL",
+ group="AVX512VBMI2 AVX512BITALG AVX512VPOPCNTDQ",
+ detect="AVX512_ICL", implies_detect=False
+ ),
+ # IBM/Power
+ ## Power7/ISA 2.06
+ VSX = dict(interest=1, headers="altivec.h", extra_checks="VSX_ASM"),
+ ## Power8/ISA 2.07
+ VSX2 = dict(interest=2, implies="VSX", implies_detect=False),
+ ## Power9/ISA 3.00
+ VSX3 = dict(interest=3, implies="VSX2", implies_detect=False),
+ ## Power10/ISA 3.1
+ VSX4 = dict(interest=4, implies="VSX3", implies_detect=False,
+ extra_checks="VSX4_MMA"),
+ # IBM/Z
+ ## VX(z13) support
+ VX = dict(interest=1, headers="vecintrin.h"),
+ ## Vector-Enhancements Facility
+ VXE = dict(interest=2, implies="VX", implies_detect=False),
+ ## Vector-Enhancements Facility 2
+ VXE2 = dict(interest=3, implies="VXE", implies_detect=False),
+ # ARM
+ NEON = dict(interest=1, headers="arm_neon.h"),
+ NEON_FP16 = dict(interest=2, implies="NEON"),
+ ## FMA
+ NEON_VFPV4 = dict(interest=3, implies="NEON_FP16"),
+ ## Advanced SIMD
+ ASIMD = dict(interest=4, implies="NEON_FP16 NEON_VFPV4", implies_detect=False),
+ ## ARMv8.2 half-precision & vector arithm
+ ASIMDHP = dict(interest=5, implies="ASIMD"),
+ ## ARMv8.2 dot product
+ ASIMDDP = dict(interest=6, implies="ASIMD"),
+ ## ARMv8.2 Single & half-precision Multiply
+ ASIMDFHM = dict(interest=7, implies="ASIMDHP"),
+ )
+ def conf_features_partial(self):
+ """Return a dictionary of supported CPU features by the platform,
+ and accumulate the rest of undefined options in `conf_features`,
+ the returned dict has same rules and notes in
+ class attribute `conf_features`, also its override
+ any options that been set in 'conf_features'.
+ """
+ if self.cc_noopt:
+ # optimization is disabled
+ return {}
+
+ on_x86 = self.cc_on_x86 or self.cc_on_x64
+ is_unix = self.cc_is_gcc or self.cc_is_clang
+
+ if on_x86 and is_unix: return dict(
+ SSE = dict(flags="-msse"),
+ SSE2 = dict(flags="-msse2"),
+ SSE3 = dict(flags="-msse3"),
+ SSSE3 = dict(flags="-mssse3"),
+ SSE41 = dict(flags="-msse4.1"),
+ POPCNT = dict(flags="-mpopcnt"),
+ SSE42 = dict(flags="-msse4.2"),
+ AVX = dict(flags="-mavx"),
+ F16C = dict(flags="-mf16c"),
+ XOP = dict(flags="-mxop"),
+ FMA4 = dict(flags="-mfma4"),
+ FMA3 = dict(flags="-mfma"),
+ AVX2 = dict(flags="-mavx2"),
+ AVX512F = dict(flags="-mavx512f -mno-mmx"),
+ AVX512CD = dict(flags="-mavx512cd"),
+ AVX512_KNL = dict(flags="-mavx512er -mavx512pf"),
+ AVX512_KNM = dict(
+ flags="-mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq"
+ ),
+ AVX512_SKX = dict(flags="-mavx512vl -mavx512bw -mavx512dq"),
+ AVX512_CLX = dict(flags="-mavx512vnni"),
+ AVX512_CNL = dict(flags="-mavx512ifma -mavx512vbmi"),
+ AVX512_ICL = dict(
+ flags="-mavx512vbmi2 -mavx512bitalg -mavx512vpopcntdq"
+ )
+ )
+ if on_x86 and self.cc_is_icc: return dict(
+ SSE = dict(flags="-msse"),
+ SSE2 = dict(flags="-msse2"),
+ SSE3 = dict(flags="-msse3"),
+ SSSE3 = dict(flags="-mssse3"),
+ SSE41 = dict(flags="-msse4.1"),
+ POPCNT = {},
+ SSE42 = dict(flags="-msse4.2"),
+ AVX = dict(flags="-mavx"),
+ F16C = {},
+ XOP = dict(disable="Intel Compiler doesn't support it"),
+ FMA4 = dict(disable="Intel Compiler doesn't support it"),
+ # Intel Compiler doesn't support AVX2 or FMA3 independently
+ FMA3 = dict(
+ implies="F16C AVX2", flags="-march=core-avx2"
+ ),
+ AVX2 = dict(implies="FMA3", flags="-march=core-avx2"),
+ # Intel Compiler doesn't support AVX512F or AVX512CD independently
+ AVX512F = dict(
+ implies="AVX2 AVX512CD", flags="-march=common-avx512"
+ ),
+ AVX512CD = dict(
+ implies="AVX2 AVX512F", flags="-march=common-avx512"
+ ),
+ AVX512_KNL = dict(flags="-xKNL"),
+ AVX512_KNM = dict(flags="-xKNM"),
+ AVX512_SKX = dict(flags="-xSKYLAKE-AVX512"),
+ AVX512_CLX = dict(flags="-xCASCADELAKE"),
+ AVX512_CNL = dict(flags="-xCANNONLAKE"),
+ AVX512_ICL = dict(flags="-xICELAKE-CLIENT"),
+ )
+ if on_x86 and self.cc_is_iccw: return dict(
+ SSE = dict(flags="/arch:SSE"),
+ SSE2 = dict(flags="/arch:SSE2"),
+ SSE3 = dict(flags="/arch:SSE3"),
+ SSSE3 = dict(flags="/arch:SSSE3"),
+ SSE41 = dict(flags="/arch:SSE4.1"),
+ POPCNT = {},
+ SSE42 = dict(flags="/arch:SSE4.2"),
+ AVX = dict(flags="/arch:AVX"),
+ F16C = {},
+ XOP = dict(disable="Intel Compiler doesn't support it"),
+ FMA4 = dict(disable="Intel Compiler doesn't support it"),
+ # Intel Compiler doesn't support FMA3 or AVX2 independently
+ FMA3 = dict(
+ implies="F16C AVX2", flags="/arch:CORE-AVX2"
+ ),
+ AVX2 = dict(
+ implies="FMA3", flags="/arch:CORE-AVX2"
+ ),
+ # Intel Compiler doesn't support AVX512F or AVX512CD independently
+ AVX512F = dict(
+ implies="AVX2 AVX512CD", flags="/Qx:COMMON-AVX512"
+ ),
+ AVX512CD = dict(
+ implies="AVX2 AVX512F", flags="/Qx:COMMON-AVX512"
+ ),
+ AVX512_KNL = dict(flags="/Qx:KNL"),
+ AVX512_KNM = dict(flags="/Qx:KNM"),
+ AVX512_SKX = dict(flags="/Qx:SKYLAKE-AVX512"),
+ AVX512_CLX = dict(flags="/Qx:CASCADELAKE"),
+ AVX512_CNL = dict(flags="/Qx:CANNONLAKE"),
+ AVX512_ICL = dict(flags="/Qx:ICELAKE-CLIENT")
+ )
+ if on_x86 and self.cc_is_msvc: return dict(
+ SSE = dict(flags="/arch:SSE") if self.cc_on_x86 else {},
+ SSE2 = dict(flags="/arch:SSE2") if self.cc_on_x86 else {},
+ SSE3 = {},
+ SSSE3 = {},
+ SSE41 = {},
+ POPCNT = dict(headers="nmmintrin.h"),
+ SSE42 = {},
+ AVX = dict(flags="/arch:AVX"),
+ F16C = {},
+ XOP = dict(headers="ammintrin.h"),
+ FMA4 = dict(headers="ammintrin.h"),
+ # MSVC doesn't support FMA3 or AVX2 independently
+ FMA3 = dict(
+ implies="F16C AVX2", flags="/arch:AVX2"
+ ),
+ AVX2 = dict(
+ implies="F16C FMA3", flags="/arch:AVX2"
+ ),
+ # MSVC doesn't support AVX512F or AVX512CD independently,
+ # always generate instructions belong to (VL/VW/DQ)
+ AVX512F = dict(
+ implies="AVX2 AVX512CD AVX512_SKX", flags="/arch:AVX512"
+ ),
+ AVX512CD = dict(
+ implies="AVX512F AVX512_SKX", flags="/arch:AVX512"
+ ),
+ AVX512_KNL = dict(
+ disable="MSVC compiler doesn't support it"
+ ),
+ AVX512_KNM = dict(
+ disable="MSVC compiler doesn't support it"
+ ),
+ AVX512_SKX = dict(flags="/arch:AVX512"),
+ AVX512_CLX = {},
+ AVX512_CNL = {},
+ AVX512_ICL = {}
+ )
+
+ on_power = self.cc_on_ppc64le or self.cc_on_ppc64
+ if on_power:
+ partial = dict(
+ VSX = dict(
+ implies=("VSX2" if self.cc_on_ppc64le else ""),
+ flags="-mvsx"
+ ),
+ VSX2 = dict(
+ flags="-mcpu=power8", implies_detect=False
+ ),
+ VSX3 = dict(
+ flags="-mcpu=power9 -mtune=power9", implies_detect=False
+ ),
+ VSX4 = dict(
+ flags="-mcpu=power10 -mtune=power10", implies_detect=False
+ )
+ )
+ if self.cc_is_clang:
+ partial["VSX"]["flags"] = "-maltivec -mvsx"
+ partial["VSX2"]["flags"] = "-mpower8-vector"
+ partial["VSX3"]["flags"] = "-mpower9-vector"
+ partial["VSX4"]["flags"] = "-mpower10-vector"
+
+ return partial
+
+ on_zarch = self.cc_on_s390x
+ if on_zarch:
+ partial = dict(
+ VX = dict(
+ flags="-march=arch11 -mzvector"
+ ),
+ VXE = dict(
+ flags="-march=arch12", implies_detect=False
+ ),
+ VXE2 = dict(
+ flags="-march=arch13", implies_detect=False
+ )
+ )
+
+ return partial
+
+
+ if self.cc_on_aarch64 and is_unix: return dict(
+ NEON = dict(
+ implies="NEON_FP16 NEON_VFPV4 ASIMD", autovec=True
+ ),
+ NEON_FP16 = dict(
+ implies="NEON NEON_VFPV4 ASIMD", autovec=True
+ ),
+ NEON_VFPV4 = dict(
+ implies="NEON NEON_FP16 ASIMD", autovec=True
+ ),
+ ASIMD = dict(
+ implies="NEON NEON_FP16 NEON_VFPV4", autovec=True
+ ),
+ ASIMDHP = dict(
+ flags="-march=armv8.2-a+fp16"
+ ),
+ ASIMDDP = dict(
+ flags="-march=armv8.2-a+dotprod"
+ ),
+ ASIMDFHM = dict(
+ flags="-march=armv8.2-a+fp16fml"
+ ),
+ )
+ if self.cc_on_armhf and is_unix: return dict(
+ NEON = dict(
+ flags="-mfpu=neon"
+ ),
+ NEON_FP16 = dict(
+ flags="-mfpu=neon-fp16 -mfp16-format=ieee"
+ ),
+ NEON_VFPV4 = dict(
+ flags="-mfpu=neon-vfpv4",
+ ),
+ ASIMD = dict(
+ flags="-mfpu=neon-fp-armv8 -march=armv8-a+simd",
+ ),
+ ASIMDHP = dict(
+ flags="-march=armv8.2-a+fp16"
+ ),
+ ASIMDDP = dict(
+ flags="-march=armv8.2-a+dotprod",
+ ),
+ ASIMDFHM = dict(
+ flags="-march=armv8.2-a+fp16fml"
+ )
+ )
+ # TODO: ARM MSVC
+ return {}
+
+ def __init__(self):
+ if self.conf_tmp_path is None:
+ import shutil
+ import tempfile
+ tmp = tempfile.mkdtemp()
+ def rm_temp():
+ try:
+ shutil.rmtree(tmp)
+ except OSError:
+ pass
+ atexit.register(rm_temp)
+ self.conf_tmp_path = tmp
+
+ if self.conf_cache_factors is None:
+ self.conf_cache_factors = [
+ os.path.getmtime(__file__),
+ self.conf_nocache
+ ]
+
+class _Distutils:
+ """A helper class that provides a collection of fundamental methods
+ implemented in a top of Python and NumPy Distutils.
+
+ The idea behind this class is to gather all methods that it may
+ need to override in case of reuse 'CCompilerOpt' in environment
+ different than of what NumPy has.
+
+ Parameters
+ ----------
+ ccompiler : `CCompiler`
+ The generate instance that returned from `distutils.ccompiler.new_compiler()`.
+ """
+ def __init__(self, ccompiler):
+ self._ccompiler = ccompiler
+
+ def dist_compile(self, sources, flags, ccompiler=None, **kwargs):
+ """Wrap CCompiler.compile()"""
+ assert(isinstance(sources, list))
+ assert(isinstance(flags, list))
+ flags = kwargs.pop("extra_postargs", []) + flags
+ if not ccompiler:
+ ccompiler = self._ccompiler
+
+ return ccompiler.compile(sources, extra_postargs=flags, **kwargs)
+
+ def dist_test(self, source, flags, macros=[]):
+ """Return True if 'CCompiler.compile()' able to compile
+ a source file with certain flags.
+ """
+ assert(isinstance(source, str))
+ from distutils.errors import CompileError
+ cc = self._ccompiler;
+ bk_spawn = getattr(cc, 'spawn', None)
+ if bk_spawn:
+ cc_type = getattr(self._ccompiler, "compiler_type", "")
+ if cc_type in ("msvc",):
+ setattr(cc, 'spawn', self._dist_test_spawn_paths)
+ else:
+ setattr(cc, 'spawn', self._dist_test_spawn)
+ test = False
+ try:
+ self.dist_compile(
+ [source], flags, macros=macros, output_dir=self.conf_tmp_path
+ )
+ test = True
+ except CompileError as e:
+ self.dist_log(str(e), stderr=True)
+ if bk_spawn:
+ setattr(cc, 'spawn', bk_spawn)
+ return test
+
+ def dist_info(self):
+ """
+ Return a tuple containing info about (platform, compiler, extra_args),
+ required by the abstract class '_CCompiler' for discovering the
+ platform environment. This is also used as a cache factor in order
+ to detect any changes happening from outside.
+ """
+ if hasattr(self, "_dist_info"):
+ return self._dist_info
+
+ cc_type = getattr(self._ccompiler, "compiler_type", '')
+ if cc_type in ("intelem", "intelemw"):
+ platform = "x86_64"
+ elif cc_type in ("intel", "intelw", "intele"):
+ platform = "x86"
+ else:
+ from distutils.util import get_platform
+ platform = get_platform()
+
+ cc_info = getattr(self._ccompiler, "compiler", getattr(self._ccompiler, "compiler_so", ''))
+ if not cc_type or cc_type == "unix":
+ if hasattr(cc_info, "__iter__"):
+ compiler = cc_info[0]
+ else:
+ compiler = str(cc_info)
+ else:
+ compiler = cc_type
+
+ if hasattr(cc_info, "__iter__") and len(cc_info) > 1:
+ extra_args = ' '.join(cc_info[1:])
+ else:
+ extra_args = os.environ.get("CFLAGS", "")
+ extra_args += os.environ.get("CPPFLAGS", "")
+
+ self._dist_info = (platform, compiler, extra_args)
+ return self._dist_info
+
+ @staticmethod
+ def dist_error(*args):
+ """Raise a compiler error"""
+ from distutils.errors import CompileError
+ raise CompileError(_Distutils._dist_str(*args))
+
+ @staticmethod
+ def dist_fatal(*args):
+ """Raise a distutils error"""
+ from distutils.errors import DistutilsError
+ raise DistutilsError(_Distutils._dist_str(*args))
+
+ @staticmethod
+ def dist_log(*args, stderr=False):
+ """Print a console message"""
+ from numpy.distutils import log
+ out = _Distutils._dist_str(*args)
+ if stderr:
+ log.warn(out)
+ else:
+ log.info(out)
+
+ @staticmethod
+ def dist_load_module(name, path):
+ """Load a module from file, required by the abstract class '_Cache'."""
+ from .misc_util import exec_mod_from_location
+ try:
+ return exec_mod_from_location(name, path)
+ except Exception as e:
+ _Distutils.dist_log(e, stderr=True)
+ return None
+
+ @staticmethod
+ def _dist_str(*args):
+ """Return a string to print by log and errors."""
+ def to_str(arg):
+ if not isinstance(arg, str) and hasattr(arg, '__iter__'):
+ ret = []
+ for a in arg:
+ ret.append(to_str(a))
+ return '('+ ' '.join(ret) + ')'
+ return str(arg)
+
+ stack = inspect.stack()[2]
+ start = "CCompilerOpt.%s[%d] : " % (stack.function, stack.lineno)
+ out = ' '.join([
+ to_str(a)
+ for a in (*args,)
+ ])
+ return start + out
+
+ def _dist_test_spawn_paths(self, cmd, display=None):
+ """
+ Fix msvc SDK ENV path same as distutils do
+ without it we get c1: fatal error C1356: unable to find mspdbcore.dll
+ """
+ if not hasattr(self._ccompiler, "_paths"):
+ self._dist_test_spawn(cmd)
+ return
+ old_path = os.getenv("path")
+ try:
+ os.environ["path"] = self._ccompiler._paths
+ self._dist_test_spawn(cmd)
+ finally:
+ os.environ["path"] = old_path
+
+ _dist_warn_regex = re.compile(
+ # intel and msvc compilers don't raise
+ # fatal errors when flags are wrong or unsupported
+ ".*("
+ "warning D9002|" # msvc, it should be work with any language.
+ "invalid argument for option" # intel
+ ").*"
+ )
+ @staticmethod
+ def _dist_test_spawn(cmd, display=None):
+ try:
+ o = subprocess.check_output(cmd, stderr=subprocess.STDOUT,
+ text=True)
+ if o and re.match(_Distutils._dist_warn_regex, o):
+ _Distutils.dist_error(
+ "Flags in command", cmd ,"aren't supported by the compiler"
+ ", output -> \n%s" % o
+ )
+ except subprocess.CalledProcessError as exc:
+ o = exc.output
+ s = exc.returncode
+ except OSError as e:
+ o = e
+ s = 127
+ else:
+ return None
+ _Distutils.dist_error(
+ "Command", cmd, "failed with exit status %d output -> \n%s" % (
+ s, o
+ ))
+
+_share_cache = {}
+class _Cache:
+ """An abstract class handles caching functionality, provides two
+ levels of caching, in-memory by share instances attributes among
+ each other and by store attributes into files.
+
+ **Note**:
+ any attributes that start with ``_`` or ``conf_`` will be ignored.
+
+ Parameters
+ ----------
+ cache_path : str or None
+ The path of cache file, if None then cache in file will disabled.
+
+ *factors :
+ The caching factors that need to utilize next to `conf_cache_factors`.
+
+ Attributes
+ ----------
+ cache_private : set
+ Hold the attributes that need be skipped from "in-memory cache".
+
+ cache_infile : bool
+ Utilized during initializing this class, to determine if the cache was able
+ to loaded from the specified cache path in 'cache_path'.
+ """
+
+ # skip attributes from cache
+ _cache_ignore = re.compile("^(_|conf_)")
+
+ def __init__(self, cache_path=None, *factors):
+ self.cache_me = {}
+ self.cache_private = set()
+ self.cache_infile = False
+ self._cache_path = None
+
+ if self.conf_nocache:
+ self.dist_log("cache is disabled by `Config`")
+ return
+
+ self._cache_hash = self.cache_hash(*factors, *self.conf_cache_factors)
+ self._cache_path = cache_path
+ if cache_path:
+ if os.path.exists(cache_path):
+ self.dist_log("load cache from file ->", cache_path)
+ cache_mod = self.dist_load_module("cache", cache_path)
+ if not cache_mod:
+ self.dist_log(
+ "unable to load the cache file as a module",
+ stderr=True
+ )
+ elif not hasattr(cache_mod, "hash") or \
+ not hasattr(cache_mod, "data"):
+ self.dist_log("invalid cache file", stderr=True)
+ elif self._cache_hash == cache_mod.hash:
+ self.dist_log("hit the file cache")
+ for attr, val in cache_mod.data.items():
+ setattr(self, attr, val)
+ self.cache_infile = True
+ else:
+ self.dist_log("miss the file cache")
+
+ if not self.cache_infile:
+ other_cache = _share_cache.get(self._cache_hash)
+ if other_cache:
+ self.dist_log("hit the memory cache")
+ for attr, val in other_cache.__dict__.items():
+ if attr in other_cache.cache_private or \
+ re.match(self._cache_ignore, attr):
+ continue
+ setattr(self, attr, val)
+
+ _share_cache[self._cache_hash] = self
+ atexit.register(self.cache_flush)
+
+ def __del__(self):
+ for h, o in _share_cache.items():
+ if o == self:
+ _share_cache.pop(h)
+ break
+
+ def cache_flush(self):
+ """
+ Force update the cache.
+ """
+ if not self._cache_path:
+ return
+ # TODO: don't write if the cache doesn't change
+ self.dist_log("write cache to path ->", self._cache_path)
+ cdict = self.__dict__.copy()
+ for attr in self.__dict__.keys():
+ if re.match(self._cache_ignore, attr):
+ cdict.pop(attr)
+
+ d = os.path.dirname(self._cache_path)
+ if not os.path.exists(d):
+ os.makedirs(d)
+
+ repr_dict = pprint.pformat(cdict, compact=True)
+ with open(self._cache_path, "w") as f:
+ f.write(textwrap.dedent("""\
+ # AUTOGENERATED DON'T EDIT
+ # Please make changes to the code generator \
+ (distutils/ccompiler_opt.py)
+ hash = {}
+ data = \\
+ """).format(self._cache_hash))
+ f.write(repr_dict)
+
+ def cache_hash(self, *factors):
+ # is there a built-in non-crypto hash?
+ # sdbm
+ chash = 0
+ for f in factors:
+ for char in str(f):
+ chash = ord(char) + (chash << 6) + (chash << 16) - chash
+ chash &= 0xFFFFFFFF
+ return chash
+
+ @staticmethod
+ def me(cb):
+ """
+ A static method that can be treated as a decorator to
+ dynamically cache certain methods.
+ """
+ def cache_wrap_me(self, *args, **kwargs):
+ # good for normal args
+ cache_key = str((
+ cb.__name__, *args, *kwargs.keys(), *kwargs.values()
+ ))
+ if cache_key in self.cache_me:
+ return self.cache_me[cache_key]
+ ccb = cb(self, *args, **kwargs)
+ self.cache_me[cache_key] = ccb
+ return ccb
+ return cache_wrap_me
+
+class _CCompiler:
+ """A helper class for `CCompilerOpt` containing all utilities that
+ related to the fundamental compiler's functions.
+
+ Attributes
+ ----------
+ cc_on_x86 : bool
+ True when the target architecture is 32-bit x86
+ cc_on_x64 : bool
+ True when the target architecture is 64-bit x86
+ cc_on_ppc64 : bool
+ True when the target architecture is 64-bit big-endian powerpc
+ cc_on_ppc64le : bool
+ True when the target architecture is 64-bit litle-endian powerpc
+ cc_on_s390x : bool
+ True when the target architecture is IBM/ZARCH on linux
+ cc_on_armhf : bool
+ True when the target architecture is 32-bit ARMv7+
+ cc_on_aarch64 : bool
+ True when the target architecture is 64-bit Armv8-a+
+ cc_on_noarch : bool
+ True when the target architecture is unknown or not supported
+ cc_is_gcc : bool
+ True if the compiler is GNU or
+ if the compiler is unknown
+ cc_is_clang : bool
+ True if the compiler is Clang
+ cc_is_icc : bool
+ True if the compiler is Intel compiler (unix like)
+ cc_is_iccw : bool
+ True if the compiler is Intel compiler (msvc like)
+ cc_is_nocc : bool
+ True if the compiler isn't supported directly,
+ Note: that cause a fail-back to gcc
+ cc_has_debug : bool
+ True if the compiler has debug flags
+ cc_has_native : bool
+ True if the compiler has native flags
+ cc_noopt : bool
+ True if the compiler has definition 'DISABLE_OPT*',
+ or 'cc_on_noarch' is True
+ cc_march : str
+ The target architecture name, or "unknown" if
+ the architecture isn't supported
+ cc_name : str
+ The compiler name, or "unknown" if the compiler isn't supported
+ cc_flags : dict
+ Dictionary containing the initialized flags of `_Config.conf_cc_flags`
+ """
+ def __init__(self):
+ if hasattr(self, "cc_is_cached"):
+ return
+ # attr regex compiler-expression
+ detect_arch = (
+ ("cc_on_x64", ".*(x|x86_|amd)64.*", ""),
+ ("cc_on_x86", ".*(win32|x86|i386|i686).*", ""),
+ ("cc_on_ppc64le", ".*(powerpc|ppc)64(el|le).*|.*powerpc.*",
+ "defined(__powerpc64__) && "
+ "defined(__LITTLE_ENDIAN__)"),
+ ("cc_on_ppc64", ".*(powerpc|ppc).*|.*powerpc.*",
+ "defined(__powerpc64__) && "
+ "defined(__BIG_ENDIAN__)"),
+ ("cc_on_aarch64", ".*(aarch64|arm64).*", ""),
+ ("cc_on_armhf", ".*arm.*", "defined(__ARM_ARCH_7__) || "
+ "defined(__ARM_ARCH_7A__)"),
+ ("cc_on_s390x", ".*s390x.*", ""),
+ # undefined platform
+ ("cc_on_noarch", "", ""),
+ )
+ detect_compiler = (
+ ("cc_is_gcc", r".*(gcc|gnu\-g).*", ""),
+ ("cc_is_clang", ".*clang.*", ""),
+ # intel msvc like
+ ("cc_is_iccw", ".*(intelw|intelemw|iccw).*", ""),
+ ("cc_is_icc", ".*(intel|icc).*", ""), # intel unix like
+ ("cc_is_msvc", ".*msvc.*", ""),
+ # undefined compiler will be treat it as gcc
+ ("cc_is_nocc", "", ""),
+ )
+ detect_args = (
+ ("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*", ""),
+ ("cc_has_native", ".*(-march=native|-xHost|/QxHost).*", ""),
+ # in case if the class run with -DNPY_DISABLE_OPTIMIZATION
+ ("cc_noopt", ".*DISABLE_OPT.*", ""),
+ )
+
+ dist_info = self.dist_info()
+ platform, compiler_info, extra_args = dist_info
+ # set False to all attrs
+ for section in (detect_arch, detect_compiler, detect_args):
+ for attr, rgex, cexpr in section:
+ setattr(self, attr, False)
+
+ for detect, searchin in ((detect_arch, platform), (detect_compiler, compiler_info)):
+ for attr, rgex, cexpr in detect:
+ if rgex and not re.match(rgex, searchin, re.IGNORECASE):
+ continue
+ if cexpr and not self.cc_test_cexpr(cexpr):
+ continue
+ setattr(self, attr, True)
+ break
+
+ for attr, rgex, cexpr in detect_args:
+ if rgex and not re.match(rgex, extra_args, re.IGNORECASE):
+ continue
+ if cexpr and not self.cc_test_cexpr(cexpr):
+ continue
+ setattr(self, attr, True)
+
+ if self.cc_on_noarch:
+ self.dist_log(
+ "unable to detect CPU architecture which lead to disable the optimization. "
+ f"check dist_info:<<\n{dist_info}\n>>",
+ stderr=True
+ )
+ self.cc_noopt = True
+
+ if self.conf_noopt:
+ self.dist_log("Optimization is disabled by the Config", stderr=True)
+ self.cc_noopt = True
+
+ if self.cc_is_nocc:
+ """
+ mingw can be treated as a gcc, and also xlc even if it based on clang,
+ but still has the same gcc optimization flags.
+ """
+ self.dist_log(
+ "unable to detect compiler type which leads to treating it as GCC. "
+ "this is a normal behavior if you're using gcc-like compiler such as MinGW or IBM/XLC."
+ f"check dist_info:<<\n{dist_info}\n>>",
+ stderr=True
+ )
+ self.cc_is_gcc = True
+
+ self.cc_march = "unknown"
+ for arch in ("x86", "x64", "ppc64", "ppc64le",
+ "armhf", "aarch64", "s390x"):
+ if getattr(self, "cc_on_" + arch):
+ self.cc_march = arch
+ break
+
+ self.cc_name = "unknown"
+ for name in ("gcc", "clang", "iccw", "icc", "msvc"):
+ if getattr(self, "cc_is_" + name):
+ self.cc_name = name
+ break
+
+ self.cc_flags = {}
+ compiler_flags = self.conf_cc_flags.get(self.cc_name)
+ if compiler_flags is None:
+ self.dist_fatal(
+ "undefined flag for compiler '%s', "
+ "leave an empty dict instead" % self.cc_name
+ )
+ for name, flags in compiler_flags.items():
+ self.cc_flags[name] = nflags = []
+ if flags:
+ assert(isinstance(flags, str))
+ flags = flags.split()
+ for f in flags:
+ if self.cc_test_flags([f]):
+ nflags.append(f)
+
+ self.cc_is_cached = True
+
+ @_Cache.me
+ def cc_test_flags(self, flags):
+ """
+ Returns True if the compiler supports 'flags'.
+ """
+ assert(isinstance(flags, list))
+ self.dist_log("testing flags", flags)
+ test_path = os.path.join(self.conf_check_path, "test_flags.c")
+ test = self.dist_test(test_path, flags)
+ if not test:
+ self.dist_log("testing failed", stderr=True)
+ return test
+
+ @_Cache.me
+ def cc_test_cexpr(self, cexpr, flags=[]):
+ """
+ Same as the above but supports compile-time expressions.
+ """
+ self.dist_log("testing compiler expression", cexpr)
+ test_path = os.path.join(self.conf_tmp_path, "npy_dist_test_cexpr.c")
+ with open(test_path, "w") as fd:
+ fd.write(textwrap.dedent(f"""\
+ #if !({cexpr})
+ #error "unsupported expression"
+ #endif
+ int dummy;
+ """))
+ test = self.dist_test(test_path, flags)
+ if not test:
+ self.dist_log("testing failed", stderr=True)
+ return test
+
+ def cc_normalize_flags(self, flags):
+ """
+ Remove the conflicts that caused due gathering implied features flags.
+
+ Parameters
+ ----------
+ 'flags' list, compiler flags
+ flags should be sorted from the lowest to the highest interest.
+
+ Returns
+ -------
+ list, filtered from any conflicts.
+
+ Examples
+ --------
+ >>> self.cc_normalize_flags(['-march=armv8.2-a+fp16', '-march=armv8.2-a+dotprod'])
+ ['armv8.2-a+fp16+dotprod']
+
+ >>> self.cc_normalize_flags(
+ ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx', '-march=core-avx2']
+ )
+ ['-march=core-avx2']
+ """
+ assert(isinstance(flags, list))
+ if self.cc_is_gcc or self.cc_is_clang or self.cc_is_icc:
+ return self._cc_normalize_unix(flags)
+
+ if self.cc_is_msvc or self.cc_is_iccw:
+ return self._cc_normalize_win(flags)
+ return flags
+
+ _cc_normalize_unix_mrgx = re.compile(
+ # 1- to check the highest of
+ r"^(-mcpu=|-march=|-x[A-Z0-9\-])"
+ )
+ _cc_normalize_unix_frgx = re.compile(
+ # 2- to remove any flags starts with
+ # -march, -mcpu, -x(INTEL) and '-m' without '='
+ r"^(?!(-mcpu=|-march=|-x[A-Z0-9\-]|-m[a-z0-9\-\.]*.$))|"
+ # exclude:
+ r"(?:-mzvector)"
+ )
+ _cc_normalize_unix_krgx = re.compile(
+ # 3- keep only the highest of
+ r"^(-mfpu|-mtune)"
+ )
+ _cc_normalize_arch_ver = re.compile(
+ r"[0-9.]"
+ )
+ def _cc_normalize_unix(self, flags):
+ def ver_flags(f):
+ # arch ver subflag
+ # -march=armv8.2-a+fp16fml
+ tokens = f.split('+')
+ ver = float('0' + ''.join(
+ re.findall(self._cc_normalize_arch_ver, tokens[0])
+ ))
+ return ver, tokens[0], tokens[1:]
+
+ if len(flags) <= 1:
+ return flags
+ # get the highest matched flag
+ for i, cur_flag in enumerate(reversed(flags)):
+ if not re.match(self._cc_normalize_unix_mrgx, cur_flag):
+ continue
+ lower_flags = flags[:-(i+1)]
+ upper_flags = flags[-i:]
+ filterd = list(filter(
+ self._cc_normalize_unix_frgx.search, lower_flags
+ ))
+ # gather subflags
+ ver, arch, subflags = ver_flags(cur_flag)
+ if ver > 0 and len(subflags) > 0:
+ for xflag in lower_flags:
+ xver, _, xsubflags = ver_flags(xflag)
+ if ver == xver:
+ subflags = xsubflags + subflags
+ cur_flag = arch + '+' + '+'.join(subflags)
+
+ flags = filterd + [cur_flag]
+ if i > 0:
+ flags += upper_flags
+ break
+
+ # to remove overridable flags
+ final_flags = []
+ matched = set()
+ for f in reversed(flags):
+ match = re.match(self._cc_normalize_unix_krgx, f)
+ if not match:
+ pass
+ elif match[0] in matched:
+ continue
+ else:
+ matched.add(match[0])
+ final_flags.insert(0, f)
+ return final_flags
+
+ _cc_normalize_win_frgx = re.compile(
+ r"^(?!(/arch\:|/Qx\:))"
+ )
+ _cc_normalize_win_mrgx = re.compile(
+ r"^(/arch|/Qx:)"
+ )
+ def _cc_normalize_win(self, flags):
+ for i, f in enumerate(reversed(flags)):
+ if not re.match(self._cc_normalize_win_mrgx, f):
+ continue
+ i += 1
+ return list(filter(
+ self._cc_normalize_win_frgx.search, flags[:-i]
+ )) + flags[-i:]
+ return flags
+
+class _Feature:
+ """A helper class for `CCompilerOpt` that managing CPU features.
+
+ Attributes
+ ----------
+ feature_supported : dict
+ Dictionary containing all CPU features that supported
+ by the platform, according to the specified values in attribute
+ `_Config.conf_features` and `_Config.conf_features_partial()`
+
+ feature_min : set
+ The minimum support of CPU features, according to
+ the specified values in attribute `_Config.conf_min_features`.
+ """
+ def __init__(self):
+ if hasattr(self, "feature_is_cached"):
+ return
+ self.feature_supported = pfeatures = self.conf_features_partial()
+ for feature_name in list(pfeatures.keys()):
+ feature = pfeatures[feature_name]
+ cfeature = self.conf_features[feature_name]
+ feature.update({
+ k:v for k,v in cfeature.items() if k not in feature
+ })
+ disabled = feature.get("disable")
+ if disabled is not None:
+ pfeatures.pop(feature_name)
+ self.dist_log(
+ "feature '%s' is disabled," % feature_name,
+ disabled, stderr=True
+ )
+ continue
+ # list is used internally for these options
+ for option in (
+ "implies", "group", "detect", "headers", "flags", "extra_checks"
+ ) :
+ oval = feature.get(option)
+ if isinstance(oval, str):
+ feature[option] = oval.split()
+
+ self.feature_min = set()
+ min_f = self.conf_min_features.get(self.cc_march, "")
+ for F in min_f.upper().split():
+ if F in self.feature_supported:
+ self.feature_min.add(F)
+
+ self.feature_is_cached = True
+
+ def feature_names(self, names=None, force_flags=None, macros=[]):
+ """
+ Returns a set of CPU feature names that supported by platform and the **C** compiler.
+
+ Parameters
+ ----------
+ names : sequence or None, optional
+ Specify certain CPU features to test it against the **C** compiler.
+ if None(default), it will test all current supported features.
+ **Note**: feature names must be in upper-case.
+
+ force_flags : list or None, optional
+ If None(default), default compiler flags for every CPU feature will
+ be used during the test.
+
+ macros : list of tuples, optional
+ A list of C macro definitions.
+ """
+ assert(
+ names is None or (
+ not isinstance(names, str) and
+ hasattr(names, "__iter__")
+ )
+ )
+ assert(force_flags is None or isinstance(force_flags, list))
+ if names is None:
+ names = self.feature_supported.keys()
+ supported_names = set()
+ for f in names:
+ if self.feature_is_supported(
+ f, force_flags=force_flags, macros=macros
+ ):
+ supported_names.add(f)
+ return supported_names
+
+ def feature_is_exist(self, name):
+ """
+ Returns True if a certain feature is exist and covered within
+ `_Config.conf_features`.
+
+ Parameters
+ ----------
+ 'name': str
+ feature name in uppercase.
+ """
+ assert(name.isupper())
+ return name in self.conf_features
+
+ def feature_sorted(self, names, reverse=False):
+ """
+ Sort a list of CPU features ordered by the lowest interest.
+
+ Parameters
+ ----------
+ 'names': sequence
+ sequence of supported feature names in uppercase.
+ 'reverse': bool, optional
+ If true, the sorted features is reversed. (highest interest)
+
+ Returns
+ -------
+ list, sorted CPU features
+ """
+ def sort_cb(k):
+ if isinstance(k, str):
+ return self.feature_supported[k]["interest"]
+ # multiple features
+ rank = max([self.feature_supported[f]["interest"] for f in k])
+ # FIXME: that's not a safe way to increase the rank for
+ # multi targets
+ rank += len(k) -1
+ return rank
+ return sorted(names, reverse=reverse, key=sort_cb)
+
+ def feature_implies(self, names, keep_origins=False):
+ """
+ Return a set of CPU features that implied by 'names'
+
+ Parameters
+ ----------
+ names : str or sequence of str
+ CPU feature name(s) in uppercase.
+
+ keep_origins : bool
+ if False(default) then the returned set will not contain any
+ features from 'names'. This case happens only when two features
+ imply each other.
+
+ Examples
+ --------
+ >>> self.feature_implies("SSE3")
+ {'SSE', 'SSE2'}
+ >>> self.feature_implies("SSE2")
+ {'SSE'}
+ >>> self.feature_implies("SSE2", keep_origins=True)
+ # 'SSE2' found here since 'SSE' and 'SSE2' imply each other
+ {'SSE', 'SSE2'}
+ """
+ def get_implies(name, _caller=set()):
+ implies = set()
+ d = self.feature_supported[name]
+ for i in d.get("implies", []):
+ implies.add(i)
+ if i in _caller:
+ # infinity recursive guard since
+ # features can imply each other
+ continue
+ _caller.add(name)
+ implies = implies.union(get_implies(i, _caller))
+ return implies
+
+ if isinstance(names, str):
+ implies = get_implies(names)
+ names = [names]
+ else:
+ assert(hasattr(names, "__iter__"))
+ implies = set()
+ for n in names:
+ implies = implies.union(get_implies(n))
+ if not keep_origins:
+ implies.difference_update(names)
+ return implies
+
+ def feature_implies_c(self, names):
+ """same as feature_implies() but combining 'names'"""
+ if isinstance(names, str):
+ names = set((names,))
+ else:
+ names = set(names)
+ return names.union(self.feature_implies(names))
+
+ def feature_ahead(self, names):
+ """
+ Return list of features in 'names' after remove any
+ implied features and keep the origins.
+
+ Parameters
+ ----------
+ 'names': sequence
+ sequence of CPU feature names in uppercase.
+
+ Returns
+ -------
+ list of CPU features sorted as-is 'names'
+
+ Examples
+ --------
+ >>> self.feature_ahead(["SSE2", "SSE3", "SSE41"])
+ ["SSE41"]
+ # assume AVX2 and FMA3 implies each other and AVX2
+ # is the highest interest
+ >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"])
+ ["AVX2"]
+ # assume AVX2 and FMA3 don't implies each other
+ >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"])
+ ["AVX2", "FMA3"]
+ """
+ assert(
+ not isinstance(names, str)
+ and hasattr(names, '__iter__')
+ )
+ implies = self.feature_implies(names, keep_origins=True)
+ ahead = [n for n in names if n not in implies]
+ if len(ahead) == 0:
+ # return the highest interested feature
+ # if all features imply each other
+ ahead = self.feature_sorted(names, reverse=True)[:1]
+ return ahead
+
+ def feature_untied(self, names):
+ """
+ same as 'feature_ahead()' but if both features implied each other
+ and keep the highest interest.
+
+ Parameters
+ ----------
+ 'names': sequence
+ sequence of CPU feature names in uppercase.
+
+ Returns
+ -------
+ list of CPU features sorted as-is 'names'
+
+ Examples
+ --------
+ >>> self.feature_untied(["SSE2", "SSE3", "SSE41"])
+ ["SSE2", "SSE3", "SSE41"]
+ # assume AVX2 and FMA3 implies each other
+ >>> self.feature_untied(["SSE2", "SSE3", "SSE41", "FMA3", "AVX2"])
+ ["SSE2", "SSE3", "SSE41", "AVX2"]
+ """
+ assert(
+ not isinstance(names, str)
+ and hasattr(names, '__iter__')
+ )
+ final = []
+ for n in names:
+ implies = self.feature_implies(n)
+ tied = [
+ nn for nn in final
+ if nn in implies and n in self.feature_implies(nn)
+ ]
+ if tied:
+ tied = self.feature_sorted(tied + [n])
+ if n not in tied[1:]:
+ continue
+ final.remove(tied[:1][0])
+ final.append(n)
+ return final
+
+ def feature_get_til(self, names, keyisfalse):
+ """
+ same as `feature_implies_c()` but stop collecting implied
+ features when feature's option that provided through
+ parameter 'keyisfalse' is False, also sorting the returned
+ features.
+ """
+ def til(tnames):
+ # sort from highest to lowest interest then cut if "key" is False
+ tnames = self.feature_implies_c(tnames)
+ tnames = self.feature_sorted(tnames, reverse=True)
+ for i, n in enumerate(tnames):
+ if not self.feature_supported[n].get(keyisfalse, True):
+ tnames = tnames[:i+1]
+ break
+ return tnames
+
+ if isinstance(names, str) or len(names) <= 1:
+ names = til(names)
+ # normalize the sort
+ names.reverse()
+ return names
+
+ names = self.feature_ahead(names)
+ names = {t for n in names for t in til(n)}
+ return self.feature_sorted(names)
+
+ def feature_detect(self, names):
+ """
+ Return a list of CPU features that required to be detected
+ sorted from the lowest to highest interest.
+ """
+ names = self.feature_get_til(names, "implies_detect")
+ detect = []
+ for n in names:
+ d = self.feature_supported[n]
+ detect += d.get("detect", d.get("group", [n]))
+ return detect
+
+ @_Cache.me
+ def feature_flags(self, names):
+ """
+ Return a list of CPU features flags sorted from the lowest
+ to highest interest.
+ """
+ names = self.feature_sorted(self.feature_implies_c(names))
+ flags = []
+ for n in names:
+ d = self.feature_supported[n]
+ f = d.get("flags", [])
+ if not f or not self.cc_test_flags(f):
+ continue
+ flags += f
+ return self.cc_normalize_flags(flags)
+
+ @_Cache.me
+ def feature_test(self, name, force_flags=None, macros=[]):
+ """
+ Test a certain CPU feature against the compiler through its own
+ check file.
+
+ Parameters
+ ----------
+ name : str
+ Supported CPU feature name.
+
+ force_flags : list or None, optional
+ If None(default), the returned flags from `feature_flags()`
+ will be used.
+
+ macros : list of tuples, optional
+ A list of C macro definitions.
+ """
+ if force_flags is None:
+ force_flags = self.feature_flags(name)
+
+ self.dist_log(
+ "testing feature '%s' with flags (%s)" % (
+ name, ' '.join(force_flags)
+ ))
+ # Each CPU feature must have C source code contains at
+ # least one intrinsic or instruction related to this feature.
+ test_path = os.path.join(
+ self.conf_check_path, "cpu_%s.c" % name.lower()
+ )
+ if not os.path.exists(test_path):
+ self.dist_fatal("feature test file is not exist", test_path)
+
+ test = self.dist_test(
+ test_path, force_flags + self.cc_flags["werror"], macros=macros
+ )
+ if not test:
+ self.dist_log("testing failed", stderr=True)
+ return test
+
+ @_Cache.me
+ def feature_is_supported(self, name, force_flags=None, macros=[]):
+ """
+ Check if a certain CPU feature is supported by the platform and compiler.
+
+ Parameters
+ ----------
+ name : str
+ CPU feature name in uppercase.
+
+ force_flags : list or None, optional
+ If None(default), default compiler flags for every CPU feature will
+ be used during test.
+
+ macros : list of tuples, optional
+ A list of C macro definitions.
+ """
+ assert(name.isupper())
+ assert(force_flags is None or isinstance(force_flags, list))
+
+ supported = name in self.feature_supported
+ if supported:
+ for impl in self.feature_implies(name):
+ if not self.feature_test(impl, force_flags, macros=macros):
+ return False
+ if not self.feature_test(name, force_flags, macros=macros):
+ return False
+ return supported
+
+ @_Cache.me
+ def feature_can_autovec(self, name):
+ """
+ check if the feature can be auto-vectorized by the compiler
+ """
+ assert(isinstance(name, str))
+ d = self.feature_supported[name]
+ can = d.get("autovec", None)
+ if can is None:
+ valid_flags = [
+ self.cc_test_flags([f]) for f in d.get("flags", [])
+ ]
+ can = valid_flags and any(valid_flags)
+ return can
+
+ @_Cache.me
+ def feature_extra_checks(self, name):
+ """
+ Return a list of supported extra checks after testing them against
+ the compiler.
+
+ Parameters
+ ----------
+ names : str
+ CPU feature name in uppercase.
+ """
+ assert isinstance(name, str)
+ d = self.feature_supported[name]
+ extra_checks = d.get("extra_checks", [])
+ if not extra_checks:
+ return []
+
+ self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks)
+ flags = self.feature_flags(name)
+ available = []
+ not_available = []
+ for chk in extra_checks:
+ test_path = os.path.join(
+ self.conf_check_path, "extra_%s.c" % chk.lower()
+ )
+ if not os.path.exists(test_path):
+ self.dist_fatal("extra check file does not exist", test_path)
+
+ is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"])
+ if is_supported:
+ available.append(chk)
+ else:
+ not_available.append(chk)
+
+ if not_available:
+ self.dist_log("testing failed for checks", not_available, stderr=True)
+ return available
+
+
+ def feature_c_preprocessor(self, feature_name, tabs=0):
+ """
+ Generate C preprocessor definitions and include headers of a CPU feature.
+
+ Parameters
+ ----------
+ 'feature_name': str
+ CPU feature name in uppercase.
+ 'tabs': int
+ if > 0, align the generated strings to the right depend on number of tabs.
+
+ Returns
+ -------
+ str, generated C preprocessor
+
+ Examples
+ --------
+ >>> self.feature_c_preprocessor("SSE3")
+ /** SSE3 **/
+ #define NPY_HAVE_SSE3 1
+ #include <pmmintrin.h>
+ """
+ assert(feature_name.isupper())
+ feature = self.feature_supported.get(feature_name)
+ assert(feature is not None)
+
+ prepr = [
+ "/** %s **/" % feature_name,
+ "#define %sHAVE_%s 1" % (self.conf_c_prefix, feature_name)
+ ]
+ prepr += [
+ "#include <%s>" % h for h in feature.get("headers", [])
+ ]
+
+ extra_defs = feature.get("group", [])
+ extra_defs += self.feature_extra_checks(feature_name)
+ for edef in extra_defs:
+ # Guard extra definitions in case of duplicate with
+ # another feature
+ prepr += [
+ "#ifndef %sHAVE_%s" % (self.conf_c_prefix, edef),
+ "\t#define %sHAVE_%s 1" % (self.conf_c_prefix, edef),
+ "#endif",
+ ]
+
+ if tabs > 0:
+ prepr = [('\t'*tabs) + l for l in prepr]
+ return '\n'.join(prepr)
+
+class _Parse:
+ """A helper class that parsing main arguments of `CCompilerOpt`,
+ also parsing configuration statements in dispatch-able sources.
+
+ Parameters
+ ----------
+ cpu_baseline : str or None
+ minimal set of required CPU features or special options.
+
+ cpu_dispatch : str or None
+ dispatched set of additional CPU features or special options.
+
+ Special options can be:
+ - **MIN**: Enables the minimum CPU features that utilized via `_Config.conf_min_features`
+ - **MAX**: Enables all supported CPU features by the Compiler and platform.
+ - **NATIVE**: Enables all CPU features that supported by the current machine.
+ - **NONE**: Enables nothing
+ - **Operand +/-**: remove or add features, useful with options **MAX**, **MIN** and **NATIVE**.
+ NOTE: operand + is only added for nominal reason.
+
+ NOTES:
+ - Case-insensitive among all CPU features and special options.
+ - Comma or space can be used as a separator.
+ - If the CPU feature is not supported by the user platform or compiler,
+ it will be skipped rather than raising a fatal error.
+ - Any specified CPU features to 'cpu_dispatch' will be skipped if its part of CPU baseline features
+ - 'cpu_baseline' force enables implied features.
+
+ Attributes
+ ----------
+ parse_baseline_names : list
+ Final CPU baseline's feature names(sorted from low to high)
+ parse_baseline_flags : list
+ Compiler flags of baseline features
+ parse_dispatch_names : list
+ Final CPU dispatch-able feature names(sorted from low to high)
+ parse_target_groups : dict
+ Dictionary containing initialized target groups that configured
+ through class attribute `conf_target_groups`.
+
+ The key is represent the group name and value is a tuple
+ contains three items :
+ - bool, True if group has the 'baseline' option.
+ - list, list of CPU features.
+ - list, list of extra compiler flags.
+
+ """
+ def __init__(self, cpu_baseline, cpu_dispatch):
+ self._parse_policies = dict(
+ # POLICY NAME, (HAVE, NOT HAVE, [DEB])
+ KEEP_BASELINE = (
+ None, self._parse_policy_not_keepbase,
+ []
+ ),
+ KEEP_SORT = (
+ self._parse_policy_keepsort,
+ self._parse_policy_not_keepsort,
+ []
+ ),
+ MAXOPT = (
+ self._parse_policy_maxopt, None,
+ []
+ ),
+ WERROR = (
+ self._parse_policy_werror, None,
+ []
+ ),
+ AUTOVEC = (
+ self._parse_policy_autovec, None,
+ ["MAXOPT"]
+ )
+ )
+ if hasattr(self, "parse_is_cached"):
+ return
+
+ self.parse_baseline_names = []
+ self.parse_baseline_flags = []
+ self.parse_dispatch_names = []
+ self.parse_target_groups = {}
+
+ if self.cc_noopt:
+ # skip parsing baseline and dispatch args and keep parsing target groups
+ cpu_baseline = cpu_dispatch = None
+
+ self.dist_log("check requested baseline")
+ if cpu_baseline is not None:
+ cpu_baseline = self._parse_arg_features("cpu_baseline", cpu_baseline)
+ baseline_names = self.feature_names(cpu_baseline)
+ self.parse_baseline_flags = self.feature_flags(baseline_names)
+ self.parse_baseline_names = self.feature_sorted(
+ self.feature_implies_c(baseline_names)
+ )
+
+ self.dist_log("check requested dispatch-able features")
+ if cpu_dispatch is not None:
+ cpu_dispatch_ = self._parse_arg_features("cpu_dispatch", cpu_dispatch)
+ cpu_dispatch = {
+ f for f in cpu_dispatch_
+ if f not in self.parse_baseline_names
+ }
+ conflict_baseline = cpu_dispatch_.difference(cpu_dispatch)
+ self.parse_dispatch_names = self.feature_sorted(
+ self.feature_names(cpu_dispatch)
+ )
+ if len(conflict_baseline) > 0:
+ self.dist_log(
+ "skip features", conflict_baseline, "since its part of baseline"
+ )
+
+ self.dist_log("initialize targets groups")
+ for group_name, tokens in self.conf_target_groups.items():
+ self.dist_log("parse target group", group_name)
+ GROUP_NAME = group_name.upper()
+ if not tokens or not tokens.strip():
+ # allow empty groups, useful in case if there's a need
+ # to disable certain group since '_parse_target_tokens()'
+ # requires at least one valid target
+ self.parse_target_groups[GROUP_NAME] = (
+ False, [], []
+ )
+ continue
+ has_baseline, features, extra_flags = \
+ self._parse_target_tokens(tokens)
+ self.parse_target_groups[GROUP_NAME] = (
+ has_baseline, features, extra_flags
+ )
+
+ self.parse_is_cached = True
+
+ def parse_targets(self, source):
+ """
+ Fetch and parse configuration statements that required for
+ defining the targeted CPU features, statements should be declared
+ in the top of source in between **C** comment and start
+ with a special mark **@targets**.
+
+ Configuration statements are sort of keywords representing
+ CPU features names, group of statements and policies, combined
+ together to determine the required optimization.
+
+ Parameters
+ ----------
+ source : str
+ the path of **C** source file.
+
+ Returns
+ -------
+ - bool, True if group has the 'baseline' option
+ - list, list of CPU features
+ - list, list of extra compiler flags
+ """
+ self.dist_log("looking for '@targets' inside -> ", source)
+ # get lines between /*@targets and */
+ with open(source) as fd:
+ tokens = ""
+ max_to_reach = 1000 # good enough, isn't?
+ start_with = "@targets"
+ start_pos = -1
+ end_with = "*/"
+ end_pos = -1
+ for current_line, line in enumerate(fd):
+ if current_line == max_to_reach:
+ self.dist_fatal("reached the max of lines")
+ break
+ if start_pos == -1:
+ start_pos = line.find(start_with)
+ if start_pos == -1:
+ continue
+ start_pos += len(start_with)
+ tokens += line
+ end_pos = line.find(end_with)
+ if end_pos != -1:
+ end_pos += len(tokens) - len(line)
+ break
+
+ if start_pos == -1:
+ self.dist_fatal("expected to find '%s' within a C comment" % start_with)
+ if end_pos == -1:
+ self.dist_fatal("expected to end with '%s'" % end_with)
+
+ tokens = tokens[start_pos:end_pos]
+ return self._parse_target_tokens(tokens)
+
+ _parse_regex_arg = re.compile(r'\s|,|([+-])')
+ def _parse_arg_features(self, arg_name, req_features):
+ if not isinstance(req_features, str):
+ self.dist_fatal("expected a string in '%s'" % arg_name)
+
+ final_features = set()
+ # space and comma can be used as a separator
+ tokens = list(filter(None, re.split(self._parse_regex_arg, req_features)))
+ append = True # append is the default
+ for tok in tokens:
+ if tok[0] in ("#", "$"):
+ self.dist_fatal(
+ arg_name, "target groups and policies "
+ "aren't allowed from arguments, "
+ "only from dispatch-able sources"
+ )
+ if tok == '+':
+ append = True
+ continue
+ if tok == '-':
+ append = False
+ continue
+
+ TOK = tok.upper() # we use upper-case internally
+ features_to = set()
+ if TOK == "NONE":
+ pass
+ elif TOK == "NATIVE":
+ native = self.cc_flags["native"]
+ if not native:
+ self.dist_fatal(arg_name,
+ "native option isn't supported by the compiler"
+ )
+ features_to = self.feature_names(
+ force_flags=native, macros=[("DETECT_FEATURES", 1)]
+ )
+ elif TOK == "MAX":
+ features_to = self.feature_supported.keys()
+ elif TOK == "MIN":
+ features_to = self.feature_min
+ else:
+ if TOK in self.feature_supported:
+ features_to.add(TOK)
+ else:
+ if not self.feature_is_exist(TOK):
+ self.dist_fatal(arg_name,
+ ", '%s' isn't a known feature or option" % tok
+ )
+ if append:
+ final_features = final_features.union(features_to)
+ else:
+ final_features = final_features.difference(features_to)
+
+ append = True # back to default
+
+ return final_features
+
+ _parse_regex_target = re.compile(r'\s|[*,/]|([()])')
+ def _parse_target_tokens(self, tokens):
+ assert(isinstance(tokens, str))
+ final_targets = [] # to keep it sorted as specified
+ extra_flags = []
+ has_baseline = False
+
+ skipped = set()
+ policies = set()
+ multi_target = None
+
+ tokens = list(filter(None, re.split(self._parse_regex_target, tokens)))
+ if not tokens:
+ self.dist_fatal("expected one token at least")
+
+ for tok in tokens:
+ TOK = tok.upper()
+ ch = tok[0]
+ if ch in ('+', '-'):
+ self.dist_fatal(
+ "+/- are 'not' allowed from target's groups or @targets, "
+ "only from cpu_baseline and cpu_dispatch parms"
+ )
+ elif ch == '$':
+ if multi_target is not None:
+ self.dist_fatal(
+ "policies aren't allowed inside multi-target '()'"
+ ", only CPU features"
+ )
+ policies.add(self._parse_token_policy(TOK))
+ elif ch == '#':
+ if multi_target is not None:
+ self.dist_fatal(
+ "target groups aren't allowed inside multi-target '()'"
+ ", only CPU features"
+ )
+ has_baseline, final_targets, extra_flags = \
+ self._parse_token_group(TOK, has_baseline, final_targets, extra_flags)
+ elif ch == '(':
+ if multi_target is not None:
+ self.dist_fatal("unclosed multi-target, missing ')'")
+ multi_target = set()
+ elif ch == ')':
+ if multi_target is None:
+ self.dist_fatal("multi-target opener '(' wasn't found")
+ targets = self._parse_multi_target(multi_target)
+ if targets is None:
+ skipped.add(tuple(multi_target))
+ else:
+ if len(targets) == 1:
+ targets = targets[0]
+ if targets and targets not in final_targets:
+ final_targets.append(targets)
+ multi_target = None # back to default
+ else:
+ if TOK == "BASELINE":
+ if multi_target is not None:
+ self.dist_fatal("baseline isn't allowed inside multi-target '()'")
+ has_baseline = True
+ continue
+
+ if multi_target is not None:
+ multi_target.add(TOK)
+ continue
+
+ if not self.feature_is_exist(TOK):
+ self.dist_fatal("invalid target name '%s'" % TOK)
+
+ is_enabled = (
+ TOK in self.parse_baseline_names or
+ TOK in self.parse_dispatch_names
+ )
+ if is_enabled:
+ if TOK not in final_targets:
+ final_targets.append(TOK)
+ continue
+
+ skipped.add(TOK)
+
+ if multi_target is not None:
+ self.dist_fatal("unclosed multi-target, missing ')'")
+ if skipped:
+ self.dist_log(
+ "skip targets", skipped,
+ "not part of baseline or dispatch-able features"
+ )
+
+ final_targets = self.feature_untied(final_targets)
+
+ # add polices dependencies
+ for p in list(policies):
+ _, _, deps = self._parse_policies[p]
+ for d in deps:
+ if d in policies:
+ continue
+ self.dist_log(
+ "policy '%s' force enables '%s'" % (
+ p, d
+ ))
+ policies.add(d)
+
+ # release policies filtrations
+ for p, (have, nhave, _) in self._parse_policies.items():
+ func = None
+ if p in policies:
+ func = have
+ self.dist_log("policy '%s' is ON" % p)
+ else:
+ func = nhave
+ if not func:
+ continue
+ has_baseline, final_targets, extra_flags = func(
+ has_baseline, final_targets, extra_flags
+ )
+
+ return has_baseline, final_targets, extra_flags
+
+ def _parse_token_policy(self, token):
+ """validate policy token"""
+ if len(token) <= 1 or token[-1:] == token[0]:
+ self.dist_fatal("'$' must stuck in the begin of policy name")
+ token = token[1:]
+ if token not in self._parse_policies:
+ self.dist_fatal(
+ "'%s' is an invalid policy name, available policies are" % token,
+ self._parse_policies.keys()
+ )
+ return token
+
+ def _parse_token_group(self, token, has_baseline, final_targets, extra_flags):
+ """validate group token"""
+ if len(token) <= 1 or token[-1:] == token[0]:
+ self.dist_fatal("'#' must stuck in the begin of group name")
+
+ token = token[1:]
+ ghas_baseline, gtargets, gextra_flags = self.parse_target_groups.get(
+ token, (False, None, [])
+ )
+ if gtargets is None:
+ self.dist_fatal(
+ "'%s' is an invalid target group name, " % token + \
+ "available target groups are",
+ self.parse_target_groups.keys()
+ )
+ if ghas_baseline:
+ has_baseline = True
+ # always keep sorting as specified
+ final_targets += [f for f in gtargets if f not in final_targets]
+ extra_flags += [f for f in gextra_flags if f not in extra_flags]
+ return has_baseline, final_targets, extra_flags
+
+ def _parse_multi_target(self, targets):
+ """validate multi targets that defined between parentheses()"""
+ # remove any implied features and keep the origins
+ if not targets:
+ self.dist_fatal("empty multi-target '()'")
+ if not all([
+ self.feature_is_exist(tar) for tar in targets
+ ]) :
+ self.dist_fatal("invalid target name in multi-target", targets)
+ if not all([
+ (
+ tar in self.parse_baseline_names or
+ tar in self.parse_dispatch_names
+ )
+ for tar in targets
+ ]) :
+ return None
+ targets = self.feature_ahead(targets)
+ if not targets:
+ return None
+ # force sort multi targets, so it can be comparable
+ targets = self.feature_sorted(targets)
+ targets = tuple(targets) # hashable
+ return targets
+
+ def _parse_policy_not_keepbase(self, has_baseline, final_targets, extra_flags):
+ """skip all baseline features"""
+ skipped = []
+ for tar in final_targets[:]:
+ is_base = False
+ if isinstance(tar, str):
+ is_base = tar in self.parse_baseline_names
+ else:
+ # multi targets
+ is_base = all([
+ f in self.parse_baseline_names
+ for f in tar
+ ])
+ if is_base:
+ skipped.append(tar)
+ final_targets.remove(tar)
+
+ if skipped:
+ self.dist_log("skip baseline features", skipped)
+
+ return has_baseline, final_targets, extra_flags
+
+ def _parse_policy_keepsort(self, has_baseline, final_targets, extra_flags):
+ """leave a notice that $keep_sort is on"""
+ self.dist_log(
+ "policy 'keep_sort' is on, dispatch-able targets", final_targets, "\n"
+ "are 'not' sorted depend on the highest interest but"
+ "as specified in the dispatch-able source or the extra group"
+ )
+ return has_baseline, final_targets, extra_flags
+
+ def _parse_policy_not_keepsort(self, has_baseline, final_targets, extra_flags):
+ """sorted depend on the highest interest"""
+ final_targets = self.feature_sorted(final_targets, reverse=True)
+ return has_baseline, final_targets, extra_flags
+
+ def _parse_policy_maxopt(self, has_baseline, final_targets, extra_flags):
+ """append the compiler optimization flags"""
+ if self.cc_has_debug:
+ self.dist_log("debug mode is detected, policy 'maxopt' is skipped.")
+ elif self.cc_noopt:
+ self.dist_log("optimization is disabled, policy 'maxopt' is skipped.")
+ else:
+ flags = self.cc_flags["opt"]
+ if not flags:
+ self.dist_log(
+ "current compiler doesn't support optimization flags, "
+ "policy 'maxopt' is skipped", stderr=True
+ )
+ else:
+ extra_flags += flags
+ return has_baseline, final_targets, extra_flags
+
+ def _parse_policy_werror(self, has_baseline, final_targets, extra_flags):
+ """force warnings to treated as errors"""
+ flags = self.cc_flags["werror"]
+ if not flags:
+ self.dist_log(
+ "current compiler doesn't support werror flags, "
+ "warnings will 'not' treated as errors", stderr=True
+ )
+ else:
+ self.dist_log("compiler warnings are treated as errors")
+ extra_flags += flags
+ return has_baseline, final_targets, extra_flags
+
+ def _parse_policy_autovec(self, has_baseline, final_targets, extra_flags):
+ """skip features that has no auto-vectorized support by compiler"""
+ skipped = []
+ for tar in final_targets[:]:
+ if isinstance(tar, str):
+ can = self.feature_can_autovec(tar)
+ else: # multiple target
+ can = all([
+ self.feature_can_autovec(t)
+ for t in tar
+ ])
+ if not can:
+ final_targets.remove(tar)
+ skipped.append(tar)
+
+ if skipped:
+ self.dist_log("skip non auto-vectorized features", skipped)
+
+ return has_baseline, final_targets, extra_flags
+
+class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse):
+ """
+ A helper class for `CCompiler` aims to provide extra build options
+ to effectively control of compiler optimizations that are directly
+ related to CPU features.
+ """
+ def __init__(self, ccompiler, cpu_baseline="min", cpu_dispatch="max", cache_path=None):
+ _Config.__init__(self)
+ _Distutils.__init__(self, ccompiler)
+ _Cache.__init__(self, cache_path, self.dist_info(), cpu_baseline, cpu_dispatch)
+ _CCompiler.__init__(self)
+ _Feature.__init__(self)
+ if not self.cc_noopt and self.cc_has_native:
+ self.dist_log(
+ "native flag is specified through environment variables. "
+ "force cpu-baseline='native'"
+ )
+ cpu_baseline = "native"
+ _Parse.__init__(self, cpu_baseline, cpu_dispatch)
+ # keep the requested features untouched, need it later for report
+ # and trace purposes
+ self._requested_baseline = cpu_baseline
+ self._requested_dispatch = cpu_dispatch
+ # key is the dispatch-able source and value is a tuple
+ # contains two items (has_baseline[boolean], dispatched-features[list])
+ self.sources_status = getattr(self, "sources_status", {})
+ # every instance should has a separate one
+ self.cache_private.add("sources_status")
+ # set it at the end to make sure the cache writing was done after init
+ # this class
+ self.hit_cache = hasattr(self, "hit_cache")
+
+ def is_cached(self):
+ """
+ Returns True if the class loaded from the cache file
+ """
+ return self.cache_infile and self.hit_cache
+
+ def cpu_baseline_flags(self):
+ """
+ Returns a list of final CPU baseline compiler flags
+ """
+ return self.parse_baseline_flags
+
+ def cpu_baseline_names(self):
+ """
+ return a list of final CPU baseline feature names
+ """
+ return self.parse_baseline_names
+
+ def cpu_dispatch_names(self):
+ """
+ return a list of final CPU dispatch feature names
+ """
+ return self.parse_dispatch_names
+
+ def try_dispatch(self, sources, src_dir=None, ccompiler=None, **kwargs):
+ """
+ Compile one or more dispatch-able sources and generates object files,
+ also generates abstract C config headers and macros that
+ used later for the final runtime dispatching process.
+
+ The mechanism behind it is to takes each source file that specified
+ in 'sources' and branching it into several files depend on
+ special configuration statements that must be declared in the
+ top of each source which contains targeted CPU features,
+ then it compiles every branched source with the proper compiler flags.
+
+ Parameters
+ ----------
+ sources : list
+ Must be a list of dispatch-able sources file paths,
+ and configuration statements must be declared inside
+ each file.
+
+ src_dir : str
+ Path of parent directory for the generated headers and wrapped sources.
+ If None(default) the files will generated in-place.
+
+ ccompiler : CCompiler
+ Distutils `CCompiler` instance to be used for compilation.
+ If None (default), the provided instance during the initialization
+ will be used instead.
+
+ **kwargs : any
+ Arguments to pass on to the `CCompiler.compile()`
+
+ Returns
+ -------
+ list : generated object files
+
+ Raises
+ ------
+ CompileError
+ Raises by `CCompiler.compile()` on compiling failure.
+ DistutilsError
+ Some errors during checking the sanity of configuration statements.
+
+ See Also
+ --------
+ parse_targets :
+ Parsing the configuration statements of dispatch-able sources.
+ """
+ to_compile = {}
+ baseline_flags = self.cpu_baseline_flags()
+ include_dirs = kwargs.setdefault("include_dirs", [])
+
+ for src in sources:
+ output_dir = os.path.dirname(src)
+ if src_dir:
+ if not output_dir.startswith(src_dir):
+ output_dir = os.path.join(src_dir, output_dir)
+ if output_dir not in include_dirs:
+ # To allow including the generated config header(*.dispatch.h)
+ # by the dispatch-able sources
+ include_dirs.append(output_dir)
+
+ has_baseline, targets, extra_flags = self.parse_targets(src)
+ nochange = self._generate_config(output_dir, src, targets, has_baseline)
+ for tar in targets:
+ tar_src = self._wrap_target(output_dir, src, tar, nochange=nochange)
+ flags = tuple(extra_flags + self.feature_flags(tar))
+ to_compile.setdefault(flags, []).append(tar_src)
+
+ if has_baseline:
+ flags = tuple(extra_flags + baseline_flags)
+ to_compile.setdefault(flags, []).append(src)
+
+ self.sources_status[src] = (has_baseline, targets)
+
+ # For these reasons, the sources are compiled in a separate loop:
+ # - Gathering all sources with the same flags to benefit from
+ # the parallel compiling as much as possible.
+ # - To generate all config headers of the dispatchable sources,
+ # before the compilation in case if there are dependency relationships
+ # among them.
+ objects = []
+ for flags, srcs in to_compile.items():
+ objects += self.dist_compile(
+ srcs, list(flags), ccompiler=ccompiler, **kwargs
+ )
+ return objects
+
+ def generate_dispatch_header(self, header_path):
+ """
+ Generate the dispatch header which contains the #definitions and headers
+ for platform-specific instruction-sets for the enabled CPU baseline and
+ dispatch-able features.
+
+ Its highly recommended to take a look at the generated header
+ also the generated source files via `try_dispatch()`
+ in order to get the full picture.
+ """
+ self.dist_log("generate CPU dispatch header: (%s)" % header_path)
+
+ baseline_names = self.cpu_baseline_names()
+ dispatch_names = self.cpu_dispatch_names()
+ baseline_len = len(baseline_names)
+ dispatch_len = len(dispatch_names)
+
+ header_dir = os.path.dirname(header_path)
+ if not os.path.exists(header_dir):
+ self.dist_log(
+ f"dispatch header dir {header_dir} does not exist, creating it",
+ stderr=True
+ )
+ os.makedirs(header_dir)
+
+ with open(header_path, 'w') as f:
+ baseline_calls = ' \\\n'.join([
+ (
+ "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))"
+ ) % (self.conf_c_prefix, f)
+ for f in baseline_names
+ ])
+ dispatch_calls = ' \\\n'.join([
+ (
+ "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))"
+ ) % (self.conf_c_prefix, f)
+ for f in dispatch_names
+ ])
+ f.write(textwrap.dedent("""\
+ /*
+ * AUTOGENERATED DON'T EDIT
+ * Please make changes to the code generator (distutils/ccompiler_opt.py)
+ */
+ #define {pfx}WITH_CPU_BASELINE "{baseline_str}"
+ #define {pfx}WITH_CPU_DISPATCH "{dispatch_str}"
+ #define {pfx}WITH_CPU_BASELINE_N {baseline_len}
+ #define {pfx}WITH_CPU_DISPATCH_N {dispatch_len}
+ #define {pfx}WITH_CPU_EXPAND_(X) X
+ #define {pfx}WITH_CPU_BASELINE_CALL(MACRO_TO_CALL, ...) \\
+ {baseline_calls}
+ #define {pfx}WITH_CPU_DISPATCH_CALL(MACRO_TO_CALL, ...) \\
+ {dispatch_calls}
+ """).format(
+ pfx=self.conf_c_prefix, baseline_str=" ".join(baseline_names),
+ dispatch_str=" ".join(dispatch_names), baseline_len=baseline_len,
+ dispatch_len=dispatch_len, baseline_calls=baseline_calls,
+ dispatch_calls=dispatch_calls
+ ))
+ baseline_pre = ''
+ for name in baseline_names:
+ baseline_pre += self.feature_c_preprocessor(name, tabs=1) + '\n'
+
+ dispatch_pre = ''
+ for name in dispatch_names:
+ dispatch_pre += textwrap.dedent("""\
+ #ifdef {pfx}CPU_TARGET_{name}
+ {pre}
+ #endif /*{pfx}CPU_TARGET_{name}*/
+ """).format(
+ pfx=self.conf_c_prefix_, name=name, pre=self.feature_c_preprocessor(
+ name, tabs=1
+ ))
+
+ f.write(textwrap.dedent("""\
+ /******* baseline features *******/
+ {baseline_pre}
+ /******* dispatch features *******/
+ {dispatch_pre}
+ """).format(
+ pfx=self.conf_c_prefix_, baseline_pre=baseline_pre,
+ dispatch_pre=dispatch_pre
+ ))
+
+ def report(self, full=False):
+ report = []
+ platform_rows = []
+ baseline_rows = []
+ dispatch_rows = []
+ report.append(("Platform", platform_rows))
+ report.append(("", ""))
+ report.append(("CPU baseline", baseline_rows))
+ report.append(("", ""))
+ report.append(("CPU dispatch", dispatch_rows))
+
+ ########## platform ##########
+ platform_rows.append(("Architecture", (
+ "unsupported" if self.cc_on_noarch else self.cc_march)
+ ))
+ platform_rows.append(("Compiler", (
+ "unix-like" if self.cc_is_nocc else self.cc_name)
+ ))
+ ########## baseline ##########
+ if self.cc_noopt:
+ baseline_rows.append(("Requested", "optimization disabled"))
+ else:
+ baseline_rows.append(("Requested", repr(self._requested_baseline)))
+
+ baseline_names = self.cpu_baseline_names()
+ baseline_rows.append((
+ "Enabled", (' '.join(baseline_names) if baseline_names else "none")
+ ))
+ baseline_flags = self.cpu_baseline_flags()
+ baseline_rows.append((
+ "Flags", (' '.join(baseline_flags) if baseline_flags else "none")
+ ))
+ extra_checks = []
+ for name in baseline_names:
+ extra_checks += self.feature_extra_checks(name)
+ baseline_rows.append((
+ "Extra checks", (' '.join(extra_checks) if extra_checks else "none")
+ ))
+
+ ########## dispatch ##########
+ if self.cc_noopt:
+ baseline_rows.append(("Requested", "optimization disabled"))
+ else:
+ dispatch_rows.append(("Requested", repr(self._requested_dispatch)))
+
+ dispatch_names = self.cpu_dispatch_names()
+ dispatch_rows.append((
+ "Enabled", (' '.join(dispatch_names) if dispatch_names else "none")
+ ))
+ ########## Generated ##########
+ # TODO:
+ # - collect object names from 'try_dispatch()'
+ # then get size of each object and printed
+ # - give more details about the features that not
+ # generated due compiler support
+ # - find a better output's design.
+ #
+ target_sources = {}
+ for source, (_, targets) in self.sources_status.items():
+ for tar in targets:
+ target_sources.setdefault(tar, []).append(source)
+
+ if not full or not target_sources:
+ generated = ""
+ for tar in self.feature_sorted(target_sources):
+ sources = target_sources[tar]
+ name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar)
+ generated += name + "[%d] " % len(sources)
+ dispatch_rows.append(("Generated", generated[:-1] if generated else "none"))
+ else:
+ dispatch_rows.append(("Generated", ''))
+ for tar in self.feature_sorted(target_sources):
+ sources = target_sources[tar]
+ pretty_name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar)
+ flags = ' '.join(self.feature_flags(tar))
+ implies = ' '.join(self.feature_sorted(self.feature_implies(tar)))
+ detect = ' '.join(self.feature_detect(tar))
+ extra_checks = []
+ for name in ((tar,) if isinstance(tar, str) else tar):
+ extra_checks += self.feature_extra_checks(name)
+ extra_checks = (' '.join(extra_checks) if extra_checks else "none")
+
+ dispatch_rows.append(('', ''))
+ dispatch_rows.append((pretty_name, implies))
+ dispatch_rows.append(("Flags", flags))
+ dispatch_rows.append(("Extra checks", extra_checks))
+ dispatch_rows.append(("Detect", detect))
+ for src in sources:
+ dispatch_rows.append(("", src))
+
+ ###############################
+ # TODO: add support for 'markdown' format
+ text = []
+ secs_len = [len(secs) for secs, _ in report]
+ cols_len = [len(col) for _, rows in report for col, _ in rows]
+ tab = ' ' * 2
+ pad = max(max(secs_len), max(cols_len))
+ for sec, rows in report:
+ if not sec:
+ text.append("") # empty line
+ continue
+ sec += ' ' * (pad - len(sec))
+ text.append(sec + tab + ': ')
+ for col, val in rows:
+ col += ' ' * (pad - len(col))
+ text.append(tab + col + ': ' + val)
+
+ return '\n'.join(text)
+
+ def _wrap_target(self, output_dir, dispatch_src, target, nochange=False):
+ assert(isinstance(target, (str, tuple)))
+ if isinstance(target, str):
+ ext_name = target_name = target
+ else:
+ # multi-target
+ ext_name = '.'.join(target)
+ target_name = '__'.join(target)
+
+ wrap_path = os.path.join(output_dir, os.path.basename(dispatch_src))
+ wrap_path = "{0}.{2}{1}".format(*os.path.splitext(wrap_path), ext_name.lower())
+ if nochange and os.path.exists(wrap_path):
+ return wrap_path
+
+ self.dist_log("wrap dispatch-able target -> ", wrap_path)
+ # sorting for readability
+ features = self.feature_sorted(self.feature_implies_c(target))
+ target_join = "#define %sCPU_TARGET_" % self.conf_c_prefix_
+ target_defs = [target_join + f for f in features]
+ target_defs = '\n'.join(target_defs)
+
+ with open(wrap_path, "w") as fd:
+ fd.write(textwrap.dedent("""\
+ /**
+ * AUTOGENERATED DON'T EDIT
+ * Please make changes to the code generator \
+ (distutils/ccompiler_opt.py)
+ */
+ #define {pfx}CPU_TARGET_MODE
+ #define {pfx}CPU_TARGET_CURRENT {target_name}
+ {target_defs}
+ #include "{path}"
+ """).format(
+ pfx=self.conf_c_prefix_, target_name=target_name,
+ path=os.path.abspath(dispatch_src), target_defs=target_defs
+ ))
+ return wrap_path
+
+ def _generate_config(self, output_dir, dispatch_src, targets, has_baseline=False):
+ config_path = os.path.basename(dispatch_src)
+ config_path = os.path.splitext(config_path)[0] + '.h'
+ config_path = os.path.join(output_dir, config_path)
+ # check if targets didn't change to avoid recompiling
+ cache_hash = self.cache_hash(targets, has_baseline)
+ try:
+ with open(config_path) as f:
+ last_hash = f.readline().split("cache_hash:")
+ if len(last_hash) == 2 and int(last_hash[1]) == cache_hash:
+ return True
+ except OSError:
+ pass
+
+ os.makedirs(os.path.dirname(config_path), exist_ok=True)
+
+ self.dist_log("generate dispatched config -> ", config_path)
+ dispatch_calls = []
+ for tar in targets:
+ if isinstance(tar, str):
+ target_name = tar
+ else: # multi target
+ target_name = '__'.join([t for t in tar])
+ req_detect = self.feature_detect(tar)
+ req_detect = '&&'.join([
+ "CHK(%s)" % f for f in req_detect
+ ])
+ dispatch_calls.append(
+ "\t%sCPU_DISPATCH_EXPAND_(CB((%s), %s, __VA_ARGS__))" % (
+ self.conf_c_prefix_, req_detect, target_name
+ ))
+ dispatch_calls = ' \\\n'.join(dispatch_calls)
+
+ if has_baseline:
+ baseline_calls = (
+ "\t%sCPU_DISPATCH_EXPAND_(CB(__VA_ARGS__))"
+ ) % self.conf_c_prefix_
+ else:
+ baseline_calls = ''
+
+ with open(config_path, "w") as fd:
+ fd.write(textwrap.dedent("""\
+ // cache_hash:{cache_hash}
+ /**
+ * AUTOGENERATED DON'T EDIT
+ * Please make changes to the code generator (distutils/ccompiler_opt.py)
+ */
+ #ifndef {pfx}CPU_DISPATCH_EXPAND_
+ #define {pfx}CPU_DISPATCH_EXPAND_(X) X
+ #endif
+ #undef {pfx}CPU_DISPATCH_BASELINE_CALL
+ #undef {pfx}CPU_DISPATCH_CALL
+ #define {pfx}CPU_DISPATCH_BASELINE_CALL(CB, ...) \\
+ {baseline_calls}
+ #define {pfx}CPU_DISPATCH_CALL(CHK, CB, ...) \\
+ {dispatch_calls}
+ """).format(
+ pfx=self.conf_c_prefix_, baseline_calls=baseline_calls,
+ dispatch_calls=dispatch_calls, cache_hash=cache_hash
+ ))
+ return False
+
+def new_ccompiler_opt(compiler, dispatch_hpath, **kwargs):
+ """
+ Create a new instance of 'CCompilerOpt' and generate the dispatch header
+ which contains the #definitions and headers of platform-specific instruction-sets for
+ the enabled CPU baseline and dispatch-able features.
+
+ Parameters
+ ----------
+ compiler : CCompiler instance
+ dispatch_hpath : str
+ path of the dispatch header
+
+ **kwargs: passed as-is to `CCompilerOpt(...)`
+ Returns
+ -------
+ new instance of CCompilerOpt
+ """
+ opt = CCompilerOpt(compiler, **kwargs)
+ if not os.path.exists(dispatch_hpath) or not opt.is_cached():
+ opt.generate_dispatch_header(dispatch_hpath)
+ return opt
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimd.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimd.c
new file mode 100644
index 00000000..6bc9022a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimd.c
@@ -0,0 +1,27 @@
+#ifdef _MSC_VER
+ #include <Intrin.h>
+#endif
+#include <arm_neon.h>
+
+int main(int argc, char **argv)
+{
+ float *src = (float*)argv[argc-1];
+ float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]);
+ /* MAXMIN */
+ int ret = (int)vgetq_lane_f32(vmaxnmq_f32(v1, v2), 0);
+ ret += (int)vgetq_lane_f32(vminnmq_f32(v1, v2), 0);
+ /* ROUNDING */
+ ret += (int)vgetq_lane_f32(vrndq_f32(v1), 0);
+#ifdef __aarch64__
+ {
+ double *src2 = (double*)argv[argc-1];
+ float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]);
+ /* MAXMIN */
+ ret += (int)vgetq_lane_f64(vmaxnmq_f64(vd1, vd2), 0);
+ ret += (int)vgetq_lane_f64(vminnmq_f64(vd1, vd2), 0);
+ /* ROUNDING */
+ ret += (int)vgetq_lane_f64(vrndq_f64(vd1), 0);
+ }
+#endif
+ return ret;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimddp.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimddp.c
new file mode 100644
index 00000000..e7068ce0
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimddp.c
@@ -0,0 +1,16 @@
+#ifdef _MSC_VER
+ #include <Intrin.h>
+#endif
+#include <arm_neon.h>
+
+int main(int argc, char **argv)
+{
+ unsigned char *src = (unsigned char*)argv[argc-1];
+ uint8x16_t v1 = vdupq_n_u8(src[0]), v2 = vdupq_n_u8(src[1]);
+ uint32x4_t va = vdupq_n_u32(3);
+ int ret = (int)vgetq_lane_u32(vdotq_u32(va, v1, v2), 0);
+#ifdef __aarch64__
+ ret += (int)vgetq_lane_u32(vdotq_laneq_u32(va, v1, v2, 0), 0);
+#endif
+ return ret;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdfhm.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdfhm.c
new file mode 100644
index 00000000..54e32809
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdfhm.c
@@ -0,0 +1,19 @@
+#ifdef _MSC_VER
+ #include <Intrin.h>
+#endif
+#include <arm_neon.h>
+
+int main(int argc, char **argv)
+{
+ float16_t *src = (float16_t*)argv[argc-1];
+ float *src2 = (float*)argv[argc-2];
+ float16x8_t vhp = vdupq_n_f16(src[0]);
+ float16x4_t vlhp = vdup_n_f16(src[1]);
+ float32x4_t vf = vdupq_n_f32(src2[0]);
+ float32x2_t vlf = vdup_n_f32(src2[1]);
+
+ int ret = (int)vget_lane_f32(vfmlal_low_f16(vlf, vlhp, vlhp), 0);
+ ret += (int)vgetq_lane_f32(vfmlslq_high_f16(vf, vhp, vhp), 0);
+
+ return ret;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdhp.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdhp.c
new file mode 100644
index 00000000..e2de0306
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdhp.c
@@ -0,0 +1,15 @@
+#ifdef _MSC_VER
+ #include <Intrin.h>
+#endif
+#include <arm_neon.h>
+
+int main(int argc, char **argv)
+{
+ float16_t *src = (float16_t*)argv[argc-1];
+ float16x8_t vhp = vdupq_n_f16(src[0]);
+ float16x4_t vlhp = vdup_n_f16(src[1]);
+
+ int ret = (int)vgetq_lane_f16(vabdq_f16(vhp, vhp), 0);
+ ret += (int)vget_lane_f16(vabd_f16(vlhp, vlhp), 0);
+ return ret;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx.c
new file mode 100644
index 00000000..26ae1846
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __AVX__
+ #error "HOST/ARCH doesn't support AVX"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m256 a = _mm256_add_ps(_mm256_loadu_ps((const float*)argv[argc-1]), _mm256_loadu_ps((const float*)argv[1]));
+ return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx2.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx2.c
new file mode 100644
index 00000000..ddde868f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx2.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __AVX2__
+ #error "HOST/ARCH doesn't support AVX2"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m256i a = _mm256_abs_epi16(_mm256_loadu_si256((const __m256i*)argv[argc-1]));
+ return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_clx.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_clx.c
new file mode 100644
index 00000000..81edcd06
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_clx.c
@@ -0,0 +1,22 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __AVX512VNNI__
+ #error "HOST/ARCH doesn't support CascadeLake AVX512 features"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ /* VNNI */
+ __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
+ a = _mm512_dpbusd_epi32(a, _mm512_setzero_si512(), a);
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c
new file mode 100644
index 00000000..5799f122
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c
@@ -0,0 +1,24 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #if !defined(__AVX512VBMI__) || !defined(__AVX512IFMA__)
+ #error "HOST/ARCH doesn't support CannonLake AVX512 features"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
+ /* IFMA */
+ a = _mm512_madd52hi_epu64(a, a, _mm512_setzero_si512());
+ /* VMBI */
+ a = _mm512_permutex2var_epi8(a, _mm512_setzero_si512(), a);
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_icl.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_icl.c
new file mode 100644
index 00000000..3cf44d73
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_icl.c
@@ -0,0 +1,26 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #if !defined(__AVX512VPOPCNTDQ__) || !defined(__AVX512BITALG__) || !defined(__AVX512VPOPCNTDQ__)
+ #error "HOST/ARCH doesn't support IceLake AVX512 features"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
+ /* VBMI2 */
+ a = _mm512_shrdv_epi64(a, a, _mm512_setzero_si512());
+ /* BITLAG */
+ a = _mm512_popcnt_epi8(a);
+ /* VPOPCNTDQ */
+ a = _mm512_popcnt_epi64(a);
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knl.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knl.c
new file mode 100644
index 00000000..b3f4f697
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knl.c
@@ -0,0 +1,25 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #if !defined(__AVX512ER__) || !defined(__AVX512PF__)
+ #error "HOST/ARCH doesn't support Knights Landing AVX512 features"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ int base[128];
+ __m512d ad = _mm512_loadu_pd((const __m512d*)argv[argc-1]);
+ /* ER */
+ __m512i a = _mm512_castpd_si512(_mm512_exp2a23_pd(ad));
+ /* PF */
+ _mm512_mask_prefetch_i64scatter_pd(base, _mm512_cmpeq_epi64_mask(a, a), a, 1, _MM_HINT_T1);
+ return base[0];
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knm.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knm.c
new file mode 100644
index 00000000..2c426462
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knm.c
@@ -0,0 +1,30 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #if !defined(__AVX5124FMAPS__) || !defined(__AVX5124VNNIW__) || !defined(__AVX512VPOPCNTDQ__)
+ #error "HOST/ARCH doesn't support Knights Mill AVX512 features"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
+ __m512 b = _mm512_loadu_ps((const __m512*)argv[argc-2]);
+
+ /* 4FMAPS */
+ b = _mm512_4fmadd_ps(b, b, b, b, b, NULL);
+ /* 4VNNIW */
+ a = _mm512_4dpwssd_epi32(a, a, a, a, a, NULL);
+ /* VPOPCNTDQ */
+ a = _mm512_popcnt_epi64(a);
+
+ a = _mm512_add_epi32(a, _mm512_castps_si512(b));
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_skx.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_skx.c
new file mode 100644
index 00000000..8840efb7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_skx.c
@@ -0,0 +1,26 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #if !defined(__AVX512VL__) || !defined(__AVX512BW__) || !defined(__AVX512DQ__)
+ #error "HOST/ARCH doesn't support SkyLake AVX512 features"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m512i aa = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
+ /* VL */
+ __m256i a = _mm256_abs_epi64(_mm512_extracti64x4_epi64(aa, 1));
+ /* DQ */
+ __m512i b = _mm512_broadcast_i32x8(a);
+ /* BW */
+ b = _mm512_abs_epi16(b);
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(b));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512cd.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512cd.c
new file mode 100644
index 00000000..5e29c79e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512cd.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __AVX512CD__
+ #error "HOST/ARCH doesn't support AVX512CD"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m512i a = _mm512_lzcnt_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512f.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512f.c
new file mode 100644
index 00000000..d0eb7b1a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512f.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __AVX512F__
+ #error "HOST/ARCH doesn't support AVX512F"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m512i a = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_f16c.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_f16c.c
new file mode 100644
index 00000000..fdf36cec
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_f16c.c
@@ -0,0 +1,22 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __F16C__
+ #error "HOST/ARCH doesn't support F16C"
+ #endif
+#endif
+
+#include <emmintrin.h>
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m128 a = _mm_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-1]));
+ __m256 a8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-2]));
+ return (int)(_mm_cvtss_f32(a) + _mm_cvtss_f32(_mm256_castps256_ps128(a8)));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma3.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma3.c
new file mode 100644
index 00000000..bfeef22b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma3.c
@@ -0,0 +1,22 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #if !defined(__FMA__) && !defined(__AVX2__)
+ #error "HOST/ARCH doesn't support FMA3"
+ #endif
+#endif
+
+#include <xmmintrin.h>
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m256 a = _mm256_loadu_ps((const float*)argv[argc-1]);
+ a = _mm256_fmadd_ps(a, a, a);
+ return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma4.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma4.c
new file mode 100644
index 00000000..0ff17a48
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma4.c
@@ -0,0 +1,13 @@
+#include <immintrin.h>
+#ifdef _MSC_VER
+ #include <ammintrin.h>
+#else
+ #include <x86intrin.h>
+#endif
+
+int main(int argc, char **argv)
+{
+ __m256 a = _mm256_loadu_ps((const float*)argv[argc-1]);
+ a = _mm256_macc_ps(a, a, a);
+ return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon.c
new file mode 100644
index 00000000..8c64f864
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon.c
@@ -0,0 +1,19 @@
+#ifdef _MSC_VER
+ #include <Intrin.h>
+#endif
+#include <arm_neon.h>
+
+int main(int argc, char **argv)
+{
+ // passing from untraced pointers to avoid optimizing out any constants
+ // so we can test against the linker.
+ float *src = (float*)argv[argc-1];
+ float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]);
+ int ret = (int)vgetq_lane_f32(vmulq_f32(v1, v2), 0);
+#ifdef __aarch64__
+ double *src2 = (double*)argv[argc-2];
+ float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]);
+ ret += (int)vgetq_lane_f64(vmulq_f64(vd1, vd2), 0);
+#endif
+ return ret;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_fp16.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_fp16.c
new file mode 100644
index 00000000..f3b94977
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_fp16.c
@@ -0,0 +1,11 @@
+#ifdef _MSC_VER
+ #include <Intrin.h>
+#endif
+#include <arm_neon.h>
+
+int main(int argc, char **argv)
+{
+ short *src = (short*)argv[argc-1];
+ float32x4_t v_z4 = vcvt_f32_f16((float16x4_t)vld1_s16(src));
+ return (int)vgetq_lane_f32(v_z4, 0);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c
new file mode 100644
index 00000000..a039159d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c
@@ -0,0 +1,21 @@
+#ifdef _MSC_VER
+ #include <Intrin.h>
+#endif
+#include <arm_neon.h>
+
+int main(int argc, char **argv)
+{
+ float *src = (float*)argv[argc-1];
+ float32x4_t v1 = vdupq_n_f32(src[0]);
+ float32x4_t v2 = vdupq_n_f32(src[1]);
+ float32x4_t v3 = vdupq_n_f32(src[2]);
+ int ret = (int)vgetq_lane_f32(vfmaq_f32(v1, v2, v3), 0);
+#ifdef __aarch64__
+ double *src2 = (double*)argv[argc-2];
+ float64x2_t vd1 = vdupq_n_f64(src2[0]);
+ float64x2_t vd2 = vdupq_n_f64(src2[1]);
+ float64x2_t vd3 = vdupq_n_f64(src2[2]);
+ ret += (int)vgetq_lane_f64(vfmaq_f64(vd1, vd2, vd3), 0);
+#endif
+ return ret;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_popcnt.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_popcnt.c
new file mode 100644
index 00000000..813c461f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_popcnt.c
@@ -0,0 +1,32 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env vr `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #if !defined(__SSE4_2__) && !defined(__POPCNT__)
+ #error "HOST/ARCH doesn't support POPCNT"
+ #endif
+#endif
+
+#ifdef _MSC_VER
+ #include <nmmintrin.h>
+#else
+ #include <popcntintrin.h>
+#endif
+
+int main(int argc, char **argv)
+{
+ // To make sure popcnt instructions are generated
+ // and been tested against the assembler
+ unsigned long long a = *((unsigned long long*)argv[argc-1]);
+ unsigned int b = *((unsigned int*)argv[argc-2]);
+
+#if defined(_M_X64) || defined(__x86_64__)
+ a = _mm_popcnt_u64(a);
+#endif
+ b = _mm_popcnt_u32(b);
+ return (int)a + b;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse.c
new file mode 100644
index 00000000..602b74e7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __SSE__
+ #error "HOST/ARCH doesn't support SSE"
+ #endif
+#endif
+
+#include <xmmintrin.h>
+
+int main(void)
+{
+ __m128 a = _mm_add_ps(_mm_setzero_ps(), _mm_setzero_ps());
+ return (int)_mm_cvtss_f32(a);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse2.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse2.c
new file mode 100644
index 00000000..33826a9e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse2.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __SSE2__
+ #error "HOST/ARCH doesn't support SSE2"
+ #endif
+#endif
+
+#include <emmintrin.h>
+
+int main(void)
+{
+ __m128i a = _mm_add_epi16(_mm_setzero_si128(), _mm_setzero_si128());
+ return _mm_cvtsi128_si32(a);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse3.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse3.c
new file mode 100644
index 00000000..d47c20f7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse3.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __SSE3__
+ #error "HOST/ARCH doesn't support SSE3"
+ #endif
+#endif
+
+#include <pmmintrin.h>
+
+int main(void)
+{
+ __m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps());
+ return (int)_mm_cvtss_f32(a);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse41.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse41.c
new file mode 100644
index 00000000..7c80238a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse41.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __SSE4_1__
+ #error "HOST/ARCH doesn't support SSE41"
+ #endif
+#endif
+
+#include <smmintrin.h>
+
+int main(void)
+{
+ __m128 a = _mm_floor_ps(_mm_setzero_ps());
+ return (int)_mm_cvtss_f32(a);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse42.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse42.c
new file mode 100644
index 00000000..f60e18f3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse42.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __SSE4_2__
+ #error "HOST/ARCH doesn't support SSE42"
+ #endif
+#endif
+
+#include <smmintrin.h>
+
+int main(void)
+{
+ __m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps());
+ return (int)_mm_cvtss_f32(a);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_ssse3.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_ssse3.c
new file mode 100644
index 00000000..fde390d6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_ssse3.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __SSSE3__
+ #error "HOST/ARCH doesn't support SSSE3"
+ #endif
+#endif
+
+#include <tmmintrin.h>
+
+int main(void)
+{
+ __m128i a = _mm_hadd_epi16(_mm_setzero_si128(), _mm_setzero_si128());
+ return (int)_mm_cvtsi128_si32(a);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx.c
new file mode 100644
index 00000000..0b3f30d6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx.c
@@ -0,0 +1,21 @@
+#ifndef __VSX__
+ #error "VSX is not supported"
+#endif
+#include <altivec.h>
+
+#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__))
+ #define vsx_ld vec_vsx_ld
+ #define vsx_st vec_vsx_st
+#else
+ #define vsx_ld vec_xl
+ #define vsx_st vec_xst
+#endif
+
+int main(void)
+{
+ unsigned int zout[4];
+ unsigned int z4[] = {0, 0, 0, 0};
+ __vector unsigned int v_z4 = vsx_ld(0, z4);
+ vsx_st(v_z4, 0, zout);
+ return zout[0];
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx2.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx2.c
new file mode 100644
index 00000000..410fb29d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx2.c
@@ -0,0 +1,13 @@
+#ifndef __VSX__
+ #error "VSX is not supported"
+#endif
+#include <altivec.h>
+
+typedef __vector unsigned long long v_uint64x2;
+
+int main(void)
+{
+ v_uint64x2 z2 = (v_uint64x2){0, 0};
+ z2 = (v_uint64x2)vec_cmpeq(z2, z2);
+ return (int)vec_extract(z2, 0);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx3.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx3.c
new file mode 100644
index 00000000..85752653
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx3.c
@@ -0,0 +1,13 @@
+#ifndef __VSX__
+ #error "VSX is not supported"
+#endif
+#include <altivec.h>
+
+typedef __vector unsigned int v_uint32x4;
+
+int main(void)
+{
+ v_uint32x4 z4 = (v_uint32x4){0, 0, 0, 0};
+ z4 = vec_absd(z4, z4);
+ return (int)vec_extract(z4, 0);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx4.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx4.c
new file mode 100644
index 00000000..a6acc738
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx4.c
@@ -0,0 +1,14 @@
+#ifndef __VSX__
+ #error "VSX is not supported"
+#endif
+#include <altivec.h>
+
+typedef __vector unsigned int v_uint32x4;
+
+int main(void)
+{
+ v_uint32x4 v1 = (v_uint32x4){2, 4, 8, 16};
+ v_uint32x4 v2 = (v_uint32x4){2, 2, 2, 2};
+ v_uint32x4 v3 = vec_mod(v1, v2);
+ return (int)vec_extractm(v3);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vx.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vx.c
new file mode 100644
index 00000000..18fb7ef9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vx.c
@@ -0,0 +1,16 @@
+#if (__VEC__ < 10301) || (__ARCH__ < 11)
+ #error VX not supported
+#endif
+
+#include <vecintrin.h>
+int main(int argc, char **argv)
+{
+ __vector double x = vec_abs(vec_xl(argc, (double*)argv));
+ __vector double y = vec_load_len((double*)argv, (unsigned int)argc);
+
+ x = vec_round(vec_ceil(x) + vec_floor(y));
+ __vector bool long long m = vec_cmpge(x, y);
+ __vector long long i = vec_signed(vec_sel(x, y, m));
+
+ return (int)vec_extract(i, 0);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe.c
new file mode 100644
index 00000000..e6933adc
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe.c
@@ -0,0 +1,25 @@
+#if (__VEC__ < 10302) || (__ARCH__ < 12)
+ #error VXE not supported
+#endif
+
+#include <vecintrin.h>
+int main(int argc, char **argv)
+{
+ __vector float x = vec_nabs(vec_xl(argc, (float*)argv));
+ __vector float y = vec_load_len((float*)argv, (unsigned int)argc);
+
+ x = vec_round(vec_ceil(x) + vec_floor(y));
+ __vector bool int m = vec_cmpge(x, y);
+ x = vec_sel(x, y, m);
+
+ // need to test the existence of intrin "vflls" since vec_doublee
+ // is vec_doublee maps to wrong intrin "vfll".
+ // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100871
+#if defined(__GNUC__) && !defined(__clang__)
+ __vector long long i = vec_signed(__builtin_s390_vflls(x));
+#else
+ __vector long long i = vec_signed(vec_doublee(x));
+#endif
+
+ return (int)vec_extract(i, 0);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe2.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe2.c
new file mode 100644
index 00000000..f36d5712
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe2.c
@@ -0,0 +1,21 @@
+#if (__VEC__ < 10303) || (__ARCH__ < 13)
+ #error VXE2 not supported
+#endif
+
+#include <vecintrin.h>
+
+int main(int argc, char **argv)
+{
+ int val;
+ __vector signed short large = { 'a', 'b', 'c', 'a', 'g', 'h', 'g', 'o' };
+ __vector signed short search = { 'g', 'h', 'g', 'o' };
+ __vector unsigned char len = { 0 };
+ __vector unsigned char res = vec_search_string_cc(large, search, len, &val);
+ __vector float x = vec_xl(argc, (float*)argv);
+ __vector int i = vec_signed(x);
+
+ i = vec_srdb(vec_sldb(i, i, 2), i, 3);
+ val += (int)vec_extract(res, 1);
+ val += vec_extract(i, 0);
+ return val;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_xop.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_xop.c
new file mode 100644
index 00000000..51d70cf2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_xop.c
@@ -0,0 +1,12 @@
+#include <immintrin.h>
+#ifdef _MSC_VER
+ #include <ammintrin.h>
+#else
+ #include <x86intrin.h>
+#endif
+
+int main(void)
+{
+ __m128i a = _mm_comge_epu32(_mm_setzero_si128(), _mm_setzero_si128());
+ return _mm_cvtsi128_si32(a);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c
new file mode 100644
index 00000000..9cfd0c2a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c
@@ -0,0 +1,18 @@
+#include <immintrin.h>
+/**
+ * Test BW mask operations due to:
+ * - MSVC has supported it since vs2019 see,
+ * https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html
+ * - Clang >= v8.0
+ * - GCC >= v7.1
+ */
+int main(void)
+{
+ __mmask64 m64 = _mm512_cmpeq_epi8_mask(_mm512_set1_epi8((char)1), _mm512_set1_epi8((char)1));
+ m64 = _kor_mask64(m64, m64);
+ m64 = _kxor_mask64(m64, m64);
+ m64 = _cvtu64_mask64(_cvtmask64_u64(m64));
+ m64 = _mm512_kunpackd(m64, m64);
+ m64 = (__mmask64)_mm512_kunpackw((__mmask32)m64, (__mmask32)m64);
+ return (int)_cvtmask64_u64(m64);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c
new file mode 100644
index 00000000..f0dc88bd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c
@@ -0,0 +1,16 @@
+#include <immintrin.h>
+/**
+ * Test DQ mask operations due to:
+ * - MSVC has supported it since vs2019 see,
+ * https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html
+ * - Clang >= v8.0
+ * - GCC >= v7.1
+ */
+int main(void)
+{
+ __mmask8 m8 = _mm512_cmpeq_epi64_mask(_mm512_set1_epi64(1), _mm512_set1_epi64(1));
+ m8 = _kor_mask8(m8, m8);
+ m8 = _kxor_mask8(m8, m8);
+ m8 = _cvtu32_mask8(_cvtmask8_u32(m8));
+ return (int)_cvtmask8_u32(m8);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c
new file mode 100644
index 00000000..db01aaee
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c
@@ -0,0 +1,41 @@
+#include <immintrin.h>
+/**
+ * The following intrinsics don't have direct native support but compilers
+ * tend to emulate them.
+ * They're usually supported by gcc >= 7.1, clang >= 4 and icc >= 19
+ */
+int main(void)
+{
+ __m512 one_ps = _mm512_set1_ps(1.0f);
+ __m512d one_pd = _mm512_set1_pd(1.0);
+ __m512i one_i64 = _mm512_set1_epi64(1);
+ // add
+ float sum_ps = _mm512_reduce_add_ps(one_ps);
+ double sum_pd = _mm512_reduce_add_pd(one_pd);
+ int sum_int = (int)_mm512_reduce_add_epi64(one_i64);
+ sum_int += (int)_mm512_reduce_add_epi32(one_i64);
+ // mul
+ sum_ps += _mm512_reduce_mul_ps(one_ps);
+ sum_pd += _mm512_reduce_mul_pd(one_pd);
+ sum_int += (int)_mm512_reduce_mul_epi64(one_i64);
+ sum_int += (int)_mm512_reduce_mul_epi32(one_i64);
+ // min
+ sum_ps += _mm512_reduce_min_ps(one_ps);
+ sum_pd += _mm512_reduce_min_pd(one_pd);
+ sum_int += (int)_mm512_reduce_min_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_min_epu32(one_i64);
+ sum_int += (int)_mm512_reduce_min_epi64(one_i64);
+ // max
+ sum_ps += _mm512_reduce_max_ps(one_ps);
+ sum_pd += _mm512_reduce_max_pd(one_pd);
+ sum_int += (int)_mm512_reduce_max_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_max_epu32(one_i64);
+ sum_int += (int)_mm512_reduce_max_epi64(one_i64);
+ // and
+ sum_int += (int)_mm512_reduce_and_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_and_epi64(one_i64);
+ // or
+ sum_int += (int)_mm512_reduce_or_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_or_epi64(one_i64);
+ return (int)sum_ps + (int)sum_pd + sum_int;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx4_mma.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx4_mma.c
new file mode 100644
index 00000000..a70b2a9f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx4_mma.c
@@ -0,0 +1,21 @@
+#ifndef __VSX__
+ #error "VSX is not supported"
+#endif
+#include <altivec.h>
+
+typedef __vector float fv4sf_t;
+typedef __vector unsigned char vec_t;
+
+int main(void)
+{
+ __vector_quad acc0;
+ float a[4] = {0,1,2,3};
+ float b[4] = {0,1,2,3};
+ vec_t *va = (vec_t *) a;
+ vec_t *vb = (vec_t *) b;
+ __builtin_mma_xvf32ger(&acc0, va[0], vb[0]);
+ fv4sf_t result[4];
+ __builtin_mma_disassemble_acc((void *)result, &acc0);
+ fv4sf_t c0 = result[0];
+ return (int)((float*)&c0)[0];
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx_asm.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx_asm.c
new file mode 100644
index 00000000..b73a6f43
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx_asm.c
@@ -0,0 +1,36 @@
+/**
+ * Testing ASM VSX register number fixer '%x<n>'
+ *
+ * old versions of CLANG doesn't support %x<n> in the inline asm template
+ * which fixes register number when using any of the register constraints wa, wd, wf.
+ *
+ * xref:
+ * - https://bugs.llvm.org/show_bug.cgi?id=31837
+ * - https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html
+ */
+#ifndef __VSX__
+ #error "VSX is not supported"
+#endif
+#include <altivec.h>
+
+#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__))
+ #define vsx_ld vec_vsx_ld
+ #define vsx_st vec_vsx_st
+#else
+ #define vsx_ld vec_xl
+ #define vsx_st vec_xst
+#endif
+
+int main(void)
+{
+ float z4[] = {0, 0, 0, 0};
+ signed int zout[] = {0, 0, 0, 0};
+
+ __vector float vz4 = vsx_ld(0, z4);
+ __vector signed int asm_ret = vsx_ld(0, zout);
+
+ __asm__ ("xvcvspsxws %x0,%x1" : "=wa" (vz4) : "wa" (asm_ret));
+
+ vsx_st(asm_ret, 0, zout);
+ return zout[0];
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/test_flags.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/test_flags.c
new file mode 100644
index 00000000..4cd09d42
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/test_flags.c
@@ -0,0 +1 @@
+int test_flags;
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/__init__.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/__init__.py
new file mode 100644
index 00000000..3ba501de
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/__init__.py
@@ -0,0 +1,41 @@
+"""distutils.command
+
+Package containing implementation of all the standard Distutils
+commands.
+
+"""
+def test_na_writable_attributes_deletion():
+ a = np.NA(2)
+ attr = ['payload', 'dtype']
+ for s in attr:
+ assert_raises(AttributeError, delattr, a, s)
+
+
+__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $"
+
+distutils_all = [ #'build_py',
+ 'clean',
+ 'install_clib',
+ 'install_scripts',
+ 'bdist',
+ 'bdist_dumb',
+ 'bdist_wininst',
+ ]
+
+__import__('distutils.command', globals(), locals(), distutils_all)
+
+__all__ = ['build',
+ 'config_compiler',
+ 'config',
+ 'build_src',
+ 'build_py',
+ 'build_ext',
+ 'build_clib',
+ 'build_scripts',
+ 'install',
+ 'install_data',
+ 'install_headers',
+ 'install_lib',
+ 'bdist_rpm',
+ 'sdist',
+ ] + distutils_all
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/autodist.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/autodist.py
new file mode 100644
index 00000000..b72d0cab
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/autodist.py
@@ -0,0 +1,148 @@
+"""This module implements additional tests ala autoconf which can be useful.
+
+"""
+import textwrap
+
+# We put them here since they could be easily reused outside numpy.distutils
+
+def check_inline(cmd):
+ """Return the inline identifier (may be empty)."""
+ cmd._check_compiler()
+ body = textwrap.dedent("""
+ #ifndef __cplusplus
+ static %(inline)s int static_func (void)
+ {
+ return 0;
+ }
+ %(inline)s int nostatic_func (void)
+ {
+ return 0;
+ }
+ #endif""")
+
+ for kw in ['inline', '__inline__', '__inline']:
+ st = cmd.try_compile(body % {'inline': kw}, None, None)
+ if st:
+ return kw
+
+ return ''
+
+
+def check_restrict(cmd):
+ """Return the restrict identifier (may be empty)."""
+ cmd._check_compiler()
+ body = textwrap.dedent("""
+ static int static_func (char * %(restrict)s a)
+ {
+ return 0;
+ }
+ """)
+
+ for kw in ['restrict', '__restrict__', '__restrict']:
+ st = cmd.try_compile(body % {'restrict': kw}, None, None)
+ if st:
+ return kw
+
+ return ''
+
+
+def check_compiler_gcc(cmd):
+ """Check if the compiler is GCC."""
+
+ cmd._check_compiler()
+ body = textwrap.dedent("""
+ int
+ main()
+ {
+ #if (! defined __GNUC__)
+ #error gcc required
+ #endif
+ return 0;
+ }
+ """)
+ return cmd.try_compile(body, None, None)
+
+
+def check_gcc_version_at_least(cmd, major, minor=0, patchlevel=0):
+ """
+ Check that the gcc version is at least the specified version."""
+
+ cmd._check_compiler()
+ version = '.'.join([str(major), str(minor), str(patchlevel)])
+ body = textwrap.dedent("""
+ int
+ main()
+ {
+ #if (! defined __GNUC__) || (__GNUC__ < %(major)d) || \\
+ (__GNUC_MINOR__ < %(minor)d) || \\
+ (__GNUC_PATCHLEVEL__ < %(patchlevel)d)
+ #error gcc >= %(version)s required
+ #endif
+ return 0;
+ }
+ """)
+ kw = {'version': version, 'major': major, 'minor': minor,
+ 'patchlevel': patchlevel}
+
+ return cmd.try_compile(body % kw, None, None)
+
+
+def check_gcc_function_attribute(cmd, attribute, name):
+ """Return True if the given function attribute is supported."""
+ cmd._check_compiler()
+ body = textwrap.dedent("""
+ #pragma GCC diagnostic error "-Wattributes"
+ #pragma clang diagnostic error "-Wattributes"
+
+ int %s %s(void* unused)
+ {
+ return 0;
+ }
+
+ int
+ main()
+ {
+ return 0;
+ }
+ """) % (attribute, name)
+ return cmd.try_compile(body, None, None) != 0
+
+
+def check_gcc_function_attribute_with_intrinsics(cmd, attribute, name, code,
+ include):
+ """Return True if the given function attribute is supported with
+ intrinsics."""
+ cmd._check_compiler()
+ body = textwrap.dedent("""
+ #include<%s>
+ int %s %s(void)
+ {
+ %s;
+ return 0;
+ }
+
+ int
+ main()
+ {
+ return 0;
+ }
+ """) % (include, attribute, name, code)
+ return cmd.try_compile(body, None, None) != 0
+
+
+def check_gcc_variable_attribute(cmd, attribute):
+ """Return True if the given variable attribute is supported."""
+ cmd._check_compiler()
+ body = textwrap.dedent("""
+ #pragma GCC diagnostic error "-Wattributes"
+ #pragma clang diagnostic error "-Wattributes"
+
+ int %s foo;
+
+ int
+ main()
+ {
+ return 0;
+ }
+ """) % (attribute, )
+ return cmd.try_compile(body, None, None) != 0
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/bdist_rpm.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/bdist_rpm.py
new file mode 100644
index 00000000..682e7a8e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/bdist_rpm.py
@@ -0,0 +1,22 @@
+import os
+import sys
+if 'setuptools' in sys.modules:
+ from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm
+else:
+ from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm
+
+class bdist_rpm(old_bdist_rpm):
+
+ def _make_spec_file(self):
+ spec_file = old_bdist_rpm._make_spec_file(self)
+
+ # Replace hardcoded setup.py script name
+ # with the real setup script name.
+ setup_py = os.path.basename(sys.argv[0])
+ if setup_py == 'setup.py':
+ return spec_file
+ new_spec_file = []
+ for line in spec_file:
+ line = line.replace('setup.py', setup_py)
+ new_spec_file.append(line)
+ return new_spec_file
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/build.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/build.py
new file mode 100644
index 00000000..80830d55
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/build.py
@@ -0,0 +1,62 @@
+import os
+import sys
+from distutils.command.build import build as old_build
+from distutils.util import get_platform
+from numpy.distutils.command.config_compiler import show_fortran_compilers
+
+class build(old_build):
+
+ sub_commands = [('config_cc', lambda *args: True),
+ ('config_fc', lambda *args: True),
+ ('build_src', old_build.has_ext_modules),
+ ] + old_build.sub_commands
+
+ user_options = old_build.user_options + [
+ ('fcompiler=', None,
+ "specify the Fortran compiler type"),
+ ('warn-error', None,
+ "turn all warnings into errors (-Werror)"),
+ ('cpu-baseline=', None,
+ "specify a list of enabled baseline CPU optimizations"),
+ ('cpu-dispatch=', None,
+ "specify a list of dispatched CPU optimizations"),
+ ('disable-optimization', None,
+ "disable CPU optimized code(dispatch,simd,fast...)"),
+ ('simd-test=', None,
+ "specify a list of CPU optimizations to be tested against NumPy SIMD interface"),
+ ]
+
+ help_options = old_build.help_options + [
+ ('help-fcompiler', None, "list available Fortran compilers",
+ show_fortran_compilers),
+ ]
+
+ def initialize_options(self):
+ old_build.initialize_options(self)
+ self.fcompiler = None
+ self.warn_error = False
+ self.cpu_baseline = "min"
+ self.cpu_dispatch = "max -xop -fma4" # drop AMD legacy features by default
+ self.disable_optimization = False
+ """
+ the '_simd' module is a very large. Adding more dispatched features
+ will increase binary size and compile time. By default we minimize
+ the targeted features to those most commonly used by the NumPy SIMD interface(NPYV),
+ NOTE: any specified features will be ignored if they're:
+ - part of the baseline(--cpu-baseline)
+ - not part of dispatch-able features(--cpu-dispatch)
+ - not supported by compiler or platform
+ """
+ self.simd_test = "BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F " \
+ "AVX512_SKX VSX VSX2 VSX3 VSX4 NEON ASIMD VX VXE VXE2"
+
+ def finalize_options(self):
+ build_scripts = self.build_scripts
+ old_build.finalize_options(self)
+ plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2])
+ if build_scripts is None:
+ self.build_scripts = os.path.join(self.build_base,
+ 'scripts' + plat_specifier)
+
+ def run(self):
+ old_build.run(self)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/build_clib.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_clib.py
new file mode 100644
index 00000000..45201f98
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_clib.py
@@ -0,0 +1,468 @@
+""" Modified version of build_clib that handles fortran source files.
+"""
+import os
+from glob import glob
+import shutil
+from distutils.command.build_clib import build_clib as old_build_clib
+from distutils.errors import DistutilsSetupError, DistutilsError, \
+ DistutilsFileError
+
+from numpy.distutils import log
+from distutils.dep_util import newer_group
+from numpy.distutils.misc_util import (
+ filter_sources, get_lib_source_files, get_numpy_include_dirs,
+ has_cxx_sources, has_f_sources, is_sequence
+)
+from numpy.distutils.ccompiler_opt import new_ccompiler_opt
+
+# Fix Python distutils bug sf #1718574:
+_l = old_build_clib.user_options
+for _i in range(len(_l)):
+ if _l[_i][0] in ['build-clib', 'build-temp']:
+ _l[_i] = (_l[_i][0] + '=',) + _l[_i][1:]
+#
+
+
+class build_clib(old_build_clib):
+
+ description = "build C/C++/F libraries used by Python extensions"
+
+ user_options = old_build_clib.user_options + [
+ ('fcompiler=', None,
+ "specify the Fortran compiler type"),
+ ('inplace', 'i', 'Build in-place'),
+ ('parallel=', 'j',
+ "number of parallel jobs"),
+ ('warn-error', None,
+ "turn all warnings into errors (-Werror)"),
+ ('cpu-baseline=', None,
+ "specify a list of enabled baseline CPU optimizations"),
+ ('cpu-dispatch=', None,
+ "specify a list of dispatched CPU optimizations"),
+ ('disable-optimization', None,
+ "disable CPU optimized code(dispatch,simd,fast...)"),
+ ]
+
+ boolean_options = old_build_clib.boolean_options + \
+ ['inplace', 'warn-error', 'disable-optimization']
+
+ def initialize_options(self):
+ old_build_clib.initialize_options(self)
+ self.fcompiler = None
+ self.inplace = 0
+ self.parallel = None
+ self.warn_error = None
+ self.cpu_baseline = None
+ self.cpu_dispatch = None
+ self.disable_optimization = None
+
+
+ def finalize_options(self):
+ if self.parallel:
+ try:
+ self.parallel = int(self.parallel)
+ except ValueError as e:
+ raise ValueError("--parallel/-j argument must be an integer") from e
+ old_build_clib.finalize_options(self)
+ self.set_undefined_options('build',
+ ('parallel', 'parallel'),
+ ('warn_error', 'warn_error'),
+ ('cpu_baseline', 'cpu_baseline'),
+ ('cpu_dispatch', 'cpu_dispatch'),
+ ('disable_optimization', 'disable_optimization')
+ )
+
+ def have_f_sources(self):
+ for (lib_name, build_info) in self.libraries:
+ if has_f_sources(build_info.get('sources', [])):
+ return True
+ return False
+
+ def have_cxx_sources(self):
+ for (lib_name, build_info) in self.libraries:
+ if has_cxx_sources(build_info.get('sources', [])):
+ return True
+ return False
+
+ def run(self):
+ if not self.libraries:
+ return
+
+ # Make sure that library sources are complete.
+ languages = []
+
+ # Make sure that extension sources are complete.
+ self.run_command('build_src')
+
+ for (lib_name, build_info) in self.libraries:
+ l = build_info.get('language', None)
+ if l and l not in languages:
+ languages.append(l)
+
+ from distutils.ccompiler import new_compiler
+ self.compiler = new_compiler(compiler=self.compiler,
+ dry_run=self.dry_run,
+ force=self.force)
+ self.compiler.customize(self.distribution,
+ need_cxx=self.have_cxx_sources())
+
+ if self.warn_error:
+ self.compiler.compiler.append('-Werror')
+ self.compiler.compiler_so.append('-Werror')
+
+ libraries = self.libraries
+ self.libraries = None
+ self.compiler.customize_cmd(self)
+ self.libraries = libraries
+
+ self.compiler.show_customization()
+
+ if not self.disable_optimization:
+ dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h")
+ dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath)
+ opt_cache_path = os.path.abspath(
+ os.path.join(self.build_temp, 'ccompiler_opt_cache_clib.py')
+ )
+ if hasattr(self, "compiler_opt"):
+ # By default `CCompilerOpt` update the cache at the exit of
+ # the process, which may lead to duplicate building
+ # (see build_extension()/force_rebuild) if run() called
+ # multiple times within the same os process/thread without
+ # giving the chance the previous instances of `CCompilerOpt`
+ # to update the cache.
+ self.compiler_opt.cache_flush()
+
+ self.compiler_opt = new_ccompiler_opt(
+ compiler=self.compiler, dispatch_hpath=dispatch_hpath,
+ cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch,
+ cache_path=opt_cache_path
+ )
+ def report(copt):
+ log.info("\n########### CLIB COMPILER OPTIMIZATION ###########")
+ log.info(copt.report(full=True))
+
+ import atexit
+ atexit.register(report, self.compiler_opt)
+
+ if self.have_f_sources():
+ from numpy.distutils.fcompiler import new_fcompiler
+ self._f_compiler = new_fcompiler(compiler=self.fcompiler,
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force,
+ requiref90='f90' in languages,
+ c_compiler=self.compiler)
+ if self._f_compiler is not None:
+ self._f_compiler.customize(self.distribution)
+
+ libraries = self.libraries
+ self.libraries = None
+ self._f_compiler.customize_cmd(self)
+ self.libraries = libraries
+
+ self._f_compiler.show_customization()
+ else:
+ self._f_compiler = None
+
+ self.build_libraries(self.libraries)
+
+ if self.inplace:
+ for l in self.distribution.installed_libraries:
+ libname = self.compiler.library_filename(l.name)
+ source = os.path.join(self.build_clib, libname)
+ target = os.path.join(l.target_dir, libname)
+ self.mkpath(l.target_dir)
+ shutil.copy(source, target)
+
+ def get_source_files(self):
+ self.check_library_list(self.libraries)
+ filenames = []
+ for lib in self.libraries:
+ filenames.extend(get_lib_source_files(lib))
+ return filenames
+
+ def build_libraries(self, libraries):
+ for (lib_name, build_info) in libraries:
+ self.build_a_library(build_info, lib_name, libraries)
+
+ def assemble_flags(self, in_flags):
+ """ Assemble flags from flag list
+
+ Parameters
+ ----------
+ in_flags : None or sequence
+ None corresponds to empty list. Sequence elements can be strings
+ or callables that return lists of strings. Callable takes `self` as
+ single parameter.
+
+ Returns
+ -------
+ out_flags : list
+ """
+ if in_flags is None:
+ return []
+ out_flags = []
+ for in_flag in in_flags:
+ if callable(in_flag):
+ out_flags += in_flag(self)
+ else:
+ out_flags.append(in_flag)
+ return out_flags
+
+ def build_a_library(self, build_info, lib_name, libraries):
+ # default compilers
+ compiler = self.compiler
+ fcompiler = self._f_compiler
+
+ sources = build_info.get('sources')
+ if sources is None or not is_sequence(sources):
+ raise DistutilsSetupError(("in 'libraries' option (library '%s'), " +
+ "'sources' must be present and must be " +
+ "a list of source filenames") % lib_name)
+ sources = list(sources)
+
+ c_sources, cxx_sources, f_sources, fmodule_sources \
+ = filter_sources(sources)
+ requiref90 = not not fmodule_sources or \
+ build_info.get('language', 'c') == 'f90'
+
+ # save source type information so that build_ext can use it.
+ source_languages = []
+ if c_sources:
+ source_languages.append('c')
+ if cxx_sources:
+ source_languages.append('c++')
+ if requiref90:
+ source_languages.append('f90')
+ elif f_sources:
+ source_languages.append('f77')
+ build_info['source_languages'] = source_languages
+
+ lib_file = compiler.library_filename(lib_name,
+ output_dir=self.build_clib)
+ depends = sources + build_info.get('depends', [])
+
+ force_rebuild = self.force
+ if not self.disable_optimization and not self.compiler_opt.is_cached():
+ log.debug("Detected changes on compiler optimizations")
+ force_rebuild = True
+ if not (force_rebuild or newer_group(depends, lib_file, 'newer')):
+ log.debug("skipping '%s' library (up-to-date)", lib_name)
+ return
+ else:
+ log.info("building '%s' library", lib_name)
+
+ config_fc = build_info.get('config_fc', {})
+ if fcompiler is not None and config_fc:
+ log.info('using additional config_fc from setup script '
+ 'for fortran compiler: %s'
+ % (config_fc,))
+ from numpy.distutils.fcompiler import new_fcompiler
+ fcompiler = new_fcompiler(compiler=fcompiler.compiler_type,
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force,
+ requiref90=requiref90,
+ c_compiler=self.compiler)
+ if fcompiler is not None:
+ dist = self.distribution
+ base_config_fc = dist.get_option_dict('config_fc').copy()
+ base_config_fc.update(config_fc)
+ fcompiler.customize(base_config_fc)
+
+ # check availability of Fortran compilers
+ if (f_sources or fmodule_sources) and fcompiler is None:
+ raise DistutilsError("library %s has Fortran sources"
+ " but no Fortran compiler found" % (lib_name))
+
+ if fcompiler is not None:
+ fcompiler.extra_f77_compile_args = build_info.get(
+ 'extra_f77_compile_args') or []
+ fcompiler.extra_f90_compile_args = build_info.get(
+ 'extra_f90_compile_args') or []
+
+ macros = build_info.get('macros')
+ if macros is None:
+ macros = []
+ include_dirs = build_info.get('include_dirs')
+ if include_dirs is None:
+ include_dirs = []
+ # Flags can be strings, or callables that return a list of strings.
+ extra_postargs = self.assemble_flags(
+ build_info.get('extra_compiler_args'))
+ extra_cflags = self.assemble_flags(
+ build_info.get('extra_cflags'))
+ extra_cxxflags = self.assemble_flags(
+ build_info.get('extra_cxxflags'))
+
+ include_dirs.extend(get_numpy_include_dirs())
+ # where compiled F90 module files are:
+ module_dirs = build_info.get('module_dirs') or []
+ module_build_dir = os.path.dirname(lib_file)
+ if requiref90:
+ self.mkpath(module_build_dir)
+
+ if compiler.compiler_type == 'msvc':
+ # this hack works around the msvc compiler attributes
+ # problem, msvc uses its own convention :(
+ c_sources += cxx_sources
+ cxx_sources = []
+
+ # filtering C dispatch-table sources when optimization is not disabled,
+ # otherwise treated as normal sources.
+ copt_c_sources = []
+ copt_cxx_sources = []
+ copt_baseline_flags = []
+ copt_macros = []
+ if not self.disable_optimization:
+ bsrc_dir = self.get_finalized_command("build_src").build_src
+ dispatch_hpath = os.path.join("numpy", "distutils", "include")
+ dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath)
+ include_dirs.append(dispatch_hpath)
+
+ copt_build_src = None if self.inplace else bsrc_dir
+ for _srcs, _dst, _ext in (
+ ((c_sources,), copt_c_sources, ('.dispatch.c',)),
+ ((c_sources, cxx_sources), copt_cxx_sources,
+ ('.dispatch.cpp', '.dispatch.cxx'))
+ ):
+ for _src in _srcs:
+ _dst += [
+ _src.pop(_src.index(s))
+ for s in _src[:] if s.endswith(_ext)
+ ]
+ copt_baseline_flags = self.compiler_opt.cpu_baseline_flags()
+ else:
+ copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1))
+
+ objects = []
+ if copt_cxx_sources:
+ log.info("compiling C++ dispatch-able sources")
+ objects += self.compiler_opt.try_dispatch(
+ copt_c_sources,
+ output_dir=self.build_temp,
+ src_dir=copt_build_src,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs + extra_cxxflags,
+ ccompiler=cxx_compiler
+ )
+
+ if copt_c_sources:
+ log.info("compiling C dispatch-able sources")
+ objects += self.compiler_opt.try_dispatch(
+ copt_c_sources,
+ output_dir=self.build_temp,
+ src_dir=copt_build_src,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs + extra_cflags)
+
+ if c_sources:
+ log.info("compiling C sources")
+ objects += compiler.compile(
+ c_sources,
+ output_dir=self.build_temp,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=(extra_postargs +
+ copt_baseline_flags +
+ extra_cflags))
+
+ if cxx_sources:
+ log.info("compiling C++ sources")
+ cxx_compiler = compiler.cxx_compiler()
+ cxx_objects = cxx_compiler.compile(
+ cxx_sources,
+ output_dir=self.build_temp,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=(extra_postargs +
+ copt_baseline_flags +
+ extra_cxxflags))
+ objects.extend(cxx_objects)
+
+ if f_sources or fmodule_sources:
+ extra_postargs = []
+ f_objects = []
+
+ if requiref90:
+ if fcompiler.module_dir_switch is None:
+ existing_modules = glob('*.mod')
+ extra_postargs += fcompiler.module_options(
+ module_dirs, module_build_dir)
+
+ if fmodule_sources:
+ log.info("compiling Fortran 90 module sources")
+ f_objects += fcompiler.compile(fmodule_sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs)
+
+ if requiref90 and self._f_compiler.module_dir_switch is None:
+ # move new compiled F90 module files to module_build_dir
+ for f in glob('*.mod'):
+ if f in existing_modules:
+ continue
+ t = os.path.join(module_build_dir, f)
+ if os.path.abspath(f) == os.path.abspath(t):
+ continue
+ if os.path.isfile(t):
+ os.remove(t)
+ try:
+ self.move_file(f, module_build_dir)
+ except DistutilsFileError:
+ log.warn('failed to move %r to %r'
+ % (f, module_build_dir))
+
+ if f_sources:
+ log.info("compiling Fortran sources")
+ f_objects += fcompiler.compile(f_sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs)
+ else:
+ f_objects = []
+
+ if f_objects and not fcompiler.can_ccompiler_link(compiler):
+ # Default linker cannot link Fortran object files, and results
+ # need to be wrapped later. Instead of creating a real static
+ # library, just keep track of the object files.
+ listfn = os.path.join(self.build_clib,
+ lib_name + '.fobjects')
+ with open(listfn, 'w') as f:
+ f.write("\n".join(os.path.abspath(obj) for obj in f_objects))
+
+ listfn = os.path.join(self.build_clib,
+ lib_name + '.cobjects')
+ with open(listfn, 'w') as f:
+ f.write("\n".join(os.path.abspath(obj) for obj in objects))
+
+ # create empty "library" file for dependency tracking
+ lib_fname = os.path.join(self.build_clib,
+ lib_name + compiler.static_lib_extension)
+ with open(lib_fname, 'wb') as f:
+ pass
+ else:
+ # assume that default linker is suitable for
+ # linking Fortran object files
+ objects.extend(f_objects)
+ compiler.create_static_lib(objects, lib_name,
+ output_dir=self.build_clib,
+ debug=self.debug)
+
+ # fix library dependencies
+ clib_libraries = build_info.get('libraries', [])
+ for lname, binfo in libraries:
+ if lname in clib_libraries:
+ clib_libraries.extend(binfo.get('libraries', []))
+ if clib_libraries:
+ build_info['libraries'] = clib_libraries
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/build_ext.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_ext.py
new file mode 100644
index 00000000..6dc6b426
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_ext.py
@@ -0,0 +1,740 @@
+""" Modified version of build_ext that handles fortran source files.
+
+"""
+import os
+import subprocess
+from glob import glob
+
+from distutils.dep_util import newer_group
+from distutils.command.build_ext import build_ext as old_build_ext
+from distutils.errors import DistutilsFileError, DistutilsSetupError,\
+ DistutilsError
+from distutils.file_util import copy_file
+
+from numpy.distutils import log
+from numpy.distutils.exec_command import filepath_from_subprocess_output
+from numpy.distutils.system_info import combine_paths
+from numpy.distutils.misc_util import (
+ filter_sources, get_ext_source_files, get_numpy_include_dirs,
+ has_cxx_sources, has_f_sources, is_sequence
+)
+from numpy.distutils.command.config_compiler import show_fortran_compilers
+from numpy.distutils.ccompiler_opt import new_ccompiler_opt, CCompilerOpt
+
+class build_ext (old_build_ext):
+
+ description = "build C/C++/F extensions (compile/link to build directory)"
+
+ user_options = old_build_ext.user_options + [
+ ('fcompiler=', None,
+ "specify the Fortran compiler type"),
+ ('parallel=', 'j',
+ "number of parallel jobs"),
+ ('warn-error', None,
+ "turn all warnings into errors (-Werror)"),
+ ('cpu-baseline=', None,
+ "specify a list of enabled baseline CPU optimizations"),
+ ('cpu-dispatch=', None,
+ "specify a list of dispatched CPU optimizations"),
+ ('disable-optimization', None,
+ "disable CPU optimized code(dispatch,simd,fast...)"),
+ ('simd-test=', None,
+ "specify a list of CPU optimizations to be tested against NumPy SIMD interface"),
+ ]
+
+ help_options = old_build_ext.help_options + [
+ ('help-fcompiler', None, "list available Fortran compilers",
+ show_fortran_compilers),
+ ]
+
+ boolean_options = old_build_ext.boolean_options + ['warn-error', 'disable-optimization']
+
+ def initialize_options(self):
+ old_build_ext.initialize_options(self)
+ self.fcompiler = None
+ self.parallel = None
+ self.warn_error = None
+ self.cpu_baseline = None
+ self.cpu_dispatch = None
+ self.disable_optimization = None
+ self.simd_test = None
+
+ def finalize_options(self):
+ if self.parallel:
+ try:
+ self.parallel = int(self.parallel)
+ except ValueError as e:
+ raise ValueError("--parallel/-j argument must be an integer") from e
+
+ # Ensure that self.include_dirs and self.distribution.include_dirs
+ # refer to the same list object. finalize_options will modify
+ # self.include_dirs, but self.distribution.include_dirs is used
+ # during the actual build.
+ # self.include_dirs is None unless paths are specified with
+ # --include-dirs.
+ # The include paths will be passed to the compiler in the order:
+ # numpy paths, --include-dirs paths, Python include path.
+ if isinstance(self.include_dirs, str):
+ self.include_dirs = self.include_dirs.split(os.pathsep)
+ incl_dirs = self.include_dirs or []
+ if self.distribution.include_dirs is None:
+ self.distribution.include_dirs = []
+ self.include_dirs = self.distribution.include_dirs
+ self.include_dirs.extend(incl_dirs)
+
+ old_build_ext.finalize_options(self)
+ self.set_undefined_options('build',
+ ('parallel', 'parallel'),
+ ('warn_error', 'warn_error'),
+ ('cpu_baseline', 'cpu_baseline'),
+ ('cpu_dispatch', 'cpu_dispatch'),
+ ('disable_optimization', 'disable_optimization'),
+ ('simd_test', 'simd_test')
+ )
+ CCompilerOpt.conf_target_groups["simd_test"] = self.simd_test
+
+ def run(self):
+ if not self.extensions:
+ return
+
+ # Make sure that extension sources are complete.
+ self.run_command('build_src')
+
+ if self.distribution.has_c_libraries():
+ if self.inplace:
+ if self.distribution.have_run.get('build_clib'):
+ log.warn('build_clib already run, it is too late to '
+ 'ensure in-place build of build_clib')
+ build_clib = self.distribution.get_command_obj(
+ 'build_clib')
+ else:
+ build_clib = self.distribution.get_command_obj(
+ 'build_clib')
+ build_clib.inplace = 1
+ build_clib.ensure_finalized()
+ build_clib.run()
+ self.distribution.have_run['build_clib'] = 1
+
+ else:
+ self.run_command('build_clib')
+ build_clib = self.get_finalized_command('build_clib')
+ self.library_dirs.append(build_clib.build_clib)
+ else:
+ build_clib = None
+
+ # Not including C libraries to the list of
+ # extension libraries automatically to prevent
+ # bogus linking commands. Extensions must
+ # explicitly specify the C libraries that they use.
+
+ from distutils.ccompiler import new_compiler
+ from numpy.distutils.fcompiler import new_fcompiler
+
+ compiler_type = self.compiler
+ # Initialize C compiler:
+ self.compiler = new_compiler(compiler=compiler_type,
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force)
+ self.compiler.customize(self.distribution)
+ self.compiler.customize_cmd(self)
+
+ if self.warn_error:
+ self.compiler.compiler.append('-Werror')
+ self.compiler.compiler_so.append('-Werror')
+
+ self.compiler.show_customization()
+
+ if not self.disable_optimization:
+ dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h")
+ dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath)
+ opt_cache_path = os.path.abspath(
+ os.path.join(self.build_temp, 'ccompiler_opt_cache_ext.py')
+ )
+ if hasattr(self, "compiler_opt"):
+ # By default `CCompilerOpt` update the cache at the exit of
+ # the process, which may lead to duplicate building
+ # (see build_extension()/force_rebuild) if run() called
+ # multiple times within the same os process/thread without
+ # giving the chance the previous instances of `CCompilerOpt`
+ # to update the cache.
+ self.compiler_opt.cache_flush()
+
+ self.compiler_opt = new_ccompiler_opt(
+ compiler=self.compiler, dispatch_hpath=dispatch_hpath,
+ cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch,
+ cache_path=opt_cache_path
+ )
+ def report(copt):
+ log.info("\n########### EXT COMPILER OPTIMIZATION ###########")
+ log.info(copt.report(full=True))
+
+ import atexit
+ atexit.register(report, self.compiler_opt)
+
+ # Setup directory for storing generated extra DLL files on Windows
+ self.extra_dll_dir = os.path.join(self.build_temp, '.libs')
+ if not os.path.isdir(self.extra_dll_dir):
+ os.makedirs(self.extra_dll_dir)
+
+ # Create mapping of libraries built by build_clib:
+ clibs = {}
+ if build_clib is not None:
+ for libname, build_info in build_clib.libraries or []:
+ if libname in clibs and clibs[libname] != build_info:
+ log.warn('library %r defined more than once,'
+ ' overwriting build_info\n%s... \nwith\n%s...'
+ % (libname, repr(clibs[libname])[:300], repr(build_info)[:300]))
+ clibs[libname] = build_info
+ # .. and distribution libraries:
+ for libname, build_info in self.distribution.libraries or []:
+ if libname in clibs:
+ # build_clib libraries have a precedence before distribution ones
+ continue
+ clibs[libname] = build_info
+
+ # Determine if C++/Fortran 77/Fortran 90 compilers are needed.
+ # Update extension libraries, library_dirs, and macros.
+ all_languages = set()
+ for ext in self.extensions:
+ ext_languages = set()
+ c_libs = []
+ c_lib_dirs = []
+ macros = []
+ for libname in ext.libraries:
+ if libname in clibs:
+ binfo = clibs[libname]
+ c_libs += binfo.get('libraries', [])
+ c_lib_dirs += binfo.get('library_dirs', [])
+ for m in binfo.get('macros', []):
+ if m not in macros:
+ macros.append(m)
+
+ for l in clibs.get(libname, {}).get('source_languages', []):
+ ext_languages.add(l)
+ if c_libs:
+ new_c_libs = ext.libraries + c_libs
+ log.info('updating extension %r libraries from %r to %r'
+ % (ext.name, ext.libraries, new_c_libs))
+ ext.libraries = new_c_libs
+ ext.library_dirs = ext.library_dirs + c_lib_dirs
+ if macros:
+ log.info('extending extension %r defined_macros with %r'
+ % (ext.name, macros))
+ ext.define_macros = ext.define_macros + macros
+
+ # determine extension languages
+ if has_f_sources(ext.sources):
+ ext_languages.add('f77')
+ if has_cxx_sources(ext.sources):
+ ext_languages.add('c++')
+ l = ext.language or self.compiler.detect_language(ext.sources)
+ if l:
+ ext_languages.add(l)
+
+ # reset language attribute for choosing proper linker
+ #
+ # When we build extensions with multiple languages, we have to
+ # choose a linker. The rules here are:
+ # 1. if there is Fortran code, always prefer the Fortran linker,
+ # 2. otherwise prefer C++ over C,
+ # 3. Users can force a particular linker by using
+ # `language='c'` # or 'c++', 'f90', 'f77'
+ # in their config.add_extension() calls.
+ if 'c++' in ext_languages:
+ ext_language = 'c++'
+ else:
+ ext_language = 'c' # default
+
+ has_fortran = False
+ if 'f90' in ext_languages:
+ ext_language = 'f90'
+ has_fortran = True
+ elif 'f77' in ext_languages:
+ ext_language = 'f77'
+ has_fortran = True
+
+ if not ext.language or has_fortran:
+ if l and l != ext_language and ext.language:
+ log.warn('resetting extension %r language from %r to %r.' %
+ (ext.name, l, ext_language))
+
+ ext.language = ext_language
+
+ # global language
+ all_languages.update(ext_languages)
+
+ need_f90_compiler = 'f90' in all_languages
+ need_f77_compiler = 'f77' in all_languages
+ need_cxx_compiler = 'c++' in all_languages
+
+ # Initialize C++ compiler:
+ if need_cxx_compiler:
+ self._cxx_compiler = new_compiler(compiler=compiler_type,
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force)
+ compiler = self._cxx_compiler
+ compiler.customize(self.distribution, need_cxx=need_cxx_compiler)
+ compiler.customize_cmd(self)
+ compiler.show_customization()
+ self._cxx_compiler = compiler.cxx_compiler()
+ else:
+ self._cxx_compiler = None
+
+ # Initialize Fortran 77 compiler:
+ if need_f77_compiler:
+ ctype = self.fcompiler
+ self._f77_compiler = new_fcompiler(compiler=self.fcompiler,
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force,
+ requiref90=False,
+ c_compiler=self.compiler)
+ fcompiler = self._f77_compiler
+ if fcompiler:
+ ctype = fcompiler.compiler_type
+ fcompiler.customize(self.distribution)
+ if fcompiler and fcompiler.get_version():
+ fcompiler.customize_cmd(self)
+ fcompiler.show_customization()
+ else:
+ self.warn('f77_compiler=%s is not available.' %
+ (ctype))
+ self._f77_compiler = None
+ else:
+ self._f77_compiler = None
+
+ # Initialize Fortran 90 compiler:
+ if need_f90_compiler:
+ ctype = self.fcompiler
+ self._f90_compiler = new_fcompiler(compiler=self.fcompiler,
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force,
+ requiref90=True,
+ c_compiler=self.compiler)
+ fcompiler = self._f90_compiler
+ if fcompiler:
+ ctype = fcompiler.compiler_type
+ fcompiler.customize(self.distribution)
+ if fcompiler and fcompiler.get_version():
+ fcompiler.customize_cmd(self)
+ fcompiler.show_customization()
+ else:
+ self.warn('f90_compiler=%s is not available.' %
+ (ctype))
+ self._f90_compiler = None
+ else:
+ self._f90_compiler = None
+
+ # Build extensions
+ self.build_extensions()
+
+ # Copy over any extra DLL files
+ # FIXME: In the case where there are more than two packages,
+ # we blindly assume that both packages need all of the libraries,
+ # resulting in a larger wheel than is required. This should be fixed,
+ # but it's so rare that I won't bother to handle it.
+ pkg_roots = {
+ self.get_ext_fullname(ext.name).split('.')[0]
+ for ext in self.extensions
+ }
+ for pkg_root in pkg_roots:
+ shared_lib_dir = os.path.join(pkg_root, '.libs')
+ if not self.inplace:
+ shared_lib_dir = os.path.join(self.build_lib, shared_lib_dir)
+ for fn in os.listdir(self.extra_dll_dir):
+ if not os.path.isdir(shared_lib_dir):
+ os.makedirs(shared_lib_dir)
+ if not fn.lower().endswith('.dll'):
+ continue
+ runtime_lib = os.path.join(self.extra_dll_dir, fn)
+ copy_file(runtime_lib, shared_lib_dir)
+
+ def swig_sources(self, sources, extensions=None):
+ # Do nothing. Swig sources have been handled in build_src command.
+ return sources
+
+ def build_extension(self, ext):
+ sources = ext.sources
+ if sources is None or not is_sequence(sources):
+ raise DistutilsSetupError(
+ ("in 'ext_modules' option (extension '%s'), " +
+ "'sources' must be present and must be " +
+ "a list of source filenames") % ext.name)
+ sources = list(sources)
+
+ if not sources:
+ return
+
+ fullname = self.get_ext_fullname(ext.name)
+ if self.inplace:
+ modpath = fullname.split('.')
+ package = '.'.join(modpath[0:-1])
+ base = modpath[-1]
+ build_py = self.get_finalized_command('build_py')
+ package_dir = build_py.get_package_dir(package)
+ ext_filename = os.path.join(package_dir,
+ self.get_ext_filename(base))
+ else:
+ ext_filename = os.path.join(self.build_lib,
+ self.get_ext_filename(fullname))
+ depends = sources + ext.depends
+
+ force_rebuild = self.force
+ if not self.disable_optimization and not self.compiler_opt.is_cached():
+ log.debug("Detected changes on compiler optimizations")
+ force_rebuild = True
+ if not (force_rebuild or newer_group(depends, ext_filename, 'newer')):
+ log.debug("skipping '%s' extension (up-to-date)", ext.name)
+ return
+ else:
+ log.info("building '%s' extension", ext.name)
+
+ extra_args = ext.extra_compile_args or []
+ extra_cflags = getattr(ext, 'extra_c_compile_args', None) or []
+ extra_cxxflags = getattr(ext, 'extra_cxx_compile_args', None) or []
+
+ macros = ext.define_macros[:]
+ for undef in ext.undef_macros:
+ macros.append((undef,))
+
+ c_sources, cxx_sources, f_sources, fmodule_sources = \
+ filter_sources(ext.sources)
+
+ if self.compiler.compiler_type == 'msvc':
+ if cxx_sources:
+ # Needed to compile kiva.agg._agg extension.
+ extra_args.append('/Zm1000')
+ # this hack works around the msvc compiler attributes
+ # problem, msvc uses its own convention :(
+ c_sources += cxx_sources
+ cxx_sources = []
+
+ # Set Fortran/C++ compilers for compilation and linking.
+ if ext.language == 'f90':
+ fcompiler = self._f90_compiler
+ elif ext.language == 'f77':
+ fcompiler = self._f77_compiler
+ else: # in case ext.language is c++, for instance
+ fcompiler = self._f90_compiler or self._f77_compiler
+ if fcompiler is not None:
+ fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(
+ ext, 'extra_f77_compile_args') else []
+ fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(
+ ext, 'extra_f90_compile_args') else []
+ cxx_compiler = self._cxx_compiler
+
+ # check for the availability of required compilers
+ if cxx_sources and cxx_compiler is None:
+ raise DistutilsError("extension %r has C++ sources"
+ "but no C++ compiler found" % (ext.name))
+ if (f_sources or fmodule_sources) and fcompiler is None:
+ raise DistutilsError("extension %r has Fortran sources "
+ "but no Fortran compiler found" % (ext.name))
+ if ext.language in ['f77', 'f90'] and fcompiler is None:
+ self.warn("extension %r has Fortran libraries "
+ "but no Fortran linker found, using default linker" % (ext.name))
+ if ext.language == 'c++' and cxx_compiler is None:
+ self.warn("extension %r has C++ libraries "
+ "but no C++ linker found, using default linker" % (ext.name))
+
+ kws = {'depends': ext.depends}
+ output_dir = self.build_temp
+
+ include_dirs = ext.include_dirs + get_numpy_include_dirs()
+
+ # filtering C dispatch-table sources when optimization is not disabled,
+ # otherwise treated as normal sources.
+ copt_c_sources = []
+ copt_cxx_sources = []
+ copt_baseline_flags = []
+ copt_macros = []
+ if not self.disable_optimization:
+ bsrc_dir = self.get_finalized_command("build_src").build_src
+ dispatch_hpath = os.path.join("numpy", "distutils", "include")
+ dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath)
+ include_dirs.append(dispatch_hpath)
+
+ copt_build_src = None if self.inplace else bsrc_dir
+ for _srcs, _dst, _ext in (
+ ((c_sources,), copt_c_sources, ('.dispatch.c',)),
+ ((c_sources, cxx_sources), copt_cxx_sources,
+ ('.dispatch.cpp', '.dispatch.cxx'))
+ ):
+ for _src in _srcs:
+ _dst += [
+ _src.pop(_src.index(s))
+ for s in _src[:] if s.endswith(_ext)
+ ]
+ copt_baseline_flags = self.compiler_opt.cpu_baseline_flags()
+ else:
+ copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1))
+
+ c_objects = []
+ if copt_cxx_sources:
+ log.info("compiling C++ dispatch-able sources")
+ c_objects += self.compiler_opt.try_dispatch(
+ copt_cxx_sources,
+ output_dir=output_dir,
+ src_dir=copt_build_src,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_args + extra_cxxflags,
+ ccompiler=cxx_compiler,
+ **kws
+ )
+ if copt_c_sources:
+ log.info("compiling C dispatch-able sources")
+ c_objects += self.compiler_opt.try_dispatch(
+ copt_c_sources,
+ output_dir=output_dir,
+ src_dir=copt_build_src,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_args + extra_cflags,
+ **kws)
+ if c_sources:
+ log.info("compiling C sources")
+ c_objects += self.compiler.compile(
+ c_sources,
+ output_dir=output_dir,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=(extra_args + copt_baseline_flags +
+ extra_cflags),
+ **kws)
+ if cxx_sources:
+ log.info("compiling C++ sources")
+ c_objects += cxx_compiler.compile(
+ cxx_sources,
+ output_dir=output_dir,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=(extra_args + copt_baseline_flags +
+ extra_cxxflags),
+ **kws)
+
+ extra_postargs = []
+ f_objects = []
+ if fmodule_sources:
+ log.info("compiling Fortran 90 module sources")
+ module_dirs = ext.module_dirs[:]
+ module_build_dir = os.path.join(
+ self.build_temp, os.path.dirname(
+ self.get_ext_filename(fullname)))
+
+ self.mkpath(module_build_dir)
+ if fcompiler.module_dir_switch is None:
+ existing_modules = glob('*.mod')
+ extra_postargs += fcompiler.module_options(
+ module_dirs, module_build_dir)
+ f_objects += fcompiler.compile(fmodule_sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs,
+ depends=ext.depends)
+
+ if fcompiler.module_dir_switch is None:
+ for f in glob('*.mod'):
+ if f in existing_modules:
+ continue
+ t = os.path.join(module_build_dir, f)
+ if os.path.abspath(f) == os.path.abspath(t):
+ continue
+ if os.path.isfile(t):
+ os.remove(t)
+ try:
+ self.move_file(f, module_build_dir)
+ except DistutilsFileError:
+ log.warn('failed to move %r to %r' %
+ (f, module_build_dir))
+ if f_sources:
+ log.info("compiling Fortran sources")
+ f_objects += fcompiler.compile(f_sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs,
+ depends=ext.depends)
+
+ if f_objects and not fcompiler.can_ccompiler_link(self.compiler):
+ unlinkable_fobjects = f_objects
+ objects = c_objects
+ else:
+ unlinkable_fobjects = []
+ objects = c_objects + f_objects
+
+ if ext.extra_objects:
+ objects.extend(ext.extra_objects)
+ extra_args = ext.extra_link_args or []
+ libraries = self.get_libraries(ext)[:]
+ library_dirs = ext.library_dirs[:]
+
+ linker = self.compiler.link_shared_object
+ # Always use system linker when using MSVC compiler.
+ if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'):
+ # expand libraries with fcompiler libraries as we are
+ # not using fcompiler linker
+ self._libs_with_msvc_and_fortran(
+ fcompiler, libraries, library_dirs)
+ if ext.runtime_library_dirs:
+ # gcc adds RPATH to the link. On windows, copy the dll into
+ # self.extra_dll_dir instead.
+ for d in ext.runtime_library_dirs:
+ for f in glob(d + '/*.dll'):
+ copy_file(f, self.extra_dll_dir)
+ ext.runtime_library_dirs = []
+
+ elif ext.language in ['f77', 'f90'] and fcompiler is not None:
+ linker = fcompiler.link_shared_object
+ if ext.language == 'c++' and cxx_compiler is not None:
+ linker = cxx_compiler.link_shared_object
+
+ if fcompiler is not None:
+ objects, libraries = self._process_unlinkable_fobjects(
+ objects, libraries,
+ fcompiler, library_dirs,
+ unlinkable_fobjects)
+
+ linker(objects, ext_filename,
+ libraries=libraries,
+ library_dirs=library_dirs,
+ runtime_library_dirs=ext.runtime_library_dirs,
+ extra_postargs=extra_args,
+ export_symbols=self.get_export_symbols(ext),
+ debug=self.debug,
+ build_temp=self.build_temp,
+ target_lang=ext.language)
+
+ def _add_dummy_mingwex_sym(self, c_sources):
+ build_src = self.get_finalized_command("build_src").build_src
+ build_clib = self.get_finalized_command("build_clib").build_clib
+ objects = self.compiler.compile([os.path.join(build_src,
+ "gfortran_vs2003_hack.c")],
+ output_dir=self.build_temp)
+ self.compiler.create_static_lib(
+ objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug)
+
+ def _process_unlinkable_fobjects(self, objects, libraries,
+ fcompiler, library_dirs,
+ unlinkable_fobjects):
+ libraries = list(libraries)
+ objects = list(objects)
+ unlinkable_fobjects = list(unlinkable_fobjects)
+
+ # Expand possible fake static libraries to objects;
+ # make sure to iterate over a copy of the list as
+ # "fake" libraries will be removed as they are
+ # encountered
+ for lib in libraries[:]:
+ for libdir in library_dirs:
+ fake_lib = os.path.join(libdir, lib + '.fobjects')
+ if os.path.isfile(fake_lib):
+ # Replace fake static library
+ libraries.remove(lib)
+ with open(fake_lib, 'r') as f:
+ unlinkable_fobjects.extend(f.read().splitlines())
+
+ # Expand C objects
+ c_lib = os.path.join(libdir, lib + '.cobjects')
+ with open(c_lib, 'r') as f:
+ objects.extend(f.read().splitlines())
+
+ # Wrap unlinkable objects to a linkable one
+ if unlinkable_fobjects:
+ fobjects = [os.path.abspath(obj) for obj in unlinkable_fobjects]
+ wrapped = fcompiler.wrap_unlinkable_objects(
+ fobjects, output_dir=self.build_temp,
+ extra_dll_dir=self.extra_dll_dir)
+ objects.extend(wrapped)
+
+ return objects, libraries
+
+ def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries,
+ c_library_dirs):
+ if fcompiler is None:
+ return
+
+ for libname in c_libraries:
+ if libname.startswith('msvc'):
+ continue
+ fileexists = False
+ for libdir in c_library_dirs or []:
+ libfile = os.path.join(libdir, '%s.lib' % (libname))
+ if os.path.isfile(libfile):
+ fileexists = True
+ break
+ if fileexists:
+ continue
+ # make g77-compiled static libs available to MSVC
+ fileexists = False
+ for libdir in c_library_dirs:
+ libfile = os.path.join(libdir, 'lib%s.a' % (libname))
+ if os.path.isfile(libfile):
+ # copy libname.a file to name.lib so that MSVC linker
+ # can find it
+ libfile2 = os.path.join(self.build_temp, libname + '.lib')
+ copy_file(libfile, libfile2)
+ if self.build_temp not in c_library_dirs:
+ c_library_dirs.append(self.build_temp)
+ fileexists = True
+ break
+ if fileexists:
+ continue
+ log.warn('could not find library %r in directories %s'
+ % (libname, c_library_dirs))
+
+ # Always use system linker when using MSVC compiler.
+ f_lib_dirs = []
+ for dir in fcompiler.library_dirs:
+ # correct path when compiling in Cygwin but with normal Win
+ # Python
+ if dir.startswith('/usr/lib'):
+ try:
+ dir = subprocess.check_output(['cygpath', '-w', dir])
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ dir = filepath_from_subprocess_output(dir)
+ f_lib_dirs.append(dir)
+ c_library_dirs.extend(f_lib_dirs)
+
+ # make g77-compiled static libs available to MSVC
+ for lib in fcompiler.libraries:
+ if not lib.startswith('msvc'):
+ c_libraries.append(lib)
+ p = combine_paths(f_lib_dirs, 'lib' + lib + '.a')
+ if p:
+ dst_name = os.path.join(self.build_temp, lib + '.lib')
+ if not os.path.isfile(dst_name):
+ copy_file(p[0], dst_name)
+ if self.build_temp not in c_library_dirs:
+ c_library_dirs.append(self.build_temp)
+
+ def get_source_files(self):
+ self.check_extensions_list(self.extensions)
+ filenames = []
+ for ext in self.extensions:
+ filenames.extend(get_ext_source_files(ext))
+ return filenames
+
+ def get_outputs(self):
+ self.check_extensions_list(self.extensions)
+
+ outputs = []
+ for ext in self.extensions:
+ if not ext.sources:
+ continue
+ fullname = self.get_ext_fullname(ext.name)
+ outputs.append(os.path.join(self.build_lib,
+ self.get_ext_filename(fullname)))
+ return outputs
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/build_py.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_py.py
new file mode 100644
index 00000000..d30dc5bf
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_py.py
@@ -0,0 +1,31 @@
+from distutils.command.build_py import build_py as old_build_py
+from numpy.distutils.misc_util import is_string
+
+class build_py(old_build_py):
+
+ def run(self):
+ build_src = self.get_finalized_command('build_src')
+ if build_src.py_modules_dict and self.packages is None:
+ self.packages = list(build_src.py_modules_dict.keys ())
+ old_build_py.run(self)
+
+ def find_package_modules(self, package, package_dir):
+ modules = old_build_py.find_package_modules(self, package, package_dir)
+
+ # Find build_src generated *.py files.
+ build_src = self.get_finalized_command('build_src')
+ modules += build_src.py_modules_dict.get(package, [])
+
+ return modules
+
+ def find_modules(self):
+ old_py_modules = self.py_modules[:]
+ new_py_modules = [_m for _m in self.py_modules if is_string(_m)]
+ self.py_modules[:] = new_py_modules
+ modules = old_build_py.find_modules(self)
+ self.py_modules[:] = old_py_modules
+
+ return modules
+
+ # XXX: Fix find_source_files for item in py_modules such that item is 3-tuple
+ # and item[2] is source file.
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/build_scripts.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_scripts.py
new file mode 100644
index 00000000..d5cadb27
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_scripts.py
@@ -0,0 +1,49 @@
+""" Modified version of build_scripts that handles building scripts from functions.
+
+"""
+from distutils.command.build_scripts import build_scripts as old_build_scripts
+from numpy.distutils import log
+from numpy.distutils.misc_util import is_string
+
+class build_scripts(old_build_scripts):
+
+ def generate_scripts(self, scripts):
+ new_scripts = []
+ func_scripts = []
+ for script in scripts:
+ if is_string(script):
+ new_scripts.append(script)
+ else:
+ func_scripts.append(script)
+ if not func_scripts:
+ return new_scripts
+
+ build_dir = self.build_dir
+ self.mkpath(build_dir)
+ for func in func_scripts:
+ script = func(build_dir)
+ if not script:
+ continue
+ if is_string(script):
+ log.info(" adding '%s' to scripts" % (script,))
+ new_scripts.append(script)
+ else:
+ [log.info(" adding '%s' to scripts" % (s,)) for s in script]
+ new_scripts.extend(list(script))
+ return new_scripts
+
+ def run (self):
+ if not self.scripts:
+ return
+
+ self.scripts = self.generate_scripts(self.scripts)
+ # Now make sure that the distribution object has this list of scripts.
+ # setuptools' develop command requires that this be a list of filenames,
+ # not functions.
+ self.distribution.scripts = self.scripts
+
+ return old_build_scripts.run(self)
+
+ def get_source_files(self):
+ from numpy.distutils.misc_util import get_script_files
+ return get_script_files(self.scripts)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/build_src.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_src.py
new file mode 100644
index 00000000..5581011f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_src.py
@@ -0,0 +1,773 @@
+""" Build swig and f2py sources.
+"""
+import os
+import re
+import sys
+import shlex
+import copy
+
+from distutils.command import build_ext
+from distutils.dep_util import newer_group, newer
+from distutils.util import get_platform
+from distutils.errors import DistutilsError, DistutilsSetupError
+
+
+# this import can't be done here, as it uses numpy stuff only available
+# after it's installed
+#import numpy.f2py
+from numpy.distutils import log
+from numpy.distutils.misc_util import (
+ fortran_ext_match, appendpath, is_string, is_sequence, get_cmd
+ )
+from numpy.distutils.from_template import process_file as process_f_file
+from numpy.distutils.conv_template import process_file as process_c_file
+
+def subst_vars(target, source, d):
+ """Substitute any occurrence of @foo@ by d['foo'] from source file into
+ target."""
+ var = re.compile('@([a-zA-Z_]+)@')
+ with open(source, 'r') as fs:
+ with open(target, 'w') as ft:
+ for l in fs:
+ m = var.search(l)
+ if m:
+ ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)]))
+ else:
+ ft.write(l)
+
+class build_src(build_ext.build_ext):
+
+ description = "build sources from SWIG, F2PY files or a function"
+
+ user_options = [
+ ('build-src=', 'd', "directory to \"build\" sources to"),
+ ('f2py-opts=', None, "list of f2py command line options"),
+ ('swig=', None, "path to the SWIG executable"),
+ ('swig-opts=', None, "list of SWIG command line options"),
+ ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"),
+ ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete
+ ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete
+ ('force', 'f', "forcibly build everything (ignore file timestamps)"),
+ ('inplace', 'i',
+ "ignore build-lib and put compiled extensions into the source " +
+ "directory alongside your pure Python modules"),
+ ('verbose-cfg', None,
+ "change logging level from WARN to INFO which will show all " +
+ "compiler output")
+ ]
+
+ boolean_options = ['force', 'inplace', 'verbose-cfg']
+
+ help_options = []
+
+ def initialize_options(self):
+ self.extensions = None
+ self.package = None
+ self.py_modules = None
+ self.py_modules_dict = None
+ self.build_src = None
+ self.build_lib = None
+ self.build_base = None
+ self.force = None
+ self.inplace = None
+ self.package_dir = None
+ self.f2pyflags = None # obsolete
+ self.f2py_opts = None
+ self.swigflags = None # obsolete
+ self.swig_opts = None
+ self.swig_cpp = None
+ self.swig = None
+ self.verbose_cfg = None
+
+ def finalize_options(self):
+ self.set_undefined_options('build',
+ ('build_base', 'build_base'),
+ ('build_lib', 'build_lib'),
+ ('force', 'force'))
+ if self.package is None:
+ self.package = self.distribution.ext_package
+ self.extensions = self.distribution.ext_modules
+ self.libraries = self.distribution.libraries or []
+ self.py_modules = self.distribution.py_modules or []
+ self.data_files = self.distribution.data_files or []
+
+ if self.build_src is None:
+ plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2])
+ self.build_src = os.path.join(self.build_base, 'src'+plat_specifier)
+
+ # py_modules_dict is used in build_py.find_package_modules
+ self.py_modules_dict = {}
+
+ if self.f2pyflags:
+ if self.f2py_opts:
+ log.warn('ignoring --f2pyflags as --f2py-opts already used')
+ else:
+ self.f2py_opts = self.f2pyflags
+ self.f2pyflags = None
+ if self.f2py_opts is None:
+ self.f2py_opts = []
+ else:
+ self.f2py_opts = shlex.split(self.f2py_opts)
+
+ if self.swigflags:
+ if self.swig_opts:
+ log.warn('ignoring --swigflags as --swig-opts already used')
+ else:
+ self.swig_opts = self.swigflags
+ self.swigflags = None
+
+ if self.swig_opts is None:
+ self.swig_opts = []
+ else:
+ self.swig_opts = shlex.split(self.swig_opts)
+
+ # use options from build_ext command
+ build_ext = self.get_finalized_command('build_ext')
+ if self.inplace is None:
+ self.inplace = build_ext.inplace
+ if self.swig_cpp is None:
+ self.swig_cpp = build_ext.swig_cpp
+ for c in ['swig', 'swig_opt']:
+ o = '--'+c.replace('_', '-')
+ v = getattr(build_ext, c, None)
+ if v:
+ if getattr(self, c):
+ log.warn('both build_src and build_ext define %s option' % (o))
+ else:
+ log.info('using "%s=%s" option from build_ext command' % (o, v))
+ setattr(self, c, v)
+
+ def run(self):
+ log.info("build_src")
+ if not (self.extensions or self.libraries):
+ return
+ self.build_sources()
+
+ def build_sources(self):
+
+ if self.inplace:
+ self.get_package_dir = \
+ self.get_finalized_command('build_py').get_package_dir
+
+ self.build_py_modules_sources()
+
+ for libname_info in self.libraries:
+ self.build_library_sources(*libname_info)
+
+ if self.extensions:
+ self.check_extensions_list(self.extensions)
+
+ for ext in self.extensions:
+ self.build_extension_sources(ext)
+
+ self.build_data_files_sources()
+ self.build_npy_pkg_config()
+
+ def build_data_files_sources(self):
+ if not self.data_files:
+ return
+ log.info('building data_files sources')
+ from numpy.distutils.misc_util import get_data_files
+ new_data_files = []
+ for data in self.data_files:
+ if isinstance(data, str):
+ new_data_files.append(data)
+ elif isinstance(data, tuple):
+ d, files = data
+ if self.inplace:
+ build_dir = self.get_package_dir('.'.join(d.split(os.sep)))
+ else:
+ build_dir = os.path.join(self.build_src, d)
+ funcs = [f for f in files if hasattr(f, '__call__')]
+ files = [f for f in files if not hasattr(f, '__call__')]
+ for f in funcs:
+ if f.__code__.co_argcount==1:
+ s = f(build_dir)
+ else:
+ s = f()
+ if s is not None:
+ if isinstance(s, list):
+ files.extend(s)
+ elif isinstance(s, str):
+ files.append(s)
+ else:
+ raise TypeError(repr(s))
+ filenames = get_data_files((d, files))
+ new_data_files.append((d, filenames))
+ else:
+ raise TypeError(repr(data))
+ self.data_files[:] = new_data_files
+
+
+ def _build_npy_pkg_config(self, info, gd):
+ template, install_dir, subst_dict = info
+ template_dir = os.path.dirname(template)
+ for k, v in gd.items():
+ subst_dict[k] = v
+
+ if self.inplace == 1:
+ generated_dir = os.path.join(template_dir, install_dir)
+ else:
+ generated_dir = os.path.join(self.build_src, template_dir,
+ install_dir)
+ generated = os.path.basename(os.path.splitext(template)[0])
+ generated_path = os.path.join(generated_dir, generated)
+ if not os.path.exists(generated_dir):
+ os.makedirs(generated_dir)
+
+ subst_vars(generated_path, template, subst_dict)
+
+ # Where to install relatively to install prefix
+ full_install_dir = os.path.join(template_dir, install_dir)
+ return full_install_dir, generated_path
+
+ def build_npy_pkg_config(self):
+ log.info('build_src: building npy-pkg config files')
+
+ # XXX: another ugly workaround to circumvent distutils brain damage. We
+ # need the install prefix here, but finalizing the options of the
+ # install command when only building sources cause error. Instead, we
+ # copy the install command instance, and finalize the copy so that it
+ # does not disrupt how distutils want to do things when with the
+ # original install command instance.
+ install_cmd = copy.copy(get_cmd('install'))
+ if not install_cmd.finalized == 1:
+ install_cmd.finalize_options()
+ build_npkg = False
+ if self.inplace == 1:
+ top_prefix = '.'
+ build_npkg = True
+ elif hasattr(install_cmd, 'install_libbase'):
+ top_prefix = install_cmd.install_libbase
+ build_npkg = True
+
+ if build_npkg:
+ for pkg, infos in self.distribution.installed_pkg_config.items():
+ pkg_path = self.distribution.package_dir[pkg]
+ prefix = os.path.join(os.path.abspath(top_prefix), pkg_path)
+ d = {'prefix': prefix}
+ for info in infos:
+ install_dir, generated = self._build_npy_pkg_config(info, d)
+ self.distribution.data_files.append((install_dir,
+ [generated]))
+
+ def build_py_modules_sources(self):
+ if not self.py_modules:
+ return
+ log.info('building py_modules sources')
+ new_py_modules = []
+ for source in self.py_modules:
+ if is_sequence(source) and len(source)==3:
+ package, module_base, source = source
+ if self.inplace:
+ build_dir = self.get_package_dir(package)
+ else:
+ build_dir = os.path.join(self.build_src,
+ os.path.join(*package.split('.')))
+ if hasattr(source, '__call__'):
+ target = os.path.join(build_dir, module_base + '.py')
+ source = source(target)
+ if source is None:
+ continue
+ modules = [(package, module_base, source)]
+ if package not in self.py_modules_dict:
+ self.py_modules_dict[package] = []
+ self.py_modules_dict[package] += modules
+ else:
+ new_py_modules.append(source)
+ self.py_modules[:] = new_py_modules
+
+ def build_library_sources(self, lib_name, build_info):
+ sources = list(build_info.get('sources', []))
+
+ if not sources:
+ return
+
+ log.info('building library "%s" sources' % (lib_name))
+
+ sources = self.generate_sources(sources, (lib_name, build_info))
+
+ sources = self.template_sources(sources, (lib_name, build_info))
+
+ sources, h_files = self.filter_h_files(sources)
+
+ if h_files:
+ log.info('%s - nothing done with h_files = %s',
+ self.package, h_files)
+
+ #for f in h_files:
+ # self.distribution.headers.append((lib_name,f))
+
+ build_info['sources'] = sources
+ return
+
+ def build_extension_sources(self, ext):
+
+ sources = list(ext.sources)
+
+ log.info('building extension "%s" sources' % (ext.name))
+
+ fullname = self.get_ext_fullname(ext.name)
+
+ modpath = fullname.split('.')
+ package = '.'.join(modpath[0:-1])
+
+ if self.inplace:
+ self.ext_target_dir = self.get_package_dir(package)
+
+ sources = self.generate_sources(sources, ext)
+ sources = self.template_sources(sources, ext)
+ sources = self.swig_sources(sources, ext)
+ sources = self.f2py_sources(sources, ext)
+ sources = self.pyrex_sources(sources, ext)
+
+ sources, py_files = self.filter_py_files(sources)
+
+ if package not in self.py_modules_dict:
+ self.py_modules_dict[package] = []
+ modules = []
+ for f in py_files:
+ module = os.path.splitext(os.path.basename(f))[0]
+ modules.append((package, module, f))
+ self.py_modules_dict[package] += modules
+
+ sources, h_files = self.filter_h_files(sources)
+
+ if h_files:
+ log.info('%s - nothing done with h_files = %s',
+ package, h_files)
+ #for f in h_files:
+ # self.distribution.headers.append((package,f))
+
+ ext.sources = sources
+
+ def generate_sources(self, sources, extension):
+ new_sources = []
+ func_sources = []
+ for source in sources:
+ if is_string(source):
+ new_sources.append(source)
+ else:
+ func_sources.append(source)
+ if not func_sources:
+ return new_sources
+ if self.inplace and not is_sequence(extension):
+ build_dir = self.ext_target_dir
+ else:
+ if is_sequence(extension):
+ name = extension[0]
+ # if 'include_dirs' not in extension[1]:
+ # extension[1]['include_dirs'] = []
+ # incl_dirs = extension[1]['include_dirs']
+ else:
+ name = extension.name
+ # incl_dirs = extension.include_dirs
+ #if self.build_src not in incl_dirs:
+ # incl_dirs.append(self.build_src)
+ build_dir = os.path.join(*([self.build_src]
+ +name.split('.')[:-1]))
+ self.mkpath(build_dir)
+
+ if self.verbose_cfg:
+ new_level = log.INFO
+ else:
+ new_level = log.WARN
+ old_level = log.set_threshold(new_level)
+
+ for func in func_sources:
+ source = func(extension, build_dir)
+ if not source:
+ continue
+ if is_sequence(source):
+ [log.info(" adding '%s' to sources." % (s,)) for s in source]
+ new_sources.extend(source)
+ else:
+ log.info(" adding '%s' to sources." % (source,))
+ new_sources.append(source)
+ log.set_threshold(old_level)
+ return new_sources
+
+ def filter_py_files(self, sources):
+ return self.filter_files(sources, ['.py'])
+
+ def filter_h_files(self, sources):
+ return self.filter_files(sources, ['.h', '.hpp', '.inc'])
+
+ def filter_files(self, sources, exts = []):
+ new_sources = []
+ files = []
+ for source in sources:
+ (base, ext) = os.path.splitext(source)
+ if ext in exts:
+ files.append(source)
+ else:
+ new_sources.append(source)
+ return new_sources, files
+
+ def template_sources(self, sources, extension):
+ new_sources = []
+ if is_sequence(extension):
+ depends = extension[1].get('depends')
+ include_dirs = extension[1].get('include_dirs')
+ else:
+ depends = extension.depends
+ include_dirs = extension.include_dirs
+ for source in sources:
+ (base, ext) = os.path.splitext(source)
+ if ext == '.src': # Template file
+ if self.inplace:
+ target_dir = os.path.dirname(base)
+ else:
+ target_dir = appendpath(self.build_src, os.path.dirname(base))
+ self.mkpath(target_dir)
+ target_file = os.path.join(target_dir, os.path.basename(base))
+ if (self.force or newer_group([source] + depends, target_file)):
+ if _f_pyf_ext_match(base):
+ log.info("from_template:> %s" % (target_file))
+ outstr = process_f_file(source)
+ else:
+ log.info("conv_template:> %s" % (target_file))
+ outstr = process_c_file(source)
+ with open(target_file, 'w') as fid:
+ fid.write(outstr)
+ if _header_ext_match(target_file):
+ d = os.path.dirname(target_file)
+ if d not in include_dirs:
+ log.info(" adding '%s' to include_dirs." % (d))
+ include_dirs.append(d)
+ new_sources.append(target_file)
+ else:
+ new_sources.append(source)
+ return new_sources
+
+ def pyrex_sources(self, sources, extension):
+ """Pyrex not supported; this remains for Cython support (see below)"""
+ new_sources = []
+ ext_name = extension.name.split('.')[-1]
+ for source in sources:
+ (base, ext) = os.path.splitext(source)
+ if ext == '.pyx':
+ target_file = self.generate_a_pyrex_source(base, ext_name,
+ source,
+ extension)
+ new_sources.append(target_file)
+ else:
+ new_sources.append(source)
+ return new_sources
+
+ def generate_a_pyrex_source(self, base, ext_name, source, extension):
+ """Pyrex is not supported, but some projects monkeypatch this method.
+
+ That allows compiling Cython code, see gh-6955.
+ This method will remain here for compatibility reasons.
+ """
+ return []
+
+ def f2py_sources(self, sources, extension):
+ new_sources = []
+ f2py_sources = []
+ f_sources = []
+ f2py_targets = {}
+ target_dirs = []
+ ext_name = extension.name.split('.')[-1]
+ skip_f2py = 0
+
+ for source in sources:
+ (base, ext) = os.path.splitext(source)
+ if ext == '.pyf': # F2PY interface file
+ if self.inplace:
+ target_dir = os.path.dirname(base)
+ else:
+ target_dir = appendpath(self.build_src, os.path.dirname(base))
+ if os.path.isfile(source):
+ name = get_f2py_modulename(source)
+ if name != ext_name:
+ raise DistutilsSetupError('mismatch of extension names: %s '
+ 'provides %r but expected %r' % (
+ source, name, ext_name))
+ target_file = os.path.join(target_dir, name+'module.c')
+ else:
+ log.debug(' source %s does not exist: skipping f2py\'ing.' \
+ % (source))
+ name = ext_name
+ skip_f2py = 1
+ target_file = os.path.join(target_dir, name+'module.c')
+ if not os.path.isfile(target_file):
+ log.warn(' target %s does not exist:\n '\
+ 'Assuming %smodule.c was generated with '\
+ '"build_src --inplace" command.' \
+ % (target_file, name))
+ target_dir = os.path.dirname(base)
+ target_file = os.path.join(target_dir, name+'module.c')
+ if not os.path.isfile(target_file):
+ raise DistutilsSetupError("%r missing" % (target_file,))
+ log.info(' Yes! Using %r as up-to-date target.' \
+ % (target_file))
+ target_dirs.append(target_dir)
+ f2py_sources.append(source)
+ f2py_targets[source] = target_file
+ new_sources.append(target_file)
+ elif fortran_ext_match(ext):
+ f_sources.append(source)
+ else:
+ new_sources.append(source)
+
+ if not (f2py_sources or f_sources):
+ return new_sources
+
+ for d in target_dirs:
+ self.mkpath(d)
+
+ f2py_options = extension.f2py_options + self.f2py_opts
+
+ if self.distribution.libraries:
+ for name, build_info in self.distribution.libraries:
+ if name in extension.libraries:
+ f2py_options.extend(build_info.get('f2py_options', []))
+
+ log.info("f2py options: %s" % (f2py_options))
+
+ if f2py_sources:
+ if len(f2py_sources) != 1:
+ raise DistutilsSetupError(
+ 'only one .pyf file is allowed per extension module but got'\
+ ' more: %r' % (f2py_sources,))
+ source = f2py_sources[0]
+ target_file = f2py_targets[source]
+ target_dir = os.path.dirname(target_file) or '.'
+ depends = [source] + extension.depends
+ if (self.force or newer_group(depends, target_file, 'newer')) \
+ and not skip_f2py:
+ log.info("f2py: %s" % (source))
+ import numpy.f2py
+ numpy.f2py.run_main(f2py_options
+ + ['--build-dir', target_dir, source])
+ else:
+ log.debug(" skipping '%s' f2py interface (up-to-date)" % (source))
+ else:
+ #XXX TODO: --inplace support for sdist command
+ if is_sequence(extension):
+ name = extension[0]
+ else: name = extension.name
+ target_dir = os.path.join(*([self.build_src]
+ +name.split('.')[:-1]))
+ target_file = os.path.join(target_dir, ext_name + 'module.c')
+ new_sources.append(target_file)
+ depends = f_sources + extension.depends
+ if (self.force or newer_group(depends, target_file, 'newer')) \
+ and not skip_f2py:
+ log.info("f2py:> %s" % (target_file))
+ self.mkpath(target_dir)
+ import numpy.f2py
+ numpy.f2py.run_main(f2py_options + ['--lower',
+ '--build-dir', target_dir]+\
+ ['-m', ext_name]+f_sources)
+ else:
+ log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\
+ % (target_file))
+
+ if not os.path.isfile(target_file):
+ raise DistutilsError("f2py target file %r not generated" % (target_file,))
+
+ build_dir = os.path.join(self.build_src, target_dir)
+ target_c = os.path.join(build_dir, 'fortranobject.c')
+ target_h = os.path.join(build_dir, 'fortranobject.h')
+ log.info(" adding '%s' to sources." % (target_c))
+ new_sources.append(target_c)
+ if build_dir not in extension.include_dirs:
+ log.info(" adding '%s' to include_dirs." % (build_dir))
+ extension.include_dirs.append(build_dir)
+
+ if not skip_f2py:
+ import numpy.f2py
+ d = os.path.dirname(numpy.f2py.__file__)
+ source_c = os.path.join(d, 'src', 'fortranobject.c')
+ source_h = os.path.join(d, 'src', 'fortranobject.h')
+ if newer(source_c, target_c) or newer(source_h, target_h):
+ self.mkpath(os.path.dirname(target_c))
+ self.copy_file(source_c, target_c)
+ self.copy_file(source_h, target_h)
+ else:
+ if not os.path.isfile(target_c):
+ raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,))
+ if not os.path.isfile(target_h):
+ raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,))
+
+ for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']:
+ filename = os.path.join(target_dir, ext_name + name_ext)
+ if os.path.isfile(filename):
+ log.info(" adding '%s' to sources." % (filename))
+ f_sources.append(filename)
+
+ return new_sources + f_sources
+
+ def swig_sources(self, sources, extension):
+ # Assuming SWIG 1.3.14 or later. See compatibility note in
+ # http://www.swig.org/Doc1.3/Python.html#Python_nn6
+
+ new_sources = []
+ swig_sources = []
+ swig_targets = {}
+ target_dirs = []
+ py_files = [] # swig generated .py files
+ target_ext = '.c'
+ if '-c++' in extension.swig_opts:
+ typ = 'c++'
+ is_cpp = True
+ extension.swig_opts.remove('-c++')
+ elif self.swig_cpp:
+ typ = 'c++'
+ is_cpp = True
+ else:
+ typ = None
+ is_cpp = False
+ skip_swig = 0
+ ext_name = extension.name.split('.')[-1]
+
+ for source in sources:
+ (base, ext) = os.path.splitext(source)
+ if ext == '.i': # SWIG interface file
+ # the code below assumes that the sources list
+ # contains not more than one .i SWIG interface file
+ if self.inplace:
+ target_dir = os.path.dirname(base)
+ py_target_dir = self.ext_target_dir
+ else:
+ target_dir = appendpath(self.build_src, os.path.dirname(base))
+ py_target_dir = target_dir
+ if os.path.isfile(source):
+ name = get_swig_modulename(source)
+ if name != ext_name[1:]:
+ raise DistutilsSetupError(
+ 'mismatch of extension names: %s provides %r'
+ ' but expected %r' % (source, name, ext_name[1:]))
+ if typ is None:
+ typ = get_swig_target(source)
+ is_cpp = typ=='c++'
+ else:
+ typ2 = get_swig_target(source)
+ if typ2 is None:
+ log.warn('source %r does not define swig target, assuming %s swig target' \
+ % (source, typ))
+ elif typ!=typ2:
+ log.warn('expected %r but source %r defines %r swig target' \
+ % (typ, source, typ2))
+ if typ2=='c++':
+ log.warn('resetting swig target to c++ (some targets may have .c extension)')
+ is_cpp = True
+ else:
+ log.warn('assuming that %r has c++ swig target' % (source))
+ if is_cpp:
+ target_ext = '.cpp'
+ target_file = os.path.join(target_dir, '%s_wrap%s' \
+ % (name, target_ext))
+ else:
+ log.warn(' source %s does not exist: skipping swig\'ing.' \
+ % (source))
+ name = ext_name[1:]
+ skip_swig = 1
+ target_file = _find_swig_target(target_dir, name)
+ if not os.path.isfile(target_file):
+ log.warn(' target %s does not exist:\n '\
+ 'Assuming %s_wrap.{c,cpp} was generated with '\
+ '"build_src --inplace" command.' \
+ % (target_file, name))
+ target_dir = os.path.dirname(base)
+ target_file = _find_swig_target(target_dir, name)
+ if not os.path.isfile(target_file):
+ raise DistutilsSetupError("%r missing" % (target_file,))
+ log.warn(' Yes! Using %r as up-to-date target.' \
+ % (target_file))
+ target_dirs.append(target_dir)
+ new_sources.append(target_file)
+ py_files.append(os.path.join(py_target_dir, name+'.py'))
+ swig_sources.append(source)
+ swig_targets[source] = new_sources[-1]
+ else:
+ new_sources.append(source)
+
+ if not swig_sources:
+ return new_sources
+
+ if skip_swig:
+ return new_sources + py_files
+
+ for d in target_dirs:
+ self.mkpath(d)
+
+ swig = self.swig or self.find_swig()
+ swig_cmd = [swig, "-python"] + extension.swig_opts
+ if is_cpp:
+ swig_cmd.append('-c++')
+ for d in extension.include_dirs:
+ swig_cmd.append('-I'+d)
+ for source in swig_sources:
+ target = swig_targets[source]
+ depends = [source] + extension.depends
+ if self.force or newer_group(depends, target, 'newer'):
+ log.info("%s: %s" % (os.path.basename(swig) \
+ + (is_cpp and '++' or ''), source))
+ self.spawn(swig_cmd + self.swig_opts \
+ + ["-o", target, '-outdir', py_target_dir, source])
+ else:
+ log.debug(" skipping '%s' swig interface (up-to-date)" \
+ % (source))
+
+ return new_sources + py_files
+
+_f_pyf_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
+_header_ext_match = re.compile(r'.*\.(inc|h|hpp)\Z', re.I).match
+
+#### SWIG related auxiliary functions ####
+_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P<package>[\w_]+)".*\)|)\s*(?P<name>[\w_]+)',
+ re.I).match
+_has_c_header = re.compile(r'-\*-\s*c\s*-\*-', re.I).search
+_has_cpp_header = re.compile(r'-\*-\s*c\+\+\s*-\*-', re.I).search
+
+def get_swig_target(source):
+ with open(source, 'r') as f:
+ result = None
+ line = f.readline()
+ if _has_cpp_header(line):
+ result = 'c++'
+ if _has_c_header(line):
+ result = 'c'
+ return result
+
+def get_swig_modulename(source):
+ with open(source, 'r') as f:
+ name = None
+ for line in f:
+ m = _swig_module_name_match(line)
+ if m:
+ name = m.group('name')
+ break
+ return name
+
+def _find_swig_target(target_dir, name):
+ for ext in ['.cpp', '.c']:
+ target = os.path.join(target_dir, '%s_wrap%s' % (name, ext))
+ if os.path.isfile(target):
+ break
+ return target
+
+#### F2PY related auxiliary functions ####
+
+_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]+)',
+ re.I).match
+_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]*?'
+ r'__user__[\w_]*)', re.I).match
+
+def get_f2py_modulename(source):
+ name = None
+ with open(source) as f:
+ for line in f:
+ m = _f2py_module_name_match(line)
+ if m:
+ if _f2py_user_module_name_match(line): # skip *__user__* names
+ continue
+ name = m.group('name')
+ break
+ return name
+
+##########################################
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/config.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/config.py
new file mode 100644
index 00000000..fdb650d3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/config.py
@@ -0,0 +1,516 @@
+# Added Fortran compiler support to config. Currently useful only for
+# try_compile call. try_run works but is untested for most of Fortran
+# compilers (they must define linker_exe first).
+# Pearu Peterson
+import os
+import signal
+import subprocess
+import sys
+import textwrap
+import warnings
+
+from distutils.command.config import config as old_config
+from distutils.command.config import LANG_EXT
+from distutils import log
+from distutils.file_util import copy_file
+from distutils.ccompiler import CompileError, LinkError
+import distutils
+from numpy.distutils.exec_command import filepath_from_subprocess_output
+from numpy.distutils.mingw32ccompiler import generate_manifest
+from numpy.distutils.command.autodist import (check_gcc_function_attribute,
+ check_gcc_function_attribute_with_intrinsics,
+ check_gcc_variable_attribute,
+ check_gcc_version_at_least,
+ check_inline,
+ check_restrict,
+ check_compiler_gcc)
+
+LANG_EXT['f77'] = '.f'
+LANG_EXT['f90'] = '.f90'
+
+class config(old_config):
+ old_config.user_options += [
+ ('fcompiler=', None, "specify the Fortran compiler type"),
+ ]
+
+ def initialize_options(self):
+ self.fcompiler = None
+ old_config.initialize_options(self)
+
+ def _check_compiler (self):
+ old_config._check_compiler(self)
+ from numpy.distutils.fcompiler import FCompiler, new_fcompiler
+
+ if sys.platform == 'win32' and (self.compiler.compiler_type in
+ ('msvc', 'intelw', 'intelemw')):
+ # XXX: hack to circumvent a python 2.6 bug with msvc9compiler:
+ # initialize call query_vcvarsall, which throws an IOError, and
+ # causes an error along the way without much information. We try to
+ # catch it here, hoping it is early enough, and print a helpful
+ # message instead of Error: None.
+ if not self.compiler.initialized:
+ try:
+ self.compiler.initialize()
+ except IOError as e:
+ msg = textwrap.dedent("""\
+ Could not initialize compiler instance: do you have Visual Studio
+ installed? If you are trying to build with MinGW, please use "python setup.py
+ build -c mingw32" instead. If you have Visual Studio installed, check it is
+ correctly installed, and the right version (VS 2015 as of this writing).
+
+ Original exception was: %s, and the Compiler class was %s
+ ============================================================================""") \
+ % (e, self.compiler.__class__.__name__)
+ print(textwrap.dedent("""\
+ ============================================================================"""))
+ raise distutils.errors.DistutilsPlatformError(msg) from e
+
+ # After MSVC is initialized, add an explicit /MANIFEST to linker
+ # flags. See issues gh-4245 and gh-4101 for details. Also
+ # relevant are issues 4431 and 16296 on the Python bug tracker.
+ from distutils import msvc9compiler
+ if msvc9compiler.get_build_version() >= 10:
+ for ldflags in [self.compiler.ldflags_shared,
+ self.compiler.ldflags_shared_debug]:
+ if '/MANIFEST' not in ldflags:
+ ldflags.append('/MANIFEST')
+
+ if not isinstance(self.fcompiler, FCompiler):
+ self.fcompiler = new_fcompiler(compiler=self.fcompiler,
+ dry_run=self.dry_run, force=1,
+ c_compiler=self.compiler)
+ if self.fcompiler is not None:
+ self.fcompiler.customize(self.distribution)
+ if self.fcompiler.get_version():
+ self.fcompiler.customize_cmd(self)
+ self.fcompiler.show_customization()
+
+ def _wrap_method(self, mth, lang, args):
+ from distutils.ccompiler import CompileError
+ from distutils.errors import DistutilsExecError
+ save_compiler = self.compiler
+ if lang in ['f77', 'f90']:
+ self.compiler = self.fcompiler
+ if self.compiler is None:
+ raise CompileError('%s compiler is not set' % (lang,))
+ try:
+ ret = mth(*((self,)+args))
+ except (DistutilsExecError, CompileError) as e:
+ self.compiler = save_compiler
+ raise CompileError from e
+ self.compiler = save_compiler
+ return ret
+
+ def _compile (self, body, headers, include_dirs, lang):
+ src, obj = self._wrap_method(old_config._compile, lang,
+ (body, headers, include_dirs, lang))
+ # _compile in unixcompiler.py sometimes creates .d dependency files.
+ # Clean them up.
+ self.temp_files.append(obj + '.d')
+ return src, obj
+
+ def _link (self, body,
+ headers, include_dirs,
+ libraries, library_dirs, lang):
+ if self.compiler.compiler_type=='msvc':
+ libraries = (libraries or [])[:]
+ library_dirs = (library_dirs or [])[:]
+ if lang in ['f77', 'f90']:
+ lang = 'c' # always use system linker when using MSVC compiler
+ if self.fcompiler:
+ for d in self.fcompiler.library_dirs or []:
+ # correct path when compiling in Cygwin but with
+ # normal Win Python
+ if d.startswith('/usr/lib'):
+ try:
+ d = subprocess.check_output(['cygpath',
+ '-w', d])
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ d = filepath_from_subprocess_output(d)
+ library_dirs.append(d)
+ for libname in self.fcompiler.libraries or []:
+ if libname not in libraries:
+ libraries.append(libname)
+ for libname in libraries:
+ if libname.startswith('msvc'): continue
+ fileexists = False
+ for libdir in library_dirs or []:
+ libfile = os.path.join(libdir, '%s.lib' % (libname))
+ if os.path.isfile(libfile):
+ fileexists = True
+ break
+ if fileexists: continue
+ # make g77-compiled static libs available to MSVC
+ fileexists = False
+ for libdir in library_dirs:
+ libfile = os.path.join(libdir, 'lib%s.a' % (libname))
+ if os.path.isfile(libfile):
+ # copy libname.a file to name.lib so that MSVC linker
+ # can find it
+ libfile2 = os.path.join(libdir, '%s.lib' % (libname))
+ copy_file(libfile, libfile2)
+ self.temp_files.append(libfile2)
+ fileexists = True
+ break
+ if fileexists: continue
+ log.warn('could not find library %r in directories %s' \
+ % (libname, library_dirs))
+ elif self.compiler.compiler_type == 'mingw32':
+ generate_manifest(self)
+ return self._wrap_method(old_config._link, lang,
+ (body, headers, include_dirs,
+ libraries, library_dirs, lang))
+
+ def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'):
+ self._check_compiler()
+ return self.try_compile(
+ "/* we need a dummy line to make distutils happy */",
+ [header], include_dirs)
+
+ def check_decl(self, symbol,
+ headers=None, include_dirs=None):
+ self._check_compiler()
+ body = textwrap.dedent("""
+ int main(void)
+ {
+ #ifndef %s
+ (void) %s;
+ #endif
+ ;
+ return 0;
+ }""") % (symbol, symbol)
+
+ return self.try_compile(body, headers, include_dirs)
+
+ def check_macro_true(self, symbol,
+ headers=None, include_dirs=None):
+ self._check_compiler()
+ body = textwrap.dedent("""
+ int main(void)
+ {
+ #if %s
+ #else
+ #error false or undefined macro
+ #endif
+ ;
+ return 0;
+ }""") % (symbol,)
+
+ return self.try_compile(body, headers, include_dirs)
+
+ def check_type(self, type_name, headers=None, include_dirs=None,
+ library_dirs=None):
+ """Check type availability. Return True if the type can be compiled,
+ False otherwise"""
+ self._check_compiler()
+
+ # First check the type can be compiled
+ body = textwrap.dedent(r"""
+ int main(void) {
+ if ((%(name)s *) 0)
+ return 0;
+ if (sizeof (%(name)s))
+ return 0;
+ }
+ """) % {'name': type_name}
+
+ st = False
+ try:
+ try:
+ self._compile(body % {'type': type_name},
+ headers, include_dirs, 'c')
+ st = True
+ except distutils.errors.CompileError:
+ st = False
+ finally:
+ self._clean()
+
+ return st
+
+ def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None):
+ """Check size of a given type."""
+ self._check_compiler()
+
+ # First check the type can be compiled
+ body = textwrap.dedent(r"""
+ typedef %(type)s npy_check_sizeof_type;
+ int main (void)
+ {
+ static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)];
+ test_array [0] = 0
+
+ ;
+ return 0;
+ }
+ """)
+ self._compile(body % {'type': type_name},
+ headers, include_dirs, 'c')
+ self._clean()
+
+ if expected:
+ body = textwrap.dedent(r"""
+ typedef %(type)s npy_check_sizeof_type;
+ int main (void)
+ {
+ static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)];
+ test_array [0] = 0
+
+ ;
+ return 0;
+ }
+ """)
+ for size in expected:
+ try:
+ self._compile(body % {'type': type_name, 'size': size},
+ headers, include_dirs, 'c')
+ self._clean()
+ return size
+ except CompileError:
+ pass
+
+ # this fails to *compile* if size > sizeof(type)
+ body = textwrap.dedent(r"""
+ typedef %(type)s npy_check_sizeof_type;
+ int main (void)
+ {
+ static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)];
+ test_array [0] = 0
+
+ ;
+ return 0;
+ }
+ """)
+
+ # The principle is simple: we first find low and high bounds of size
+ # for the type, where low/high are looked up on a log scale. Then, we
+ # do a binary search to find the exact size between low and high
+ low = 0
+ mid = 0
+ while True:
+ try:
+ self._compile(body % {'type': type_name, 'size': mid},
+ headers, include_dirs, 'c')
+ self._clean()
+ break
+ except CompileError:
+ #log.info("failure to test for bound %d" % mid)
+ low = mid + 1
+ mid = 2 * mid + 1
+
+ high = mid
+ # Binary search:
+ while low != high:
+ mid = (high - low) // 2 + low
+ try:
+ self._compile(body % {'type': type_name, 'size': mid},
+ headers, include_dirs, 'c')
+ self._clean()
+ high = mid
+ except CompileError:
+ low = mid + 1
+ return low
+
+ def check_func(self, func,
+ headers=None, include_dirs=None,
+ libraries=None, library_dirs=None,
+ decl=False, call=False, call_args=None):
+ # clean up distutils's config a bit: add void to main(), and
+ # return a value.
+ self._check_compiler()
+ body = []
+ if decl:
+ if type(decl) == str:
+ body.append(decl)
+ else:
+ body.append("int %s (void);" % func)
+ # Handle MSVC intrinsics: force MS compiler to make a function call.
+ # Useful to test for some functions when built with optimization on, to
+ # avoid build error because the intrinsic and our 'fake' test
+ # declaration do not match.
+ body.append("#ifdef _MSC_VER")
+ body.append("#pragma function(%s)" % func)
+ body.append("#endif")
+ body.append("int main (void) {")
+ if call:
+ if call_args is None:
+ call_args = ''
+ body.append(" %s(%s);" % (func, call_args))
+ else:
+ body.append(" %s;" % func)
+ body.append(" return 0;")
+ body.append("}")
+ body = '\n'.join(body) + "\n"
+
+ return self.try_link(body, headers, include_dirs,
+ libraries, library_dirs)
+
+ def check_funcs_once(self, funcs,
+ headers=None, include_dirs=None,
+ libraries=None, library_dirs=None,
+ decl=False, call=False, call_args=None):
+ """Check a list of functions at once.
+
+ This is useful to speed up things, since all the functions in the funcs
+ list will be put in one compilation unit.
+
+ Arguments
+ ---------
+ funcs : seq
+ list of functions to test
+ include_dirs : seq
+ list of header paths
+ libraries : seq
+ list of libraries to link the code snippet to
+ library_dirs : seq
+ list of library paths
+ decl : dict
+ for every (key, value), the declaration in the value will be
+ used for function in key. If a function is not in the
+ dictionary, no declaration will be used.
+ call : dict
+ for every item (f, value), if the value is True, a call will be
+ done to the function f.
+ """
+ self._check_compiler()
+ body = []
+ if decl:
+ for f, v in decl.items():
+ if v:
+ body.append("int %s (void);" % f)
+
+ # Handle MS intrinsics. See check_func for more info.
+ body.append("#ifdef _MSC_VER")
+ for func in funcs:
+ body.append("#pragma function(%s)" % func)
+ body.append("#endif")
+
+ body.append("int main (void) {")
+ if call:
+ for f in funcs:
+ if f in call and call[f]:
+ if not (call_args and f in call_args and call_args[f]):
+ args = ''
+ else:
+ args = call_args[f]
+ body.append(" %s(%s);" % (f, args))
+ else:
+ body.append(" %s;" % f)
+ else:
+ for f in funcs:
+ body.append(" %s;" % f)
+ body.append(" return 0;")
+ body.append("}")
+ body = '\n'.join(body) + "\n"
+
+ return self.try_link(body, headers, include_dirs,
+ libraries, library_dirs)
+
+ def check_inline(self):
+ """Return the inline keyword recognized by the compiler, empty string
+ otherwise."""
+ return check_inline(self)
+
+ def check_restrict(self):
+ """Return the restrict keyword recognized by the compiler, empty string
+ otherwise."""
+ return check_restrict(self)
+
+ def check_compiler_gcc(self):
+ """Return True if the C compiler is gcc"""
+ return check_compiler_gcc(self)
+
+ def check_gcc_function_attribute(self, attribute, name):
+ return check_gcc_function_attribute(self, attribute, name)
+
+ def check_gcc_function_attribute_with_intrinsics(self, attribute, name,
+ code, include):
+ return check_gcc_function_attribute_with_intrinsics(self, attribute,
+ name, code, include)
+
+ def check_gcc_variable_attribute(self, attribute):
+ return check_gcc_variable_attribute(self, attribute)
+
+ def check_gcc_version_at_least(self, major, minor=0, patchlevel=0):
+ """Return True if the GCC version is greater than or equal to the
+ specified version."""
+ return check_gcc_version_at_least(self, major, minor, patchlevel)
+
+ def get_output(self, body, headers=None, include_dirs=None,
+ libraries=None, library_dirs=None,
+ lang="c", use_tee=None):
+ """Try to compile, link to an executable, and run a program
+ built from 'body' and 'headers'. Returns the exit status code
+ of the program and its output.
+ """
+ # 2008-11-16, RemoveMe
+ warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n"
+ "Usage of get_output is deprecated: please do not \n"
+ "use it anymore, and avoid configuration checks \n"
+ "involving running executable on the target machine.\n"
+ "+++++++++++++++++++++++++++++++++++++++++++++++++\n",
+ DeprecationWarning, stacklevel=2)
+ self._check_compiler()
+ exitcode, output = 255, ''
+ try:
+ grabber = GrabStdout()
+ try:
+ src, obj, exe = self._link(body, headers, include_dirs,
+ libraries, library_dirs, lang)
+ grabber.restore()
+ except Exception:
+ output = grabber.data
+ grabber.restore()
+ raise
+ exe = os.path.join('.', exe)
+ try:
+ # specify cwd arg for consistency with
+ # historic usage pattern of exec_command()
+ # also, note that exe appears to be a string,
+ # which exec_command() handled, but we now
+ # use a list for check_output() -- this assumes
+ # that exe is always a single command
+ output = subprocess.check_output([exe], cwd='.')
+ except subprocess.CalledProcessError as exc:
+ exitstatus = exc.returncode
+ output = ''
+ except OSError:
+ # preserve the EnvironmentError exit status
+ # used historically in exec_command()
+ exitstatus = 127
+ output = ''
+ else:
+ output = filepath_from_subprocess_output(output)
+ if hasattr(os, 'WEXITSTATUS'):
+ exitcode = os.WEXITSTATUS(exitstatus)
+ if os.WIFSIGNALED(exitstatus):
+ sig = os.WTERMSIG(exitstatus)
+ log.error('subprocess exited with signal %d' % (sig,))
+ if sig == signal.SIGINT:
+ # control-C
+ raise KeyboardInterrupt
+ else:
+ exitcode = exitstatus
+ log.info("success!")
+ except (CompileError, LinkError):
+ log.info("failure.")
+ self._clean()
+ return exitcode, output
+
+class GrabStdout:
+
+ def __init__(self):
+ self.sys_stdout = sys.stdout
+ self.data = ''
+ sys.stdout = self
+
+ def write (self, data):
+ self.sys_stdout.write(data)
+ self.data += data
+
+ def flush (self):
+ self.sys_stdout.flush()
+
+ def restore(self):
+ sys.stdout = self.sys_stdout
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/config_compiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/config_compiler.py
new file mode 100644
index 00000000..44265bfc
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/config_compiler.py
@@ -0,0 +1,126 @@
+from distutils.core import Command
+from numpy.distutils import log
+
+#XXX: Linker flags
+
+def show_fortran_compilers(_cache=None):
+ # Using cache to prevent infinite recursion.
+ if _cache:
+ return
+ elif _cache is None:
+ _cache = []
+ _cache.append(1)
+ from numpy.distutils.fcompiler import show_fcompilers
+ import distutils.core
+ dist = distutils.core._setup_distribution
+ show_fcompilers(dist)
+
+class config_fc(Command):
+ """ Distutils command to hold user specified options
+ to Fortran compilers.
+
+ config_fc command is used by the FCompiler.customize() method.
+ """
+
+ description = "specify Fortran 77/Fortran 90 compiler information"
+
+ user_options = [
+ ('fcompiler=', None, "specify Fortran compiler type"),
+ ('f77exec=', None, "specify F77 compiler command"),
+ ('f90exec=', None, "specify F90 compiler command"),
+ ('f77flags=', None, "specify F77 compiler flags"),
+ ('f90flags=', None, "specify F90 compiler flags"),
+ ('opt=', None, "specify optimization flags"),
+ ('arch=', None, "specify architecture specific optimization flags"),
+ ('debug', 'g', "compile with debugging information"),
+ ('noopt', None, "compile without optimization"),
+ ('noarch', None, "compile without arch-dependent optimization"),
+ ]
+
+ help_options = [
+ ('help-fcompiler', None, "list available Fortran compilers",
+ show_fortran_compilers),
+ ]
+
+ boolean_options = ['debug', 'noopt', 'noarch']
+
+ def initialize_options(self):
+ self.fcompiler = None
+ self.f77exec = None
+ self.f90exec = None
+ self.f77flags = None
+ self.f90flags = None
+ self.opt = None
+ self.arch = None
+ self.debug = None
+ self.noopt = None
+ self.noarch = None
+
+ def finalize_options(self):
+ log.info('unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options')
+ build_clib = self.get_finalized_command('build_clib')
+ build_ext = self.get_finalized_command('build_ext')
+ config = self.get_finalized_command('config')
+ build = self.get_finalized_command('build')
+ cmd_list = [self, config, build_clib, build_ext, build]
+ for a in ['fcompiler']:
+ l = []
+ for c in cmd_list:
+ v = getattr(c, a)
+ if v is not None:
+ if not isinstance(v, str): v = v.compiler_type
+ if v not in l: l.append(v)
+ if not l: v1 = None
+ else: v1 = l[0]
+ if len(l)>1:
+ log.warn(' commands have different --%s options: %s'\
+ ', using first in list as default' % (a, l))
+ if v1:
+ for c in cmd_list:
+ if getattr(c, a) is None: setattr(c, a, v1)
+
+ def run(self):
+ # Do nothing.
+ return
+
+class config_cc(Command):
+ """ Distutils command to hold user specified options
+ to C/C++ compilers.
+ """
+
+ description = "specify C/C++ compiler information"
+
+ user_options = [
+ ('compiler=', None, "specify C/C++ compiler type"),
+ ]
+
+ def initialize_options(self):
+ self.compiler = None
+
+ def finalize_options(self):
+ log.info('unifing config_cc, config, build_clib, build_ext, build commands --compiler options')
+ build_clib = self.get_finalized_command('build_clib')
+ build_ext = self.get_finalized_command('build_ext')
+ config = self.get_finalized_command('config')
+ build = self.get_finalized_command('build')
+ cmd_list = [self, config, build_clib, build_ext, build]
+ for a in ['compiler']:
+ l = []
+ for c in cmd_list:
+ v = getattr(c, a)
+ if v is not None:
+ if not isinstance(v, str): v = v.compiler_type
+ if v not in l: l.append(v)
+ if not l: v1 = None
+ else: v1 = l[0]
+ if len(l)>1:
+ log.warn(' commands have different --%s options: %s'\
+ ', using first in list as default' % (a, l))
+ if v1:
+ for c in cmd_list:
+ if getattr(c, a) is None: setattr(c, a, v1)
+ return
+
+ def run(self):
+ # Do nothing.
+ return
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/develop.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/develop.py
new file mode 100644
index 00000000..af24baf2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/develop.py
@@ -0,0 +1,15 @@
+""" Override the develop command from setuptools so we can ensure that our
+generated files (from build_src or build_scripts) are properly converted to real
+files with filenames.
+
+"""
+from setuptools.command.develop import develop as old_develop
+
+class develop(old_develop):
+ __doc__ = old_develop.__doc__
+ def install_for_development(self):
+ # Build sources in-place, too.
+ self.reinitialize_command('build_src', inplace=1)
+ # Make sure scripts are built.
+ self.run_command('build_scripts')
+ old_develop.install_for_development(self)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/egg_info.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/egg_info.py
new file mode 100644
index 00000000..14c62b4d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/egg_info.py
@@ -0,0 +1,25 @@
+import sys
+
+from setuptools.command.egg_info import egg_info as _egg_info
+
+class egg_info(_egg_info):
+ def run(self):
+ if 'sdist' in sys.argv:
+ import warnings
+ import textwrap
+ msg = textwrap.dedent("""
+ `build_src` is being run, this may lead to missing
+ files in your sdist! You want to use distutils.sdist
+ instead of the setuptools version:
+
+ from distutils.command.sdist import sdist
+ cmdclass={'sdist': sdist}"
+
+ See numpy's setup.py or gh-7131 for details.""")
+ warnings.warn(msg, UserWarning, stacklevel=2)
+
+ # We need to ensure that build_src has been executed in order to give
+ # setuptools' egg_info command real filenames instead of functions which
+ # generate files.
+ self.run_command("build_src")
+ _egg_info.run(self)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/install.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/install.py
new file mode 100644
index 00000000..2eff2d14
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/install.py
@@ -0,0 +1,79 @@
+import sys
+if 'setuptools' in sys.modules:
+ import setuptools.command.install as old_install_mod
+ have_setuptools = True
+else:
+ import distutils.command.install as old_install_mod
+ have_setuptools = False
+from distutils.file_util import write_file
+
+old_install = old_install_mod.install
+
+class install(old_install):
+
+ # Always run install_clib - the command is cheap, so no need to bypass it;
+ # but it's not run by setuptools -- so it's run again in install_data
+ sub_commands = old_install.sub_commands + [
+ ('install_clib', lambda x: True)
+ ]
+
+ def finalize_options (self):
+ old_install.finalize_options(self)
+ self.install_lib = self.install_libbase
+
+ def setuptools_run(self):
+ """ The setuptools version of the .run() method.
+
+ We must pull in the entire code so we can override the level used in the
+ _getframe() call since we wrap this call by one more level.
+ """
+ from distutils.command.install import install as distutils_install
+
+ # Explicit request for old-style install? Just do it
+ if self.old_and_unmanageable or self.single_version_externally_managed:
+ return distutils_install.run(self)
+
+ # Attempt to detect whether we were called from setup() or by another
+ # command. If we were called by setup(), our caller will be the
+ # 'run_command' method in 'distutils.dist', and *its* caller will be
+ # the 'run_commands' method. If we were called any other way, our
+ # immediate caller *might* be 'run_command', but it won't have been
+ # called by 'run_commands'. This is slightly kludgy, but seems to
+ # work.
+ #
+ caller = sys._getframe(3)
+ caller_module = caller.f_globals.get('__name__', '')
+ caller_name = caller.f_code.co_name
+
+ if caller_module != 'distutils.dist' or caller_name!='run_commands':
+ # We weren't called from the command line or setup(), so we
+ # should run in backward-compatibility mode to support bdist_*
+ # commands.
+ distutils_install.run(self)
+ else:
+ self.do_egg_install()
+
+ def run(self):
+ if not have_setuptools:
+ r = old_install.run(self)
+ else:
+ r = self.setuptools_run()
+ if self.record:
+ # bdist_rpm fails when INSTALLED_FILES contains
+ # paths with spaces. Such paths must be enclosed
+ # with double-quotes.
+ with open(self.record, 'r') as f:
+ lines = []
+ need_rewrite = False
+ for l in f:
+ l = l.rstrip()
+ if ' ' in l:
+ need_rewrite = True
+ l = '"%s"' % (l)
+ lines.append(l)
+ if need_rewrite:
+ self.execute(write_file,
+ (self.record, lines),
+ "re-writing list of installed files to '%s'" %
+ self.record)
+ return r
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/install_clib.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/install_clib.py
new file mode 100644
index 00000000..aa2e5594
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/install_clib.py
@@ -0,0 +1,40 @@
+import os
+from distutils.core import Command
+from distutils.ccompiler import new_compiler
+from numpy.distutils.misc_util import get_cmd
+
+class install_clib(Command):
+ description = "Command to install installable C libraries"
+
+ user_options = []
+
+ def initialize_options(self):
+ self.install_dir = None
+ self.outfiles = []
+
+ def finalize_options(self):
+ self.set_undefined_options('install', ('install_lib', 'install_dir'))
+
+ def run (self):
+ build_clib_cmd = get_cmd("build_clib")
+ if not build_clib_cmd.build_clib:
+ # can happen if the user specified `--skip-build`
+ build_clib_cmd.finalize_options()
+ build_dir = build_clib_cmd.build_clib
+
+ # We need the compiler to get the library name -> filename association
+ if not build_clib_cmd.compiler:
+ compiler = new_compiler(compiler=None)
+ compiler.customize(self.distribution)
+ else:
+ compiler = build_clib_cmd.compiler
+
+ for l in self.distribution.installed_libraries:
+ target_dir = os.path.join(self.install_dir, l.target_dir)
+ name = compiler.library_filename(l.name)
+ source = os.path.join(build_dir, name)
+ self.mkpath(target_dir)
+ self.outfiles.append(self.copy_file(source, target_dir)[0])
+
+ def get_outputs(self):
+ return self.outfiles
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/install_data.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/install_data.py
new file mode 100644
index 00000000..0a2e68ae
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/install_data.py
@@ -0,0 +1,24 @@
+import sys
+have_setuptools = ('setuptools' in sys.modules)
+
+from distutils.command.install_data import install_data as old_install_data
+
+#data installer with improved intelligence over distutils
+#data files are copied into the project directory instead
+#of willy-nilly
+class install_data (old_install_data):
+
+ def run(self):
+ old_install_data.run(self)
+
+ if have_setuptools:
+ # Run install_clib again, since setuptools does not run sub-commands
+ # of install automatically
+ self.run_command('install_clib')
+
+ def finalize_options (self):
+ self.set_undefined_options('install',
+ ('install_lib', 'install_dir'),
+ ('root', 'root'),
+ ('force', 'force'),
+ )
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/install_headers.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/install_headers.py
new file mode 100644
index 00000000..bb4ad563
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/install_headers.py
@@ -0,0 +1,25 @@
+import os
+from distutils.command.install_headers import install_headers as old_install_headers
+
+class install_headers (old_install_headers):
+
+ def run (self):
+ headers = self.distribution.headers
+ if not headers:
+ return
+
+ prefix = os.path.dirname(self.install_dir)
+ for header in headers:
+ if isinstance(header, tuple):
+ # Kind of a hack, but I don't know where else to change this...
+ if header[0] == 'numpy.core':
+ header = ('numpy', header[1])
+ if os.path.splitext(header[1])[1] == '.inc':
+ continue
+ d = os.path.join(*([prefix]+header[0].split('.')))
+ header = header[1]
+ else:
+ d = self.install_dir
+ self.mkpath(d)
+ (out, _) = self.copy_file(header, d)
+ self.outfiles.append(out)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/sdist.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/sdist.py
new file mode 100644
index 00000000..e3419388
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/sdist.py
@@ -0,0 +1,27 @@
+import sys
+if 'setuptools' in sys.modules:
+ from setuptools.command.sdist import sdist as old_sdist
+else:
+ from distutils.command.sdist import sdist as old_sdist
+
+from numpy.distutils.misc_util import get_data_files
+
+class sdist(old_sdist):
+
+ def add_defaults (self):
+ old_sdist.add_defaults(self)
+
+ dist = self.distribution
+
+ if dist.has_data_files():
+ for data in dist.data_files:
+ self.filelist.extend(get_data_files(data))
+
+ if dist.has_headers():
+ headers = []
+ for h in dist.headers:
+ if isinstance(h, str): headers.append(h)
+ else: headers.append(h[1])
+ self.filelist.extend(headers)
+
+ return
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/conv_template.py b/venv/lib/python3.9/site-packages/numpy/distutils/conv_template.py
new file mode 100644
index 00000000..c8933d1d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/conv_template.py
@@ -0,0 +1,329 @@
+#!/usr/bin/env python3
+"""
+takes templated file .xxx.src and produces .xxx file where .xxx is
+.i or .c or .h, using the following template rules
+
+/**begin repeat -- on a line by itself marks the start of a repeated code
+ segment
+/**end repeat**/ -- on a line by itself marks it's end
+
+After the /**begin repeat and before the */, all the named templates are placed
+these should all have the same number of replacements
+
+Repeat blocks can be nested, with each nested block labeled with its depth,
+i.e.
+/**begin repeat1
+ *....
+ */
+/**end repeat1**/
+
+When using nested loops, you can optionally exclude particular
+combinations of the variables using (inside the comment portion of the inner loop):
+
+ :exclude: var1=value1, var2=value2, ...
+
+This will exclude the pattern where var1 is value1 and var2 is value2 when
+the result is being generated.
+
+
+In the main body each replace will use one entry from the list of named replacements
+
+ Note that all #..# forms in a block must have the same number of
+ comma-separated entries.
+
+Example:
+
+ An input file containing
+
+ /**begin repeat
+ * #a = 1,2,3#
+ * #b = 1,2,3#
+ */
+
+ /**begin repeat1
+ * #c = ted, jim#
+ */
+ @a@, @b@, @c@
+ /**end repeat1**/
+
+ /**end repeat**/
+
+ produces
+
+ line 1 "template.c.src"
+
+ /*
+ *********************************************************************
+ ** This file was autogenerated from a template DO NOT EDIT!!**
+ ** Changes should be made to the original source (.src) file **
+ *********************************************************************
+ */
+
+ #line 9
+ 1, 1, ted
+
+ #line 9
+ 1, 1, jim
+
+ #line 9
+ 2, 2, ted
+
+ #line 9
+ 2, 2, jim
+
+ #line 9
+ 3, 3, ted
+
+ #line 9
+ 3, 3, jim
+
+"""
+
+__all__ = ['process_str', 'process_file']
+
+import os
+import sys
+import re
+
+# names for replacement that are already global.
+global_names = {}
+
+# header placed at the front of head processed file
+header =\
+"""
+/*
+ *****************************************************************************
+ ** This file was autogenerated from a template DO NOT EDIT!!!! **
+ ** Changes should be made to the original source (.src) file **
+ *****************************************************************************
+ */
+
+"""
+# Parse string for repeat loops
+def parse_structure(astr, level):
+ """
+ The returned line number is from the beginning of the string, starting
+ at zero. Returns an empty list if no loops found.
+
+ """
+ if level == 0 :
+ loopbeg = "/**begin repeat"
+ loopend = "/**end repeat**/"
+ else :
+ loopbeg = "/**begin repeat%d" % level
+ loopend = "/**end repeat%d**/" % level
+
+ ind = 0
+ line = 0
+ spanlist = []
+ while True:
+ start = astr.find(loopbeg, ind)
+ if start == -1:
+ break
+ start2 = astr.find("*/", start)
+ start2 = astr.find("\n", start2)
+ fini1 = astr.find(loopend, start2)
+ fini2 = astr.find("\n", fini1)
+ line += astr.count("\n", ind, start2+1)
+ spanlist.append((start, start2+1, fini1, fini2+1, line))
+ line += astr.count("\n", start2+1, fini2)
+ ind = fini2
+ spanlist.sort()
+ return spanlist
+
+
+def paren_repl(obj):
+ torep = obj.group(1)
+ numrep = obj.group(2)
+ return ','.join([torep]*int(numrep))
+
+parenrep = re.compile(r"\(([^)]*)\)\*(\d+)")
+plainrep = re.compile(r"([^*]+)\*(\d+)")
+def parse_values(astr):
+ # replaces all occurrences of '(a,b,c)*4' in astr
+ # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate
+ # empty values, i.e., ()*4 yields ',,,'. The result is
+ # split at ',' and a list of values returned.
+ astr = parenrep.sub(paren_repl, astr)
+ # replaces occurrences of xxx*3 with xxx, xxx, xxx
+ astr = ','.join([plainrep.sub(paren_repl, x.strip())
+ for x in astr.split(',')])
+ return astr.split(',')
+
+
+stripast = re.compile(r"\n\s*\*?")
+named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#")
+exclude_vars_re = re.compile(r"(\w*)=(\w*)")
+exclude_re = re.compile(":exclude:")
+def parse_loop_header(loophead) :
+ """Find all named replacements in the header
+
+ Returns a list of dictionaries, one for each loop iteration,
+ where each key is a name to be substituted and the corresponding
+ value is the replacement string.
+
+ Also return a list of exclusions. The exclusions are dictionaries
+ of key value pairs. There can be more than one exclusion.
+ [{'var1':'value1', 'var2', 'value2'[,...]}, ...]
+
+ """
+ # Strip out '\n' and leading '*', if any, in continuation lines.
+ # This should not effect code previous to this change as
+ # continuation lines were not allowed.
+ loophead = stripast.sub("", loophead)
+ # parse out the names and lists of values
+ names = []
+ reps = named_re.findall(loophead)
+ nsub = None
+ for rep in reps:
+ name = rep[0]
+ vals = parse_values(rep[1])
+ size = len(vals)
+ if nsub is None :
+ nsub = size
+ elif nsub != size :
+ msg = "Mismatch in number of values, %d != %d\n%s = %s"
+ raise ValueError(msg % (nsub, size, name, vals))
+ names.append((name, vals))
+
+
+ # Find any exclude variables
+ excludes = []
+
+ for obj in exclude_re.finditer(loophead):
+ span = obj.span()
+ # find next newline
+ endline = loophead.find('\n', span[1])
+ substr = loophead[span[1]:endline]
+ ex_names = exclude_vars_re.findall(substr)
+ excludes.append(dict(ex_names))
+
+ # generate list of dictionaries, one for each template iteration
+ dlist = []
+ if nsub is None :
+ raise ValueError("No substitution variables found")
+ for i in range(nsub):
+ tmp = {name: vals[i] for name, vals in names}
+ dlist.append(tmp)
+ return dlist
+
+replace_re = re.compile(r"@(\w+)@")
+def parse_string(astr, env, level, line) :
+ lineno = "#line %d\n" % line
+
+ # local function for string replacement, uses env
+ def replace(match):
+ name = match.group(1)
+ try :
+ val = env[name]
+ except KeyError:
+ msg = 'line %d: no definition of key "%s"'%(line, name)
+ raise ValueError(msg) from None
+ return val
+
+ code = [lineno]
+ struct = parse_structure(astr, level)
+ if struct :
+ # recurse over inner loops
+ oldend = 0
+ newlevel = level + 1
+ for sub in struct:
+ pref = astr[oldend:sub[0]]
+ head = astr[sub[0]:sub[1]]
+ text = astr[sub[1]:sub[2]]
+ oldend = sub[3]
+ newline = line + sub[4]
+ code.append(replace_re.sub(replace, pref))
+ try :
+ envlist = parse_loop_header(head)
+ except ValueError as e:
+ msg = "line %d: %s" % (newline, e)
+ raise ValueError(msg)
+ for newenv in envlist :
+ newenv.update(env)
+ newcode = parse_string(text, newenv, newlevel, newline)
+ code.extend(newcode)
+ suff = astr[oldend:]
+ code.append(replace_re.sub(replace, suff))
+ else :
+ # replace keys
+ code.append(replace_re.sub(replace, astr))
+ code.append('\n')
+ return ''.join(code)
+
+def process_str(astr):
+ code = [header]
+ code.extend(parse_string(astr, global_names, 0, 1))
+ return ''.join(code)
+
+
+include_src_re = re.compile(r"(\n|\A)#include\s*['\"]"
+ r"(?P<name>[\w\d./\\]+[.]src)['\"]", re.I)
+
+def resolve_includes(source):
+ d = os.path.dirname(source)
+ with open(source) as fid:
+ lines = []
+ for line in fid:
+ m = include_src_re.match(line)
+ if m:
+ fn = m.group('name')
+ if not os.path.isabs(fn):
+ fn = os.path.join(d, fn)
+ if os.path.isfile(fn):
+ lines.extend(resolve_includes(fn))
+ else:
+ lines.append(line)
+ else:
+ lines.append(line)
+ return lines
+
+def process_file(source):
+ lines = resolve_includes(source)
+ sourcefile = os.path.normcase(source).replace("\\", "\\\\")
+ try:
+ code = process_str(''.join(lines))
+ except ValueError as e:
+ raise ValueError('In "%s" loop at %s' % (sourcefile, e)) from None
+ return '#line 1 "%s"\n%s' % (sourcefile, code)
+
+
+def unique_key(adict):
+ # this obtains a unique key given a dictionary
+ # currently it works by appending together n of the letters of the
+ # current keys and increasing n until a unique key is found
+ # -- not particularly quick
+ allkeys = list(adict.keys())
+ done = False
+ n = 1
+ while not done:
+ newkey = "".join([x[:n] for x in allkeys])
+ if newkey in allkeys:
+ n += 1
+ else:
+ done = True
+ return newkey
+
+
+def main():
+ try:
+ file = sys.argv[1]
+ except IndexError:
+ fid = sys.stdin
+ outfile = sys.stdout
+ else:
+ fid = open(file, 'r')
+ (base, ext) = os.path.splitext(file)
+ newname = base
+ outfile = open(newname, 'w')
+
+ allstr = fid.read()
+ try:
+ writestr = process_str(allstr)
+ except ValueError as e:
+ raise ValueError("In %s loop at %s" % (file, e)) from None
+
+ outfile.write(writestr)
+
+if __name__ == "__main__":
+ main()
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/core.py b/venv/lib/python3.9/site-packages/numpy/distutils/core.py
new file mode 100644
index 00000000..c4a14e59
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/core.py
@@ -0,0 +1,215 @@
+import sys
+from distutils.core import Distribution
+
+if 'setuptools' in sys.modules:
+ have_setuptools = True
+ from setuptools import setup as old_setup
+ # easy_install imports math, it may be picked up from cwd
+ from setuptools.command import easy_install
+ try:
+ # very old versions of setuptools don't have this
+ from setuptools.command import bdist_egg
+ except ImportError:
+ have_setuptools = False
+else:
+ from distutils.core import setup as old_setup
+ have_setuptools = False
+
+import warnings
+import distutils.core
+import distutils.dist
+
+from numpy.distutils.extension import Extension # noqa: F401
+from numpy.distutils.numpy_distribution import NumpyDistribution
+from numpy.distutils.command import config, config_compiler, \
+ build, build_py, build_ext, build_clib, build_src, build_scripts, \
+ sdist, install_data, install_headers, install, bdist_rpm, \
+ install_clib
+from numpy.distutils.misc_util import is_sequence, is_string
+
+numpy_cmdclass = {'build': build.build,
+ 'build_src': build_src.build_src,
+ 'build_scripts': build_scripts.build_scripts,
+ 'config_cc': config_compiler.config_cc,
+ 'config_fc': config_compiler.config_fc,
+ 'config': config.config,
+ 'build_ext': build_ext.build_ext,
+ 'build_py': build_py.build_py,
+ 'build_clib': build_clib.build_clib,
+ 'sdist': sdist.sdist,
+ 'install_data': install_data.install_data,
+ 'install_headers': install_headers.install_headers,
+ 'install_clib': install_clib.install_clib,
+ 'install': install.install,
+ 'bdist_rpm': bdist_rpm.bdist_rpm,
+ }
+if have_setuptools:
+ # Use our own versions of develop and egg_info to ensure that build_src is
+ # handled appropriately.
+ from numpy.distutils.command import develop, egg_info
+ numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg
+ numpy_cmdclass['develop'] = develop.develop
+ numpy_cmdclass['easy_install'] = easy_install.easy_install
+ numpy_cmdclass['egg_info'] = egg_info.egg_info
+
+def _dict_append(d, **kws):
+ for k, v in kws.items():
+ if k not in d:
+ d[k] = v
+ continue
+ dv = d[k]
+ if isinstance(dv, tuple):
+ d[k] = dv + tuple(v)
+ elif isinstance(dv, list):
+ d[k] = dv + list(v)
+ elif isinstance(dv, dict):
+ _dict_append(dv, **v)
+ elif is_string(dv):
+ d[k] = dv + v
+ else:
+ raise TypeError(repr(type(dv)))
+
+def _command_line_ok(_cache=None):
+ """ Return True if command line does not contain any
+ help or display requests.
+ """
+ if _cache:
+ return _cache[0]
+ elif _cache is None:
+ _cache = []
+ ok = True
+ display_opts = ['--'+n for n in Distribution.display_option_names]
+ for o in Distribution.display_options:
+ if o[1]:
+ display_opts.append('-'+o[1])
+ for arg in sys.argv:
+ if arg.startswith('--help') or arg=='-h' or arg in display_opts:
+ ok = False
+ break
+ _cache.append(ok)
+ return ok
+
+def get_distribution(always=False):
+ dist = distutils.core._setup_distribution
+ # XXX Hack to get numpy installable with easy_install.
+ # The problem is easy_install runs it's own setup(), which
+ # sets up distutils.core._setup_distribution. However,
+ # when our setup() runs, that gets overwritten and lost.
+ # We can't use isinstance, as the DistributionWithoutHelpCommands
+ # class is local to a function in setuptools.command.easy_install
+ if dist is not None and \
+ 'DistributionWithoutHelpCommands' in repr(dist):
+ dist = None
+ if always and dist is None:
+ dist = NumpyDistribution()
+ return dist
+
+def setup(**attr):
+
+ cmdclass = numpy_cmdclass.copy()
+
+ new_attr = attr.copy()
+ if 'cmdclass' in new_attr:
+ cmdclass.update(new_attr['cmdclass'])
+ new_attr['cmdclass'] = cmdclass
+
+ if 'configuration' in new_attr:
+ # To avoid calling configuration if there are any errors
+ # or help request in command in the line.
+ configuration = new_attr.pop('configuration')
+
+ old_dist = distutils.core._setup_distribution
+ old_stop = distutils.core._setup_stop_after
+ distutils.core._setup_distribution = None
+ distutils.core._setup_stop_after = "commandline"
+ try:
+ dist = setup(**new_attr)
+ finally:
+ distutils.core._setup_distribution = old_dist
+ distutils.core._setup_stop_after = old_stop
+ if dist.help or not _command_line_ok():
+ # probably displayed help, skip running any commands
+ return dist
+
+ # create setup dictionary and append to new_attr
+ config = configuration()
+ if hasattr(config, 'todict'):
+ config = config.todict()
+ _dict_append(new_attr, **config)
+
+ # Move extension source libraries to libraries
+ libraries = []
+ for ext in new_attr.get('ext_modules', []):
+ new_libraries = []
+ for item in ext.libraries:
+ if is_sequence(item):
+ lib_name, build_info = item
+ _check_append_ext_library(libraries, lib_name, build_info)
+ new_libraries.append(lib_name)
+ elif is_string(item):
+ new_libraries.append(item)
+ else:
+ raise TypeError("invalid description of extension module "
+ "library %r" % (item,))
+ ext.libraries = new_libraries
+ if libraries:
+ if 'libraries' not in new_attr:
+ new_attr['libraries'] = []
+ for item in libraries:
+ _check_append_library(new_attr['libraries'], item)
+
+ # sources in ext_modules or libraries may contain header files
+ if ('ext_modules' in new_attr or 'libraries' in new_attr) \
+ and 'headers' not in new_attr:
+ new_attr['headers'] = []
+
+ # Use our custom NumpyDistribution class instead of distutils' one
+ new_attr['distclass'] = NumpyDistribution
+
+ return old_setup(**new_attr)
+
+def _check_append_library(libraries, item):
+ for libitem in libraries:
+ if is_sequence(libitem):
+ if is_sequence(item):
+ if item[0]==libitem[0]:
+ if item[1] is libitem[1]:
+ return
+ warnings.warn("[0] libraries list contains %r with"
+ " different build_info" % (item[0],),
+ stacklevel=2)
+ break
+ else:
+ if item==libitem[0]:
+ warnings.warn("[1] libraries list contains %r with"
+ " no build_info" % (item[0],),
+ stacklevel=2)
+ break
+ else:
+ if is_sequence(item):
+ if item[0]==libitem:
+ warnings.warn("[2] libraries list contains %r with"
+ " no build_info" % (item[0],),
+ stacklevel=2)
+ break
+ else:
+ if item==libitem:
+ return
+ libraries.append(item)
+
+def _check_append_ext_library(libraries, lib_name, build_info):
+ for item in libraries:
+ if is_sequence(item):
+ if item[0]==lib_name:
+ if item[1] is build_info:
+ return
+ warnings.warn("[3] libraries list contains %r with"
+ " different build_info" % (lib_name,),
+ stacklevel=2)
+ break
+ elif item==lib_name:
+ warnings.warn("[4] libraries list contains %r with"
+ " no build_info" % (lib_name,),
+ stacklevel=2)
+ break
+ libraries.append((lib_name, build_info))
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/cpuinfo.py b/venv/lib/python3.9/site-packages/numpy/distutils/cpuinfo.py
new file mode 100644
index 00000000..77620210
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/cpuinfo.py
@@ -0,0 +1,683 @@
+#!/usr/bin/env python3
+"""
+cpuinfo
+
+Copyright 2002 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@cens.ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy (BSD style) license. See LICENSE.txt that came with
+this distribution for specifics.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+Pearu Peterson
+
+"""
+__all__ = ['cpu']
+
+import os
+import platform
+import re
+import sys
+import types
+import warnings
+
+from subprocess import getstatusoutput
+
+
+def getoutput(cmd, successful_status=(0,), stacklevel=1):
+ try:
+ status, output = getstatusoutput(cmd)
+ except OSError as e:
+ warnings.warn(str(e), UserWarning, stacklevel=stacklevel)
+ return False, ""
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status:
+ return True, output
+ return False, output
+
+def command_info(successful_status=(0,), stacklevel=1, **kw):
+ info = {}
+ for key in kw:
+ ok, output = getoutput(kw[key], successful_status=successful_status,
+ stacklevel=stacklevel+1)
+ if ok:
+ info[key] = output.strip()
+ return info
+
+def command_by_line(cmd, successful_status=(0,), stacklevel=1):
+ ok, output = getoutput(cmd, successful_status=successful_status,
+ stacklevel=stacklevel+1)
+ if not ok:
+ return
+ for line in output.splitlines():
+ yield line.strip()
+
+def key_value_from_command(cmd, sep, successful_status=(0,),
+ stacklevel=1):
+ d = {}
+ for line in command_by_line(cmd, successful_status=successful_status,
+ stacklevel=stacklevel+1):
+ l = [s.strip() for s in line.split(sep, 1)]
+ if len(l) == 2:
+ d[l[0]] = l[1]
+ return d
+
+class CPUInfoBase:
+ """Holds CPU information and provides methods for requiring
+ the availability of various CPU features.
+ """
+
+ def _try_call(self, func):
+ try:
+ return func()
+ except Exception:
+ pass
+
+ def __getattr__(self, name):
+ if not name.startswith('_'):
+ if hasattr(self, '_'+name):
+ attr = getattr(self, '_'+name)
+ if isinstance(attr, types.MethodType):
+ return lambda func=self._try_call,attr=attr : func(attr)
+ else:
+ return lambda : None
+ raise AttributeError(name)
+
+ def _getNCPUs(self):
+ return 1
+
+ def __get_nbits(self):
+ abits = platform.architecture()[0]
+ nbits = re.compile(r'(\d+)bit').search(abits).group(1)
+ return nbits
+
+ def _is_32bit(self):
+ return self.__get_nbits() == '32'
+
+ def _is_64bit(self):
+ return self.__get_nbits() == '64'
+
+class LinuxCPUInfo(CPUInfoBase):
+
+ info = None
+
+ def __init__(self):
+ if self.info is not None:
+ return
+ info = [ {} ]
+ ok, output = getoutput('uname -m')
+ if ok:
+ info[0]['uname_m'] = output.strip()
+ try:
+ fo = open('/proc/cpuinfo')
+ except OSError as e:
+ warnings.warn(str(e), UserWarning, stacklevel=2)
+ else:
+ for line in fo:
+ name_value = [s.strip() for s in line.split(':', 1)]
+ if len(name_value) != 2:
+ continue
+ name, value = name_value
+ if not info or name in info[-1]: # next processor
+ info.append({})
+ info[-1][name] = value
+ fo.close()
+ self.__class__.info = info
+
+ def _not_impl(self): pass
+
+ # Athlon
+
+ def _is_AMD(self):
+ return self.info[0]['vendor_id']=='AuthenticAMD'
+
+ def _is_AthlonK6_2(self):
+ return self._is_AMD() and self.info[0]['model'] == '2'
+
+ def _is_AthlonK6_3(self):
+ return self._is_AMD() and self.info[0]['model'] == '3'
+
+ def _is_AthlonK6(self):
+ return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None
+
+ def _is_AthlonK7(self):
+ return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None
+
+ def _is_AthlonMP(self):
+ return re.match(r'.*?Athlon\(tm\) MP\b',
+ self.info[0]['model name']) is not None
+
+ def _is_AMD64(self):
+ return self.is_AMD() and self.info[0]['family'] == '15'
+
+ def _is_Athlon64(self):
+ return re.match(r'.*?Athlon\(tm\) 64\b',
+ self.info[0]['model name']) is not None
+
+ def _is_AthlonHX(self):
+ return re.match(r'.*?Athlon HX\b',
+ self.info[0]['model name']) is not None
+
+ def _is_Opteron(self):
+ return re.match(r'.*?Opteron\b',
+ self.info[0]['model name']) is not None
+
+ def _is_Hammer(self):
+ return re.match(r'.*?Hammer\b',
+ self.info[0]['model name']) is not None
+
+ # Alpha
+
+ def _is_Alpha(self):
+ return self.info[0]['cpu']=='Alpha'
+
+ def _is_EV4(self):
+ return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4'
+
+ def _is_EV5(self):
+ return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5'
+
+ def _is_EV56(self):
+ return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56'
+
+ def _is_PCA56(self):
+ return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56'
+
+ # Intel
+
+ #XXX
+ _is_i386 = _not_impl
+
+ def _is_Intel(self):
+ return self.info[0]['vendor_id']=='GenuineIntel'
+
+ def _is_i486(self):
+ return self.info[0]['cpu']=='i486'
+
+ def _is_i586(self):
+ return self.is_Intel() and self.info[0]['cpu family'] == '5'
+
+ def _is_i686(self):
+ return self.is_Intel() and self.info[0]['cpu family'] == '6'
+
+ def _is_Celeron(self):
+ return re.match(r'.*?Celeron',
+ self.info[0]['model name']) is not None
+
+ def _is_Pentium(self):
+ return re.match(r'.*?Pentium',
+ self.info[0]['model name']) is not None
+
+ def _is_PentiumII(self):
+ return re.match(r'.*?Pentium.*?II\b',
+ self.info[0]['model name']) is not None
+
+ def _is_PentiumPro(self):
+ return re.match(r'.*?PentiumPro\b',
+ self.info[0]['model name']) is not None
+
+ def _is_PentiumMMX(self):
+ return re.match(r'.*?Pentium.*?MMX\b',
+ self.info[0]['model name']) is not None
+
+ def _is_PentiumIII(self):
+ return re.match(r'.*?Pentium.*?III\b',
+ self.info[0]['model name']) is not None
+
+ def _is_PentiumIV(self):
+ return re.match(r'.*?Pentium.*?(IV|4)\b',
+ self.info[0]['model name']) is not None
+
+ def _is_PentiumM(self):
+ return re.match(r'.*?Pentium.*?M\b',
+ self.info[0]['model name']) is not None
+
+ def _is_Prescott(self):
+ return self.is_PentiumIV() and self.has_sse3()
+
+ def _is_Nocona(self):
+ return (self.is_Intel()
+ and (self.info[0]['cpu family'] == '6'
+ or self.info[0]['cpu family'] == '15')
+ and (self.has_sse3() and not self.has_ssse3())
+ and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None)
+
+ def _is_Core2(self):
+ return (self.is_64bit() and self.is_Intel() and
+ re.match(r'.*?Core\(TM\)2\b',
+ self.info[0]['model name']) is not None)
+
+ def _is_Itanium(self):
+ return re.match(r'.*?Itanium\b',
+ self.info[0]['family']) is not None
+
+ def _is_XEON(self):
+ return re.match(r'.*?XEON\b',
+ self.info[0]['model name'], re.IGNORECASE) is not None
+
+ _is_Xeon = _is_XEON
+
+ # Varia
+
+ def _is_singleCPU(self):
+ return len(self.info) == 1
+
+ def _getNCPUs(self):
+ return len(self.info)
+
+ def _has_fdiv_bug(self):
+ return self.info[0]['fdiv_bug']=='yes'
+
+ def _has_f00f_bug(self):
+ return self.info[0]['f00f_bug']=='yes'
+
+ def _has_mmx(self):
+ return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None
+
+ def _has_sse(self):
+ return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None
+
+ def _has_sse2(self):
+ return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None
+
+ def _has_sse3(self):
+ return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None
+
+ def _has_ssse3(self):
+ return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None
+
+ def _has_3dnow(self):
+ return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None
+
+ def _has_3dnowext(self):
+ return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None
+
+class IRIXCPUInfo(CPUInfoBase):
+ info = None
+
+ def __init__(self):
+ if self.info is not None:
+ return
+ info = key_value_from_command('sysconf', sep=' ',
+ successful_status=(0, 1))
+ self.__class__.info = info
+
+ def _not_impl(self): pass
+
+ def _is_singleCPU(self):
+ return self.info.get('NUM_PROCESSORS') == '1'
+
+ def _getNCPUs(self):
+ return int(self.info.get('NUM_PROCESSORS', 1))
+
+ def __cputype(self, n):
+ return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n)
+ def _is_r2000(self): return self.__cputype(2000)
+ def _is_r3000(self): return self.__cputype(3000)
+ def _is_r3900(self): return self.__cputype(3900)
+ def _is_r4000(self): return self.__cputype(4000)
+ def _is_r4100(self): return self.__cputype(4100)
+ def _is_r4300(self): return self.__cputype(4300)
+ def _is_r4400(self): return self.__cputype(4400)
+ def _is_r4600(self): return self.__cputype(4600)
+ def _is_r4650(self): return self.__cputype(4650)
+ def _is_r5000(self): return self.__cputype(5000)
+ def _is_r6000(self): return self.__cputype(6000)
+ def _is_r8000(self): return self.__cputype(8000)
+ def _is_r10000(self): return self.__cputype(10000)
+ def _is_r12000(self): return self.__cputype(12000)
+ def _is_rorion(self): return self.__cputype('orion')
+
+ def get_ip(self):
+ try: return self.info.get('MACHINE')
+ except Exception: pass
+ def __machine(self, n):
+ return self.info.get('MACHINE').lower() == 'ip%s' % (n)
+ def _is_IP19(self): return self.__machine(19)
+ def _is_IP20(self): return self.__machine(20)
+ def _is_IP21(self): return self.__machine(21)
+ def _is_IP22(self): return self.__machine(22)
+ def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000()
+ def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000()
+ def _is_IP24(self): return self.__machine(24)
+ def _is_IP25(self): return self.__machine(25)
+ def _is_IP26(self): return self.__machine(26)
+ def _is_IP27(self): return self.__machine(27)
+ def _is_IP28(self): return self.__machine(28)
+ def _is_IP30(self): return self.__machine(30)
+ def _is_IP32(self): return self.__machine(32)
+ def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000()
+ def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000()
+
+
+class DarwinCPUInfo(CPUInfoBase):
+ info = None
+
+ def __init__(self):
+ if self.info is not None:
+ return
+ info = command_info(arch='arch',
+ machine='machine')
+ info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=')
+ self.__class__.info = info
+
+ def _not_impl(self): pass
+
+ def _getNCPUs(self):
+ return int(self.info['sysctl_hw'].get('hw.ncpu', 1))
+
+ def _is_Power_Macintosh(self):
+ return self.info['sysctl_hw']['hw.machine']=='Power Macintosh'
+
+ def _is_i386(self):
+ return self.info['arch']=='i386'
+ def _is_ppc(self):
+ return self.info['arch']=='ppc'
+
+ def __machine(self, n):
+ return self.info['machine'] == 'ppc%s'%n
+ def _is_ppc601(self): return self.__machine(601)
+ def _is_ppc602(self): return self.__machine(602)
+ def _is_ppc603(self): return self.__machine(603)
+ def _is_ppc603e(self): return self.__machine('603e')
+ def _is_ppc604(self): return self.__machine(604)
+ def _is_ppc604e(self): return self.__machine('604e')
+ def _is_ppc620(self): return self.__machine(620)
+ def _is_ppc630(self): return self.__machine(630)
+ def _is_ppc740(self): return self.__machine(740)
+ def _is_ppc7400(self): return self.__machine(7400)
+ def _is_ppc7450(self): return self.__machine(7450)
+ def _is_ppc750(self): return self.__machine(750)
+ def _is_ppc403(self): return self.__machine(403)
+ def _is_ppc505(self): return self.__machine(505)
+ def _is_ppc801(self): return self.__machine(801)
+ def _is_ppc821(self): return self.__machine(821)
+ def _is_ppc823(self): return self.__machine(823)
+ def _is_ppc860(self): return self.__machine(860)
+
+
+class SunOSCPUInfo(CPUInfoBase):
+
+ info = None
+
+ def __init__(self):
+ if self.info is not None:
+ return
+ info = command_info(arch='arch',
+ mach='mach',
+ uname_i='uname_i',
+ isainfo_b='isainfo -b',
+ isainfo_n='isainfo -n',
+ )
+ info['uname_X'] = key_value_from_command('uname -X', sep='=')
+ for line in command_by_line('psrinfo -v 0'):
+ m = re.match(r'\s*The (?P<p>[\w\d]+) processor operates at', line)
+ if m:
+ info['processor'] = m.group('p')
+ break
+ self.__class__.info = info
+
+ def _not_impl(self): pass
+
+ def _is_i386(self):
+ return self.info['isainfo_n']=='i386'
+ def _is_sparc(self):
+ return self.info['isainfo_n']=='sparc'
+ def _is_sparcv9(self):
+ return self.info['isainfo_n']=='sparcv9'
+
+ def _getNCPUs(self):
+ return int(self.info['uname_X'].get('NumCPU', 1))
+
+ def _is_sun4(self):
+ return self.info['arch']=='sun4'
+
+ def _is_SUNW(self):
+ return re.match(r'SUNW', self.info['uname_i']) is not None
+ def _is_sparcstation5(self):
+ return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None
+ def _is_ultra1(self):
+ return re.match(r'.*Ultra-1', self.info['uname_i']) is not None
+ def _is_ultra250(self):
+ return re.match(r'.*Ultra-250', self.info['uname_i']) is not None
+ def _is_ultra2(self):
+ return re.match(r'.*Ultra-2', self.info['uname_i']) is not None
+ def _is_ultra30(self):
+ return re.match(r'.*Ultra-30', self.info['uname_i']) is not None
+ def _is_ultra4(self):
+ return re.match(r'.*Ultra-4', self.info['uname_i']) is not None
+ def _is_ultra5_10(self):
+ return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None
+ def _is_ultra5(self):
+ return re.match(r'.*Ultra-5', self.info['uname_i']) is not None
+ def _is_ultra60(self):
+ return re.match(r'.*Ultra-60', self.info['uname_i']) is not None
+ def _is_ultra80(self):
+ return re.match(r'.*Ultra-80', self.info['uname_i']) is not None
+ def _is_ultraenterprice(self):
+ return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None
+ def _is_ultraenterprice10k(self):
+ return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None
+ def _is_sunfire(self):
+ return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None
+ def _is_ultra(self):
+ return re.match(r'.*Ultra', self.info['uname_i']) is not None
+
+ def _is_cpusparcv7(self):
+ return self.info['processor']=='sparcv7'
+ def _is_cpusparcv8(self):
+ return self.info['processor']=='sparcv8'
+ def _is_cpusparcv9(self):
+ return self.info['processor']=='sparcv9'
+
+class Win32CPUInfo(CPUInfoBase):
+
+ info = None
+ pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor"
+ # XXX: what does the value of
+ # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0
+ # mean?
+
+ def __init__(self):
+ if self.info is not None:
+ return
+ info = []
+ try:
+ #XXX: Bad style to use so long `try:...except:...`. Fix it!
+ import winreg
+
+ prgx = re.compile(r"family\s+(?P<FML>\d+)\s+model\s+(?P<MDL>\d+)"
+ r"\s+stepping\s+(?P<STP>\d+)", re.IGNORECASE)
+ chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey)
+ pnum=0
+ while True:
+ try:
+ proc=winreg.EnumKey(chnd, pnum)
+ except winreg.error:
+ break
+ else:
+ pnum+=1
+ info.append({"Processor":proc})
+ phnd=winreg.OpenKey(chnd, proc)
+ pidx=0
+ while True:
+ try:
+ name, value, vtpe=winreg.EnumValue(phnd, pidx)
+ except winreg.error:
+ break
+ else:
+ pidx=pidx+1
+ info[-1][name]=value
+ if name=="Identifier":
+ srch=prgx.search(value)
+ if srch:
+ info[-1]["Family"]=int(srch.group("FML"))
+ info[-1]["Model"]=int(srch.group("MDL"))
+ info[-1]["Stepping"]=int(srch.group("STP"))
+ except Exception as e:
+ print(e, '(ignoring)')
+ self.__class__.info = info
+
+ def _not_impl(self): pass
+
+ # Athlon
+
+ def _is_AMD(self):
+ return self.info[0]['VendorIdentifier']=='AuthenticAMD'
+
+ def _is_Am486(self):
+ return self.is_AMD() and self.info[0]['Family']==4
+
+ def _is_Am5x86(self):
+ return self.is_AMD() and self.info[0]['Family']==4
+
+ def _is_AMDK5(self):
+ return self.is_AMD() and self.info[0]['Family']==5 \
+ and self.info[0]['Model'] in [0, 1, 2, 3]
+
+ def _is_AMDK6(self):
+ return self.is_AMD() and self.info[0]['Family']==5 \
+ and self.info[0]['Model'] in [6, 7]
+
+ def _is_AMDK6_2(self):
+ return self.is_AMD() and self.info[0]['Family']==5 \
+ and self.info[0]['Model']==8
+
+ def _is_AMDK6_3(self):
+ return self.is_AMD() and self.info[0]['Family']==5 \
+ and self.info[0]['Model']==9
+
+ def _is_AMDK7(self):
+ return self.is_AMD() and self.info[0]['Family'] == 6
+
+ # To reliably distinguish between the different types of AMD64 chips
+ # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would
+ # require looking at the 'brand' from cpuid
+
+ def _is_AMD64(self):
+ return self.is_AMD() and self.info[0]['Family'] == 15
+
+ # Intel
+
+ def _is_Intel(self):
+ return self.info[0]['VendorIdentifier']=='GenuineIntel'
+
+ def _is_i386(self):
+ return self.info[0]['Family']==3
+
+ def _is_i486(self):
+ return self.info[0]['Family']==4
+
+ def _is_i586(self):
+ return self.is_Intel() and self.info[0]['Family']==5
+
+ def _is_i686(self):
+ return self.is_Intel() and self.info[0]['Family']==6
+
+ def _is_Pentium(self):
+ return self.is_Intel() and self.info[0]['Family']==5
+
+ def _is_PentiumMMX(self):
+ return self.is_Intel() and self.info[0]['Family']==5 \
+ and self.info[0]['Model']==4
+
+ def _is_PentiumPro(self):
+ return self.is_Intel() and self.info[0]['Family']==6 \
+ and self.info[0]['Model']==1
+
+ def _is_PentiumII(self):
+ return self.is_Intel() and self.info[0]['Family']==6 \
+ and self.info[0]['Model'] in [3, 5, 6]
+
+ def _is_PentiumIII(self):
+ return self.is_Intel() and self.info[0]['Family']==6 \
+ and self.info[0]['Model'] in [7, 8, 9, 10, 11]
+
+ def _is_PentiumIV(self):
+ return self.is_Intel() and self.info[0]['Family']==15
+
+ def _is_PentiumM(self):
+ return self.is_Intel() and self.info[0]['Family'] == 6 \
+ and self.info[0]['Model'] in [9, 13, 14]
+
+ def _is_Core2(self):
+ return self.is_Intel() and self.info[0]['Family'] == 6 \
+ and self.info[0]['Model'] in [15, 16, 17]
+
+ # Varia
+
+ def _is_singleCPU(self):
+ return len(self.info) == 1
+
+ def _getNCPUs(self):
+ return len(self.info)
+
+ def _has_mmx(self):
+ if self.is_Intel():
+ return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \
+ or (self.info[0]['Family'] in [6, 15])
+ elif self.is_AMD():
+ return self.info[0]['Family'] in [5, 6, 15]
+ else:
+ return False
+
+ def _has_sse(self):
+ if self.is_Intel():
+ return ((self.info[0]['Family']==6 and
+ self.info[0]['Model'] in [7, 8, 9, 10, 11])
+ or self.info[0]['Family']==15)
+ elif self.is_AMD():
+ return ((self.info[0]['Family']==6 and
+ self.info[0]['Model'] in [6, 7, 8, 10])
+ or self.info[0]['Family']==15)
+ else:
+ return False
+
+ def _has_sse2(self):
+ if self.is_Intel():
+ return self.is_Pentium4() or self.is_PentiumM() \
+ or self.is_Core2()
+ elif self.is_AMD():
+ return self.is_AMD64()
+ else:
+ return False
+
+ def _has_3dnow(self):
+ return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15]
+
+ def _has_3dnowext(self):
+ return self.is_AMD() and self.info[0]['Family'] in [6, 15]
+
+if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?)
+ cpuinfo = LinuxCPUInfo
+elif sys.platform.startswith('irix'):
+ cpuinfo = IRIXCPUInfo
+elif sys.platform == 'darwin':
+ cpuinfo = DarwinCPUInfo
+elif sys.platform.startswith('sunos'):
+ cpuinfo = SunOSCPUInfo
+elif sys.platform.startswith('win32'):
+ cpuinfo = Win32CPUInfo
+elif sys.platform.startswith('cygwin'):
+ cpuinfo = LinuxCPUInfo
+#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices.
+else:
+ cpuinfo = CPUInfoBase
+
+cpu = cpuinfo()
+
+#if __name__ == "__main__":
+#
+# cpu.is_blaa()
+# cpu.is_Intel()
+# cpu.is_Alpha()
+#
+# print('CPU information:'),
+# for name in dir(cpuinfo):
+# if name[0]=='_' and name[1]!='_':
+# r = getattr(cpu,name[1:])()
+# if r:
+# if r!=1:
+# print('%s=%s' %(name[1:],r))
+# else:
+# print(name[1:]),
+# print()
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/exec_command.py b/venv/lib/python3.9/site-packages/numpy/distutils/exec_command.py
new file mode 100644
index 00000000..a67453ab
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/exec_command.py
@@ -0,0 +1,315 @@
+"""
+exec_command
+
+Implements exec_command function that is (almost) equivalent to
+commands.getstatusoutput function but on NT, DOS systems the
+returned status is actually correct (though, the returned status
+values may be different by a factor). In addition, exec_command
+takes keyword arguments for (re-)defining environment variables.
+
+Provides functions:
+
+ exec_command --- execute command in a specified directory and
+ in the modified environment.
+ find_executable --- locate a command using info from environment
+ variable PATH. Equivalent to posix `which`
+ command.
+
+Author: Pearu Peterson <pearu@cens.ioc.ee>
+Created: 11 January 2003
+
+Requires: Python 2.x
+
+Successfully tested on:
+
+======== ============ =================================================
+os.name sys.platform comments
+======== ============ =================================================
+posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3
+ PyCrust 0.9.3, Idle 1.0.2
+posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2
+posix sunos5 SunOS 5.9, Python 2.2, 2.3.2
+posix darwin Darwin 7.2.0, Python 2.3
+nt win32 Windows Me
+ Python 2.3(EE), Idle 1.0, PyCrust 0.7.2
+ Python 2.1.1 Idle 0.8
+nt win32 Windows 98, Python 2.1.1. Idle 0.8
+nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests
+ fail i.e. redefining environment variables may
+ not work. FIXED: don't use cygwin echo!
+ Comment: also `cmd /c echo` will not work
+ but redefining environment variables do work.
+posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special)
+nt win32 Windows XP, Python 2.3.3
+======== ============ =================================================
+
+Known bugs:
+
+* Tests, that send messages to stderr, fail when executed from MSYS prompt
+ because the messages are lost at some point.
+
+"""
+__all__ = ['exec_command', 'find_executable']
+
+import os
+import sys
+import subprocess
+import locale
+import warnings
+
+from numpy.distutils.misc_util import is_sequence, make_temp_file
+from numpy.distutils import log
+
+def filepath_from_subprocess_output(output):
+ """
+ Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`.
+
+ Inherited from `exec_command`, and possibly incorrect.
+ """
+ mylocale = locale.getpreferredencoding(False)
+ if mylocale is None:
+ mylocale = 'ascii'
+ output = output.decode(mylocale, errors='replace')
+ output = output.replace('\r\n', '\n')
+ # Another historical oddity
+ if output[-1:] == '\n':
+ output = output[:-1]
+ return output
+
+
+def forward_bytes_to_stdout(val):
+ """
+ Forward bytes from a subprocess call to the console, without attempting to
+ decode them.
+
+ The assumption is that the subprocess call already returned bytes in
+ a suitable encoding.
+ """
+ if hasattr(sys.stdout, 'buffer'):
+ # use the underlying binary output if there is one
+ sys.stdout.buffer.write(val)
+ elif hasattr(sys.stdout, 'encoding'):
+ # round-trip the encoding if necessary
+ sys.stdout.write(val.decode(sys.stdout.encoding))
+ else:
+ # make a best-guess at the encoding
+ sys.stdout.write(val.decode('utf8', errors='replace'))
+
+
+def temp_file_name():
+ # 2019-01-30, 1.17
+ warnings.warn('temp_file_name is deprecated since NumPy v1.17, use '
+ 'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1)
+ fo, name = make_temp_file()
+ fo.close()
+ return name
+
+def get_pythonexe():
+ pythonexe = sys.executable
+ if os.name in ['nt', 'dos']:
+ fdir, fn = os.path.split(pythonexe)
+ fn = fn.upper().replace('PYTHONW', 'PYTHON')
+ pythonexe = os.path.join(fdir, fn)
+ assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,)
+ return pythonexe
+
+def find_executable(exe, path=None, _cache={}):
+ """Return full path of a executable or None.
+
+ Symbolic links are not followed.
+ """
+ key = exe, path
+ try:
+ return _cache[key]
+ except KeyError:
+ pass
+ log.debug('find_executable(%r)' % exe)
+ orig_exe = exe
+
+ if path is None:
+ path = os.environ.get('PATH', os.defpath)
+ if os.name=='posix':
+ realpath = os.path.realpath
+ else:
+ realpath = lambda a:a
+
+ if exe.startswith('"'):
+ exe = exe[1:-1]
+
+ suffixes = ['']
+ if os.name in ['nt', 'dos', 'os2']:
+ fn, ext = os.path.splitext(exe)
+ extra_suffixes = ['.exe', '.com', '.bat']
+ if ext.lower() not in extra_suffixes:
+ suffixes = extra_suffixes
+
+ if os.path.isabs(exe):
+ paths = ['']
+ else:
+ paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ]
+
+ for path in paths:
+ fn = os.path.join(path, exe)
+ for s in suffixes:
+ f_ext = fn+s
+ if not os.path.islink(f_ext):
+ f_ext = realpath(f_ext)
+ if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK):
+ log.info('Found executable %s' % f_ext)
+ _cache[key] = f_ext
+ return f_ext
+
+ log.warn('Could not locate executable %s' % orig_exe)
+ return None
+
+############################################################
+
+def _preserve_environment( names ):
+ log.debug('_preserve_environment(%r)' % (names))
+ env = {name: os.environ.get(name) for name in names}
+ return env
+
+def _update_environment( **env ):
+ log.debug('_update_environment(...)')
+ for name, value in env.items():
+ os.environ[name] = value or ''
+
+def exec_command(command, execute_in='', use_shell=None, use_tee=None,
+ _with_python = 1, **env ):
+ """
+ Return (status,output) of executed command.
+
+ .. deprecated:: 1.17
+ Use subprocess.Popen instead
+
+ Parameters
+ ----------
+ command : str
+ A concatenated string of executable and arguments.
+ execute_in : str
+ Before running command ``cd execute_in`` and after ``cd -``.
+ use_shell : {bool, None}, optional
+ If True, execute ``sh -c command``. Default None (True)
+ use_tee : {bool, None}, optional
+ If True use tee. Default None (True)
+
+
+ Returns
+ -------
+ res : str
+ Both stdout and stderr messages.
+
+ Notes
+ -----
+ On NT, DOS systems the returned status is correct for external commands.
+ Wild cards will not work for non-posix systems or when use_shell=0.
+
+ """
+ # 2019-01-30, 1.17
+ warnings.warn('exec_command is deprecated since NumPy v1.17, use '
+ 'subprocess.Popen instead', DeprecationWarning, stacklevel=1)
+ log.debug('exec_command(%r,%s)' % (command,
+ ','.join(['%s=%r'%kv for kv in env.items()])))
+
+ if use_tee is None:
+ use_tee = os.name=='posix'
+ if use_shell is None:
+ use_shell = os.name=='posix'
+ execute_in = os.path.abspath(execute_in)
+ oldcwd = os.path.abspath(os.getcwd())
+
+ if __name__[-12:] == 'exec_command':
+ exec_dir = os.path.dirname(os.path.abspath(__file__))
+ elif os.path.isfile('exec_command.py'):
+ exec_dir = os.path.abspath('.')
+ else:
+ exec_dir = os.path.abspath(sys.argv[0])
+ if os.path.isfile(exec_dir):
+ exec_dir = os.path.dirname(exec_dir)
+
+ if oldcwd!=execute_in:
+ os.chdir(execute_in)
+ log.debug('New cwd: %s' % execute_in)
+ else:
+ log.debug('Retaining cwd: %s' % oldcwd)
+
+ oldenv = _preserve_environment( list(env.keys()) )
+ _update_environment( **env )
+
+ try:
+ st = _exec_command(command,
+ use_shell=use_shell,
+ use_tee=use_tee,
+ **env)
+ finally:
+ if oldcwd!=execute_in:
+ os.chdir(oldcwd)
+ log.debug('Restored cwd to %s' % oldcwd)
+ _update_environment(**oldenv)
+
+ return st
+
+
+def _exec_command(command, use_shell=None, use_tee = None, **env):
+ """
+ Internal workhorse for exec_command().
+ """
+ if use_shell is None:
+ use_shell = os.name=='posix'
+ if use_tee is None:
+ use_tee = os.name=='posix'
+
+ if os.name == 'posix' and use_shell:
+ # On POSIX, subprocess always uses /bin/sh, override
+ sh = os.environ.get('SHELL', '/bin/sh')
+ if is_sequence(command):
+ command = [sh, '-c', ' '.join(command)]
+ else:
+ command = [sh, '-c', command]
+ use_shell = False
+
+ elif os.name == 'nt' and is_sequence(command):
+ # On Windows, join the string for CreateProcess() ourselves as
+ # subprocess does it a bit differently
+ command = ' '.join(_quote_arg(arg) for arg in command)
+
+ # Inherit environment by default
+ env = env or None
+ try:
+ # text is set to False so that communicate()
+ # will return bytes. We need to decode the output ourselves
+ # so that Python will not raise a UnicodeDecodeError when
+ # it encounters an invalid character; rather, we simply replace it
+ proc = subprocess.Popen(command, shell=use_shell, env=env, text=False,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ except OSError:
+ # Return 127, as os.spawn*() and /bin/sh do
+ return 127, ''
+
+ text, err = proc.communicate()
+ mylocale = locale.getpreferredencoding(False)
+ if mylocale is None:
+ mylocale = 'ascii'
+ text = text.decode(mylocale, errors='replace')
+ text = text.replace('\r\n', '\n')
+ # Another historical oddity
+ if text[-1:] == '\n':
+ text = text[:-1]
+
+ if use_tee and text:
+ print(text)
+ return proc.returncode, text
+
+
+def _quote_arg(arg):
+ """
+ Quote the argument for safe use in a shell command line.
+ """
+ # If there is a quote in the string, assume relevants parts of the
+ # string are already quoted (e.g. '-I"C:\\Program Files\\..."')
+ if '"' not in arg and ' ' in arg:
+ return '"%s"' % arg
+ return arg
+
+############################################################
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/extension.py b/venv/lib/python3.9/site-packages/numpy/distutils/extension.py
new file mode 100644
index 00000000..3ede013e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/extension.py
@@ -0,0 +1,107 @@
+"""distutils.extension
+
+Provides the Extension class, used to describe C/C++ extension
+modules in setup scripts.
+
+Overridden to support f2py.
+
+"""
+import re
+from distutils.extension import Extension as old_Extension
+
+
+cxx_ext_re = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match
+fortran_pyf_ext_re = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
+
+
+class Extension(old_Extension):
+ """
+ Parameters
+ ----------
+ name : str
+ Extension name.
+ sources : list of str
+ List of source file locations relative to the top directory of
+ the package.
+ extra_compile_args : list of str
+ Extra command line arguments to pass to the compiler.
+ extra_f77_compile_args : list of str
+ Extra command line arguments to pass to the fortran77 compiler.
+ extra_f90_compile_args : list of str
+ Extra command line arguments to pass to the fortran90 compiler.
+ """
+ def __init__(
+ self, name, sources,
+ include_dirs=None,
+ define_macros=None,
+ undef_macros=None,
+ library_dirs=None,
+ libraries=None,
+ runtime_library_dirs=None,
+ extra_objects=None,
+ extra_compile_args=None,
+ extra_link_args=None,
+ export_symbols=None,
+ swig_opts=None,
+ depends=None,
+ language=None,
+ f2py_options=None,
+ module_dirs=None,
+ extra_c_compile_args=None,
+ extra_cxx_compile_args=None,
+ extra_f77_compile_args=None,
+ extra_f90_compile_args=None,):
+
+ old_Extension.__init__(
+ self, name, [],
+ include_dirs=include_dirs,
+ define_macros=define_macros,
+ undef_macros=undef_macros,
+ library_dirs=library_dirs,
+ libraries=libraries,
+ runtime_library_dirs=runtime_library_dirs,
+ extra_objects=extra_objects,
+ extra_compile_args=extra_compile_args,
+ extra_link_args=extra_link_args,
+ export_symbols=export_symbols)
+
+ # Avoid assert statements checking that sources contains strings:
+ self.sources = sources
+
+ # Python 2.4 distutils new features
+ self.swig_opts = swig_opts or []
+ # swig_opts is assumed to be a list. Here we handle the case where it
+ # is specified as a string instead.
+ if isinstance(self.swig_opts, str):
+ import warnings
+ msg = "swig_opts is specified as a string instead of a list"
+ warnings.warn(msg, SyntaxWarning, stacklevel=2)
+ self.swig_opts = self.swig_opts.split()
+
+ # Python 2.3 distutils new features
+ self.depends = depends or []
+ self.language = language
+
+ # numpy_distutils features
+ self.f2py_options = f2py_options or []
+ self.module_dirs = module_dirs or []
+ self.extra_c_compile_args = extra_c_compile_args or []
+ self.extra_cxx_compile_args = extra_cxx_compile_args or []
+ self.extra_f77_compile_args = extra_f77_compile_args or []
+ self.extra_f90_compile_args = extra_f90_compile_args or []
+
+ return
+
+ def has_cxx_sources(self):
+ for source in self.sources:
+ if cxx_ext_re(str(source)):
+ return True
+ return False
+
+ def has_f2py_sources(self):
+ for source in self.sources:
+ if fortran_pyf_ext_re(source):
+ return True
+ return False
+
+# class Extension
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/__init__.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/__init__.py
new file mode 100644
index 00000000..ecba3e5d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/__init__.py
@@ -0,0 +1,1030 @@
+"""numpy.distutils.fcompiler
+
+Contains FCompiler, an abstract base class that defines the interface
+for the numpy.distutils Fortran compiler abstraction model.
+
+Terminology:
+
+To be consistent, where the term 'executable' is used, it means the single
+file, like 'gcc', that is executed, and should be a string. In contrast,
+'command' means the entire command line, like ['gcc', '-c', 'file.c'], and
+should be a list.
+
+But note that FCompiler.executables is actually a dictionary of commands.
+
+"""
+__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers',
+ 'dummy_fortran_file']
+
+import os
+import sys
+import re
+
+from distutils.sysconfig import get_python_lib
+from distutils.fancy_getopt import FancyGetopt
+from distutils.errors import DistutilsModuleError, \
+ DistutilsExecError, CompileError, LinkError, DistutilsPlatformError
+from distutils.util import split_quoted, strtobool
+
+from numpy.distutils.ccompiler import CCompiler, gen_lib_options
+from numpy.distutils import log
+from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \
+ make_temp_file, get_shared_lib_extension
+from numpy.distutils.exec_command import find_executable
+from numpy.distutils import _shell_utils
+
+from .environment import EnvironmentConfig
+
+__metaclass__ = type
+
+class CompilerNotFound(Exception):
+ pass
+
+def flaglist(s):
+ if is_string(s):
+ return split_quoted(s)
+ else:
+ return s
+
+def str2bool(s):
+ if is_string(s):
+ return strtobool(s)
+ return bool(s)
+
+def is_sequence_of_strings(seq):
+ return is_sequence(seq) and all_strings(seq)
+
+class FCompiler(CCompiler):
+ """Abstract base class to define the interface that must be implemented
+ by real Fortran compiler classes.
+
+ Methods that subclasses may redefine:
+
+ update_executables(), find_executables(), get_version()
+ get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug()
+ get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(),
+ get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(),
+ get_flags_arch_f90(), get_flags_debug_f90(),
+ get_flags_fix(), get_flags_linker_so()
+
+ DON'T call these methods (except get_version) after
+ constructing a compiler instance or inside any other method.
+ All methods, except update_executables() and find_executables(),
+ may call the get_version() method.
+
+ After constructing a compiler instance, always call customize(dist=None)
+ method that finalizes compiler construction and makes the following
+ attributes available:
+ compiler_f77
+ compiler_f90
+ compiler_fix
+ linker_so
+ archiver
+ ranlib
+ libraries
+ library_dirs
+ """
+
+ # These are the environment variables and distutils keys used.
+ # Each configuration description is
+ # (<hook name>, <environment variable>, <key in distutils.cfg>, <convert>, <append>)
+ # The hook names are handled by the self._environment_hook method.
+ # - names starting with 'self.' call methods in this class
+ # - names starting with 'exe.' return the key in the executables dict
+ # - names like 'flags.YYY' return self.get_flag_YYY()
+ # convert is either None or a function to convert a string to the
+ # appropriate type used.
+
+ distutils_vars = EnvironmentConfig(
+ distutils_section='config_fc',
+ noopt = (None, None, 'noopt', str2bool, False),
+ noarch = (None, None, 'noarch', str2bool, False),
+ debug = (None, None, 'debug', str2bool, False),
+ verbose = (None, None, 'verbose', str2bool, False),
+ )
+
+ command_vars = EnvironmentConfig(
+ distutils_section='config_fc',
+ compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False),
+ compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False),
+ compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False),
+ version_cmd = ('exe.version_cmd', None, None, None, False),
+ linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False),
+ linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False),
+ archiver = (None, 'AR', 'ar', None, False),
+ ranlib = (None, 'RANLIB', 'ranlib', None, False),
+ )
+
+ flag_vars = EnvironmentConfig(
+ distutils_section='config_fc',
+ f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True),
+ f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True),
+ free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True),
+ fix = ('flags.fix', None, None, flaglist, False),
+ opt = ('flags.opt', 'FOPT', 'opt', flaglist, True),
+ opt_f77 = ('flags.opt_f77', None, None, flaglist, False),
+ opt_f90 = ('flags.opt_f90', None, None, flaglist, False),
+ arch = ('flags.arch', 'FARCH', 'arch', flaglist, False),
+ arch_f77 = ('flags.arch_f77', None, None, flaglist, False),
+ arch_f90 = ('flags.arch_f90', None, None, flaglist, False),
+ debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True),
+ debug_f77 = ('flags.debug_f77', None, None, flaglist, False),
+ debug_f90 = ('flags.debug_f90', None, None, flaglist, False),
+ flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True),
+ linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True),
+ linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True),
+ ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True),
+ )
+
+ language_map = {'.f': 'f77',
+ '.for': 'f77',
+ '.F': 'f77', # XXX: needs preprocessor
+ '.ftn': 'f77',
+ '.f77': 'f77',
+ '.f90': 'f90',
+ '.F90': 'f90', # XXX: needs preprocessor
+ '.f95': 'f90',
+ }
+ language_order = ['f90', 'f77']
+
+
+ # These will be set by the subclass
+
+ compiler_type = None
+ compiler_aliases = ()
+ version_pattern = None
+
+ possible_executables = []
+ executables = {
+ 'version_cmd': ["f77", "-v"],
+ 'compiler_f77': ["f77"],
+ 'compiler_f90': ["f90"],
+ 'compiler_fix': ["f90", "-fixed"],
+ 'linker_so': ["f90", "-shared"],
+ 'linker_exe': ["f90"],
+ 'archiver': ["ar", "-cr"],
+ 'ranlib': None,
+ }
+
+ # If compiler does not support compiling Fortran 90 then it can
+ # suggest using another compiler. For example, gnu would suggest
+ # gnu95 compiler type when there are F90 sources.
+ suggested_f90_compiler = None
+
+ compile_switch = "-c"
+ object_switch = "-o " # Ending space matters! It will be stripped
+ # but if it is missing then object_switch
+ # will be prefixed to object file name by
+ # string concatenation.
+ library_switch = "-o " # Ditto!
+
+ # Switch to specify where module files are created and searched
+ # for USE statement. Normally it is a string and also here ending
+ # space matters. See above.
+ module_dir_switch = None
+
+ # Switch to specify where module files are searched for USE statement.
+ module_include_switch = '-I'
+
+ pic_flags = [] # Flags to create position-independent code
+
+ src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR']
+ obj_extension = ".o"
+
+ shared_lib_extension = get_shared_lib_extension()
+ static_lib_extension = ".a" # or .lib
+ static_lib_format = "lib%s%s" # or %s%s
+ shared_lib_format = "%s%s"
+ exe_extension = ""
+
+ _exe_cache = {}
+
+ _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90',
+ 'compiler_fix', 'linker_so', 'linker_exe', 'archiver',
+ 'ranlib']
+
+ # This will be set by new_fcompiler when called in
+ # command/{build_ext.py, build_clib.py, config.py} files.
+ c_compiler = None
+
+ # extra_{f77,f90}_compile_args are set by build_ext.build_extension method
+ extra_f77_compile_args = []
+ extra_f90_compile_args = []
+
+ def __init__(self, *args, **kw):
+ CCompiler.__init__(self, *args, **kw)
+ self.distutils_vars = self.distutils_vars.clone(self._environment_hook)
+ self.command_vars = self.command_vars.clone(self._environment_hook)
+ self.flag_vars = self.flag_vars.clone(self._environment_hook)
+ self.executables = self.executables.copy()
+ for e in self._executable_keys:
+ if e not in self.executables:
+ self.executables[e] = None
+
+ # Some methods depend on .customize() being called first, so
+ # this keeps track of whether that's happened yet.
+ self._is_customised = False
+
+ def __copy__(self):
+ obj = self.__new__(self.__class__)
+ obj.__dict__.update(self.__dict__)
+ obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook)
+ obj.command_vars = obj.command_vars.clone(obj._environment_hook)
+ obj.flag_vars = obj.flag_vars.clone(obj._environment_hook)
+ obj.executables = obj.executables.copy()
+ return obj
+
+ def copy(self):
+ return self.__copy__()
+
+ # Use properties for the attributes used by CCompiler. Setting them
+ # as attributes from the self.executables dictionary is error-prone,
+ # so we get them from there each time.
+ def _command_property(key):
+ def fget(self):
+ assert self._is_customised
+ return self.executables[key]
+ return property(fget=fget)
+ version_cmd = _command_property('version_cmd')
+ compiler_f77 = _command_property('compiler_f77')
+ compiler_f90 = _command_property('compiler_f90')
+ compiler_fix = _command_property('compiler_fix')
+ linker_so = _command_property('linker_so')
+ linker_exe = _command_property('linker_exe')
+ archiver = _command_property('archiver')
+ ranlib = _command_property('ranlib')
+
+ # Make our terminology consistent.
+ def set_executable(self, key, value):
+ self.set_command(key, value)
+
+ def set_commands(self, **kw):
+ for k, v in kw.items():
+ self.set_command(k, v)
+
+ def set_command(self, key, value):
+ if not key in self._executable_keys:
+ raise ValueError(
+ "unknown executable '%s' for class %s" %
+ (key, self.__class__.__name__))
+ if is_string(value):
+ value = split_quoted(value)
+ assert value is None or is_sequence_of_strings(value[1:]), (key, value)
+ self.executables[key] = value
+
+ ######################################################################
+ ## Methods that subclasses may redefine. But don't call these methods!
+ ## They are private to FCompiler class and may return unexpected
+ ## results if used elsewhere. So, you have been warned..
+
+ def find_executables(self):
+ """Go through the self.executables dictionary, and attempt to
+ find and assign appropriate executables.
+
+ Executable names are looked for in the environment (environment
+ variables, the distutils.cfg, and command line), the 0th-element of
+ the command list, and the self.possible_executables list.
+
+ Also, if the 0th element is "<F77>" or "<F90>", the Fortran 77
+ or the Fortran 90 compiler executable is used, unless overridden
+ by an environment setting.
+
+ Subclasses should call this if overridden.
+ """
+ assert self._is_customised
+ exe_cache = self._exe_cache
+ def cached_find_executable(exe):
+ if exe in exe_cache:
+ return exe_cache[exe]
+ fc_exe = find_executable(exe)
+ exe_cache[exe] = exe_cache[fc_exe] = fc_exe
+ return fc_exe
+ def verify_command_form(name, value):
+ if value is not None and not is_sequence_of_strings(value):
+ raise ValueError(
+ "%s value %r is invalid in class %s" %
+ (name, value, self.__class__.__name__))
+ def set_exe(exe_key, f77=None, f90=None):
+ cmd = self.executables.get(exe_key, None)
+ if not cmd:
+ return None
+ # Note that we get cmd[0] here if the environment doesn't
+ # have anything set
+ exe_from_environ = getattr(self.command_vars, exe_key)
+ if not exe_from_environ:
+ possibles = [f90, f77] + self.possible_executables
+ else:
+ possibles = [exe_from_environ] + self.possible_executables
+
+ seen = set()
+ unique_possibles = []
+ for e in possibles:
+ if e == '<F77>':
+ e = f77
+ elif e == '<F90>':
+ e = f90
+ if not e or e in seen:
+ continue
+ seen.add(e)
+ unique_possibles.append(e)
+
+ for exe in unique_possibles:
+ fc_exe = cached_find_executable(exe)
+ if fc_exe:
+ cmd[0] = fc_exe
+ return fc_exe
+ self.set_command(exe_key, None)
+ return None
+
+ ctype = self.compiler_type
+ f90 = set_exe('compiler_f90')
+ if not f90:
+ f77 = set_exe('compiler_f77')
+ if f77:
+ log.warn('%s: no Fortran 90 compiler found' % ctype)
+ else:
+ raise CompilerNotFound('%s: f90 nor f77' % ctype)
+ else:
+ f77 = set_exe('compiler_f77', f90=f90)
+ if not f77:
+ log.warn('%s: no Fortran 77 compiler found' % ctype)
+ set_exe('compiler_fix', f90=f90)
+
+ set_exe('linker_so', f77=f77, f90=f90)
+ set_exe('linker_exe', f77=f77, f90=f90)
+ set_exe('version_cmd', f77=f77, f90=f90)
+ set_exe('archiver')
+ set_exe('ranlib')
+
+ def update_executables(self):
+ """Called at the beginning of customisation. Subclasses should
+ override this if they need to set up the executables dictionary.
+
+ Note that self.find_executables() is run afterwards, so the
+ self.executables dictionary values can contain <F77> or <F90> as
+ the command, which will be replaced by the found F77 or F90
+ compiler.
+ """
+ pass
+
+ def get_flags(self):
+ """List of flags common to all compiler types."""
+ return [] + self.pic_flags
+
+ def _get_command_flags(self, key):
+ cmd = self.executables.get(key, None)
+ if cmd is None:
+ return []
+ return cmd[1:]
+
+ def get_flags_f77(self):
+ """List of Fortran 77 specific flags."""
+ return self._get_command_flags('compiler_f77')
+ def get_flags_f90(self):
+ """List of Fortran 90 specific flags."""
+ return self._get_command_flags('compiler_f90')
+ def get_flags_free(self):
+ """List of Fortran 90 free format specific flags."""
+ return []
+ def get_flags_fix(self):
+ """List of Fortran 90 fixed format specific flags."""
+ return self._get_command_flags('compiler_fix')
+ def get_flags_linker_so(self):
+ """List of linker flags to build a shared library."""
+ return self._get_command_flags('linker_so')
+ def get_flags_linker_exe(self):
+ """List of linker flags to build an executable."""
+ return self._get_command_flags('linker_exe')
+ def get_flags_ar(self):
+ """List of archiver flags. """
+ return self._get_command_flags('archiver')
+ def get_flags_opt(self):
+ """List of architecture independent compiler flags."""
+ return []
+ def get_flags_arch(self):
+ """List of architecture dependent compiler flags."""
+ return []
+ def get_flags_debug(self):
+ """List of compiler flags to compile with debugging information."""
+ return []
+
+ get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt
+ get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch
+ get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug
+
+ def get_libraries(self):
+ """List of compiler libraries."""
+ return self.libraries[:]
+ def get_library_dirs(self):
+ """List of compiler library directories."""
+ return self.library_dirs[:]
+
+ def get_version(self, force=False, ok_status=[0]):
+ assert self._is_customised
+ version = CCompiler.get_version(self, force=force, ok_status=ok_status)
+ if version is None:
+ raise CompilerNotFound()
+ return version
+
+
+ ############################################################
+
+ ## Public methods:
+
+ def customize(self, dist = None):
+ """Customize Fortran compiler.
+
+ This method gets Fortran compiler specific information from
+ (i) class definition, (ii) environment, (iii) distutils config
+ files, and (iv) command line (later overrides earlier).
+
+ This method should be always called after constructing a
+ compiler instance. But not in __init__ because Distribution
+ instance is needed for (iii) and (iv).
+ """
+ log.info('customize %s' % (self.__class__.__name__))
+
+ self._is_customised = True
+
+ self.distutils_vars.use_distribution(dist)
+ self.command_vars.use_distribution(dist)
+ self.flag_vars.use_distribution(dist)
+
+ self.update_executables()
+
+ # find_executables takes care of setting the compiler commands,
+ # version_cmd, linker_so, linker_exe, ar, and ranlib
+ self.find_executables()
+
+ noopt = self.distutils_vars.get('noopt', False)
+ noarch = self.distutils_vars.get('noarch', noopt)
+ debug = self.distutils_vars.get('debug', False)
+
+ f77 = self.command_vars.compiler_f77
+ f90 = self.command_vars.compiler_f90
+
+ f77flags = []
+ f90flags = []
+ freeflags = []
+ fixflags = []
+
+ if f77:
+ f77 = _shell_utils.NativeParser.split(f77)
+ f77flags = self.flag_vars.f77
+ if f90:
+ f90 = _shell_utils.NativeParser.split(f90)
+ f90flags = self.flag_vars.f90
+ freeflags = self.flag_vars.free
+ # XXX Assuming that free format is default for f90 compiler.
+ fix = self.command_vars.compiler_fix
+ # NOTE: this and similar examples are probably just
+ # excluding --coverage flag when F90 = gfortran --coverage
+ # instead of putting that flag somewhere more appropriate
+ # this and similar examples where a Fortran compiler
+ # environment variable has been customized by CI or a user
+ # should perhaps eventually be more thoroughly tested and more
+ # robustly handled
+ if fix:
+ fix = _shell_utils.NativeParser.split(fix)
+ fixflags = self.flag_vars.fix + f90flags
+
+ oflags, aflags, dflags = [], [], []
+ # examine get_flags_<tag>_<compiler> for extra flags
+ # only add them if the method is different from get_flags_<tag>
+ def get_flags(tag, flags):
+ # note that self.flag_vars.<tag> calls self.get_flags_<tag>()
+ flags.extend(getattr(self.flag_vars, tag))
+ this_get = getattr(self, 'get_flags_' + tag)
+ for name, c, flagvar in [('f77', f77, f77flags),
+ ('f90', f90, f90flags),
+ ('f90', fix, fixflags)]:
+ t = '%s_%s' % (tag, name)
+ if c and this_get is not getattr(self, 'get_flags_' + t):
+ flagvar.extend(getattr(self.flag_vars, t))
+ if not noopt:
+ get_flags('opt', oflags)
+ if not noarch:
+ get_flags('arch', aflags)
+ if debug:
+ get_flags('debug', dflags)
+
+ fflags = self.flag_vars.flags + dflags + oflags + aflags
+
+ if f77:
+ self.set_commands(compiler_f77=f77+f77flags+fflags)
+ if f90:
+ self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags)
+ if fix:
+ self.set_commands(compiler_fix=fix+fixflags+fflags)
+
+
+ #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS
+ linker_so = self.linker_so
+ if linker_so:
+ linker_so_flags = self.flag_vars.linker_so
+ if sys.platform.startswith('aix'):
+ python_lib = get_python_lib(standard_lib=1)
+ ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')
+ python_exp = os.path.join(python_lib, 'config', 'python.exp')
+ linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp]
+ if sys.platform.startswith('os400'):
+ from distutils.sysconfig import get_config_var
+ python_config = get_config_var('LIBPL')
+ ld_so_aix = os.path.join(python_config, 'ld_so_aix')
+ python_exp = os.path.join(python_config, 'python.exp')
+ linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp]
+ self.set_commands(linker_so=linker_so+linker_so_flags)
+
+ linker_exe = self.linker_exe
+ if linker_exe:
+ linker_exe_flags = self.flag_vars.linker_exe
+ self.set_commands(linker_exe=linker_exe+linker_exe_flags)
+
+ ar = self.command_vars.archiver
+ if ar:
+ arflags = self.flag_vars.ar
+ self.set_commands(archiver=[ar]+arflags)
+
+ self.set_library_dirs(self.get_library_dirs())
+ self.set_libraries(self.get_libraries())
+
+ def dump_properties(self):
+ """Print out the attributes of a compiler instance."""
+ props = []
+ for key in list(self.executables.keys()) + \
+ ['version', 'libraries', 'library_dirs',
+ 'object_switch', 'compile_switch']:
+ if hasattr(self, key):
+ v = getattr(self, key)
+ props.append((key, None, '= '+repr(v)))
+ props.sort()
+
+ pretty_printer = FancyGetopt(props)
+ for l in pretty_printer.generate_help("%s instance properties:" \
+ % (self.__class__.__name__)):
+ if l[:4]==' --':
+ l = ' ' + l[4:]
+ print(l)
+
+ ###################
+
+ def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
+ """Compile 'src' to product 'obj'."""
+ src_flags = {}
+ if is_f_file(src) and not has_f90_header(src):
+ flavor = ':f77'
+ compiler = self.compiler_f77
+ src_flags = get_f77flags(src)
+ extra_compile_args = self.extra_f77_compile_args or []
+ elif is_free_format(src):
+ flavor = ':f90'
+ compiler = self.compiler_f90
+ if compiler is None:
+ raise DistutilsExecError('f90 not supported by %s needed for %s'\
+ % (self.__class__.__name__, src))
+ extra_compile_args = self.extra_f90_compile_args or []
+ else:
+ flavor = ':fix'
+ compiler = self.compiler_fix
+ if compiler is None:
+ raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\
+ % (self.__class__.__name__, src))
+ extra_compile_args = self.extra_f90_compile_args or []
+ if self.object_switch[-1]==' ':
+ o_args = [self.object_switch.strip(), obj]
+ else:
+ o_args = [self.object_switch.strip()+obj]
+
+ assert self.compile_switch.strip()
+ s_args = [self.compile_switch, src]
+
+ if extra_compile_args:
+ log.info('extra %s options: %r' \
+ % (flavor[1:], ' '.join(extra_compile_args)))
+
+ extra_flags = src_flags.get(self.compiler_type, [])
+ if extra_flags:
+ log.info('using compile options from source: %r' \
+ % ' '.join(extra_flags))
+
+ command = compiler + cc_args + extra_flags + s_args + o_args \
+ + extra_postargs + extra_compile_args
+
+ display = '%s: %s' % (os.path.basename(compiler[0]) + flavor,
+ src)
+ try:
+ self.spawn(command, display=display)
+ except DistutilsExecError as e:
+ msg = str(e)
+ raise CompileError(msg) from None
+
+ def module_options(self, module_dirs, module_build_dir):
+ options = []
+ if self.module_dir_switch is not None:
+ if self.module_dir_switch[-1]==' ':
+ options.extend([self.module_dir_switch.strip(), module_build_dir])
+ else:
+ options.append(self.module_dir_switch.strip()+module_build_dir)
+ else:
+ print('XXX: module_build_dir=%r option ignored' % (module_build_dir))
+ print('XXX: Fix module_dir_switch for ', self.__class__.__name__)
+ if self.module_include_switch is not None:
+ for d in [module_build_dir]+module_dirs:
+ options.append('%s%s' % (self.module_include_switch, d))
+ else:
+ print('XXX: module_dirs=%r option ignored' % (module_dirs))
+ print('XXX: Fix module_include_switch for ', self.__class__.__name__)
+ return options
+
+ def library_option(self, lib):
+ return "-l" + lib
+ def library_dir_option(self, dir):
+ return "-L" + dir
+
+ def link(self, target_desc, objects,
+ output_filename, output_dir=None, libraries=None,
+ library_dirs=None, runtime_library_dirs=None,
+ export_symbols=None, debug=0, extra_preargs=None,
+ extra_postargs=None, build_temp=None, target_lang=None):
+ objects, output_dir = self._fix_object_args(objects, output_dir)
+ libraries, library_dirs, runtime_library_dirs = \
+ self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
+
+ lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
+ libraries)
+ if is_string(output_dir):
+ output_filename = os.path.join(output_dir, output_filename)
+ elif output_dir is not None:
+ raise TypeError("'output_dir' must be a string or None")
+
+ if self._need_link(objects, output_filename):
+ if self.library_switch[-1]==' ':
+ o_args = [self.library_switch.strip(), output_filename]
+ else:
+ o_args = [self.library_switch.strip()+output_filename]
+
+ if is_string(self.objects):
+ ld_args = objects + [self.objects]
+ else:
+ ld_args = objects + self.objects
+ ld_args = ld_args + lib_opts + o_args
+ if debug:
+ ld_args[:0] = ['-g']
+ if extra_preargs:
+ ld_args[:0] = extra_preargs
+ if extra_postargs:
+ ld_args.extend(extra_postargs)
+ self.mkpath(os.path.dirname(output_filename))
+ if target_desc == CCompiler.EXECUTABLE:
+ linker = self.linker_exe[:]
+ else:
+ linker = self.linker_so[:]
+ command = linker + ld_args
+ try:
+ self.spawn(command)
+ except DistutilsExecError as e:
+ msg = str(e)
+ raise LinkError(msg) from None
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+ def _environment_hook(self, name, hook_name):
+ if hook_name is None:
+ return None
+ if is_string(hook_name):
+ if hook_name.startswith('self.'):
+ hook_name = hook_name[5:]
+ hook = getattr(self, hook_name)
+ return hook()
+ elif hook_name.startswith('exe.'):
+ hook_name = hook_name[4:]
+ var = self.executables[hook_name]
+ if var:
+ return var[0]
+ else:
+ return None
+ elif hook_name.startswith('flags.'):
+ hook_name = hook_name[6:]
+ hook = getattr(self, 'get_flags_' + hook_name)
+ return hook()
+ else:
+ return hook_name()
+
+ def can_ccompiler_link(self, ccompiler):
+ """
+ Check if the given C compiler can link objects produced by
+ this compiler.
+ """
+ return True
+
+ def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):
+ """
+ Convert a set of object files that are not compatible with the default
+ linker, to a file that is compatible.
+
+ Parameters
+ ----------
+ objects : list
+ List of object files to include.
+ output_dir : str
+ Output directory to place generated object files.
+ extra_dll_dir : str
+ Output directory to place extra DLL files that need to be
+ included on Windows.
+
+ Returns
+ -------
+ converted_objects : list of str
+ List of converted object files.
+ Note that the number of output files is not necessarily
+ the same as inputs.
+
+ """
+ raise NotImplementedError()
+
+ ## class FCompiler
+
+_default_compilers = (
+ # sys.platform mappings
+ ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95',
+ 'intelvem', 'intelem', 'flang')),
+ ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')),
+ ('linux.*', ('arm', 'gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag',
+ 'vast', 'compaq', 'intele', 'intelem', 'gnu', 'g95',
+ 'pathf95', 'nagfor', 'fujitsu')),
+ ('darwin.*', ('gnu95', 'nag', 'nagfor', 'absoft', 'ibm', 'intel', 'gnu',
+ 'g95', 'pg')),
+ ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')),
+ ('irix.*', ('mips', 'gnu', 'gnu95',)),
+ ('aix.*', ('ibm', 'gnu', 'gnu95',)),
+ # os.name mappings
+ ('posix', ('gnu', 'gnu95',)),
+ ('nt', ('gnu', 'gnu95',)),
+ ('mac', ('gnu95', 'gnu', 'pg')),
+ )
+
+fcompiler_class = None
+fcompiler_aliases = None
+
+def load_all_fcompiler_classes():
+ """Cache all the FCompiler classes found in modules in the
+ numpy.distutils.fcompiler package.
+ """
+ from glob import glob
+ global fcompiler_class, fcompiler_aliases
+ if fcompiler_class is not None:
+ return
+ pys = os.path.join(os.path.dirname(__file__), '*.py')
+ fcompiler_class = {}
+ fcompiler_aliases = {}
+ for fname in glob(pys):
+ module_name, ext = os.path.splitext(os.path.basename(fname))
+ module_name = 'numpy.distutils.fcompiler.' + module_name
+ __import__ (module_name)
+ module = sys.modules[module_name]
+ if hasattr(module, 'compilers'):
+ for cname in module.compilers:
+ klass = getattr(module, cname)
+ desc = (klass.compiler_type, klass, klass.description)
+ fcompiler_class[klass.compiler_type] = desc
+ for alias in klass.compiler_aliases:
+ if alias in fcompiler_aliases:
+ raise ValueError("alias %r defined for both %s and %s"
+ % (alias, klass.__name__,
+ fcompiler_aliases[alias][1].__name__))
+ fcompiler_aliases[alias] = desc
+
+def _find_existing_fcompiler(compiler_types,
+ osname=None, platform=None,
+ requiref90=False,
+ c_compiler=None):
+ from numpy.distutils.core import get_distribution
+ dist = get_distribution(always=True)
+ for compiler_type in compiler_types:
+ v = None
+ try:
+ c = new_fcompiler(plat=platform, compiler=compiler_type,
+ c_compiler=c_compiler)
+ c.customize(dist)
+ v = c.get_version()
+ if requiref90 and c.compiler_f90 is None:
+ v = None
+ new_compiler = c.suggested_f90_compiler
+ if new_compiler:
+ log.warn('Trying %r compiler as suggested by %r '
+ 'compiler for f90 support.' % (compiler_type,
+ new_compiler))
+ c = new_fcompiler(plat=platform, compiler=new_compiler,
+ c_compiler=c_compiler)
+ c.customize(dist)
+ v = c.get_version()
+ if v is not None:
+ compiler_type = new_compiler
+ if requiref90 and c.compiler_f90 is None:
+ raise ValueError('%s does not support compiling f90 codes, '
+ 'skipping.' % (c.__class__.__name__))
+ except DistutilsModuleError:
+ log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type)
+ except CompilerNotFound:
+ log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type)
+ if v is not None:
+ return compiler_type
+ return None
+
+def available_fcompilers_for_platform(osname=None, platform=None):
+ if osname is None:
+ osname = os.name
+ if platform is None:
+ platform = sys.platform
+ matching_compiler_types = []
+ for pattern, compiler_type in _default_compilers:
+ if re.match(pattern, platform) or re.match(pattern, osname):
+ for ct in compiler_type:
+ if ct not in matching_compiler_types:
+ matching_compiler_types.append(ct)
+ if not matching_compiler_types:
+ matching_compiler_types.append('gnu')
+ return matching_compiler_types
+
+def get_default_fcompiler(osname=None, platform=None, requiref90=False,
+ c_compiler=None):
+ """Determine the default Fortran compiler to use for the given
+ platform."""
+ matching_compiler_types = available_fcompilers_for_platform(osname,
+ platform)
+ log.info("get_default_fcompiler: matching types: '%s'",
+ matching_compiler_types)
+ compiler_type = _find_existing_fcompiler(matching_compiler_types,
+ osname=osname,
+ platform=platform,
+ requiref90=requiref90,
+ c_compiler=c_compiler)
+ return compiler_type
+
+# Flag to avoid rechecking for Fortran compiler every time
+failed_fcompilers = set()
+
+def new_fcompiler(plat=None,
+ compiler=None,
+ verbose=0,
+ dry_run=0,
+ force=0,
+ requiref90=False,
+ c_compiler = None):
+ """Generate an instance of some FCompiler subclass for the supplied
+ platform/compiler combination.
+ """
+ global failed_fcompilers
+ fcompiler_key = (plat, compiler)
+ if fcompiler_key in failed_fcompilers:
+ return None
+
+ load_all_fcompiler_classes()
+ if plat is None:
+ plat = os.name
+ if compiler is None:
+ compiler = get_default_fcompiler(plat, requiref90=requiref90,
+ c_compiler=c_compiler)
+ if compiler in fcompiler_class:
+ module_name, klass, long_description = fcompiler_class[compiler]
+ elif compiler in fcompiler_aliases:
+ module_name, klass, long_description = fcompiler_aliases[compiler]
+ else:
+ msg = "don't know how to compile Fortran code on platform '%s'" % plat
+ if compiler is not None:
+ msg = msg + " with '%s' compiler." % compiler
+ msg = msg + " Supported compilers are: %s)" \
+ % (','.join(fcompiler_class.keys()))
+ log.warn(msg)
+ failed_fcompilers.add(fcompiler_key)
+ return None
+
+ compiler = klass(verbose=verbose, dry_run=dry_run, force=force)
+ compiler.c_compiler = c_compiler
+ return compiler
+
+def show_fcompilers(dist=None):
+ """Print list of available compilers (used by the "--help-fcompiler"
+ option to "config_fc").
+ """
+ if dist is None:
+ from distutils.dist import Distribution
+ from numpy.distutils.command.config_compiler import config_fc
+ dist = Distribution()
+ dist.script_name = os.path.basename(sys.argv[0])
+ dist.script_args = ['config_fc'] + sys.argv[1:]
+ try:
+ dist.script_args.remove('--help-fcompiler')
+ except ValueError:
+ pass
+ dist.cmdclass['config_fc'] = config_fc
+ dist.parse_config_files()
+ dist.parse_command_line()
+ compilers = []
+ compilers_na = []
+ compilers_ni = []
+ if not fcompiler_class:
+ load_all_fcompiler_classes()
+ platform_compilers = available_fcompilers_for_platform()
+ for compiler in platform_compilers:
+ v = None
+ log.set_verbosity(-2)
+ try:
+ c = new_fcompiler(compiler=compiler, verbose=dist.verbose)
+ c.customize(dist)
+ v = c.get_version()
+ except (DistutilsModuleError, CompilerNotFound) as e:
+ log.debug("show_fcompilers: %s not found" % (compiler,))
+ log.debug(repr(e))
+
+ if v is None:
+ compilers_na.append(("fcompiler="+compiler, None,
+ fcompiler_class[compiler][2]))
+ else:
+ c.dump_properties()
+ compilers.append(("fcompiler="+compiler, None,
+ fcompiler_class[compiler][2] + ' (%s)' % v))
+
+ compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers))
+ compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2])
+ for fc in compilers_ni]
+
+ compilers.sort()
+ compilers_na.sort()
+ compilers_ni.sort()
+ pretty_printer = FancyGetopt(compilers)
+ pretty_printer.print_help("Fortran compilers found:")
+ pretty_printer = FancyGetopt(compilers_na)
+ pretty_printer.print_help("Compilers available for this "
+ "platform, but not found:")
+ if compilers_ni:
+ pretty_printer = FancyGetopt(compilers_ni)
+ pretty_printer.print_help("Compilers not available on this platform:")
+ print("For compiler details, run 'config_fc --verbose' setup command.")
+
+
+def dummy_fortran_file():
+ fo, name = make_temp_file(suffix='.f')
+ fo.write(" subroutine dummy()\n end\n")
+ fo.close()
+ return name[:-2]
+
+
+is_f_file = re.compile(r'.*\.(for|ftn|f77|f)\Z', re.I).match
+_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search
+_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search
+_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search
+_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match
+
+def is_free_format(file):
+ """Check if file is in free format Fortran."""
+ # f90 allows both fixed and free format, assuming fixed unless
+ # signs of free format are detected.
+ result = 0
+ with open(file, encoding='latin1') as f:
+ line = f.readline()
+ n = 10000 # the number of non-comment lines to scan for hints
+ if _has_f_header(line) or _has_fix_header(line):
+ n = 0
+ elif _has_f90_header(line):
+ n = 0
+ result = 1
+ while n>0 and line:
+ line = line.rstrip()
+ if line and line[0]!='!':
+ n -= 1
+ if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&':
+ result = 1
+ break
+ line = f.readline()
+ return result
+
+def has_f90_header(src):
+ with open(src, encoding='latin1') as f:
+ line = f.readline()
+ return _has_f90_header(line) or _has_fix_header(line)
+
+_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P<fcname>\w+)\s*\)\s*=\s*(?P<fflags>.*)', re.I)
+def get_f77flags(src):
+ """
+ Search the first 20 lines of fortran 77 code for line pattern
+ `CF77FLAGS(<fcompiler type>)=<f77 flags>`
+ Return a dictionary {<fcompiler type>:<f77 flags>}.
+ """
+ flags = {}
+ with open(src, encoding='latin1') as f:
+ i = 0
+ for line in f:
+ i += 1
+ if i>20: break
+ m = _f77flags_re.match(line)
+ if not m: continue
+ fcname = m.group('fcname').strip()
+ fflags = m.group('fflags').strip()
+ flags[fcname] = split_quoted(fflags)
+ return flags
+
+# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags
+
+if __name__ == '__main__':
+ show_fcompilers()
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/absoft.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/absoft.py
new file mode 100644
index 00000000..efe3a4cb
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/absoft.py
@@ -0,0 +1,156 @@
+
+# http://www.absoft.com/literature/osxuserguide.pdf
+# http://www.absoft.com/documentation.html
+
+# Notes:
+# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py
+# generated extension modules (works for f2py v2.45.241_1936 and up)
+import os
+
+from numpy.distutils.cpuinfo import cpu
+from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
+from numpy.distutils.misc_util import cyg2win32
+
+compilers = ['AbsoftFCompiler']
+
+class AbsoftFCompiler(FCompiler):
+
+ compiler_type = 'absoft'
+ description = 'Absoft Corp Fortran Compiler'
+ #version_pattern = r'FORTRAN 77 Compiler (?P<version>[^\s*,]*).*?Absoft Corp'
+ version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\
+ r' (?P<version>[^\s*,]*)(.*?Absoft Corp|)'
+
+ # on windows: f90 -V -c dummy.f
+ # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16
+
+ # samt5735(8)$ f90 -V -c dummy.f
+ # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0
+ # Note that fink installs g77 as f77, so need to use f90 for detection.
+
+ executables = {
+ 'version_cmd' : None, # set by update_executables
+ 'compiler_f77' : ["f77"],
+ 'compiler_fix' : ["f90"],
+ 'compiler_f90' : ["f90"],
+ 'linker_so' : ["<F90>"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ if os.name=='nt':
+ library_switch = '/out:' #No space after /out:!
+
+ module_dir_switch = None
+ module_include_switch = '-p'
+
+ def update_executables(self):
+ f = cyg2win32(dummy_fortran_file())
+ self.executables['version_cmd'] = ['<F90>', '-V', '-c',
+ f+'.f', '-o', f+'.o']
+
+ def get_flags_linker_so(self):
+ if os.name=='nt':
+ opt = ['/dll']
+ # The "-K shared" switches are being left in for pre-9.0 versions
+ # of Absoft though I don't think versions earlier than 9 can
+ # actually be used to build shared libraries. In fact, version
+ # 8 of Absoft doesn't recognize "-K shared" and will fail.
+ elif self.get_version() >= '9.0':
+ opt = ['-shared']
+ else:
+ opt = ["-K", "shared"]
+ return opt
+
+ def library_dir_option(self, dir):
+ if os.name=='nt':
+ return ['-link', '/PATH:%s' % (dir)]
+ return "-L" + dir
+
+ def library_option(self, lib):
+ if os.name=='nt':
+ return '%s.lib' % (lib)
+ return "-l" + lib
+
+ def get_library_dirs(self):
+ opt = FCompiler.get_library_dirs(self)
+ d = os.environ.get('ABSOFT')
+ if d:
+ if self.get_version() >= '10.0':
+ # use shared libraries, the static libraries were not compiled -fPIC
+ prefix = 'sh'
+ else:
+ prefix = ''
+ if cpu.is_64bit():
+ suffix = '64'
+ else:
+ suffix = ''
+ opt.append(os.path.join(d, '%slib%s' % (prefix, suffix)))
+ return opt
+
+ def get_libraries(self):
+ opt = FCompiler.get_libraries(self)
+ if self.get_version() >= '11.0':
+ opt.extend(['af90math', 'afio', 'af77math', 'amisc'])
+ elif self.get_version() >= '10.0':
+ opt.extend(['af90math', 'afio', 'af77math', 'U77'])
+ elif self.get_version() >= '8.0':
+ opt.extend(['f90math', 'fio', 'f77math', 'U77'])
+ else:
+ opt.extend(['fio', 'f90math', 'fmath', 'U77'])
+ if os.name =='nt':
+ opt.append('COMDLG32')
+ return opt
+
+ def get_flags(self):
+ opt = FCompiler.get_flags(self)
+ if os.name != 'nt':
+ opt.extend(['-s'])
+ if self.get_version():
+ if self.get_version()>='8.2':
+ opt.append('-fpic')
+ return opt
+
+ def get_flags_f77(self):
+ opt = FCompiler.get_flags_f77(self)
+ opt.extend(['-N22', '-N90', '-N110'])
+ v = self.get_version()
+ if os.name == 'nt':
+ if v and v>='8.0':
+ opt.extend(['-f', '-N15'])
+ else:
+ opt.append('-f')
+ if v:
+ if v<='4.6':
+ opt.append('-B108')
+ else:
+ # Though -N15 is undocumented, it works with
+ # Absoft 8.0 on Linux
+ opt.append('-N15')
+ return opt
+
+ def get_flags_f90(self):
+ opt = FCompiler.get_flags_f90(self)
+ opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX",
+ "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"])
+ if self.get_version():
+ if self.get_version()>'4.6':
+ opt.extend(["-YDEALLOC=ALL"])
+ return opt
+
+ def get_flags_fix(self):
+ opt = FCompiler.get_flags_fix(self)
+ opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX",
+ "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"])
+ opt.extend(["-f", "fixed"])
+ return opt
+
+ def get_flags_opt(self):
+ opt = ['-O']
+ return opt
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='absoft').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/arm.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/arm.py
new file mode 100644
index 00000000..3eb7e9af
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/arm.py
@@ -0,0 +1,71 @@
+import sys
+
+from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
+from sys import platform
+from os.path import join, dirname, normpath
+
+compilers = ['ArmFlangCompiler']
+
+import functools
+
+class ArmFlangCompiler(FCompiler):
+ compiler_type = 'arm'
+ description = 'Arm Compiler'
+ version_pattern = r'\s*Arm.*version (?P<version>[\d.-]+).*'
+
+ ar_exe = 'lib.exe'
+ possible_executables = ['armflang']
+
+ executables = {
+ 'version_cmd': ["", "--version"],
+ 'compiler_f77': ["armflang", "-fPIC"],
+ 'compiler_fix': ["armflang", "-fPIC", "-ffixed-form"],
+ 'compiler_f90': ["armflang", "-fPIC"],
+ 'linker_so': ["armflang", "-fPIC", "-shared"],
+ 'archiver': ["ar", "-cr"],
+ 'ranlib': None
+ }
+
+ pic_flags = ["-fPIC", "-DPIC"]
+ c_compiler = 'arm'
+ module_dir_switch = '-module ' # Don't remove ending space!
+
+ def get_libraries(self):
+ opt = FCompiler.get_libraries(self)
+ opt.extend(['flang', 'flangrti', 'ompstub'])
+ return opt
+
+ @functools.lru_cache(maxsize=128)
+ def get_library_dirs(self):
+ """List of compiler library directories."""
+ opt = FCompiler.get_library_dirs(self)
+ flang_dir = dirname(self.executables['compiler_f77'][0])
+ opt.append(normpath(join(flang_dir, '..', 'lib')))
+
+ return opt
+
+ def get_flags(self):
+ return []
+
+ def get_flags_free(self):
+ return []
+
+ def get_flags_debug(self):
+ return ['-g']
+
+ def get_flags_opt(self):
+ return ['-O3']
+
+ def get_flags_arch(self):
+ return []
+
+ def runtime_library_dir_option(self, dir):
+ return '-Wl,-rpath=%s' % dir
+
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='armflang').get_version())
+
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/compaq.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/compaq.py
new file mode 100644
index 00000000..01314c13
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/compaq.py
@@ -0,0 +1,120 @@
+
+#http://www.compaq.com/fortran/docs/
+import os
+import sys
+
+from numpy.distutils.fcompiler import FCompiler
+from distutils.errors import DistutilsPlatformError
+
+compilers = ['CompaqFCompiler']
+if os.name != 'posix' or sys.platform[:6] == 'cygwin' :
+ # Otherwise we'd get a false positive on posix systems with
+ # case-insensitive filesystems (like darwin), because we'll pick
+ # up /bin/df
+ compilers.append('CompaqVisualFCompiler')
+
+class CompaqFCompiler(FCompiler):
+
+ compiler_type = 'compaq'
+ description = 'Compaq Fortran Compiler'
+ version_pattern = r'Compaq Fortran (?P<version>[^\s]*).*'
+
+ if sys.platform[:5]=='linux':
+ fc_exe = 'fort'
+ else:
+ fc_exe = 'f90'
+
+ executables = {
+ 'version_cmd' : ['<F90>', "-version"],
+ 'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"],
+ 'compiler_fix' : [fc_exe, "-fixed"],
+ 'compiler_f90' : [fc_exe],
+ 'linker_so' : ['<F90>'],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ module_dir_switch = '-module ' # not tested
+ module_include_switch = '-I'
+
+ def get_flags(self):
+ return ['-assume no2underscore', '-nomixed_str_len_arg']
+ def get_flags_debug(self):
+ return ['-g', '-check bounds']
+ def get_flags_opt(self):
+ return ['-O4', '-align dcommons', '-assume bigarrays',
+ '-assume nozsize', '-math_library fast']
+ def get_flags_arch(self):
+ return ['-arch host', '-tune host']
+ def get_flags_linker_so(self):
+ if sys.platform[:5]=='linux':
+ return ['-shared']
+ return ['-shared', '-Wl,-expect_unresolved,*']
+
+class CompaqVisualFCompiler(FCompiler):
+
+ compiler_type = 'compaqv'
+ description = 'DIGITAL or Compaq Visual Fortran Compiler'
+ version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler'
+ r' Version (?P<version>[^\s]*).*')
+
+ compile_switch = '/compile_only'
+ object_switch = '/object:'
+ library_switch = '/OUT:' #No space after /OUT:!
+
+ static_lib_extension = ".lib"
+ static_lib_format = "%s%s"
+ module_dir_switch = '/module:'
+ module_include_switch = '/I'
+
+ ar_exe = 'lib.exe'
+ fc_exe = 'DF'
+
+ if sys.platform=='win32':
+ from numpy.distutils.msvccompiler import MSVCCompiler
+
+ try:
+ m = MSVCCompiler()
+ m.initialize()
+ ar_exe = m.lib
+ except DistutilsPlatformError:
+ pass
+ except AttributeError as e:
+ if '_MSVCCompiler__root' in str(e):
+ print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e))
+ else:
+ raise
+ except OSError as e:
+ if not "vcvarsall.bat" in str(e):
+ print("Unexpected OSError in", __file__)
+ raise
+ except ValueError as e:
+ if not "'path'" in str(e):
+ print("Unexpected ValueError in", __file__)
+ raise
+
+ executables = {
+ 'version_cmd' : ['<F90>', "/what"],
+ 'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"],
+ 'compiler_fix' : [fc_exe, "/fixed"],
+ 'compiler_f90' : [fc_exe],
+ 'linker_so' : ['<F90>'],
+ 'archiver' : [ar_exe, "/OUT:"],
+ 'ranlib' : None
+ }
+
+ def get_flags(self):
+ return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)',
+ '/names:lowercase', '/assume:underscore']
+ def get_flags_opt(self):
+ return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast']
+ def get_flags_arch(self):
+ return ['/threads']
+ def get_flags_debug(self):
+ return ['/debug']
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='compaq').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/environment.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/environment.py
new file mode 100644
index 00000000..ecd4d998
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/environment.py
@@ -0,0 +1,88 @@
+import os
+from distutils.dist import Distribution
+
+__metaclass__ = type
+
+class EnvironmentConfig:
+ def __init__(self, distutils_section='ALL', **kw):
+ self._distutils_section = distutils_section
+ self._conf_keys = kw
+ self._conf = None
+ self._hook_handler = None
+
+ def dump_variable(self, name):
+ conf_desc = self._conf_keys[name]
+ hook, envvar, confvar, convert, append = conf_desc
+ if not convert:
+ convert = lambda x : x
+ print('%s.%s:' % (self._distutils_section, name))
+ v = self._hook_handler(name, hook)
+ print(' hook : %s' % (convert(v),))
+ if envvar:
+ v = os.environ.get(envvar, None)
+ print(' environ: %s' % (convert(v),))
+ if confvar and self._conf:
+ v = self._conf.get(confvar, (None, None))[1]
+ print(' config : %s' % (convert(v),))
+
+ def dump_variables(self):
+ for name in self._conf_keys:
+ self.dump_variable(name)
+
+ def __getattr__(self, name):
+ try:
+ conf_desc = self._conf_keys[name]
+ except KeyError:
+ raise AttributeError(
+ f"'EnvironmentConfig' object has no attribute '{name}'"
+ ) from None
+
+ return self._get_var(name, conf_desc)
+
+ def get(self, name, default=None):
+ try:
+ conf_desc = self._conf_keys[name]
+ except KeyError:
+ return default
+ var = self._get_var(name, conf_desc)
+ if var is None:
+ var = default
+ return var
+
+ def _get_var(self, name, conf_desc):
+ hook, envvar, confvar, convert, append = conf_desc
+ if convert is None:
+ convert = lambda x: x
+ var = self._hook_handler(name, hook)
+ if envvar is not None:
+ envvar_contents = os.environ.get(envvar)
+ if envvar_contents is not None:
+ envvar_contents = convert(envvar_contents)
+ if var and append:
+ if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1':
+ var.extend(envvar_contents)
+ else:
+ # NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0
+ # to keep old (overwrite flags rather than append to
+ # them) behavior
+ var = envvar_contents
+ else:
+ var = envvar_contents
+ if confvar is not None and self._conf:
+ if confvar in self._conf:
+ source, confvar_contents = self._conf[confvar]
+ var = convert(confvar_contents)
+ return var
+
+
+ def clone(self, hook_handler):
+ ec = self.__class__(distutils_section=self._distutils_section,
+ **self._conf_keys)
+ ec._hook_handler = hook_handler
+ return ec
+
+ def use_distribution(self, dist):
+ if isinstance(dist, Distribution):
+ self._conf = dist.get_option_dict(self._distutils_section)
+ else:
+ self._conf = dist
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/fujitsu.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/fujitsu.py
new file mode 100644
index 00000000..ddce6745
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/fujitsu.py
@@ -0,0 +1,46 @@
+"""
+fujitsu
+
+Supports Fujitsu compiler function.
+This compiler is developed by Fujitsu and is used in A64FX on Fugaku.
+"""
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['FujitsuFCompiler']
+
+class FujitsuFCompiler(FCompiler):
+ compiler_type = 'fujitsu'
+ description = 'Fujitsu Fortran Compiler'
+
+ possible_executables = ['frt']
+ version_pattern = r'frt \(FRT\) (?P<version>[a-z\d.]+)'
+ # $ frt --version
+ # frt (FRT) x.x.x yyyymmdd
+
+ executables = {
+ 'version_cmd' : ["<F77>", "--version"],
+ 'compiler_f77' : ["frt", "-Fixed"],
+ 'compiler_fix' : ["frt", "-Fixed"],
+ 'compiler_f90' : ["frt"],
+ 'linker_so' : ["frt", "-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+ pic_flags = ['-KPIC']
+ module_dir_switch = '-M'
+ module_include_switch = '-I'
+
+ def get_flags_opt(self):
+ return ['-O3']
+ def get_flags_debug(self):
+ return ['-g']
+ def runtime_library_dir_option(self, dir):
+ return f'-Wl,-rpath={dir}'
+ def get_libraries(self):
+ return ['fj90f', 'fj90i', 'fjsrcinfo']
+
+if __name__ == '__main__':
+ from distutils import log
+ from numpy.distutils import customized_fcompiler
+ log.set_verbosity(2)
+ print(customized_fcompiler('fujitsu').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/g95.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/g95.py
new file mode 100644
index 00000000..e109a972
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/g95.py
@@ -0,0 +1,42 @@
+# http://g95.sourceforge.net/
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['G95FCompiler']
+
+class G95FCompiler(FCompiler):
+ compiler_type = 'g95'
+ description = 'G95 Fortran Compiler'
+
+# version_pattern = r'G95 \((GCC (?P<gccversion>[\d.]+)|.*?) \(g95!\) (?P<version>.*)\).*'
+ # $ g95 --version
+ # G95 (GCC 4.0.3 (g95!) May 22 2006)
+
+ version_pattern = r'G95 \((GCC (?P<gccversion>[\d.]+)|.*?) \(g95 (?P<version>.*)!\) (?P<date>.*)\).*'
+ # $ g95 --version
+ # G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006)
+
+ executables = {
+ 'version_cmd' : ["<F90>", "--version"],
+ 'compiler_f77' : ["g95", "-ffixed-form"],
+ 'compiler_fix' : ["g95", "-ffixed-form"],
+ 'compiler_f90' : ["g95"],
+ 'linker_so' : ["<F90>", "-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+ pic_flags = ['-fpic']
+ module_dir_switch = '-fmod='
+ module_include_switch = '-I'
+
+ def get_flags(self):
+ return ['-fno-second-underscore']
+ def get_flags_opt(self):
+ return ['-O']
+ def get_flags_debug(self):
+ return ['-g']
+
+if __name__ == '__main__':
+ from distutils import log
+ from numpy.distutils import customized_fcompiler
+ log.set_verbosity(2)
+ print(customized_fcompiler('g95').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/gnu.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/gnu.py
new file mode 100644
index 00000000..3472b5d4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/gnu.py
@@ -0,0 +1,555 @@
+import re
+import os
+import sys
+import warnings
+import platform
+import tempfile
+import hashlib
+import base64
+import subprocess
+from subprocess import Popen, PIPE, STDOUT
+from numpy.distutils.exec_command import filepath_from_subprocess_output
+from numpy.distutils.fcompiler import FCompiler
+from distutils.version import LooseVersion
+
+compilers = ['GnuFCompiler', 'Gnu95FCompiler']
+
+TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)")
+
+# XXX: handle cross compilation
+
+
+def is_win64():
+ return sys.platform == "win32" and platform.architecture()[0] == "64bit"
+
+
+class GnuFCompiler(FCompiler):
+ compiler_type = 'gnu'
+ compiler_aliases = ('g77', )
+ description = 'GNU Fortran 77 compiler'
+
+ def gnu_version_match(self, version_string):
+ """Handle the different versions of GNU fortran compilers"""
+ # Strip warning(s) that may be emitted by gfortran
+ while version_string.startswith('gfortran: warning'):
+ version_string =\
+ version_string[version_string.find('\n') + 1:].strip()
+
+ # Gfortran versions from after 2010 will output a simple string
+ # (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older
+ # gfortrans may still return long version strings (``-dumpversion`` was
+ # an alias for ``--version``)
+ if len(version_string) <= 20:
+ # Try to find a valid version string
+ m = re.search(r'([0-9.]+)', version_string)
+ if m:
+ # g77 provides a longer version string that starts with GNU
+ # Fortran
+ if version_string.startswith('GNU Fortran'):
+ return ('g77', m.group(1))
+
+ # gfortran only outputs a version string such as #.#.#, so check
+ # if the match is at the start of the string
+ elif m.start() == 0:
+ return ('gfortran', m.group(1))
+ else:
+ # Output probably from --version, try harder:
+ m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
+ if m:
+ return ('gfortran', m.group(1))
+ m = re.search(
+ r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string)
+ if m:
+ v = m.group(1)
+ if v.startswith('0') or v.startswith('2') or v.startswith('3'):
+ # the '0' is for early g77's
+ return ('g77', v)
+ else:
+ # at some point in the 4.x series, the ' 95' was dropped
+ # from the version string
+ return ('gfortran', v)
+
+ # If still nothing, raise an error to make the problem easy to find.
+ err = 'A valid Fortran version was not found in this string:\n'
+ raise ValueError(err + version_string)
+
+ def version_match(self, version_string):
+ v = self.gnu_version_match(version_string)
+ if not v or v[0] != 'g77':
+ return None
+ return v[1]
+
+ possible_executables = ['g77', 'f77']
+ executables = {
+ 'version_cmd' : [None, "-dumpversion"],
+ 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"],
+ 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes
+ 'compiler_fix' : None,
+ 'linker_so' : [None, "-g", "-Wall"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"],
+ 'linker_exe' : [None, "-g", "-Wall"]
+ }
+ module_dir_switch = None
+ module_include_switch = None
+
+ # Cygwin: f771: warning: -fPIC ignored for target (all code is
+ # position independent)
+ if os.name != 'nt' and sys.platform != 'cygwin':
+ pic_flags = ['-fPIC']
+
+ # use -mno-cygwin for g77 when Python is not Cygwin-Python
+ if sys.platform == 'win32':
+ for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']:
+ executables[key].append('-mno-cygwin')
+
+ g2c = 'g2c'
+ suggested_f90_compiler = 'gnu95'
+
+ def get_flags_linker_so(self):
+ opt = self.linker_so[1:]
+ if sys.platform == 'darwin':
+ target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None)
+ # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value
+ # and leave it alone. But, distutils will complain if the
+ # environment's value is different from the one in the Python
+ # Makefile used to build Python. We let distutils handle this
+ # error checking.
+ if not target:
+ # If MACOSX_DEPLOYMENT_TARGET is not set in the environment,
+ # we try to get it first from sysconfig and then
+ # fall back to setting it to 10.9 This is a reasonable default
+ # even when using the official Python dist and those derived
+ # from it.
+ import sysconfig
+ target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
+ if not target:
+ target = '10.9'
+ s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}'
+ warnings.warn(s, stacklevel=2)
+ os.environ['MACOSX_DEPLOYMENT_TARGET'] = str(target)
+ opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
+ else:
+ opt.append("-shared")
+ if sys.platform.startswith('sunos'):
+ # SunOS often has dynamically loaded symbols defined in the
+ # static library libg2c.a The linker doesn't like this. To
+ # ignore the problem, use the -mimpure-text flag. It isn't
+ # the safest thing, but seems to work. 'man gcc' says:
+ # ".. Instead of using -mimpure-text, you should compile all
+ # source code with -fpic or -fPIC."
+ opt.append('-mimpure-text')
+ return opt
+
+ def get_libgcc_dir(self):
+ try:
+ output = subprocess.check_output(self.compiler_f77 +
+ ['-print-libgcc-file-name'])
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ output = filepath_from_subprocess_output(output)
+ return os.path.dirname(output)
+ return None
+
+ def get_libgfortran_dir(self):
+ if sys.platform[:5] == 'linux':
+ libgfortran_name = 'libgfortran.so'
+ elif sys.platform == 'darwin':
+ libgfortran_name = 'libgfortran.dylib'
+ else:
+ libgfortran_name = None
+
+ libgfortran_dir = None
+ if libgfortran_name:
+ find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)]
+ try:
+ output = subprocess.check_output(
+ self.compiler_f77 + find_lib_arg)
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ output = filepath_from_subprocess_output(output)
+ libgfortran_dir = os.path.dirname(output)
+ return libgfortran_dir
+
+ def get_library_dirs(self):
+ opt = []
+ if sys.platform[:5] != 'linux':
+ d = self.get_libgcc_dir()
+ if d:
+ # if windows and not cygwin, libg2c lies in a different folder
+ if sys.platform == 'win32' and not d.startswith('/usr/lib'):
+ d = os.path.normpath(d)
+ path = os.path.join(d, "lib%s.a" % self.g2c)
+ if not os.path.exists(path):
+ root = os.path.join(d, *((os.pardir, ) * 4))
+ d2 = os.path.abspath(os.path.join(root, 'lib'))
+ path = os.path.join(d2, "lib%s.a" % self.g2c)
+ if os.path.exists(path):
+ opt.append(d2)
+ opt.append(d)
+ # For Macports / Linux, libgfortran and libgcc are not co-located
+ lib_gfortran_dir = self.get_libgfortran_dir()
+ if lib_gfortran_dir:
+ opt.append(lib_gfortran_dir)
+ return opt
+
+ def get_libraries(self):
+ opt = []
+ d = self.get_libgcc_dir()
+ if d is not None:
+ g2c = self.g2c + '-pic'
+ f = self.static_lib_format % (g2c, self.static_lib_extension)
+ if not os.path.isfile(os.path.join(d, f)):
+ g2c = self.g2c
+ else:
+ g2c = self.g2c
+
+ if g2c is not None:
+ opt.append(g2c)
+ c_compiler = self.c_compiler
+ if sys.platform == 'win32' and c_compiler and \
+ c_compiler.compiler_type == 'msvc':
+ opt.append('gcc')
+ if sys.platform == 'darwin':
+ opt.append('cc_dynamic')
+ return opt
+
+ def get_flags_debug(self):
+ return ['-g']
+
+ def get_flags_opt(self):
+ v = self.get_version()
+ if v and v <= '3.3.3':
+ # With this compiler version building Fortran BLAS/LAPACK
+ # with -O3 caused failures in lib.lapack heevr,syevr tests.
+ opt = ['-O2']
+ else:
+ opt = ['-O3']
+ opt.append('-funroll-loops')
+ return opt
+
+ def _c_arch_flags(self):
+ """ Return detected arch flags from CFLAGS """
+ import sysconfig
+ try:
+ cflags = sysconfig.get_config_vars()['CFLAGS']
+ except KeyError:
+ return []
+ arch_re = re.compile(r"-arch\s+(\w+)")
+ arch_flags = []
+ for arch in arch_re.findall(cflags):
+ arch_flags += ['-arch', arch]
+ return arch_flags
+
+ def get_flags_arch(self):
+ return []
+
+ def runtime_library_dir_option(self, dir):
+ if sys.platform == 'win32' or sys.platform == 'cygwin':
+ # Linux/Solaris/Unix support RPATH, Windows does not
+ raise NotImplementedError
+
+ # TODO: could use -Xlinker here, if it's supported
+ assert "," not in dir
+
+ if sys.platform == 'darwin':
+ return f'-Wl,-rpath,{dir}'
+ elif sys.platform.startswith(('aix', 'os400')):
+ # AIX RPATH is called LIBPATH
+ return f'-Wl,-blibpath:{dir}'
+ else:
+ return f'-Wl,-rpath={dir}'
+
+
+class Gnu95FCompiler(GnuFCompiler):
+ compiler_type = 'gnu95'
+ compiler_aliases = ('gfortran', )
+ description = 'GNU Fortran 95 compiler'
+
+ def version_match(self, version_string):
+ v = self.gnu_version_match(version_string)
+ if not v or v[0] != 'gfortran':
+ return None
+ v = v[1]
+ if LooseVersion(v) >= "4":
+ # gcc-4 series releases do not support -mno-cygwin option
+ pass
+ else:
+ # use -mno-cygwin flag for gfortran when Python is not
+ # Cygwin-Python
+ if sys.platform == 'win32':
+ for key in [
+ 'version_cmd', 'compiler_f77', 'compiler_f90',
+ 'compiler_fix', 'linker_so', 'linker_exe'
+ ]:
+ self.executables[key].append('-mno-cygwin')
+ return v
+
+ possible_executables = ['gfortran', 'f95']
+ executables = {
+ 'version_cmd' : ["<F90>", "-dumpversion"],
+ 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form",
+ "-fno-second-underscore"],
+ 'compiler_f90' : [None, "-Wall", "-g",
+ "-fno-second-underscore"],
+ 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form",
+ "-fno-second-underscore"],
+ 'linker_so' : ["<F90>", "-Wall", "-g"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"],
+ 'linker_exe' : [None, "-Wall"]
+ }
+
+ module_dir_switch = '-J'
+ module_include_switch = '-I'
+
+ if sys.platform.startswith(('aix', 'os400')):
+ executables['linker_so'].append('-lpthread')
+ if platform.architecture()[0][:2] == '64':
+ for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']:
+ executables[key].append('-maix64')
+
+ g2c = 'gfortran'
+
+ def _universal_flags(self, cmd):
+ """Return a list of -arch flags for every supported architecture."""
+ if not sys.platform == 'darwin':
+ return []
+ arch_flags = []
+ # get arches the C compiler gets.
+ c_archs = self._c_arch_flags()
+ if "i386" in c_archs:
+ c_archs[c_archs.index("i386")] = "i686"
+ # check the arches the Fortran compiler supports, and compare with
+ # arch flags from C compiler
+ for arch in ["ppc", "i686", "x86_64", "ppc64", "s390x"]:
+ if _can_target(cmd, arch) and arch in c_archs:
+ arch_flags.extend(["-arch", arch])
+ return arch_flags
+
+ def get_flags(self):
+ flags = GnuFCompiler.get_flags(self)
+ arch_flags = self._universal_flags(self.compiler_f90)
+ if arch_flags:
+ flags[:0] = arch_flags
+ return flags
+
+ def get_flags_linker_so(self):
+ flags = GnuFCompiler.get_flags_linker_so(self)
+ arch_flags = self._universal_flags(self.linker_so)
+ if arch_flags:
+ flags[:0] = arch_flags
+ return flags
+
+ def get_library_dirs(self):
+ opt = GnuFCompiler.get_library_dirs(self)
+ if sys.platform == 'win32':
+ c_compiler = self.c_compiler
+ if c_compiler and c_compiler.compiler_type == "msvc":
+ target = self.get_target()
+ if target:
+ d = os.path.normpath(self.get_libgcc_dir())
+ root = os.path.join(d, *((os.pardir, ) * 4))
+ path = os.path.join(root, "lib")
+ mingwdir = os.path.normpath(path)
+ if os.path.exists(os.path.join(mingwdir, "libmingwex.a")):
+ opt.append(mingwdir)
+ # For Macports / Linux, libgfortran and libgcc are not co-located
+ lib_gfortran_dir = self.get_libgfortran_dir()
+ if lib_gfortran_dir:
+ opt.append(lib_gfortran_dir)
+ return opt
+
+ def get_libraries(self):
+ opt = GnuFCompiler.get_libraries(self)
+ if sys.platform == 'darwin':
+ opt.remove('cc_dynamic')
+ if sys.platform == 'win32':
+ c_compiler = self.c_compiler
+ if c_compiler and c_compiler.compiler_type == "msvc":
+ if "gcc" in opt:
+ i = opt.index("gcc")
+ opt.insert(i + 1, "mingwex")
+ opt.insert(i + 1, "mingw32")
+ c_compiler = self.c_compiler
+ if c_compiler and c_compiler.compiler_type == "msvc":
+ return []
+ else:
+ pass
+ return opt
+
+ def get_target(self):
+ try:
+ p = subprocess.Popen(
+ self.compiler_f77 + ['-v'],
+ stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ stdout, stderr = p.communicate()
+ output = (stdout or b"") + (stderr or b"")
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ output = filepath_from_subprocess_output(output)
+ m = TARGET_R.search(output)
+ if m:
+ return m.group(1)
+ return ""
+
+ def _hash_files(self, filenames):
+ h = hashlib.sha1()
+ for fn in filenames:
+ with open(fn, 'rb') as f:
+ while True:
+ block = f.read(131072)
+ if not block:
+ break
+ h.update(block)
+ text = base64.b32encode(h.digest())
+ text = text.decode('ascii')
+ return text.rstrip('=')
+
+ def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir,
+ chained_dlls, is_archive):
+ """Create a wrapper shared library for the given objects
+
+ Return an MSVC-compatible lib
+ """
+
+ c_compiler = self.c_compiler
+ if c_compiler.compiler_type != "msvc":
+ raise ValueError("This method only supports MSVC")
+
+ object_hash = self._hash_files(list(objects) + list(chained_dlls))
+
+ if is_win64():
+ tag = 'win_amd64'
+ else:
+ tag = 'win32'
+
+ basename = 'lib' + os.path.splitext(
+ os.path.basename(objects[0]))[0][:8]
+ root_name = basename + '.' + object_hash + '.gfortran-' + tag
+ dll_name = root_name + '.dll'
+ def_name = root_name + '.def'
+ lib_name = root_name + '.lib'
+ dll_path = os.path.join(extra_dll_dir, dll_name)
+ def_path = os.path.join(output_dir, def_name)
+ lib_path = os.path.join(output_dir, lib_name)
+
+ if os.path.isfile(lib_path):
+ # Nothing to do
+ return lib_path, dll_path
+
+ if is_archive:
+ objects = (["-Wl,--whole-archive"] + list(objects) +
+ ["-Wl,--no-whole-archive"])
+ self.link_shared_object(
+ objects,
+ dll_name,
+ output_dir=extra_dll_dir,
+ extra_postargs=list(chained_dlls) + [
+ '-Wl,--allow-multiple-definition',
+ '-Wl,--output-def,' + def_path,
+ '-Wl,--export-all-symbols',
+ '-Wl,--enable-auto-import',
+ '-static',
+ '-mlong-double-64',
+ ])
+
+ # No PowerPC!
+ if is_win64():
+ specifier = '/MACHINE:X64'
+ else:
+ specifier = '/MACHINE:X86'
+
+ # MSVC specific code
+ lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier]
+ if not c_compiler.initialized:
+ c_compiler.initialize()
+ c_compiler.spawn([c_compiler.lib] + lib_args)
+
+ return lib_path, dll_path
+
+ def can_ccompiler_link(self, compiler):
+ # MSVC cannot link objects compiled by GNU fortran
+ return compiler.compiler_type not in ("msvc", )
+
+ def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):
+ """
+ Convert a set of object files that are not compatible with the default
+ linker, to a file that is compatible.
+ """
+ if self.c_compiler.compiler_type == "msvc":
+ # Compile a DLL and return the lib for the DLL as
+ # the object. Also keep track of previous DLLs that
+ # we have compiled so that we can link against them.
+
+ # If there are .a archives, assume they are self-contained
+ # static libraries, and build separate DLLs for each
+ archives = []
+ plain_objects = []
+ for obj in objects:
+ if obj.lower().endswith('.a'):
+ archives.append(obj)
+ else:
+ plain_objects.append(obj)
+
+ chained_libs = []
+ chained_dlls = []
+ for archive in archives[::-1]:
+ lib, dll = self._link_wrapper_lib(
+ [archive],
+ output_dir,
+ extra_dll_dir,
+ chained_dlls=chained_dlls,
+ is_archive=True)
+ chained_libs.insert(0, lib)
+ chained_dlls.insert(0, dll)
+
+ if not plain_objects:
+ return chained_libs
+
+ lib, dll = self._link_wrapper_lib(
+ plain_objects,
+ output_dir,
+ extra_dll_dir,
+ chained_dlls=chained_dlls,
+ is_archive=False)
+ return [lib] + chained_libs
+ else:
+ raise ValueError("Unsupported C compiler")
+
+
+def _can_target(cmd, arch):
+ """Return true if the architecture supports the -arch flag"""
+ newcmd = cmd[:]
+ fid, filename = tempfile.mkstemp(suffix=".f")
+ os.close(fid)
+ try:
+ d = os.path.dirname(filename)
+ output = os.path.splitext(filename)[0] + ".o"
+ try:
+ newcmd.extend(["-arch", arch, "-c", filename])
+ p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d)
+ p.communicate()
+ return p.returncode == 0
+ finally:
+ if os.path.exists(output):
+ os.remove(output)
+ finally:
+ os.remove(filename)
+
+
+if __name__ == '__main__':
+ from distutils import log
+ from numpy.distutils import customized_fcompiler
+ log.set_verbosity(2)
+
+ print(customized_fcompiler('gnu').get_version())
+ try:
+ print(customized_fcompiler('g95').get_version())
+ except Exception as e:
+ print(e)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/hpux.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/hpux.py
new file mode 100644
index 00000000..09e6483b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/hpux.py
@@ -0,0 +1,41 @@
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['HPUXFCompiler']
+
+class HPUXFCompiler(FCompiler):
+
+ compiler_type = 'hpux'
+ description = 'HP Fortran 90 Compiler'
+ version_pattern = r'HP F90 (?P<version>[^\s*,]*)'
+
+ executables = {
+ 'version_cmd' : ["f90", "+version"],
+ 'compiler_f77' : ["f90"],
+ 'compiler_fix' : ["f90"],
+ 'compiler_f90' : ["f90"],
+ 'linker_so' : ["ld", "-b"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+ module_dir_switch = None #XXX: fix me
+ module_include_switch = None #XXX: fix me
+ pic_flags = ['+Z']
+ def get_flags(self):
+ return self.pic_flags + ['+ppu', '+DD64']
+ def get_flags_opt(self):
+ return ['-O3']
+ def get_libraries(self):
+ return ['m']
+ def get_library_dirs(self):
+ opt = ['/usr/lib/hpux64']
+ return opt
+ def get_version(self, force=0, ok_status=[256, 0, 1]):
+ # XXX status==256 may indicate 'unrecognized option' or
+ # 'no input file'. So, version_cmd needs more work.
+ return FCompiler.get_version(self, force, ok_status)
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(10)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='hpux').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/ibm.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/ibm.py
new file mode 100644
index 00000000..eff24401
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/ibm.py
@@ -0,0 +1,97 @@
+import os
+import re
+import sys
+import subprocess
+
+from numpy.distutils.fcompiler import FCompiler
+from numpy.distutils.exec_command import find_executable
+from numpy.distutils.misc_util import make_temp_file
+from distutils import log
+
+compilers = ['IBMFCompiler']
+
+class IBMFCompiler(FCompiler):
+ compiler_type = 'ibm'
+ description = 'IBM XL Fortran Compiler'
+ version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P<version>[^\s*]*)'
+ #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004
+
+ executables = {
+ 'version_cmd' : ["<F77>", "-qversion"],
+ 'compiler_f77' : ["xlf"],
+ 'compiler_fix' : ["xlf90", "-qfixed"],
+ 'compiler_f90' : ["xlf90"],
+ 'linker_so' : ["xlf95"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ def get_version(self,*args,**kwds):
+ version = FCompiler.get_version(self,*args,**kwds)
+
+ if version is None and sys.platform.startswith('aix'):
+ # use lslpp to find out xlf version
+ lslpp = find_executable('lslpp')
+ xlf = find_executable('xlf')
+ if os.path.exists(xlf) and os.path.exists(lslpp):
+ try:
+ o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp'])
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ m = re.search(r'xlfcmp:(?P<version>\d+([.]\d+)+)', o)
+ if m: version = m.group('version')
+
+ xlf_dir = '/etc/opt/ibmcmp/xlf'
+ if version is None and os.path.isdir(xlf_dir):
+ # linux:
+ # If the output of xlf does not contain version info
+ # (that's the case with xlf 8.1, for instance) then
+ # let's try another method:
+ l = sorted(os.listdir(xlf_dir))
+ l.reverse()
+ l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))]
+ if l:
+ from distutils.version import LooseVersion
+ self.version = version = LooseVersion(l[0])
+ return version
+
+ def get_flags(self):
+ return ['-qextname']
+
+ def get_flags_debug(self):
+ return ['-g']
+
+ def get_flags_linker_so(self):
+ opt = []
+ if sys.platform=='darwin':
+ opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress')
+ else:
+ opt.append('-bshared')
+ version = self.get_version(ok_status=[0, 40])
+ if version is not None:
+ if sys.platform.startswith('aix'):
+ xlf_cfg = '/etc/xlf.cfg'
+ else:
+ xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version
+ fo, new_cfg = make_temp_file(suffix='_xlf.cfg')
+ log.info('Creating '+new_cfg)
+ with open(xlf_cfg, 'r') as fi:
+ crt1_match = re.compile(r'\s*crt\s*=\s*(?P<path>.*)/crt1.o').match
+ for line in fi:
+ m = crt1_match(line)
+ if m:
+ fo.write('crt = %s/bundle1.o\n' % (m.group('path')))
+ else:
+ fo.write(line)
+ fo.close()
+ opt.append('-F'+new_cfg)
+ return opt
+
+ def get_flags_opt(self):
+ return ['-O3']
+
+if __name__ == '__main__':
+ from numpy.distutils import customized_fcompiler
+ log.set_verbosity(2)
+ print(customized_fcompiler(compiler='ibm').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/intel.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/intel.py
new file mode 100644
index 00000000..1d606590
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/intel.py
@@ -0,0 +1,211 @@
+# http://developer.intel.com/software/products/compilers/flin/
+import sys
+
+from numpy.distutils.ccompiler import simple_version_match
+from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
+
+compilers = ['IntelFCompiler', 'IntelVisualFCompiler',
+ 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler',
+ 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler']
+
+
+def intel_version_match(type):
+ # Match against the important stuff in the version string
+ return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,))
+
+
+class BaseIntelFCompiler(FCompiler):
+ def update_executables(self):
+ f = dummy_fortran_file()
+ self.executables['version_cmd'] = ['<F77>', '-FI', '-V', '-c',
+ f + '.f', '-o', f + '.o']
+
+ def runtime_library_dir_option(self, dir):
+ # TODO: could use -Xlinker here, if it's supported
+ assert "," not in dir
+
+ return '-Wl,-rpath=%s' % dir
+
+
+class IntelFCompiler(BaseIntelFCompiler):
+
+ compiler_type = 'intel'
+ compiler_aliases = ('ifort',)
+ description = 'Intel Fortran Compiler for 32-bit apps'
+ version_match = intel_version_match('32-bit|IA-32')
+
+ possible_executables = ['ifort', 'ifc']
+
+ executables = {
+ 'version_cmd' : None, # set by update_executables
+ 'compiler_f77' : [None, "-72", "-w90", "-w95"],
+ 'compiler_f90' : [None],
+ 'compiler_fix' : [None, "-FI"],
+ 'linker_so' : ["<F90>", "-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ pic_flags = ['-fPIC']
+ module_dir_switch = '-module ' # Don't remove ending space!
+ module_include_switch = '-I'
+
+ def get_flags_free(self):
+ return ['-FR']
+
+ def get_flags(self):
+ return ['-fPIC']
+
+ def get_flags_opt(self): # Scipy test failures with -O2
+ v = self.get_version()
+ mpopt = 'openmp' if v and v < '15' else 'qopenmp'
+ return ['-fp-model', 'strict', '-O1',
+ '-assume', 'minus0', '-{}'.format(mpopt)]
+
+ def get_flags_arch(self):
+ return []
+
+ def get_flags_linker_so(self):
+ opt = FCompiler.get_flags_linker_so(self)
+ v = self.get_version()
+ if v and v >= '8.0':
+ opt.append('-nofor_main')
+ if sys.platform == 'darwin':
+ # Here, it's -dynamiclib
+ try:
+ idx = opt.index('-shared')
+ opt.remove('-shared')
+ except ValueError:
+ idx = 0
+ opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup']
+ return opt
+
+
+class IntelItaniumFCompiler(IntelFCompiler):
+ compiler_type = 'intele'
+ compiler_aliases = ()
+ description = 'Intel Fortran Compiler for Itanium apps'
+
+ version_match = intel_version_match('Itanium|IA-64')
+
+ possible_executables = ['ifort', 'efort', 'efc']
+
+ executables = {
+ 'version_cmd' : None,
+ 'compiler_f77' : [None, "-FI", "-w90", "-w95"],
+ 'compiler_fix' : [None, "-FI"],
+ 'compiler_f90' : [None],
+ 'linker_so' : ['<F90>', "-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+
+class IntelEM64TFCompiler(IntelFCompiler):
+ compiler_type = 'intelem'
+ compiler_aliases = ()
+ description = 'Intel Fortran Compiler for 64-bit apps'
+
+ version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit')
+
+ possible_executables = ['ifort', 'efort', 'efc']
+
+ executables = {
+ 'version_cmd' : None,
+ 'compiler_f77' : [None, "-FI"],
+ 'compiler_fix' : [None, "-FI"],
+ 'compiler_f90' : [None],
+ 'linker_so' : ['<F90>', "-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+# Is there no difference in the version string between the above compilers
+# and the Visual compilers?
+
+
+class IntelVisualFCompiler(BaseIntelFCompiler):
+ compiler_type = 'intelv'
+ description = 'Intel Visual Fortran Compiler for 32-bit apps'
+ version_match = intel_version_match('32-bit|IA-32')
+
+ def update_executables(self):
+ f = dummy_fortran_file()
+ self.executables['version_cmd'] = ['<F77>', '/FI', '/c',
+ f + '.f', '/o', f + '.o']
+
+ ar_exe = 'lib.exe'
+ possible_executables = ['ifort', 'ifl']
+
+ executables = {
+ 'version_cmd' : None,
+ 'compiler_f77' : [None],
+ 'compiler_fix' : [None],
+ 'compiler_f90' : [None],
+ 'linker_so' : [None],
+ 'archiver' : [ar_exe, "/verbose", "/OUT:"],
+ 'ranlib' : None
+ }
+
+ compile_switch = '/c '
+ object_switch = '/Fo' # No space after /Fo!
+ library_switch = '/OUT:' # No space after /OUT:!
+ module_dir_switch = '/module:' # No space after /module:
+ module_include_switch = '/I'
+
+ def get_flags(self):
+ opt = ['/nologo', '/MD', '/nbs', '/names:lowercase',
+ '/assume:underscore', '/fpp']
+ return opt
+
+ def get_flags_free(self):
+ return []
+
+ def get_flags_debug(self):
+ return ['/4Yb', '/d2']
+
+ def get_flags_opt(self):
+ return ['/O1', '/assume:minus0'] # Scipy test failures with /O2
+
+ def get_flags_arch(self):
+ return ["/arch:IA32", "/QaxSSE3"]
+
+ def runtime_library_dir_option(self, dir):
+ raise NotImplementedError
+
+
+class IntelItaniumVisualFCompiler(IntelVisualFCompiler):
+ compiler_type = 'intelev'
+ description = 'Intel Visual Fortran Compiler for Itanium apps'
+
+ version_match = intel_version_match('Itanium')
+
+ possible_executables = ['efl'] # XXX this is a wild guess
+ ar_exe = IntelVisualFCompiler.ar_exe
+
+ executables = {
+ 'version_cmd' : None,
+ 'compiler_f77' : [None, "-FI", "-w90", "-w95"],
+ 'compiler_fix' : [None, "-FI", "-4L72", "-w"],
+ 'compiler_f90' : [None],
+ 'linker_so' : ['<F90>', "-shared"],
+ 'archiver' : [ar_exe, "/verbose", "/OUT:"],
+ 'ranlib' : None
+ }
+
+
+class IntelEM64VisualFCompiler(IntelVisualFCompiler):
+ compiler_type = 'intelvem'
+ description = 'Intel Visual Fortran Compiler for 64-bit apps'
+
+ version_match = simple_version_match(start=r'Intel\(R\).*?64,')
+
+ def get_flags_arch(self):
+ return []
+
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='intel').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/lahey.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/lahey.py
new file mode 100644
index 00000000..e9258382
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/lahey.py
@@ -0,0 +1,45 @@
+import os
+
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['LaheyFCompiler']
+
+class LaheyFCompiler(FCompiler):
+
+ compiler_type = 'lahey'
+ description = 'Lahey/Fujitsu Fortran 95 Compiler'
+ version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P<version>[^\s*]*)'
+
+ executables = {
+ 'version_cmd' : ["<F90>", "--version"],
+ 'compiler_f77' : ["lf95", "--fix"],
+ 'compiler_fix' : ["lf95", "--fix"],
+ 'compiler_f90' : ["lf95"],
+ 'linker_so' : ["lf95", "-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ module_dir_switch = None #XXX Fix me
+ module_include_switch = None #XXX Fix me
+
+ def get_flags_opt(self):
+ return ['-O']
+ def get_flags_debug(self):
+ return ['-g', '--chk', '--chkglobal']
+ def get_library_dirs(self):
+ opt = []
+ d = os.environ.get('LAHEY')
+ if d:
+ opt.append(os.path.join(d, 'lib'))
+ return opt
+ def get_libraries(self):
+ opt = []
+ opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6'])
+ return opt
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='lahey').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/mips.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/mips.py
new file mode 100644
index 00000000..a0973804
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/mips.py
@@ -0,0 +1,54 @@
+from numpy.distutils.cpuinfo import cpu
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['MIPSFCompiler']
+
+class MIPSFCompiler(FCompiler):
+
+ compiler_type = 'mips'
+ description = 'MIPSpro Fortran Compiler'
+ version_pattern = r'MIPSpro Compilers: Version (?P<version>[^\s*,]*)'
+
+ executables = {
+ 'version_cmd' : ["<F90>", "-version"],
+ 'compiler_f77' : ["f77", "-f77"],
+ 'compiler_fix' : ["f90", "-fixedform"],
+ 'compiler_f90' : ["f90"],
+ 'linker_so' : ["f90", "-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : None
+ }
+ module_dir_switch = None #XXX: fix me
+ module_include_switch = None #XXX: fix me
+ pic_flags = ['-KPIC']
+
+ def get_flags(self):
+ return self.pic_flags + ['-n32']
+ def get_flags_opt(self):
+ return ['-O3']
+ def get_flags_arch(self):
+ opt = []
+ for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split():
+ if getattr(cpu, 'is_IP%s'%a)():
+ opt.append('-TARG:platform=IP%s' % a)
+ break
+ return opt
+ def get_flags_arch_f77(self):
+ r = None
+ if cpu.is_r10000(): r = 10000
+ elif cpu.is_r12000(): r = 12000
+ elif cpu.is_r8000(): r = 8000
+ elif cpu.is_r5000(): r = 5000
+ elif cpu.is_r4000(): r = 4000
+ if r is not None:
+ return ['r%s' % (r)]
+ return []
+ def get_flags_arch_f90(self):
+ r = self.get_flags_arch_f77()
+ if r:
+ r[0] = '-' + r[0]
+ return r
+
+if __name__ == '__main__':
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='mips').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/nag.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/nag.py
new file mode 100644
index 00000000..939201f4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/nag.py
@@ -0,0 +1,87 @@
+import sys
+import re
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['NAGFCompiler', 'NAGFORCompiler']
+
+class BaseNAGFCompiler(FCompiler):
+ version_pattern = r'NAG.* Release (?P<version>[^(\s]*)'
+
+ def version_match(self, version_string):
+ m = re.search(self.version_pattern, version_string)
+ if m:
+ return m.group('version')
+ else:
+ return None
+
+ def get_flags_linker_so(self):
+ return ["-Wl,-shared"]
+ def get_flags_opt(self):
+ return ['-O4']
+ def get_flags_arch(self):
+ return []
+
+class NAGFCompiler(BaseNAGFCompiler):
+
+ compiler_type = 'nag'
+ description = 'NAGWare Fortran 95 Compiler'
+
+ executables = {
+ 'version_cmd' : ["<F90>", "-V"],
+ 'compiler_f77' : ["f95", "-fixed"],
+ 'compiler_fix' : ["f95", "-fixed"],
+ 'compiler_f90' : ["f95"],
+ 'linker_so' : ["<F90>"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ def get_flags_linker_so(self):
+ if sys.platform == 'darwin':
+ return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress']
+ return BaseNAGFCompiler.get_flags_linker_so(self)
+ def get_flags_arch(self):
+ version = self.get_version()
+ if version and version < '5.1':
+ return ['-target=native']
+ else:
+ return BaseNAGFCompiler.get_flags_arch(self)
+ def get_flags_debug(self):
+ return ['-g', '-gline', '-g90', '-nan', '-C']
+
+class NAGFORCompiler(BaseNAGFCompiler):
+
+ compiler_type = 'nagfor'
+ description = 'NAG Fortran Compiler'
+
+ executables = {
+ 'version_cmd' : ["nagfor", "-V"],
+ 'compiler_f77' : ["nagfor", "-fixed"],
+ 'compiler_fix' : ["nagfor", "-fixed"],
+ 'compiler_f90' : ["nagfor"],
+ 'linker_so' : ["nagfor"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ def get_flags_linker_so(self):
+ if sys.platform == 'darwin':
+ return ['-unsharedrts',
+ '-Wl,-bundle,-flat_namespace,-undefined,suppress']
+ return BaseNAGFCompiler.get_flags_linker_so(self)
+ def get_flags_debug(self):
+ version = self.get_version()
+ if version and version > '6.1':
+ return ['-g', '-u', '-nan', '-C=all', '-thread_safe',
+ '-kind=unique', '-Warn=allocation', '-Warn=subnormal']
+ else:
+ return ['-g', '-nan', '-C=all', '-u', '-thread_safe']
+
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ compiler = customized_fcompiler(compiler='nagfor')
+ print(compiler.get_version())
+ print(compiler.get_flags_debug())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/none.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/none.py
new file mode 100644
index 00000000..ef411fff
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/none.py
@@ -0,0 +1,28 @@
+from numpy.distutils.fcompiler import FCompiler
+from numpy.distutils import customized_fcompiler
+
+compilers = ['NoneFCompiler']
+
+class NoneFCompiler(FCompiler):
+
+ compiler_type = 'none'
+ description = 'Fake Fortran compiler'
+
+ executables = {'compiler_f77': None,
+ 'compiler_f90': None,
+ 'compiler_fix': None,
+ 'linker_so': None,
+ 'linker_exe': None,
+ 'archiver': None,
+ 'ranlib': None,
+ 'version_cmd': None,
+ }
+
+ def find_executables(self):
+ pass
+
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ print(customized_fcompiler(compiler='none').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/nv.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/nv.py
new file mode 100644
index 00000000..212f3480
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/nv.py
@@ -0,0 +1,53 @@
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['NVHPCFCompiler']
+
+class NVHPCFCompiler(FCompiler):
+ """ NVIDIA High Performance Computing (HPC) SDK Fortran Compiler
+
+ https://developer.nvidia.com/hpc-sdk
+
+ Since august 2020 the NVIDIA HPC SDK includes the compilers formerly known as The Portland Group compilers,
+ https://www.pgroup.com/index.htm.
+ See also `numpy.distutils.fcompiler.pg`.
+ """
+
+ compiler_type = 'nv'
+ description = 'NVIDIA HPC SDK'
+ version_pattern = r'\s*(nvfortran|(pg(f77|f90|fortran)) \(aka nvfortran\)) (?P<version>[\d.-]+).*'
+
+ executables = {
+ 'version_cmd': ["<F90>", "-V"],
+ 'compiler_f77': ["nvfortran"],
+ 'compiler_fix': ["nvfortran", "-Mfixed"],
+ 'compiler_f90': ["nvfortran"],
+ 'linker_so': ["<F90>"],
+ 'archiver': ["ar", "-cr"],
+ 'ranlib': ["ranlib"]
+ }
+ pic_flags = ['-fpic']
+
+ module_dir_switch = '-module '
+ module_include_switch = '-I'
+
+ def get_flags(self):
+ opt = ['-Minform=inform', '-Mnosecond_underscore']
+ return self.pic_flags + opt
+
+ def get_flags_opt(self):
+ return ['-fast']
+
+ def get_flags_debug(self):
+ return ['-g']
+
+ def get_flags_linker_so(self):
+ return ["-shared", '-fpic']
+
+ def runtime_library_dir_option(self, dir):
+ return '-R%s' % dir
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='nv').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/pathf95.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/pathf95.py
new file mode 100644
index 00000000..0768cb12
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/pathf95.py
@@ -0,0 +1,33 @@
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['PathScaleFCompiler']
+
+class PathScaleFCompiler(FCompiler):
+
+ compiler_type = 'pathf95'
+ description = 'PathScale Fortran Compiler'
+ version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P<version>[\d.]+)'
+
+ executables = {
+ 'version_cmd' : ["pathf95", "-version"],
+ 'compiler_f77' : ["pathf95", "-fixedform"],
+ 'compiler_fix' : ["pathf95", "-fixedform"],
+ 'compiler_f90' : ["pathf95"],
+ 'linker_so' : ["pathf95", "-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+ pic_flags = ['-fPIC']
+ module_dir_switch = '-module ' # Don't remove ending space!
+ module_include_switch = '-I'
+
+ def get_flags_opt(self):
+ return ['-O3']
+ def get_flags_debug(self):
+ return ['-g']
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='pathf95').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/pg.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/pg.py
new file mode 100644
index 00000000..72442c4f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/pg.py
@@ -0,0 +1,128 @@
+# http://www.pgroup.com
+import sys
+
+from numpy.distutils.fcompiler import FCompiler
+from sys import platform
+from os.path import join, dirname, normpath
+
+compilers = ['PGroupFCompiler', 'PGroupFlangCompiler']
+
+
+class PGroupFCompiler(FCompiler):
+
+ compiler_type = 'pg'
+ description = 'Portland Group Fortran Compiler'
+ version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P<version>[\d.-]+).*'
+
+ if platform == 'darwin':
+ executables = {
+ 'version_cmd': ["<F77>", "-V"],
+ 'compiler_f77': ["pgfortran", "-dynamiclib"],
+ 'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"],
+ 'compiler_f90': ["pgfortran", "-dynamiclib"],
+ 'linker_so': ["libtool"],
+ 'archiver': ["ar", "-cr"],
+ 'ranlib': ["ranlib"]
+ }
+ pic_flags = ['']
+ else:
+ executables = {
+ 'version_cmd': ["<F77>", "-V"],
+ 'compiler_f77': ["pgfortran"],
+ 'compiler_fix': ["pgfortran", "-Mfixed"],
+ 'compiler_f90': ["pgfortran"],
+ 'linker_so': ["<F90>"],
+ 'archiver': ["ar", "-cr"],
+ 'ranlib': ["ranlib"]
+ }
+ pic_flags = ['-fpic']
+
+ module_dir_switch = '-module '
+ module_include_switch = '-I'
+
+ def get_flags(self):
+ opt = ['-Minform=inform', '-Mnosecond_underscore']
+ return self.pic_flags + opt
+
+ def get_flags_opt(self):
+ return ['-fast']
+
+ def get_flags_debug(self):
+ return ['-g']
+
+ if platform == 'darwin':
+ def get_flags_linker_so(self):
+ return ["-dynamic", '-undefined', 'dynamic_lookup']
+
+ else:
+ def get_flags_linker_so(self):
+ return ["-shared", '-fpic']
+
+ def runtime_library_dir_option(self, dir):
+ return '-R%s' % dir
+
+
+import functools
+
+class PGroupFlangCompiler(FCompiler):
+ compiler_type = 'flang'
+ description = 'Portland Group Fortran LLVM Compiler'
+ version_pattern = r'\s*(flang|clang) version (?P<version>[\d.-]+).*'
+
+ ar_exe = 'lib.exe'
+ possible_executables = ['flang']
+
+ executables = {
+ 'version_cmd': ["<F77>", "--version"],
+ 'compiler_f77': ["flang"],
+ 'compiler_fix': ["flang"],
+ 'compiler_f90': ["flang"],
+ 'linker_so': [None],
+ 'archiver': [ar_exe, "/verbose", "/OUT:"],
+ 'ranlib': None
+ }
+
+ library_switch = '/OUT:' # No space after /OUT:!
+ module_dir_switch = '-module ' # Don't remove ending space!
+
+ def get_libraries(self):
+ opt = FCompiler.get_libraries(self)
+ opt.extend(['flang', 'flangrti', 'ompstub'])
+ return opt
+
+ @functools.lru_cache(maxsize=128)
+ def get_library_dirs(self):
+ """List of compiler library directories."""
+ opt = FCompiler.get_library_dirs(self)
+ flang_dir = dirname(self.executables['compiler_f77'][0])
+ opt.append(normpath(join(flang_dir, '..', 'lib')))
+
+ return opt
+
+ def get_flags(self):
+ return []
+
+ def get_flags_free(self):
+ return []
+
+ def get_flags_debug(self):
+ return ['-g']
+
+ def get_flags_opt(self):
+ return ['-O3']
+
+ def get_flags_arch(self):
+ return []
+
+ def runtime_library_dir_option(self, dir):
+ raise NotImplementedError
+
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ if 'flang' in sys.argv:
+ print(customized_fcompiler(compiler='flang').get_version())
+ else:
+ print(customized_fcompiler(compiler='pg').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/sun.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/sun.py
new file mode 100644
index 00000000..d039f0b2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/sun.py
@@ -0,0 +1,51 @@
+from numpy.distutils.ccompiler import simple_version_match
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['SunFCompiler']
+
+class SunFCompiler(FCompiler):
+
+ compiler_type = 'sun'
+ description = 'Sun or Forte Fortran 95 Compiler'
+ # ex:
+ # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28
+ version_match = simple_version_match(
+ start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95')
+
+ executables = {
+ 'version_cmd' : ["<F90>", "-V"],
+ 'compiler_f77' : ["f90"],
+ 'compiler_fix' : ["f90", "-fixed"],
+ 'compiler_f90' : ["f90"],
+ 'linker_so' : ["<F90>", "-Bdynamic", "-G"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+ module_dir_switch = '-moddir='
+ module_include_switch = '-M'
+ pic_flags = ['-xcode=pic32']
+
+ def get_flags_f77(self):
+ ret = ["-ftrap=%none"]
+ if (self.get_version() or '') >= '7':
+ ret.append("-f77")
+ else:
+ ret.append("-fixed")
+ return ret
+ def get_opt(self):
+ return ['-fast', '-dalign']
+ def get_arch(self):
+ return ['-xtarget=generic']
+ def get_libraries(self):
+ opt = []
+ opt.extend(['fsu', 'sunmath', 'mvec'])
+ return opt
+
+ def runtime_library_dir_option(self, dir):
+ return '-R%s' % dir
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='sun').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/vast.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/vast.py
new file mode 100644
index 00000000..92a1647b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/vast.py
@@ -0,0 +1,52 @@
+import os
+
+from numpy.distutils.fcompiler.gnu import GnuFCompiler
+
+compilers = ['VastFCompiler']
+
+class VastFCompiler(GnuFCompiler):
+ compiler_type = 'vast'
+ compiler_aliases = ()
+ description = 'Pacific-Sierra Research Fortran 90 Compiler'
+ version_pattern = (r'\s*Pacific-Sierra Research vf90 '
+ r'(Personal|Professional)\s+(?P<version>[^\s]*)')
+
+ # VAST f90 does not support -o with -c. So, object files are created
+ # to the current directory and then moved to build directory
+ object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile '
+
+ executables = {
+ 'version_cmd' : ["vf90", "-v"],
+ 'compiler_f77' : ["g77"],
+ 'compiler_fix' : ["f90", "-Wv,-ya"],
+ 'compiler_f90' : ["f90"],
+ 'linker_so' : ["<F90>"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+ module_dir_switch = None #XXX Fix me
+ module_include_switch = None #XXX Fix me
+
+ def find_executables(self):
+ pass
+
+ def get_version_cmd(self):
+ f90 = self.compiler_f90[0]
+ d, b = os.path.split(f90)
+ vf90 = os.path.join(d, 'v'+b)
+ return vf90
+
+ def get_flags_arch(self):
+ vast_version = self.get_version()
+ gnu = GnuFCompiler()
+ gnu.customize(None)
+ self.version = gnu.get_version()
+ opt = GnuFCompiler.get_flags_arch(self)
+ self.version = vast_version
+ return opt
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='vast').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/from_template.py b/venv/lib/python3.9/site-packages/numpy/distutils/from_template.py
new file mode 100644
index 00000000..90d1f4c3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/from_template.py
@@ -0,0 +1,261 @@
+#!/usr/bin/env python3
+"""
+
+process_file(filename)
+
+ takes templated file .xxx.src and produces .xxx file where .xxx
+ is .pyf .f90 or .f using the following template rules:
+
+ '<..>' denotes a template.
+
+ All function and subroutine blocks in a source file with names that
+ contain '<..>' will be replicated according to the rules in '<..>'.
+
+ The number of comma-separated words in '<..>' will determine the number of
+ replicates.
+
+ '<..>' may have two different forms, named and short. For example,
+
+ named:
+ <p=d,s,z,c> where anywhere inside a block '<p>' will be replaced with
+ 'd', 's', 'z', and 'c' for each replicate of the block.
+
+ <_c> is already defined: <_c=s,d,c,z>
+ <_t> is already defined: <_t=real,double precision,complex,double complex>
+
+ short:
+ <s,d,c,z>, a short form of the named, useful when no <p> appears inside
+ a block.
+
+ In general, '<..>' contains a comma separated list of arbitrary
+ expressions. If these expression must contain a comma|leftarrow|rightarrow,
+ then prepend the comma|leftarrow|rightarrow with a backslash.
+
+ If an expression matches '\\<index>' then it will be replaced
+ by <index>-th expression.
+
+ Note that all '<..>' forms in a block must have the same number of
+ comma-separated entries.
+
+ Predefined named template rules:
+ <prefix=s,d,c,z>
+ <ftype=real,double precision,complex,double complex>
+ <ftypereal=real,double precision,\\0,\\1>
+ <ctype=float,double,complex_float,complex_double>
+ <ctypereal=float,double,\\0,\\1>
+
+"""
+__all__ = ['process_str', 'process_file']
+
+import os
+import sys
+import re
+
+routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I)
+routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I)
+function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I)
+
+def parse_structure(astr):
+ """ Return a list of tuples for each function or subroutine each
+ tuple is the start and end of a subroutine or function to be
+ expanded.
+ """
+
+ spanlist = []
+ ind = 0
+ while True:
+ m = routine_start_re.search(astr, ind)
+ if m is None:
+ break
+ start = m.start()
+ if function_start_re.match(astr, start, m.end()):
+ while True:
+ i = astr.rfind('\n', ind, start)
+ if i==-1:
+ break
+ start = i
+ if astr[i:i+7]!='\n $':
+ break
+ start += 1
+ m = routine_end_re.search(astr, m.end())
+ ind = end = m and m.end()-1 or len(astr)
+ spanlist.append((start, end))
+ return spanlist
+
+template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>")
+named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>")
+list_re = re.compile(r"<\s*((.*?))\s*>")
+
+def find_repl_patterns(astr):
+ reps = named_re.findall(astr)
+ names = {}
+ for rep in reps:
+ name = rep[0].strip() or unique_key(names)
+ repl = rep[1].replace(r'\,', '@comma@')
+ thelist = conv(repl)
+ names[name] = thelist
+ return names
+
+def find_and_remove_repl_patterns(astr):
+ names = find_repl_patterns(astr)
+ astr = re.subn(named_re, '', astr)[0]
+ return astr, names
+
+item_re = re.compile(r"\A\\(?P<index>\d+)\Z")
+def conv(astr):
+ b = astr.split(',')
+ l = [x.strip() for x in b]
+ for i in range(len(l)):
+ m = item_re.match(l[i])
+ if m:
+ j = int(m.group('index'))
+ l[i] = l[j]
+ return ','.join(l)
+
+def unique_key(adict):
+ """ Obtain a unique key given a dictionary."""
+ allkeys = list(adict.keys())
+ done = False
+ n = 1
+ while not done:
+ newkey = '__l%s' % (n)
+ if newkey in allkeys:
+ n += 1
+ else:
+ done = True
+ return newkey
+
+
+template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z')
+def expand_sub(substr, names):
+ substr = substr.replace(r'\>', '@rightarrow@')
+ substr = substr.replace(r'\<', '@leftarrow@')
+ lnames = find_repl_patterns(substr)
+ substr = named_re.sub(r"<\1>", substr) # get rid of definition templates
+
+ def listrepl(mobj):
+ thelist = conv(mobj.group(1).replace(r'\,', '@comma@'))
+ if template_name_re.match(thelist):
+ return "<%s>" % (thelist)
+ name = None
+ for key in lnames.keys(): # see if list is already in dictionary
+ if lnames[key] == thelist:
+ name = key
+ if name is None: # this list is not in the dictionary yet
+ name = unique_key(lnames)
+ lnames[name] = thelist
+ return "<%s>" % name
+
+ substr = list_re.sub(listrepl, substr) # convert all lists to named templates
+ # newnames are constructed as needed
+
+ numsubs = None
+ base_rule = None
+ rules = {}
+ for r in template_re.findall(substr):
+ if r not in rules:
+ thelist = lnames.get(r, names.get(r, None))
+ if thelist is None:
+ raise ValueError('No replicates found for <%s>' % (r))
+ if r not in names and not thelist.startswith('_'):
+ names[r] = thelist
+ rule = [i.replace('@comma@', ',') for i in thelist.split(',')]
+ num = len(rule)
+
+ if numsubs is None:
+ numsubs = num
+ rules[r] = rule
+ base_rule = r
+ elif num == numsubs:
+ rules[r] = rule
+ else:
+ print("Mismatch in number of replacements (base <%s=%s>)"
+ " for <%s=%s>. Ignoring." %
+ (base_rule, ','.join(rules[base_rule]), r, thelist))
+ if not rules:
+ return substr
+
+ def namerepl(mobj):
+ name = mobj.group(1)
+ return rules.get(name, (k+1)*[name])[k]
+
+ newstr = ''
+ for k in range(numsubs):
+ newstr += template_re.sub(namerepl, substr) + '\n\n'
+
+ newstr = newstr.replace('@rightarrow@', '>')
+ newstr = newstr.replace('@leftarrow@', '<')
+ return newstr
+
+def process_str(allstr):
+ newstr = allstr
+ writestr = ''
+
+ struct = parse_structure(newstr)
+
+ oldend = 0
+ names = {}
+ names.update(_special_names)
+ for sub in struct:
+ cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]])
+ writestr += cleanedstr
+ names.update(defs)
+ writestr += expand_sub(newstr[sub[0]:sub[1]], names)
+ oldend = sub[1]
+ writestr += newstr[oldend:]
+
+ return writestr
+
+include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+\.src)['\"]", re.I)
+
+def resolve_includes(source):
+ d = os.path.dirname(source)
+ with open(source) as fid:
+ lines = []
+ for line in fid:
+ m = include_src_re.match(line)
+ if m:
+ fn = m.group('name')
+ if not os.path.isabs(fn):
+ fn = os.path.join(d, fn)
+ if os.path.isfile(fn):
+ lines.extend(resolve_includes(fn))
+ else:
+ lines.append(line)
+ else:
+ lines.append(line)
+ return lines
+
+def process_file(source):
+ lines = resolve_includes(source)
+ return process_str(''.join(lines))
+
+_special_names = find_repl_patterns('''
+<_c=s,d,c,z>
+<_t=real,double precision,complex,double complex>
+<prefix=s,d,c,z>
+<ftype=real,double precision,complex,double complex>
+<ctype=float,double,complex_float,complex_double>
+<ftypereal=real,double precision,\\0,\\1>
+<ctypereal=float,double,\\0,\\1>
+''')
+
+def main():
+ try:
+ file = sys.argv[1]
+ except IndexError:
+ fid = sys.stdin
+ outfile = sys.stdout
+ else:
+ fid = open(file, 'r')
+ (base, ext) = os.path.splitext(file)
+ newname = base
+ outfile = open(newname, 'w')
+
+ allstr = fid.read()
+ writestr = process_str(allstr)
+ outfile.write(writestr)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/intelccompiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/intelccompiler.py
new file mode 100644
index 00000000..0fa1c11d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/intelccompiler.py
@@ -0,0 +1,111 @@
+import platform
+
+from distutils.unixccompiler import UnixCCompiler
+from numpy.distutils.exec_command import find_executable
+from numpy.distutils.ccompiler import simple_version_match
+if platform.system() == 'Windows':
+ from numpy.distutils.msvc9compiler import MSVCCompiler
+
+
+class IntelCCompiler(UnixCCompiler):
+ """A modified Intel compiler compatible with a GCC-built Python."""
+ compiler_type = 'intel'
+ cc_exe = 'icc'
+ cc_args = 'fPIC'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ UnixCCompiler.__init__(self, verbose, dry_run, force)
+
+ v = self.get_version()
+ mpopt = 'openmp' if v and v < '15' else 'qopenmp'
+ self.cc_exe = ('icc -fPIC -fp-model strict -O3 '
+ '-fomit-frame-pointer -{}').format(mpopt)
+ compiler = self.cc_exe
+
+ if platform.system() == 'Darwin':
+ shared_flag = '-Wl,-undefined,dynamic_lookup'
+ else:
+ shared_flag = '-shared'
+ self.set_executables(compiler=compiler,
+ compiler_so=compiler,
+ compiler_cxx=compiler,
+ archiver='xiar' + ' cru',
+ linker_exe=compiler + ' -shared-intel',
+ linker_so=compiler + ' ' + shared_flag +
+ ' -shared-intel')
+
+
+class IntelItaniumCCompiler(IntelCCompiler):
+ compiler_type = 'intele'
+
+ # On Itanium, the Intel Compiler used to be called ecc, let's search for
+ # it (now it's also icc, so ecc is last in the search).
+ for cc_exe in map(find_executable, ['icc', 'ecc']):
+ if cc_exe:
+ break
+
+
+class IntelEM64TCCompiler(UnixCCompiler):
+ """
+ A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python.
+ """
+ compiler_type = 'intelem'
+ cc_exe = 'icc -m64'
+ cc_args = '-fPIC'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ UnixCCompiler.__init__(self, verbose, dry_run, force)
+
+ v = self.get_version()
+ mpopt = 'openmp' if v and v < '15' else 'qopenmp'
+ self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 '
+ '-fomit-frame-pointer -{}').format(mpopt)
+ compiler = self.cc_exe
+
+ if platform.system() == 'Darwin':
+ shared_flag = '-Wl,-undefined,dynamic_lookup'
+ else:
+ shared_flag = '-shared'
+ self.set_executables(compiler=compiler,
+ compiler_so=compiler,
+ compiler_cxx=compiler,
+ archiver='xiar' + ' cru',
+ linker_exe=compiler + ' -shared-intel',
+ linker_so=compiler + ' ' + shared_flag +
+ ' -shared-intel')
+
+
+if platform.system() == 'Windows':
+ class IntelCCompilerW(MSVCCompiler):
+ """
+ A modified Intel compiler compatible with an MSVC-built Python.
+ """
+ compiler_type = 'intelw'
+ compiler_cxx = 'icl'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ MSVCCompiler.__init__(self, verbose, dry_run, force)
+ version_match = simple_version_match(start=r'Intel\(R\).*?32,')
+ self.__version = version_match
+
+ def initialize(self, plat_name=None):
+ MSVCCompiler.initialize(self, plat_name)
+ self.cc = self.find_exe('icl.exe')
+ self.lib = self.find_exe('xilib')
+ self.linker = self.find_exe('xilink')
+ self.compile_options = ['/nologo', '/O3', '/MD', '/W3',
+ '/Qstd=c99']
+ self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
+ '/Qstd=c99', '/Z7', '/D_DEBUG']
+
+ class IntelEM64TCCompilerW(IntelCCompilerW):
+ """
+ A modified Intel x86_64 compiler compatible with
+ a 64bit MSVC-built Python.
+ """
+ compiler_type = 'intelemw'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ MSVCCompiler.__init__(self, verbose, dry_run, force)
+ version_match = simple_version_match(start=r'Intel\(R\).*?64,')
+ self.__version = version_match
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/lib2def.py b/venv/lib/python3.9/site-packages/numpy/distutils/lib2def.py
new file mode 100644
index 00000000..851682c6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/lib2def.py
@@ -0,0 +1,116 @@
+import re
+import sys
+import subprocess
+
+__doc__ = """This module generates a DEF file from the symbols in
+an MSVC-compiled DLL import library. It correctly discriminates between
+data and functions. The data is collected from the output of the program
+nm(1).
+
+Usage:
+ python lib2def.py [libname.lib] [output.def]
+or
+ python lib2def.py [libname.lib] > output.def
+
+libname.lib defaults to python<py_ver>.lib and output.def defaults to stdout
+
+Author: Robert Kern <kernr@mail.ncifcrf.gov>
+Last Update: April 30, 1999
+"""
+
+__version__ = '0.1a'
+
+py_ver = "%d%d" % tuple(sys.version_info[:2])
+
+DEFAULT_NM = ['nm', '-Cs']
+
+DEF_HEADER = """LIBRARY python%s.dll
+;CODE PRELOAD MOVEABLE DISCARDABLE
+;DATA PRELOAD SINGLE
+
+EXPORTS
+""" % py_ver
+# the header of the DEF file
+
+FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE)
+DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE)
+
+def parse_cmd():
+ """Parses the command-line arguments.
+
+libfile, deffile = parse_cmd()"""
+ if len(sys.argv) == 3:
+ if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def':
+ libfile, deffile = sys.argv[1:]
+ elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib':
+ deffile, libfile = sys.argv[1:]
+ else:
+ print("I'm assuming that your first argument is the library")
+ print("and the second is the DEF file.")
+ elif len(sys.argv) == 2:
+ if sys.argv[1][-4:] == '.def':
+ deffile = sys.argv[1]
+ libfile = 'python%s.lib' % py_ver
+ elif sys.argv[1][-4:] == '.lib':
+ deffile = None
+ libfile = sys.argv[1]
+ else:
+ libfile = 'python%s.lib' % py_ver
+ deffile = None
+ return libfile, deffile
+
+def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True):
+ """Returns the output of nm_cmd via a pipe.
+
+nm_output = getnm(nm_cmd = 'nm -Cs py_lib')"""
+ p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, text=True)
+ nm_output, nm_err = p.communicate()
+ if p.returncode != 0:
+ raise RuntimeError('failed to run "%s": "%s"' % (
+ ' '.join(nm_cmd), nm_err))
+ return nm_output
+
+def parse_nm(nm_output):
+ """Returns a tuple of lists: dlist for the list of data
+symbols and flist for the list of function symbols.
+
+dlist, flist = parse_nm(nm_output)"""
+ data = DATA_RE.findall(nm_output)
+ func = FUNC_RE.findall(nm_output)
+
+ flist = []
+ for sym in data:
+ if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'):
+ flist.append(sym)
+
+ dlist = []
+ for sym in data:
+ if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'):
+ dlist.append(sym)
+
+ dlist.sort()
+ flist.sort()
+ return dlist, flist
+
+def output_def(dlist, flist, header, file = sys.stdout):
+ """Outputs the final DEF file to a file defaulting to stdout.
+
+output_def(dlist, flist, header, file = sys.stdout)"""
+ for data_sym in dlist:
+ header = header + '\t%s DATA\n' % data_sym
+ header = header + '\n' # blank line
+ for func_sym in flist:
+ header = header + '\t%s\n' % func_sym
+ file.write(header)
+
+if __name__ == '__main__':
+ libfile, deffile = parse_cmd()
+ if deffile is None:
+ deffile = sys.stdout
+ else:
+ deffile = open(deffile, 'w')
+ nm_cmd = DEFAULT_NM + [str(libfile)]
+ nm_output = getnm(nm_cmd, shell=False)
+ dlist, flist = parse_nm(nm_output)
+ output_def(dlist, flist, DEF_HEADER, deffile)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/line_endings.py b/venv/lib/python3.9/site-packages/numpy/distutils/line_endings.py
new file mode 100644
index 00000000..686e5ebd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/line_endings.py
@@ -0,0 +1,77 @@
+""" Functions for converting from DOS to UNIX line endings
+
+"""
+import os
+import re
+import sys
+
+
+def dos2unix(file):
+ "Replace CRLF with LF in argument files. Print names of changed files."
+ if os.path.isdir(file):
+ print(file, "Directory!")
+ return
+
+ with open(file, "rb") as fp:
+ data = fp.read()
+ if '\0' in data:
+ print(file, "Binary!")
+ return
+
+ newdata = re.sub("\r\n", "\n", data)
+ if newdata != data:
+ print('dos2unix:', file)
+ with open(file, "wb") as f:
+ f.write(newdata)
+ return file
+ else:
+ print(file, 'ok')
+
+def dos2unix_one_dir(modified_files, dir_name, file_names):
+ for file in file_names:
+ full_path = os.path.join(dir_name, file)
+ file = dos2unix(full_path)
+ if file is not None:
+ modified_files.append(file)
+
+def dos2unix_dir(dir_name):
+ modified_files = []
+ os.path.walk(dir_name, dos2unix_one_dir, modified_files)
+ return modified_files
+#----------------------------------
+
+def unix2dos(file):
+ "Replace LF with CRLF in argument files. Print names of changed files."
+ if os.path.isdir(file):
+ print(file, "Directory!")
+ return
+
+ with open(file, "rb") as fp:
+ data = fp.read()
+ if '\0' in data:
+ print(file, "Binary!")
+ return
+ newdata = re.sub("\r\n", "\n", data)
+ newdata = re.sub("\n", "\r\n", newdata)
+ if newdata != data:
+ print('unix2dos:', file)
+ with open(file, "wb") as f:
+ f.write(newdata)
+ return file
+ else:
+ print(file, 'ok')
+
+def unix2dos_one_dir(modified_files, dir_name, file_names):
+ for file in file_names:
+ full_path = os.path.join(dir_name, file)
+ unix2dos(full_path)
+ if file is not None:
+ modified_files.append(file)
+
+def unix2dos_dir(dir_name):
+ modified_files = []
+ os.path.walk(dir_name, unix2dos_one_dir, modified_files)
+ return modified_files
+
+if __name__ == "__main__":
+ dos2unix_dir(sys.argv[1])
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/log.py b/venv/lib/python3.9/site-packages/numpy/distutils/log.py
new file mode 100644
index 00000000..3347f56d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/log.py
@@ -0,0 +1,111 @@
+# Colored log
+import sys
+from distutils.log import * # noqa: F403
+from distutils.log import Log as old_Log
+from distutils.log import _global_log
+
+from numpy.distutils.misc_util import (red_text, default_text, cyan_text,
+ green_text, is_sequence, is_string)
+
+
+def _fix_args(args,flag=1):
+ if is_string(args):
+ return args.replace('%', '%%')
+ if flag and is_sequence(args):
+ return tuple([_fix_args(a, flag=0) for a in args])
+ return args
+
+
+class Log(old_Log):
+ def _log(self, level, msg, args):
+ if level >= self.threshold:
+ if args:
+ msg = msg % _fix_args(args)
+ if 0:
+ if msg.startswith('copying ') and msg.find(' -> ') != -1:
+ return
+ if msg.startswith('byte-compiling '):
+ return
+ print(_global_color_map[level](msg))
+ sys.stdout.flush()
+
+ def good(self, msg, *args):
+ """
+ If we log WARN messages, log this message as a 'nice' anti-warn
+ message.
+
+ """
+ if WARN >= self.threshold:
+ if args:
+ print(green_text(msg % _fix_args(args)))
+ else:
+ print(green_text(msg))
+ sys.stdout.flush()
+
+
+_global_log.__class__ = Log
+
+good = _global_log.good
+
+def set_threshold(level, force=False):
+ prev_level = _global_log.threshold
+ if prev_level > DEBUG or force:
+ # If we're running at DEBUG, don't change the threshold, as there's
+ # likely a good reason why we're running at this level.
+ _global_log.threshold = level
+ if level <= DEBUG:
+ info('set_threshold: setting threshold to DEBUG level,'
+ ' it can be changed only with force argument')
+ else:
+ info('set_threshold: not changing threshold from DEBUG level'
+ ' %s to %s' % (prev_level, level))
+ return prev_level
+
+def get_threshold():
+ return _global_log.threshold
+
+def set_verbosity(v, force=False):
+ prev_level = _global_log.threshold
+ if v < 0:
+ set_threshold(ERROR, force)
+ elif v == 0:
+ set_threshold(WARN, force)
+ elif v == 1:
+ set_threshold(INFO, force)
+ elif v >= 2:
+ set_threshold(DEBUG, force)
+ return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1)
+
+
+_global_color_map = {
+ DEBUG:cyan_text,
+ INFO:default_text,
+ WARN:red_text,
+ ERROR:red_text,
+ FATAL:red_text
+}
+
+# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold.
+set_verbosity(0, force=True)
+
+
+_error = error
+_warn = warn
+_info = info
+_debug = debug
+
+
+def error(msg, *a, **kw):
+ _error(f"ERROR: {msg}", *a, **kw)
+
+
+def warn(msg, *a, **kw):
+ _warn(f"WARN: {msg}", *a, **kw)
+
+
+def info(msg, *a, **kw):
+ _info(f"INFO: {msg}", *a, **kw)
+
+
+def debug(msg, *a, **kw):
+ _debug(f"DEBUG: {msg}", *a, **kw)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c b/venv/lib/python3.9/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c
new file mode 100644
index 00000000..485a675d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c
@@ -0,0 +1,6 @@
+int _get_output_format(void)
+{
+ return 0;
+}
+
+int _imp____lc_codepage = 0;
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/mingw32ccompiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/mingw32ccompiler.py
new file mode 100644
index 00000000..5d1c27a6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/mingw32ccompiler.py
@@ -0,0 +1,592 @@
+"""
+Support code for building Python extensions on Windows.
+
+ # NT stuff
+ # 1. Make sure libpython<version>.a exists for gcc. If not, build it.
+ # 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
+ # 3. Force windows to use g77
+
+"""
+import os
+import platform
+import sys
+import subprocess
+import re
+import textwrap
+
+# Overwrite certain distutils.ccompiler functions:
+import numpy.distutils.ccompiler # noqa: F401
+from numpy.distutils import log
+# NT stuff
+# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
+# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
+# --> this is done in numpy/distutils/ccompiler.py
+# 3. Force windows to use g77
+
+import distutils.cygwinccompiler
+from distutils.unixccompiler import UnixCCompiler
+from distutils.msvccompiler import get_build_version as get_build_msvc_version
+from distutils.errors import UnknownFileError
+from numpy.distutils.misc_util import (msvc_runtime_library,
+ msvc_runtime_version,
+ msvc_runtime_major,
+ get_build_architecture)
+
+def get_msvcr_replacement():
+ """Replacement for outdated version of get_msvcr from cygwinccompiler"""
+ msvcr = msvc_runtime_library()
+ return [] if msvcr is None else [msvcr]
+
+
+# Useful to generate table of symbols from a dll
+_START = re.compile(r'\[Ordinal/Name Pointer\] Table')
+_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)')
+
+# the same as cygwin plus some additional parameters
+class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
+ """ A modified MingW32 compiler compatible with an MSVC built Python.
+
+ """
+
+ compiler_type = 'mingw32'
+
+ def __init__ (self,
+ verbose=0,
+ dry_run=0,
+ force=0):
+
+ distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose,
+ dry_run, force)
+
+ # **changes: eric jones 4/11/01
+ # 1. Check for import library on Windows. Build if it doesn't exist.
+
+ build_import_library()
+
+ # Check for custom msvc runtime library on Windows. Build if it doesn't exist.
+ msvcr_success = build_msvcr_library()
+ msvcr_dbg_success = build_msvcr_library(debug=True)
+ if msvcr_success or msvcr_dbg_success:
+ # add preprocessor statement for using customized msvcr lib
+ self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR')
+
+ # Define the MSVC version as hint for MinGW
+ msvcr_version = msvc_runtime_version()
+ if msvcr_version:
+ self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version)
+
+ # MS_WIN64 should be defined when building for amd64 on windows,
+ # but python headers define it only for MS compilers, which has all
+ # kind of bad consequences, like using Py_ModuleInit4 instead of
+ # Py_ModuleInit4_64, etc... So we add it here
+ if get_build_architecture() == 'AMD64':
+ self.set_executables(
+ compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall',
+ compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall '
+ '-Wstrict-prototypes',
+ linker_exe='gcc -g',
+ linker_so='gcc -g -shared')
+ else:
+ self.set_executables(
+ compiler='gcc -O2 -Wall',
+ compiler_so='gcc -O2 -Wall -Wstrict-prototypes',
+ linker_exe='g++ ',
+ linker_so='g++ -shared')
+ # added for python2.3 support
+ # we can't pass it through set_executables because pre 2.2 would fail
+ self.compiler_cxx = ['g++']
+
+ # Maybe we should also append -mthreads, but then the finished dlls
+ # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support
+ # thread-safe exception handling on `Mingw32')
+
+ # no additional libraries needed
+ #self.dll_libraries=[]
+ return
+
+ # __init__ ()
+
+ def link(self,
+ target_desc,
+ objects,
+ output_filename,
+ output_dir,
+ libraries,
+ library_dirs,
+ runtime_library_dirs,
+ export_symbols = None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ build_temp=None,
+ target_lang=None):
+ # Include the appropriate MSVC runtime library if Python was built
+ # with MSVC >= 7.0 (MinGW standard is msvcrt)
+ runtime_library = msvc_runtime_library()
+ if runtime_library:
+ if not libraries:
+ libraries = []
+ libraries.append(runtime_library)
+ args = (self,
+ target_desc,
+ objects,
+ output_filename,
+ output_dir,
+ libraries,
+ library_dirs,
+ runtime_library_dirs,
+ None, #export_symbols, we do this in our def-file
+ debug,
+ extra_preargs,
+ extra_postargs,
+ build_temp,
+ target_lang)
+ func = UnixCCompiler.link
+ func(*args[:func.__code__.co_argcount])
+ return
+
+ def object_filenames (self,
+ source_filenames,
+ strip_dir=0,
+ output_dir=''):
+ if output_dir is None: output_dir = ''
+ obj_names = []
+ for src_name in source_filenames:
+ # use normcase to make sure '.rc' is really '.rc' and not '.RC'
+ (base, ext) = os.path.splitext (os.path.normcase(src_name))
+
+ # added these lines to strip off windows drive letters
+ # without it, .o files are placed next to .c files
+ # instead of the build directory
+ drv, base = os.path.splitdrive(base)
+ if drv:
+ base = base[1:]
+
+ if ext not in (self.src_extensions + ['.rc', '.res']):
+ raise UnknownFileError(
+ "unknown file type '%s' (from '%s')" % \
+ (ext, src_name))
+ if strip_dir:
+ base = os.path.basename (base)
+ if ext == '.res' or ext == '.rc':
+ # these need to be compiled to object files
+ obj_names.append (os.path.join (output_dir,
+ base + ext + self.obj_extension))
+ else:
+ obj_names.append (os.path.join (output_dir,
+ base + self.obj_extension))
+ return obj_names
+
+ # object_filenames ()
+
+
+def find_python_dll():
+ # We can't do much here:
+ # - find it in the virtualenv (sys.prefix)
+ # - find it in python main dir (sys.base_prefix, if in a virtualenv)
+ # - in system32,
+ # - ortherwise (Sxs), I don't know how to get it.
+ stems = [sys.prefix]
+ if sys.base_prefix != sys.prefix:
+ stems.append(sys.base_prefix)
+
+ sub_dirs = ['', 'lib', 'bin']
+ # generate possible combinations of directory trees and sub-directories
+ lib_dirs = []
+ for stem in stems:
+ for folder in sub_dirs:
+ lib_dirs.append(os.path.join(stem, folder))
+
+ # add system directory as well
+ if 'SYSTEMROOT' in os.environ:
+ lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32'))
+
+ # search in the file system for possible candidates
+ major_version, minor_version = tuple(sys.version_info[:2])
+ implementation = platform.python_implementation()
+ if implementation == 'CPython':
+ dllname = f'python{major_version}{minor_version}.dll'
+ elif implementation == 'PyPy':
+ dllname = f'libpypy{major_version}-c.dll'
+ else:
+ dllname = f'Unknown platform {implementation}'
+ print("Looking for %s" % dllname)
+ for folder in lib_dirs:
+ dll = os.path.join(folder, dllname)
+ if os.path.exists(dll):
+ return dll
+
+ raise ValueError("%s not found in %s" % (dllname, lib_dirs))
+
+def dump_table(dll):
+ st = subprocess.check_output(["objdump.exe", "-p", dll])
+ return st.split(b'\n')
+
+def generate_def(dll, dfile):
+ """Given a dll file location, get all its exported symbols and dump them
+ into the given def file.
+
+ The .def file will be overwritten"""
+ dump = dump_table(dll)
+ for i in range(len(dump)):
+ if _START.match(dump[i].decode()):
+ break
+ else:
+ raise ValueError("Symbol table not found")
+
+ syms = []
+ for j in range(i+1, len(dump)):
+ m = _TABLE.match(dump[j].decode())
+ if m:
+ syms.append((int(m.group(1).strip()), m.group(2)))
+ else:
+ break
+
+ if len(syms) == 0:
+ log.warn('No symbols found in %s' % dll)
+
+ with open(dfile, 'w') as d:
+ d.write('LIBRARY %s\n' % os.path.basename(dll))
+ d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n')
+ d.write(';DATA PRELOAD SINGLE\n')
+ d.write('\nEXPORTS\n')
+ for s in syms:
+ #d.write('@%d %s\n' % (s[0], s[1]))
+ d.write('%s\n' % s[1])
+
+def find_dll(dll_name):
+
+ arch = {'AMD64' : 'amd64',
+ 'Intel' : 'x86'}[get_build_architecture()]
+
+ def _find_dll_in_winsxs(dll_name):
+ # Walk through the WinSxS directory to find the dll.
+ winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'),
+ 'winsxs')
+ if not os.path.exists(winsxs_path):
+ return None
+ for root, dirs, files in os.walk(winsxs_path):
+ if dll_name in files and arch in root:
+ return os.path.join(root, dll_name)
+ return None
+
+ def _find_dll_in_path(dll_name):
+ # First, look in the Python directory, then scan PATH for
+ # the given dll name.
+ for path in [sys.prefix] + os.environ['PATH'].split(';'):
+ filepath = os.path.join(path, dll_name)
+ if os.path.exists(filepath):
+ return os.path.abspath(filepath)
+
+ return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name)
+
+def build_msvcr_library(debug=False):
+ if os.name != 'nt':
+ return False
+
+ # If the version number is None, then we couldn't find the MSVC runtime at
+ # all, because we are running on a Python distribution which is customed
+ # compiled; trust that the compiler is the same as the one available to us
+ # now, and that it is capable of linking with the correct runtime without
+ # any extra options.
+ msvcr_ver = msvc_runtime_major()
+ if msvcr_ver is None:
+ log.debug('Skip building import library: '
+ 'Runtime is not compiled with MSVC')
+ return False
+
+ # Skip using a custom library for versions < MSVC 8.0
+ if msvcr_ver < 80:
+ log.debug('Skip building msvcr library:'
+ ' custom functionality not present')
+ return False
+
+ msvcr_name = msvc_runtime_library()
+ if debug:
+ msvcr_name += 'd'
+
+ # Skip if custom library already exists
+ out_name = "lib%s.a" % msvcr_name
+ out_file = os.path.join(sys.prefix, 'libs', out_name)
+ if os.path.isfile(out_file):
+ log.debug('Skip building msvcr library: "%s" exists' %
+ (out_file,))
+ return True
+
+ # Find the msvcr dll
+ msvcr_dll_name = msvcr_name + '.dll'
+ dll_file = find_dll(msvcr_dll_name)
+ if not dll_file:
+ log.warn('Cannot build msvcr library: "%s" not found' %
+ msvcr_dll_name)
+ return False
+
+ def_name = "lib%s.def" % msvcr_name
+ def_file = os.path.join(sys.prefix, 'libs', def_name)
+
+ log.info('Building msvcr library: "%s" (from %s)' \
+ % (out_file, dll_file))
+
+ # Generate a symbol definition file from the msvcr dll
+ generate_def(dll_file, def_file)
+
+ # Create a custom mingw library for the given symbol definitions
+ cmd = ['dlltool', '-d', def_file, '-l', out_file]
+ retcode = subprocess.call(cmd)
+
+ # Clean up symbol definitions
+ os.remove(def_file)
+
+ return (not retcode)
+
+def build_import_library():
+ if os.name != 'nt':
+ return
+
+ arch = get_build_architecture()
+ if arch == 'AMD64':
+ return _build_import_library_amd64()
+ elif arch == 'Intel':
+ return _build_import_library_x86()
+ else:
+ raise ValueError("Unhandled arch %s" % arch)
+
+def _check_for_import_lib():
+ """Check if an import library for the Python runtime already exists."""
+ major_version, minor_version = tuple(sys.version_info[:2])
+
+ # patterns for the file name of the library itself
+ patterns = ['libpython%d%d.a',
+ 'libpython%d%d.dll.a',
+ 'libpython%d.%d.dll.a']
+
+ # directory trees that may contain the library
+ stems = [sys.prefix]
+ if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:
+ stems.append(sys.base_prefix)
+ elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix:
+ stems.append(sys.real_prefix)
+
+ # possible subdirectories within those trees where it is placed
+ sub_dirs = ['libs', 'lib']
+
+ # generate a list of candidate locations
+ candidates = []
+ for pat in patterns:
+ filename = pat % (major_version, minor_version)
+ for stem_dir in stems:
+ for folder in sub_dirs:
+ candidates.append(os.path.join(stem_dir, folder, filename))
+
+ # test the filesystem to see if we can find any of these
+ for fullname in candidates:
+ if os.path.isfile(fullname):
+ # already exists, in location given
+ return (True, fullname)
+
+ # needs to be built, preferred location given first
+ return (False, candidates[0])
+
+def _build_import_library_amd64():
+ out_exists, out_file = _check_for_import_lib()
+ if out_exists:
+ log.debug('Skip building import library: "%s" exists', out_file)
+ return
+
+ # get the runtime dll for which we are building import library
+ dll_file = find_python_dll()
+ log.info('Building import library (arch=AMD64): "%s" (from %s)' %
+ (out_file, dll_file))
+
+ # generate symbol list from this library
+ def_name = "python%d%d.def" % tuple(sys.version_info[:2])
+ def_file = os.path.join(sys.prefix, 'libs', def_name)
+ generate_def(dll_file, def_file)
+
+ # generate import library from this symbol list
+ cmd = ['dlltool', '-d', def_file, '-l', out_file]
+ subprocess.check_call(cmd)
+
+def _build_import_library_x86():
+ """ Build the import libraries for Mingw32-gcc on Windows
+ """
+ out_exists, out_file = _check_for_import_lib()
+ if out_exists:
+ log.debug('Skip building import library: "%s" exists', out_file)
+ return
+
+ lib_name = "python%d%d.lib" % tuple(sys.version_info[:2])
+ lib_file = os.path.join(sys.prefix, 'libs', lib_name)
+ if not os.path.isfile(lib_file):
+ # didn't find library file in virtualenv, try base distribution, too,
+ # and use that instead if found there. for Python 2.7 venvs, the base
+ # directory is in attribute real_prefix instead of base_prefix.
+ if hasattr(sys, 'base_prefix'):
+ base_lib = os.path.join(sys.base_prefix, 'libs', lib_name)
+ elif hasattr(sys, 'real_prefix'):
+ base_lib = os.path.join(sys.real_prefix, 'libs', lib_name)
+ else:
+ base_lib = '' # os.path.isfile('') == False
+
+ if os.path.isfile(base_lib):
+ lib_file = base_lib
+ else:
+ log.warn('Cannot build import library: "%s" not found', lib_file)
+ return
+ log.info('Building import library (ARCH=x86): "%s"', out_file)
+
+ from numpy.distutils import lib2def
+
+ def_name = "python%d%d.def" % tuple(sys.version_info[:2])
+ def_file = os.path.join(sys.prefix, 'libs', def_name)
+ nm_output = lib2def.getnm(
+ lib2def.DEFAULT_NM + [lib_file], shell=False)
+ dlist, flist = lib2def.parse_nm(nm_output)
+ with open(def_file, 'w') as fid:
+ lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid)
+
+ dll_name = find_python_dll ()
+
+ cmd = ["dlltool",
+ "--dllname", dll_name,
+ "--def", def_file,
+ "--output-lib", out_file]
+ status = subprocess.check_output(cmd)
+ if status:
+ log.warn('Failed to build import library for gcc. Linking will fail.')
+ return
+
+#=====================================
+# Dealing with Visual Studio MANIFESTS
+#=====================================
+
+# Functions to deal with visual studio manifests. Manifest are a mechanism to
+# enforce strong DLL versioning on windows, and has nothing to do with
+# distutils MANIFEST. manifests are XML files with version info, and used by
+# the OS loader; they are necessary when linking against a DLL not in the
+# system path; in particular, official python 2.6 binary is built against the
+# MS runtime 9 (the one from VS 2008), which is not available on most windows
+# systems; python 2.6 installer does install it in the Win SxS (Side by side)
+# directory, but this requires the manifest for this to work. This is a big
+# mess, thanks MS for a wonderful system.
+
+# XXX: ideally, we should use exactly the same version as used by python. I
+# submitted a patch to get this version, but it was only included for python
+# 2.6.1 and above. So for versions below, we use a "best guess".
+_MSVCRVER_TO_FULLVER = {}
+if sys.platform == 'win32':
+ try:
+ import msvcrt
+ # I took one version in my SxS directory: no idea if it is the good
+ # one, and we can't retrieve it from python
+ _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42"
+ _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8"
+ # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0
+ # on Windows XP:
+ _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460"
+ crt_ver = getattr(msvcrt, 'CRT_ASSEMBLY_VERSION', None)
+ if crt_ver is not None: # Available at least back to Python 3.3
+ maj, min = re.match(r'(\d+)\.(\d)', crt_ver).groups()
+ _MSVCRVER_TO_FULLVER[maj + min] = crt_ver
+ del maj, min
+ del crt_ver
+ except ImportError:
+ # If we are here, means python was not built with MSVC. Not sure what
+ # to do in that case: manifest building will fail, but it should not be
+ # used in that case anyway
+ log.warn('Cannot import msvcrt: using manifest will not be possible')
+
+def msvc_manifest_xml(maj, min):
+ """Given a major and minor version of the MSVCR, returns the
+ corresponding XML file."""
+ try:
+ fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)]
+ except KeyError:
+ raise ValueError("Version %d,%d of MSVCRT not supported yet" %
+ (maj, min)) from None
+ # Don't be fooled, it looks like an XML, but it is not. In particular, it
+ # should not have any space before starting, and its size should be
+ # divisible by 4, most likely for alignment constraints when the xml is
+ # embedded in the binary...
+ # This template was copied directly from the python 2.6 binary (using
+ # strings.exe from mingw on python.exe).
+ template = textwrap.dedent("""\
+ <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
+ <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
+ <security>
+ <requestedPrivileges>
+ <requestedExecutionLevel level="asInvoker" uiAccess="false"></requestedExecutionLevel>
+ </requestedPrivileges>
+ </security>
+ </trustInfo>
+ <dependency>
+ <dependentAssembly>
+ <assemblyIdentity type="win32" name="Microsoft.VC%(maj)d%(min)d.CRT" version="%(fullver)s" processorArchitecture="*" publicKeyToken="1fc8b3b9a1e18e3b"></assemblyIdentity>
+ </dependentAssembly>
+ </dependency>
+ </assembly>""")
+
+ return template % {'fullver': fullver, 'maj': maj, 'min': min}
+
+def manifest_rc(name, type='dll'):
+ """Return the rc file used to generate the res file which will be embedded
+ as manifest for given manifest file name, of given type ('dll' or
+ 'exe').
+
+ Parameters
+ ----------
+ name : str
+ name of the manifest file to embed
+ type : str {'dll', 'exe'}
+ type of the binary which will embed the manifest
+
+ """
+ if type == 'dll':
+ rctype = 2
+ elif type == 'exe':
+ rctype = 1
+ else:
+ raise ValueError("Type %s not supported" % type)
+
+ return """\
+#include "winuser.h"
+%d RT_MANIFEST %s""" % (rctype, name)
+
+def check_embedded_msvcr_match_linked(msver):
+ """msver is the ms runtime version used for the MANIFEST."""
+ # check msvcr major version are the same for linking and
+ # embedding
+ maj = msvc_runtime_major()
+ if maj:
+ if not maj == int(msver):
+ raise ValueError(
+ "Discrepancy between linked msvcr " \
+ "(%d) and the one about to be embedded " \
+ "(%d)" % (int(msver), maj))
+
+def configtest_name(config):
+ base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c"))
+ return os.path.splitext(base)[0]
+
+def manifest_name(config):
+ # Get configest name (including suffix)
+ root = configtest_name(config)
+ exext = config.compiler.exe_extension
+ return root + exext + ".manifest"
+
+def rc_name(config):
+ # Get configtest name (including suffix)
+ root = configtest_name(config)
+ return root + ".rc"
+
+def generate_manifest(config):
+ msver = get_build_msvc_version()
+ if msver is not None:
+ if msver >= 8:
+ check_embedded_msvcr_match_linked(msver)
+ ma_str, mi_str = str(msver).split('.')
+ # Write the manifest file
+ manxml = msvc_manifest_xml(int(ma_str), int(mi_str))
+ with open(manifest_name(config), "w") as man:
+ config.temp_files.append(manifest_name(config))
+ man.write(manxml)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/misc_util.py b/venv/lib/python3.9/site-packages/numpy/distutils/misc_util.py
new file mode 100644
index 00000000..79ba0851
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/misc_util.py
@@ -0,0 +1,2493 @@
+import os
+import re
+import sys
+import copy
+import glob
+import atexit
+import tempfile
+import subprocess
+import shutil
+import multiprocessing
+import textwrap
+import importlib.util
+from threading import local as tlocal
+from functools import reduce
+
+import distutils
+from distutils.errors import DistutilsError
+
+# stores temporary directory of each thread to only create one per thread
+_tdata = tlocal()
+
+# store all created temporary directories so they can be deleted on exit
+_tmpdirs = []
+def clean_up_temporary_directory():
+ if _tmpdirs is not None:
+ for d in _tmpdirs:
+ try:
+ shutil.rmtree(d)
+ except OSError:
+ pass
+
+atexit.register(clean_up_temporary_directory)
+
+__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
+ 'dict_append', 'appendpath', 'generate_config_py',
+ 'get_cmd', 'allpath', 'get_mathlibs',
+ 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text',
+ 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings',
+ 'has_f_sources', 'has_cxx_sources', 'filter_sources',
+ 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files',
+ 'get_script_files', 'get_lib_source_files', 'get_data_files',
+ 'dot_join', 'get_frame', 'minrelpath', 'njoin',
+ 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
+ 'get_build_architecture', 'get_info', 'get_pkg_info',
+ 'get_num_build_jobs', 'sanitize_cxx_flags',
+ 'exec_mod_from_location']
+
+class InstallableLib:
+ """
+ Container to hold information on an installable library.
+
+ Parameters
+ ----------
+ name : str
+ Name of the installed library.
+ build_info : dict
+ Dictionary holding build information.
+ target_dir : str
+ Absolute path specifying where to install the library.
+
+ See Also
+ --------
+ Configuration.add_installed_library
+
+ Notes
+ -----
+ The three parameters are stored as attributes with the same names.
+
+ """
+ def __init__(self, name, build_info, target_dir):
+ self.name = name
+ self.build_info = build_info
+ self.target_dir = target_dir
+
+
+def get_num_build_jobs():
+ """
+ Get number of parallel build jobs set by the --parallel command line
+ argument of setup.py
+ If the command did not receive a setting the environment variable
+ NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of
+ processors on the system, with a maximum of 8 (to prevent
+ overloading the system if there a lot of CPUs).
+
+ Returns
+ -------
+ out : int
+ number of parallel jobs that can be run
+
+ """
+ from numpy.distutils.core import get_distribution
+ try:
+ cpu_count = len(os.sched_getaffinity(0))
+ except AttributeError:
+ cpu_count = multiprocessing.cpu_count()
+ cpu_count = min(cpu_count, 8)
+ envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count))
+ dist = get_distribution()
+ # may be None during configuration
+ if dist is None:
+ return envjobs
+
+ # any of these three may have the job set, take the largest
+ cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None),
+ getattr(dist.get_command_obj('build_ext'), 'parallel', None),
+ getattr(dist.get_command_obj('build_clib'), 'parallel', None))
+ if all(x is None for x in cmdattr):
+ return envjobs
+ else:
+ return max(x for x in cmdattr if x is not None)
+
+def quote_args(args):
+ """Quote list of arguments.
+
+ .. deprecated:: 1.22.
+ """
+ import warnings
+ warnings.warn('"quote_args" is deprecated.',
+ DeprecationWarning, stacklevel=2)
+ # don't used _nt_quote_args as it does not check if
+ # args items already have quotes or not.
+ args = list(args)
+ for i in range(len(args)):
+ a = args[i]
+ if ' ' in a and a[0] not in '"\'':
+ args[i] = '"%s"' % (a)
+ return args
+
+def allpath(name):
+ "Convert a /-separated pathname to one using the OS's path separator."
+ split = name.split('/')
+ return os.path.join(*split)
+
+def rel_path(path, parent_path):
+ """Return path relative to parent_path."""
+ # Use realpath to avoid issues with symlinked dirs (see gh-7707)
+ pd = os.path.realpath(os.path.abspath(parent_path))
+ apath = os.path.realpath(os.path.abspath(path))
+ if len(apath) < len(pd):
+ return path
+ if apath == pd:
+ return ''
+ if pd == apath[:len(pd)]:
+ assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)]))
+ path = apath[len(pd)+1:]
+ return path
+
+def get_path_from_frame(frame, parent_path=None):
+ """Return path of the module given a frame object from the call stack.
+
+ Returned path is relative to parent_path when given,
+ otherwise it is absolute path.
+ """
+
+ # First, try to find if the file name is in the frame.
+ try:
+ caller_file = eval('__file__', frame.f_globals, frame.f_locals)
+ d = os.path.dirname(os.path.abspath(caller_file))
+ except NameError:
+ # __file__ is not defined, so let's try __name__. We try this second
+ # because setuptools spoofs __name__ to be '__main__' even though
+ # sys.modules['__main__'] might be something else, like easy_install(1).
+ caller_name = eval('__name__', frame.f_globals, frame.f_locals)
+ __import__(caller_name)
+ mod = sys.modules[caller_name]
+ if hasattr(mod, '__file__'):
+ d = os.path.dirname(os.path.abspath(mod.__file__))
+ else:
+ # we're probably running setup.py as execfile("setup.py")
+ # (likely we're building an egg)
+ d = os.path.abspath('.')
+
+ if parent_path is not None:
+ d = rel_path(d, parent_path)
+
+ return d or '.'
+
+def njoin(*path):
+ """Join two or more pathname components +
+ - convert a /-separated pathname to one using the OS's path separator.
+ - resolve `..` and `.` from path.
+
+ Either passing n arguments as in njoin('a','b'), or a sequence
+ of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.
+ """
+ paths = []
+ for p in path:
+ if is_sequence(p):
+ # njoin(['a', 'b'], 'c')
+ paths.append(njoin(*p))
+ else:
+ assert is_string(p)
+ paths.append(p)
+ path = paths
+ if not path:
+ # njoin()
+ joined = ''
+ else:
+ # njoin('a', 'b')
+ joined = os.path.join(*path)
+ if os.path.sep != '/':
+ joined = joined.replace('/', os.path.sep)
+ return minrelpath(joined)
+
+def get_mathlibs(path=None):
+ """Return the MATHLIB line from numpyconfig.h
+ """
+ if path is not None:
+ config_file = os.path.join(path, '_numpyconfig.h')
+ else:
+ # Look for the file in each of the numpy include directories.
+ dirs = get_numpy_include_dirs()
+ for path in dirs:
+ fn = os.path.join(path, '_numpyconfig.h')
+ if os.path.exists(fn):
+ config_file = fn
+ break
+ else:
+ raise DistutilsError('_numpyconfig.h not found in numpy include '
+ 'dirs %r' % (dirs,))
+
+ with open(config_file) as fid:
+ mathlibs = []
+ s = '#define MATHLIB'
+ for line in fid:
+ if line.startswith(s):
+ value = line[len(s):].strip()
+ if value:
+ mathlibs.extend(value.split(','))
+ return mathlibs
+
+def minrelpath(path):
+ """Resolve `..` and '.' from path.
+ """
+ if not is_string(path):
+ return path
+ if '.' not in path:
+ return path
+ l = path.split(os.sep)
+ while l:
+ try:
+ i = l.index('.', 1)
+ except ValueError:
+ break
+ del l[i]
+ j = 1
+ while l:
+ try:
+ i = l.index('..', j)
+ except ValueError:
+ break
+ if l[i-1]=='..':
+ j += 1
+ else:
+ del l[i], l[i-1]
+ j = 1
+ if not l:
+ return ''
+ return os.sep.join(l)
+
+def sorted_glob(fileglob):
+ """sorts output of python glob for https://bugs.python.org/issue30461
+ to allow extensions to have reproducible build results"""
+ return sorted(glob.glob(fileglob))
+
+def _fix_paths(paths, local_path, include_non_existing):
+ assert is_sequence(paths), repr(type(paths))
+ new_paths = []
+ assert not is_string(paths), repr(paths)
+ for n in paths:
+ if is_string(n):
+ if '*' in n or '?' in n:
+ p = sorted_glob(n)
+ p2 = sorted_glob(njoin(local_path, n))
+ if p2:
+ new_paths.extend(p2)
+ elif p:
+ new_paths.extend(p)
+ else:
+ if include_non_existing:
+ new_paths.append(n)
+ print('could not resolve pattern in %r: %r' %
+ (local_path, n))
+ else:
+ n2 = njoin(local_path, n)
+ if os.path.exists(n2):
+ new_paths.append(n2)
+ else:
+ if os.path.exists(n):
+ new_paths.append(n)
+ elif include_non_existing:
+ new_paths.append(n)
+ if not os.path.exists(n):
+ print('non-existing path in %r: %r' %
+ (local_path, n))
+
+ elif is_sequence(n):
+ new_paths.extend(_fix_paths(n, local_path, include_non_existing))
+ else:
+ new_paths.append(n)
+ return [minrelpath(p) for p in new_paths]
+
+def gpaths(paths, local_path='', include_non_existing=True):
+ """Apply glob to paths and prepend local_path if needed.
+ """
+ if is_string(paths):
+ paths = (paths,)
+ return _fix_paths(paths, local_path, include_non_existing)
+
+def make_temp_file(suffix='', prefix='', text=True):
+ if not hasattr(_tdata, 'tempdir'):
+ _tdata.tempdir = tempfile.mkdtemp()
+ _tmpdirs.append(_tdata.tempdir)
+ fid, name = tempfile.mkstemp(suffix=suffix,
+ prefix=prefix,
+ dir=_tdata.tempdir,
+ text=text)
+ fo = os.fdopen(fid, 'w')
+ return fo, name
+
+# Hooks for colored terminal output.
+# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle
+def terminal_has_colors():
+ if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ:
+ # Avoid importing curses that causes illegal operation
+ # with a message:
+ # PYTHON2 caused an invalid page fault in
+ # module CYGNURSES7.DLL as 015f:18bbfc28
+ # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)]
+ # ssh to Win32 machine from debian
+ # curses.version is 2.2
+ # CYGWIN_98-4.10, release 1.5.7(0.109/3/2))
+ return 0
+ if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():
+ try:
+ import curses
+ curses.setupterm()
+ if (curses.tigetnum("colors") >= 0
+ and curses.tigetnum("pairs") >= 0
+ and ((curses.tigetstr("setf") is not None
+ and curses.tigetstr("setb") is not None)
+ or (curses.tigetstr("setaf") is not None
+ and curses.tigetstr("setab") is not None)
+ or curses.tigetstr("scp") is not None)):
+ return 1
+ except Exception:
+ pass
+ return 0
+
+if terminal_has_colors():
+ _colour_codes = dict(black=0, red=1, green=2, yellow=3,
+ blue=4, magenta=5, cyan=6, white=7, default=9)
+ def colour_text(s, fg=None, bg=None, bold=False):
+ seq = []
+ if bold:
+ seq.append('1')
+ if fg:
+ fgcode = 30 + _colour_codes.get(fg.lower(), 0)
+ seq.append(str(fgcode))
+ if bg:
+ bgcode = 40 + _colour_codes.get(bg.lower(), 7)
+ seq.append(str(bgcode))
+ if seq:
+ return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s)
+ else:
+ return s
+else:
+ def colour_text(s, fg=None, bg=None):
+ return s
+
+def default_text(s):
+ return colour_text(s, 'default')
+def red_text(s):
+ return colour_text(s, 'red')
+def green_text(s):
+ return colour_text(s, 'green')
+def yellow_text(s):
+ return colour_text(s, 'yellow')
+def cyan_text(s):
+ return colour_text(s, 'cyan')
+def blue_text(s):
+ return colour_text(s, 'blue')
+
+#########################
+
+def cyg2win32(path: str) -> str:
+ """Convert a path from Cygwin-native to Windows-native.
+
+ Uses the cygpath utility (part of the Base install) to do the
+ actual conversion. Falls back to returning the original path if
+ this fails.
+
+ Handles the default ``/cygdrive`` mount prefix as well as the
+ ``/proc/cygdrive`` portable prefix, custom cygdrive prefixes such
+ as ``/`` or ``/mnt``, and absolute paths such as ``/usr/src/`` or
+ ``/home/username``
+
+ Parameters
+ ----------
+ path : str
+ The path to convert
+
+ Returns
+ -------
+ converted_path : str
+ The converted path
+
+ Notes
+ -----
+ Documentation for cygpath utility:
+ https://cygwin.com/cygwin-ug-net/cygpath.html
+ Documentation for the C function it wraps:
+ https://cygwin.com/cygwin-api/func-cygwin-conv-path.html
+
+ """
+ if sys.platform != "cygwin":
+ return path
+ return subprocess.check_output(
+ ["/usr/bin/cygpath", "--windows", path], text=True
+ )
+
+
+def mingw32():
+ """Return true when using mingw32 environment.
+ """
+ if sys.platform=='win32':
+ if os.environ.get('OSTYPE', '')=='msys':
+ return True
+ if os.environ.get('MSYSTEM', '')=='MINGW32':
+ return True
+ return False
+
+def msvc_runtime_version():
+ "Return version of MSVC runtime library, as defined by __MSC_VER__ macro"
+ msc_pos = sys.version.find('MSC v.')
+ if msc_pos != -1:
+ msc_ver = int(sys.version[msc_pos+6:msc_pos+10])
+ else:
+ msc_ver = None
+ return msc_ver
+
+def msvc_runtime_library():
+ "Return name of MSVC runtime library if Python was built with MSVC >= 7"
+ ver = msvc_runtime_major ()
+ if ver:
+ if ver < 140:
+ return "msvcr%i" % ver
+ else:
+ return "vcruntime%i" % ver
+ else:
+ return None
+
+def msvc_runtime_major():
+ "Return major version of MSVC runtime coded like get_build_msvc_version"
+ major = {1300: 70, # MSVC 7.0
+ 1310: 71, # MSVC 7.1
+ 1400: 80, # MSVC 8
+ 1500: 90, # MSVC 9 (aka 2008)
+ 1600: 100, # MSVC 10 (aka 2010)
+ 1900: 140, # MSVC 14 (aka 2015)
+ }.get(msvc_runtime_version(), None)
+ return major
+
+#########################
+
+#XXX need support for .C that is also C++
+cxx_ext_match = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match
+fortran_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f)\Z', re.I).match
+f90_ext_match = re.compile(r'.*\.(f90|f95)\Z', re.I).match
+f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)', re.I).match
+def _get_f90_modules(source):
+ """Return a list of Fortran f90 module names that
+ given source file defines.
+ """
+ if not f90_ext_match(source):
+ return []
+ modules = []
+ with open(source, 'r') as f:
+ for line in f:
+ m = f90_module_name_match(line)
+ if m:
+ name = m.group('name')
+ modules.append(name)
+ # break # XXX can we assume that there is one module per file?
+ return modules
+
+def is_string(s):
+ return isinstance(s, str)
+
+def all_strings(lst):
+ """Return True if all items in lst are string objects. """
+ for item in lst:
+ if not is_string(item):
+ return False
+ return True
+
+def is_sequence(seq):
+ if is_string(seq):
+ return False
+ try:
+ len(seq)
+ except Exception:
+ return False
+ return True
+
+def is_glob_pattern(s):
+ return is_string(s) and ('*' in s or '?' in s)
+
+def as_list(seq):
+ if is_sequence(seq):
+ return list(seq)
+ else:
+ return [seq]
+
+def get_language(sources):
+ # not used in numpy/scipy packages, use build_ext.detect_language instead
+ """Determine language value (c,f77,f90) from sources """
+ language = None
+ for source in sources:
+ if isinstance(source, str):
+ if f90_ext_match(source):
+ language = 'f90'
+ break
+ elif fortran_ext_match(source):
+ language = 'f77'
+ return language
+
+def has_f_sources(sources):
+ """Return True if sources contains Fortran files """
+ for source in sources:
+ if fortran_ext_match(source):
+ return True
+ return False
+
+def has_cxx_sources(sources):
+ """Return True if sources contains C++ files """
+ for source in sources:
+ if cxx_ext_match(source):
+ return True
+ return False
+
+def filter_sources(sources):
+ """Return four lists of filenames containing
+ C, C++, Fortran, and Fortran 90 module sources,
+ respectively.
+ """
+ c_sources = []
+ cxx_sources = []
+ f_sources = []
+ fmodule_sources = []
+ for source in sources:
+ if fortran_ext_match(source):
+ modules = _get_f90_modules(source)
+ if modules:
+ fmodule_sources.append(source)
+ else:
+ f_sources.append(source)
+ elif cxx_ext_match(source):
+ cxx_sources.append(source)
+ else:
+ c_sources.append(source)
+ return c_sources, cxx_sources, f_sources, fmodule_sources
+
+
+def _get_headers(directory_list):
+ # get *.h files from list of directories
+ headers = []
+ for d in directory_list:
+ head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
+ headers.extend(head)
+ return headers
+
+def _get_directories(list_of_sources):
+ # get unique directories from list of sources.
+ direcs = []
+ for f in list_of_sources:
+ d = os.path.split(f)
+ if d[0] != '' and not d[0] in direcs:
+ direcs.append(d[0])
+ return direcs
+
+def _commandline_dep_string(cc_args, extra_postargs, pp_opts):
+ """
+ Return commandline representation used to determine if a file needs
+ to be recompiled
+ """
+ cmdline = 'commandline: '
+ cmdline += ' '.join(cc_args)
+ cmdline += ' '.join(extra_postargs)
+ cmdline += ' '.join(pp_opts) + '\n'
+ return cmdline
+
+
+def get_dependencies(sources):
+ #XXX scan sources for include statements
+ return _get_headers(_get_directories(sources))
+
+def is_local_src_dir(directory):
+ """Return true if directory is local directory.
+ """
+ if not is_string(directory):
+ return False
+ abs_dir = os.path.abspath(directory)
+ c = os.path.commonprefix([os.getcwd(), abs_dir])
+ new_dir = abs_dir[len(c):].split(os.sep)
+ if new_dir and not new_dir[0]:
+ new_dir = new_dir[1:]
+ if new_dir and new_dir[0]=='build':
+ return False
+ new_dir = os.sep.join(new_dir)
+ return os.path.isdir(new_dir)
+
+def general_source_files(top_path):
+ pruned_directories = {'CVS':1, '.svn':1, 'build':1}
+ prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
+ for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
+ pruned = [ d for d in dirnames if d not in pruned_directories ]
+ dirnames[:] = pruned
+ for f in filenames:
+ if not prune_file_pat.search(f):
+ yield os.path.join(dirpath, f)
+
+def general_source_directories_files(top_path):
+ """Return a directory name relative to top_path and
+ files contained.
+ """
+ pruned_directories = ['CVS', '.svn', 'build']
+ prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
+ for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
+ pruned = [ d for d in dirnames if d not in pruned_directories ]
+ dirnames[:] = pruned
+ for d in dirnames:
+ dpath = os.path.join(dirpath, d)
+ rpath = rel_path(dpath, top_path)
+ files = []
+ for f in os.listdir(dpath):
+ fn = os.path.join(dpath, f)
+ if os.path.isfile(fn) and not prune_file_pat.search(fn):
+ files.append(fn)
+ yield rpath, files
+ dpath = top_path
+ rpath = rel_path(dpath, top_path)
+ filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \
+ if not prune_file_pat.search(f)]
+ files = [f for f in filenames if os.path.isfile(f)]
+ yield rpath, files
+
+
+def get_ext_source_files(ext):
+ # Get sources and any include files in the same directory.
+ filenames = []
+ sources = [_m for _m in ext.sources if is_string(_m)]
+ filenames.extend(sources)
+ filenames.extend(get_dependencies(sources))
+ for d in ext.depends:
+ if is_local_src_dir(d):
+ filenames.extend(list(general_source_files(d)))
+ elif os.path.isfile(d):
+ filenames.append(d)
+ return filenames
+
+def get_script_files(scripts):
+ scripts = [_m for _m in scripts if is_string(_m)]
+ return scripts
+
+def get_lib_source_files(lib):
+ filenames = []
+ sources = lib[1].get('sources', [])
+ sources = [_m for _m in sources if is_string(_m)]
+ filenames.extend(sources)
+ filenames.extend(get_dependencies(sources))
+ depends = lib[1].get('depends', [])
+ for d in depends:
+ if is_local_src_dir(d):
+ filenames.extend(list(general_source_files(d)))
+ elif os.path.isfile(d):
+ filenames.append(d)
+ return filenames
+
+def get_shared_lib_extension(is_python_ext=False):
+ """Return the correct file extension for shared libraries.
+
+ Parameters
+ ----------
+ is_python_ext : bool, optional
+ Whether the shared library is a Python extension. Default is False.
+
+ Returns
+ -------
+ so_ext : str
+ The shared library extension.
+
+ Notes
+ -----
+ For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X,
+ and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on
+ POSIX systems according to PEP 3149.
+
+ """
+ confvars = distutils.sysconfig.get_config_vars()
+ so_ext = confvars.get('EXT_SUFFIX', '')
+
+ if not is_python_ext:
+ # hardcode known values, config vars (including SHLIB_SUFFIX) are
+ # unreliable (see #3182)
+ # darwin, windows and debug linux are wrong in 3.3.1 and older
+ if (sys.platform.startswith('linux') or
+ sys.platform.startswith('gnukfreebsd')):
+ so_ext = '.so'
+ elif sys.platform.startswith('darwin'):
+ so_ext = '.dylib'
+ elif sys.platform.startswith('win'):
+ so_ext = '.dll'
+ else:
+ # fall back to config vars for unknown platforms
+ # fix long extension for Python >=3.2, see PEP 3149.
+ if 'SOABI' in confvars:
+ # Does nothing unless SOABI config var exists
+ so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1)
+
+ return so_ext
+
+def get_data_files(data):
+ if is_string(data):
+ return [data]
+ sources = data[1]
+ filenames = []
+ for s in sources:
+ if hasattr(s, '__call__'):
+ continue
+ if is_local_src_dir(s):
+ filenames.extend(list(general_source_files(s)))
+ elif is_string(s):
+ if os.path.isfile(s):
+ filenames.append(s)
+ else:
+ print('Not existing data file:', s)
+ else:
+ raise TypeError(repr(s))
+ return filenames
+
+def dot_join(*args):
+ return '.'.join([a for a in args if a])
+
+def get_frame(level=0):
+ """Return frame object from call stack with given level.
+ """
+ try:
+ return sys._getframe(level+1)
+ except AttributeError:
+ frame = sys.exc_info()[2].tb_frame
+ for _ in range(level+1):
+ frame = frame.f_back
+ return frame
+
+
+######################
+
+class Configuration:
+
+ _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
+ 'libraries', 'headers', 'scripts', 'py_modules',
+ 'installed_libraries', 'define_macros']
+ _dict_keys = ['package_dir', 'installed_pkg_config']
+ _extra_keys = ['name', 'version']
+
+ numpy_include_dirs = []
+
+ def __init__(self,
+ package_name=None,
+ parent_name=None,
+ top_path=None,
+ package_path=None,
+ caller_level=1,
+ setup_name='setup.py',
+ **attrs):
+ """Construct configuration instance of a package.
+
+ package_name -- name of the package
+ Ex.: 'distutils'
+ parent_name -- name of the parent package
+ Ex.: 'numpy'
+ top_path -- directory of the toplevel package
+ Ex.: the directory where the numpy package source sits
+ package_path -- directory of package. Will be computed by magic from the
+ directory of the caller module if not specified
+ Ex.: the directory where numpy.distutils is
+ caller_level -- frame level to caller namespace, internal parameter.
+ """
+ self.name = dot_join(parent_name, package_name)
+ self.version = None
+
+ caller_frame = get_frame(caller_level)
+ self.local_path = get_path_from_frame(caller_frame, top_path)
+ # local_path -- directory of a file (usually setup.py) that
+ # defines a configuration() function.
+ # local_path -- directory of a file (usually setup.py) that
+ # defines a configuration() function.
+ if top_path is None:
+ top_path = self.local_path
+ self.local_path = ''
+ if package_path is None:
+ package_path = self.local_path
+ elif os.path.isdir(njoin(self.local_path, package_path)):
+ package_path = njoin(self.local_path, package_path)
+ if not os.path.isdir(package_path or '.'):
+ raise ValueError("%r is not a directory" % (package_path,))
+ self.top_path = top_path
+ self.package_path = package_path
+ # this is the relative path in the installed package
+ self.path_in_package = os.path.join(*self.name.split('.'))
+
+ self.list_keys = self._list_keys[:]
+ self.dict_keys = self._dict_keys[:]
+
+ for n in self.list_keys:
+ v = copy.copy(attrs.get(n, []))
+ setattr(self, n, as_list(v))
+
+ for n in self.dict_keys:
+ v = copy.copy(attrs.get(n, {}))
+ setattr(self, n, v)
+
+ known_keys = self.list_keys + self.dict_keys
+ self.extra_keys = self._extra_keys[:]
+ for n in attrs.keys():
+ if n in known_keys:
+ continue
+ a = attrs[n]
+ setattr(self, n, a)
+ if isinstance(a, list):
+ self.list_keys.append(n)
+ elif isinstance(a, dict):
+ self.dict_keys.append(n)
+ else:
+ self.extra_keys.append(n)
+
+ if os.path.exists(njoin(package_path, '__init__.py')):
+ self.packages.append(self.name)
+ self.package_dir[self.name] = package_path
+
+ self.options = dict(
+ ignore_setup_xxx_py = False,
+ assume_default_configuration = False,
+ delegate_options_to_subpackages = False,
+ quiet = False,
+ )
+
+ caller_instance = None
+ for i in range(1, 3):
+ try:
+ f = get_frame(i)
+ except ValueError:
+ break
+ try:
+ caller_instance = eval('self', f.f_globals, f.f_locals)
+ break
+ except NameError:
+ pass
+ if isinstance(caller_instance, self.__class__):
+ if caller_instance.options['delegate_options_to_subpackages']:
+ self.set_options(**caller_instance.options)
+
+ self.setup_name = setup_name
+
+ def todict(self):
+ """
+ Return a dictionary compatible with the keyword arguments of distutils
+ setup function.
+
+ Examples
+ --------
+ >>> setup(**config.todict()) #doctest: +SKIP
+ """
+
+ self._optimize_data_files()
+ d = {}
+ known_keys = self.list_keys + self.dict_keys + self.extra_keys
+ for n in known_keys:
+ a = getattr(self, n)
+ if a:
+ d[n] = a
+ return d
+
+ def info(self, message):
+ if not self.options['quiet']:
+ print(message)
+
+ def warn(self, message):
+ sys.stderr.write('Warning: %s\n' % (message,))
+
+ def set_options(self, **options):
+ """
+ Configure Configuration instance.
+
+ The following options are available:
+ - ignore_setup_xxx_py
+ - assume_default_configuration
+ - delegate_options_to_subpackages
+ - quiet
+
+ """
+ for key, value in options.items():
+ if key in self.options:
+ self.options[key] = value
+ else:
+ raise ValueError('Unknown option: '+key)
+
+ def get_distribution(self):
+ """Return the distutils distribution object for self."""
+ from numpy.distutils.core import get_distribution
+ return get_distribution()
+
+ def _wildcard_get_subpackage(self, subpackage_name,
+ parent_name,
+ caller_level = 1):
+ l = subpackage_name.split('.')
+ subpackage_path = njoin([self.local_path]+l)
+ dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)]
+ config_list = []
+ for d in dirs:
+ if not os.path.isfile(njoin(d, '__init__.py')):
+ continue
+ if 'build' in d.split(os.sep):
+ continue
+ n = '.'.join(d.split(os.sep)[-len(l):])
+ c = self.get_subpackage(n,
+ parent_name = parent_name,
+ caller_level = caller_level+1)
+ config_list.extend(c)
+ return config_list
+
+ def _get_configuration_from_setup_py(self, setup_py,
+ subpackage_name,
+ subpackage_path,
+ parent_name,
+ caller_level = 1):
+ # In case setup_py imports local modules:
+ sys.path.insert(0, os.path.dirname(setup_py))
+ try:
+ setup_name = os.path.splitext(os.path.basename(setup_py))[0]
+ n = dot_join(self.name, subpackage_name, setup_name)
+ setup_module = exec_mod_from_location(
+ '_'.join(n.split('.')), setup_py)
+ if not hasattr(setup_module, 'configuration'):
+ if not self.options['assume_default_configuration']:
+ self.warn('Assuming default configuration '\
+ '(%s does not define configuration())'\
+ % (setup_module))
+ config = Configuration(subpackage_name, parent_name,
+ self.top_path, subpackage_path,
+ caller_level = caller_level + 1)
+ else:
+ pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
+ args = (pn,)
+ if setup_module.configuration.__code__.co_argcount > 1:
+ args = args + (self.top_path,)
+ config = setup_module.configuration(*args)
+ if config.name!=dot_join(parent_name, subpackage_name):
+ self.warn('Subpackage %r configuration returned as %r' % \
+ (dot_join(parent_name, subpackage_name), config.name))
+ finally:
+ del sys.path[0]
+ return config
+
+ def get_subpackage(self,subpackage_name,
+ subpackage_path=None,
+ parent_name=None,
+ caller_level = 1):
+ """Return list of subpackage configurations.
+
+ Parameters
+ ----------
+ subpackage_name : str or None
+ Name of the subpackage to get the configuration. '*' in
+ subpackage_name is handled as a wildcard.
+ subpackage_path : str
+ If None, then the path is assumed to be the local path plus the
+ subpackage_name. If a setup.py file is not found in the
+ subpackage_path, then a default configuration is used.
+ parent_name : str
+ Parent name.
+ """
+ if subpackage_name is None:
+ if subpackage_path is None:
+ raise ValueError(
+ "either subpackage_name or subpackage_path must be specified")
+ subpackage_name = os.path.basename(subpackage_path)
+
+ # handle wildcards
+ l = subpackage_name.split('.')
+ if subpackage_path is None and '*' in subpackage_name:
+ return self._wildcard_get_subpackage(subpackage_name,
+ parent_name,
+ caller_level = caller_level+1)
+ assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name))
+ if subpackage_path is None:
+ subpackage_path = njoin([self.local_path] + l)
+ else:
+ subpackage_path = njoin([subpackage_path] + l[:-1])
+ subpackage_path = self.paths([subpackage_path])[0]
+ setup_py = njoin(subpackage_path, self.setup_name)
+ if not self.options['ignore_setup_xxx_py']:
+ if not os.path.isfile(setup_py):
+ setup_py = njoin(subpackage_path,
+ 'setup_%s.py' % (subpackage_name))
+ if not os.path.isfile(setup_py):
+ if not self.options['assume_default_configuration']:
+ self.warn('Assuming default configuration '\
+ '(%s/{setup_%s,setup}.py was not found)' \
+ % (os.path.dirname(setup_py), subpackage_name))
+ config = Configuration(subpackage_name, parent_name,
+ self.top_path, subpackage_path,
+ caller_level = caller_level+1)
+ else:
+ config = self._get_configuration_from_setup_py(
+ setup_py,
+ subpackage_name,
+ subpackage_path,
+ parent_name,
+ caller_level = caller_level + 1)
+ if config:
+ return [config]
+ else:
+ return []
+
+ def add_subpackage(self,subpackage_name,
+ subpackage_path=None,
+ standalone = False):
+ """Add a sub-package to the current Configuration instance.
+
+ This is useful in a setup.py script for adding sub-packages to a
+ package.
+
+ Parameters
+ ----------
+ subpackage_name : str
+ name of the subpackage
+ subpackage_path : str
+ if given, the subpackage path such as the subpackage is in
+ subpackage_path / subpackage_name. If None,the subpackage is
+ assumed to be located in the local path / subpackage_name.
+ standalone : bool
+ """
+
+ if standalone:
+ parent_name = None
+ else:
+ parent_name = self.name
+ config_list = self.get_subpackage(subpackage_name, subpackage_path,
+ parent_name = parent_name,
+ caller_level = 2)
+ if not config_list:
+ self.warn('No configuration returned, assuming unavailable.')
+ for config in config_list:
+ d = config
+ if isinstance(config, Configuration):
+ d = config.todict()
+ assert isinstance(d, dict), repr(type(d))
+
+ self.info('Appending %s configuration to %s' \
+ % (d.get('name'), self.name))
+ self.dict_append(**d)
+
+ dist = self.get_distribution()
+ if dist is not None:
+ self.warn('distutils distribution has been initialized,'\
+ ' it may be too late to add a subpackage '+ subpackage_name)
+
+ def add_data_dir(self, data_path):
+ """Recursively add files under data_path to data_files list.
+
+ Recursively add files under data_path to the list of data_files to be
+ installed (and distributed). The data_path can be either a relative
+ path-name, or an absolute path-name, or a 2-tuple where the first
+ argument shows where in the install directory the data directory
+ should be installed to.
+
+ Parameters
+ ----------
+ data_path : seq or str
+ Argument can be either
+
+ * 2-sequence (<datadir suffix>, <path to data directory>)
+ * path to data directory where python datadir suffix defaults
+ to package dir.
+
+ Notes
+ -----
+ Rules for installation paths::
+
+ foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
+ (gun, foo/bar) -> parent/gun
+ foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
+ (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
+ (gun/*, foo/*) -> parent/gun/a, parent/gun/b
+ /foo/bar -> (bar, /foo/bar) -> parent/bar
+ (gun, /foo/bar) -> parent/gun
+ (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
+
+ Examples
+ --------
+ For example suppose the source directory contains fun/foo.dat and
+ fun/bar/car.dat:
+
+ >>> self.add_data_dir('fun') #doctest: +SKIP
+ >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
+ >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
+
+ Will install data-files to the locations::
+
+ <package install directory>/
+ fun/
+ foo.dat
+ bar/
+ car.dat
+ sun/
+ foo.dat
+ bar/
+ car.dat
+ gun/
+ foo.dat
+ car.dat
+
+ """
+ if is_sequence(data_path):
+ d, data_path = data_path
+ else:
+ d = None
+ if is_sequence(data_path):
+ [self.add_data_dir((d, p)) for p in data_path]
+ return
+ if not is_string(data_path):
+ raise TypeError("not a string: %r" % (data_path,))
+ if d is None:
+ if os.path.isabs(data_path):
+ return self.add_data_dir((os.path.basename(data_path), data_path))
+ return self.add_data_dir((data_path, data_path))
+ paths = self.paths(data_path, include_non_existing=False)
+ if is_glob_pattern(data_path):
+ if is_glob_pattern(d):
+ pattern_list = allpath(d).split(os.sep)
+ pattern_list.reverse()
+ # /a/*//b/ -> /a/*/b
+ rl = list(range(len(pattern_list)-1)); rl.reverse()
+ for i in rl:
+ if not pattern_list[i]:
+ del pattern_list[i]
+ #
+ for path in paths:
+ if not os.path.isdir(path):
+ print('Not a directory, skipping', path)
+ continue
+ rpath = rel_path(path, self.local_path)
+ path_list = rpath.split(os.sep)
+ path_list.reverse()
+ target_list = []
+ i = 0
+ for s in pattern_list:
+ if is_glob_pattern(s):
+ if i>=len(path_list):
+ raise ValueError('cannot fill pattern %r with %r' \
+ % (d, path))
+ target_list.append(path_list[i])
+ else:
+ assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath))
+ target_list.append(s)
+ i += 1
+ if path_list[i:]:
+ self.warn('mismatch of pattern_list=%s and path_list=%s'\
+ % (pattern_list, path_list))
+ target_list.reverse()
+ self.add_data_dir((os.sep.join(target_list), path))
+ else:
+ for path in paths:
+ self.add_data_dir((d, path))
+ return
+ assert not is_glob_pattern(d), repr(d)
+
+ dist = self.get_distribution()
+ if dist is not None and dist.data_files is not None:
+ data_files = dist.data_files
+ else:
+ data_files = self.data_files
+
+ for path in paths:
+ for d1, f in list(general_source_directories_files(path)):
+ target_path = os.path.join(self.path_in_package, d, d1)
+ data_files.append((target_path, f))
+
+ def _optimize_data_files(self):
+ data_dict = {}
+ for p, files in self.data_files:
+ if p not in data_dict:
+ data_dict[p] = set()
+ for f in files:
+ data_dict[p].add(f)
+ self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()]
+
+ def add_data_files(self,*files):
+ """Add data files to configuration data_files.
+
+ Parameters
+ ----------
+ files : sequence
+ Argument(s) can be either
+
+ * 2-sequence (<datadir prefix>,<path to data file(s)>)
+ * paths to data files where python datadir prefix defaults
+ to package dir.
+
+ Notes
+ -----
+ The form of each element of the files sequence is very flexible
+ allowing many combinations of where to get the files from the package
+ and where they should ultimately be installed on the system. The most
+ basic usage is for an element of the files argument sequence to be a
+ simple filename. This will cause that file from the local path to be
+ installed to the installation path of the self.name package (package
+ path). The file argument can also be a relative path in which case the
+ entire relative path will be installed into the package directory.
+ Finally, the file can be an absolute path name in which case the file
+ will be found at the absolute path name but installed to the package
+ path.
+
+ This basic behavior can be augmented by passing a 2-tuple in as the
+ file argument. The first element of the tuple should specify the
+ relative path (under the package install directory) where the
+ remaining sequence of files should be installed to (it has nothing to
+ do with the file-names in the source distribution). The second element
+ of the tuple is the sequence of files that should be installed. The
+ files in this sequence can be filenames, relative paths, or absolute
+ paths. For absolute paths the file will be installed in the top-level
+ package installation directory (regardless of the first argument).
+ Filenames and relative path names will be installed in the package
+ install directory under the path name given as the first element of
+ the tuple.
+
+ Rules for installation paths:
+
+ #. file.txt -> (., file.txt)-> parent/file.txt
+ #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
+ #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
+ #. ``*``.txt -> parent/a.txt, parent/b.txt
+ #. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt
+ #. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt
+ #. (sun, file.txt) -> parent/sun/file.txt
+ #. (sun, bar/file.txt) -> parent/sun/file.txt
+ #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
+ #. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
+ #. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
+ #. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt
+
+ An additional feature is that the path to a data-file can actually be
+ a function that takes no arguments and returns the actual path(s) to
+ the data-files. This is useful when the data files are generated while
+ building the package.
+
+ Examples
+ --------
+ Add files to the list of data_files to be included with the package.
+
+ >>> self.add_data_files('foo.dat',
+ ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
+ ... 'bar/cat.dat',
+ ... '/full/path/to/can.dat') #doctest: +SKIP
+
+ will install these data files to::
+
+ <package install directory>/
+ foo.dat
+ fun/
+ gun.dat
+ nun/
+ pun.dat
+ sun.dat
+ bar/
+ car.dat
+ can.dat
+
+ where <package install directory> is the package (or sub-package)
+ directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
+ \\Python2.4 \\Lib \\site-packages \\mypackage') or
+ '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
+ \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
+ """
+
+ if len(files)>1:
+ for f in files:
+ self.add_data_files(f)
+ return
+ assert len(files)==1
+ if is_sequence(files[0]):
+ d, files = files[0]
+ else:
+ d = None
+ if is_string(files):
+ filepat = files
+ elif is_sequence(files):
+ if len(files)==1:
+ filepat = files[0]
+ else:
+ for f in files:
+ self.add_data_files((d, f))
+ return
+ else:
+ raise TypeError(repr(type(files)))
+
+ if d is None:
+ if hasattr(filepat, '__call__'):
+ d = ''
+ elif os.path.isabs(filepat):
+ d = ''
+ else:
+ d = os.path.dirname(filepat)
+ self.add_data_files((d, files))
+ return
+
+ paths = self.paths(filepat, include_non_existing=False)
+ if is_glob_pattern(filepat):
+ if is_glob_pattern(d):
+ pattern_list = d.split(os.sep)
+ pattern_list.reverse()
+ for path in paths:
+ path_list = path.split(os.sep)
+ path_list.reverse()
+ path_list.pop() # filename
+ target_list = []
+ i = 0
+ for s in pattern_list:
+ if is_glob_pattern(s):
+ target_list.append(path_list[i])
+ i += 1
+ else:
+ target_list.append(s)
+ target_list.reverse()
+ self.add_data_files((os.sep.join(target_list), path))
+ else:
+ self.add_data_files((d, paths))
+ return
+ assert not is_glob_pattern(d), repr((d, filepat))
+
+ dist = self.get_distribution()
+ if dist is not None and dist.data_files is not None:
+ data_files = dist.data_files
+ else:
+ data_files = self.data_files
+
+ data_files.append((os.path.join(self.path_in_package, d), paths))
+
+ ### XXX Implement add_py_modules
+
+ def add_define_macros(self, macros):
+ """Add define macros to configuration
+
+ Add the given sequence of macro name and value duples to the beginning
+ of the define_macros list This list will be visible to all extension
+ modules of the current package.
+ """
+ dist = self.get_distribution()
+ if dist is not None:
+ if not hasattr(dist, 'define_macros'):
+ dist.define_macros = []
+ dist.define_macros.extend(macros)
+ else:
+ self.define_macros.extend(macros)
+
+
+ def add_include_dirs(self,*paths):
+ """Add paths to configuration include directories.
+
+ Add the given sequence of paths to the beginning of the include_dirs
+ list. This list will be visible to all extension modules of the
+ current package.
+ """
+ include_dirs = self.paths(paths)
+ dist = self.get_distribution()
+ if dist is not None:
+ if dist.include_dirs is None:
+ dist.include_dirs = []
+ dist.include_dirs.extend(include_dirs)
+ else:
+ self.include_dirs.extend(include_dirs)
+
+ def add_headers(self,*files):
+ """Add installable headers to configuration.
+
+ Add the given sequence of files to the beginning of the headers list.
+ By default, headers will be installed under <python-
+ include>/<self.name.replace('.','/')>/ directory. If an item of files
+ is a tuple, then its first argument specifies the actual installation
+ location relative to the <python-include> path.
+
+ Parameters
+ ----------
+ files : str or seq
+ Argument(s) can be either:
+
+ * 2-sequence (<includedir suffix>,<path to header file(s)>)
+ * path(s) to header file(s) where python includedir suffix will
+ default to package name.
+ """
+ headers = []
+ for path in files:
+ if is_string(path):
+ [headers.append((self.name, p)) for p in self.paths(path)]
+ else:
+ if not isinstance(path, (tuple, list)) or len(path) != 2:
+ raise TypeError(repr(path))
+ [headers.append((path[0], p)) for p in self.paths(path[1])]
+ dist = self.get_distribution()
+ if dist is not None:
+ if dist.headers is None:
+ dist.headers = []
+ dist.headers.extend(headers)
+ else:
+ self.headers.extend(headers)
+
+ def paths(self,*paths,**kws):
+ """Apply glob to paths and prepend local_path if needed.
+
+ Applies glob.glob(...) to each path in the sequence (if needed) and
+ pre-pends the local_path if needed. Because this is called on all
+ source lists, this allows wildcard characters to be specified in lists
+ of sources for extension modules and libraries and scripts and allows
+ path-names be relative to the source directory.
+
+ """
+ include_non_existing = kws.get('include_non_existing', True)
+ return gpaths(paths,
+ local_path = self.local_path,
+ include_non_existing=include_non_existing)
+
+ def _fix_paths_dict(self, kw):
+ for k in kw.keys():
+ v = kw[k]
+ if k in ['sources', 'depends', 'include_dirs', 'library_dirs',
+ 'module_dirs', 'extra_objects']:
+ new_v = self.paths(v)
+ kw[k] = new_v
+
+ def add_extension(self,name,sources,**kw):
+ """Add extension to configuration.
+
+ Create and add an Extension instance to the ext_modules list. This
+ method also takes the following optional keyword arguments that are
+ passed on to the Extension constructor.
+
+ Parameters
+ ----------
+ name : str
+ name of the extension
+ sources : seq
+ list of the sources. The list of sources may contain functions
+ (called source generators) which must take an extension instance
+ and a build directory as inputs and return a source file or list of
+ source files or None. If None is returned then no sources are
+ generated. If the Extension instance has no sources after
+ processing all source generators, then no extension module is
+ built.
+ include_dirs :
+ define_macros :
+ undef_macros :
+ library_dirs :
+ libraries :
+ runtime_library_dirs :
+ extra_objects :
+ extra_compile_args :
+ extra_link_args :
+ extra_f77_compile_args :
+ extra_f90_compile_args :
+ export_symbols :
+ swig_opts :
+ depends :
+ The depends list contains paths to files or directories that the
+ sources of the extension module depend on. If any path in the
+ depends list is newer than the extension module, then the module
+ will be rebuilt.
+ language :
+ f2py_options :
+ module_dirs :
+ extra_info : dict or list
+ dict or list of dict of keywords to be appended to keywords.
+
+ Notes
+ -----
+ The self.paths(...) method is applied to all lists that may contain
+ paths.
+ """
+ ext_args = copy.copy(kw)
+ ext_args['name'] = dot_join(self.name, name)
+ ext_args['sources'] = sources
+
+ if 'extra_info' in ext_args:
+ extra_info = ext_args['extra_info']
+ del ext_args['extra_info']
+ if isinstance(extra_info, dict):
+ extra_info = [extra_info]
+ for info in extra_info:
+ assert isinstance(info, dict), repr(info)
+ dict_append(ext_args,**info)
+
+ self._fix_paths_dict(ext_args)
+
+ # Resolve out-of-tree dependencies
+ libraries = ext_args.get('libraries', [])
+ libnames = []
+ ext_args['libraries'] = []
+ for libname in libraries:
+ if isinstance(libname, tuple):
+ self._fix_paths_dict(libname[1])
+
+ # Handle library names of the form libname@relative/path/to/library
+ if '@' in libname:
+ lname, lpath = libname.split('@', 1)
+ lpath = os.path.abspath(njoin(self.local_path, lpath))
+ if os.path.isdir(lpath):
+ c = self.get_subpackage(None, lpath,
+ caller_level = 2)
+ if isinstance(c, Configuration):
+ c = c.todict()
+ for l in [l[0] for l in c.get('libraries', [])]:
+ llname = l.split('__OF__', 1)[0]
+ if llname == lname:
+ c.pop('name', None)
+ dict_append(ext_args,**c)
+ break
+ continue
+ libnames.append(libname)
+
+ ext_args['libraries'] = libnames + ext_args['libraries']
+ ext_args['define_macros'] = \
+ self.define_macros + ext_args.get('define_macros', [])
+
+ from numpy.distutils.core import Extension
+ ext = Extension(**ext_args)
+ self.ext_modules.append(ext)
+
+ dist = self.get_distribution()
+ if dist is not None:
+ self.warn('distutils distribution has been initialized,'\
+ ' it may be too late to add an extension '+name)
+ return ext
+
+ def add_library(self,name,sources,**build_info):
+ """
+ Add library to configuration.
+
+ Parameters
+ ----------
+ name : str
+ Name of the extension.
+ sources : sequence
+ List of the sources. The list of sources may contain functions
+ (called source generators) which must take an extension instance
+ and a build directory as inputs and return a source file or list of
+ source files or None. If None is returned then no sources are
+ generated. If the Extension instance has no sources after
+ processing all source generators, then no extension module is
+ built.
+ build_info : dict, optional
+ The following keys are allowed:
+
+ * depends
+ * macros
+ * include_dirs
+ * extra_compiler_args
+ * extra_f77_compile_args
+ * extra_f90_compile_args
+ * f2py_options
+ * language
+
+ """
+ self._add_library(name, sources, None, build_info)
+
+ dist = self.get_distribution()
+ if dist is not None:
+ self.warn('distutils distribution has been initialized,'\
+ ' it may be too late to add a library '+ name)
+
+ def _add_library(self, name, sources, install_dir, build_info):
+ """Common implementation for add_library and add_installed_library. Do
+ not use directly"""
+ build_info = copy.copy(build_info)
+ build_info['sources'] = sources
+
+ # Sometimes, depends is not set up to an empty list by default, and if
+ # depends is not given to add_library, distutils barfs (#1134)
+ if not 'depends' in build_info:
+ build_info['depends'] = []
+
+ self._fix_paths_dict(build_info)
+
+ # Add to libraries list so that it is build with build_clib
+ self.libraries.append((name, build_info))
+
+ def add_installed_library(self, name, sources, install_dir, build_info=None):
+ """
+ Similar to add_library, but the specified library is installed.
+
+ Most C libraries used with `distutils` are only used to build python
+ extensions, but libraries built through this method will be installed
+ so that they can be reused by third-party packages.
+
+ Parameters
+ ----------
+ name : str
+ Name of the installed library.
+ sources : sequence
+ List of the library's source files. See `add_library` for details.
+ install_dir : str
+ Path to install the library, relative to the current sub-package.
+ build_info : dict, optional
+ The following keys are allowed:
+
+ * depends
+ * macros
+ * include_dirs
+ * extra_compiler_args
+ * extra_f77_compile_args
+ * extra_f90_compile_args
+ * f2py_options
+ * language
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ add_library, add_npy_pkg_config, get_info
+
+ Notes
+ -----
+ The best way to encode the options required to link against the specified
+ C libraries is to use a "libname.ini" file, and use `get_info` to
+ retrieve the required options (see `add_npy_pkg_config` for more
+ information).
+
+ """
+ if not build_info:
+ build_info = {}
+
+ install_dir = os.path.join(self.package_path, install_dir)
+ self._add_library(name, sources, install_dir, build_info)
+ self.installed_libraries.append(InstallableLib(name, build_info, install_dir))
+
+ def add_npy_pkg_config(self, template, install_dir, subst_dict=None):
+ """
+ Generate and install a npy-pkg config file from a template.
+
+ The config file generated from `template` is installed in the
+ given install directory, using `subst_dict` for variable substitution.
+
+ Parameters
+ ----------
+ template : str
+ The path of the template, relatively to the current package path.
+ install_dir : str
+ Where to install the npy-pkg config file, relatively to the current
+ package path.
+ subst_dict : dict, optional
+ If given, any string of the form ``@key@`` will be replaced by
+ ``subst_dict[key]`` in the template file when installed. The install
+ prefix is always available through the variable ``@prefix@``, since the
+ install prefix is not easy to get reliably from setup.py.
+
+ See also
+ --------
+ add_installed_library, get_info
+
+ Notes
+ -----
+ This works for both standard installs and in-place builds, i.e. the
+ ``@prefix@`` refer to the source directory for in-place builds.
+
+ Examples
+ --------
+ ::
+
+ config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})
+
+ Assuming the foo.ini.in file has the following content::
+
+ [meta]
+ Name=@foo@
+ Version=1.0
+ Description=dummy description
+
+ [default]
+ Cflags=-I@prefix@/include
+ Libs=
+
+ The generated file will have the following content::
+
+ [meta]
+ Name=bar
+ Version=1.0
+ Description=dummy description
+
+ [default]
+ Cflags=-Iprefix_dir/include
+ Libs=
+
+ and will be installed as foo.ini in the 'lib' subpath.
+
+ When cross-compiling with numpy distutils, it might be necessary to
+ use modified npy-pkg-config files. Using the default/generated files
+ will link with the host libraries (i.e. libnpymath.a). For
+ cross-compilation you of-course need to link with target libraries,
+ while using the host Python installation.
+
+ You can copy out the numpy/core/lib/npy-pkg-config directory, add a
+ pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment
+ variable to point to the directory with the modified npy-pkg-config
+ files.
+
+ Example npymath.ini modified for cross-compilation::
+
+ [meta]
+ Name=npymath
+ Description=Portable, core math library implementing C99 standard
+ Version=0.1
+
+ [variables]
+ pkgname=numpy.core
+ pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core
+ prefix=${pkgdir}
+ libdir=${prefix}/lib
+ includedir=${prefix}/include
+
+ [default]
+ Libs=-L${libdir} -lnpymath
+ Cflags=-I${includedir}
+ Requires=mlib
+
+ [msvc]
+ Libs=/LIBPATH:${libdir} npymath.lib
+ Cflags=/INCLUDE:${includedir}
+ Requires=mlib
+
+ """
+ if subst_dict is None:
+ subst_dict = {}
+ template = os.path.join(self.package_path, template)
+
+ if self.name in self.installed_pkg_config:
+ self.installed_pkg_config[self.name].append((template, install_dir,
+ subst_dict))
+ else:
+ self.installed_pkg_config[self.name] = [(template, install_dir,
+ subst_dict)]
+
+
+ def add_scripts(self,*files):
+ """Add scripts to configuration.
+
+ Add the sequence of files to the beginning of the scripts list.
+ Scripts will be installed under the <prefix>/bin/ directory.
+
+ """
+ scripts = self.paths(files)
+ dist = self.get_distribution()
+ if dist is not None:
+ if dist.scripts is None:
+ dist.scripts = []
+ dist.scripts.extend(scripts)
+ else:
+ self.scripts.extend(scripts)
+
+ def dict_append(self,**dict):
+ for key in self.list_keys:
+ a = getattr(self, key)
+ a.extend(dict.get(key, []))
+ for key in self.dict_keys:
+ a = getattr(self, key)
+ a.update(dict.get(key, {}))
+ known_keys = self.list_keys + self.dict_keys + self.extra_keys
+ for key in dict.keys():
+ if key not in known_keys:
+ a = getattr(self, key, None)
+ if a and a==dict[key]: continue
+ self.warn('Inheriting attribute %r=%r from %r' \
+ % (key, dict[key], dict.get('name', '?')))
+ setattr(self, key, dict[key])
+ self.extra_keys.append(key)
+ elif key in self.extra_keys:
+ self.info('Ignoring attempt to set %r (from %r to %r)' \
+ % (key, getattr(self, key), dict[key]))
+ elif key in known_keys:
+ # key is already processed above
+ pass
+ else:
+ raise ValueError("Don't know about key=%r" % (key))
+
+ def __str__(self):
+ from pprint import pformat
+ known_keys = self.list_keys + self.dict_keys + self.extra_keys
+ s = '<'+5*'-' + '\n'
+ s += 'Configuration of '+self.name+':\n'
+ known_keys.sort()
+ for k in known_keys:
+ a = getattr(self, k, None)
+ if a:
+ s += '%s = %s\n' % (k, pformat(a))
+ s += 5*'-' + '>'
+ return s
+
+ def get_config_cmd(self):
+ """
+ Returns the numpy.distutils config command instance.
+ """
+ cmd = get_cmd('config')
+ cmd.ensure_finalized()
+ cmd.dump_source = 0
+ cmd.noisy = 0
+ old_path = os.environ.get('PATH')
+ if old_path:
+ path = os.pathsep.join(['.', old_path])
+ os.environ['PATH'] = path
+ return cmd
+
+ def get_build_temp_dir(self):
+ """
+ Return a path to a temporary directory where temporary files should be
+ placed.
+ """
+ cmd = get_cmd('build')
+ cmd.ensure_finalized()
+ return cmd.build_temp
+
+ def have_f77c(self):
+ """Check for availability of Fortran 77 compiler.
+
+ Use it inside source generating function to ensure that
+ setup distribution instance has been initialized.
+
+ Notes
+ -----
+ True if a Fortran 77 compiler is available (because a simple Fortran 77
+ code was able to be compiled successfully).
+ """
+ simple_fortran_subroutine = '''
+ subroutine simple
+ end
+ '''
+ config_cmd = self.get_config_cmd()
+ flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')
+ return flag
+
+ def have_f90c(self):
+ """Check for availability of Fortran 90 compiler.
+
+ Use it inside source generating function to ensure that
+ setup distribution instance has been initialized.
+
+ Notes
+ -----
+ True if a Fortran 90 compiler is available (because a simple Fortran
+ 90 code was able to be compiled successfully)
+ """
+ simple_fortran_subroutine = '''
+ subroutine simple
+ end
+ '''
+ config_cmd = self.get_config_cmd()
+ flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')
+ return flag
+
+ def append_to(self, extlib):
+ """Append libraries, include_dirs to extension or library item.
+ """
+ if is_sequence(extlib):
+ lib_name, build_info = extlib
+ dict_append(build_info,
+ libraries=self.libraries,
+ include_dirs=self.include_dirs)
+ else:
+ from numpy.distutils.core import Extension
+ assert isinstance(extlib, Extension), repr(extlib)
+ extlib.libraries.extend(self.libraries)
+ extlib.include_dirs.extend(self.include_dirs)
+
+ def _get_svn_revision(self, path):
+ """Return path's SVN revision number.
+ """
+ try:
+ output = subprocess.check_output(['svnversion'], cwd=path)
+ except (subprocess.CalledProcessError, OSError):
+ pass
+ else:
+ m = re.match(rb'(?P<revision>\d+)', output)
+ if m:
+ return int(m.group('revision'))
+
+ if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):
+ entries = njoin(path, '_svn', 'entries')
+ else:
+ entries = njoin(path, '.svn', 'entries')
+ if os.path.isfile(entries):
+ with open(entries) as f:
+ fstr = f.read()
+ if fstr[:5] == '<?xml': # pre 1.4
+ m = re.search(r'revision="(?P<revision>\d+)"', fstr)
+ if m:
+ return int(m.group('revision'))
+ else: # non-xml entries file --- check to be sure that
+ m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
+ if m:
+ return int(m.group('revision'))
+ return None
+
+ def _get_hg_revision(self, path):
+ """Return path's Mercurial revision number.
+ """
+ try:
+ output = subprocess.check_output(
+ ['hg', 'identify', '--num'], cwd=path)
+ except (subprocess.CalledProcessError, OSError):
+ pass
+ else:
+ m = re.match(rb'(?P<revision>\d+)', output)
+ if m:
+ return int(m.group('revision'))
+
+ branch_fn = njoin(path, '.hg', 'branch')
+ branch_cache_fn = njoin(path, '.hg', 'branch.cache')
+
+ if os.path.isfile(branch_fn):
+ branch0 = None
+ with open(branch_fn) as f:
+ revision0 = f.read().strip()
+
+ branch_map = {}
+ with open(branch_cache_fn, 'r') as f:
+ for line in f:
+ branch1, revision1 = line.split()[:2]
+ if revision1==revision0:
+ branch0 = branch1
+ try:
+ revision1 = int(revision1)
+ except ValueError:
+ continue
+ branch_map[branch1] = revision1
+
+ return branch_map.get(branch0)
+
+ return None
+
+
+ def get_version(self, version_file=None, version_variable=None):
+ """Try to get version string of a package.
+
+ Return a version string of the current package or None if the version
+ information could not be detected.
+
+ Notes
+ -----
+ This method scans files named
+ __version__.py, <packagename>_version.py, version.py, and
+ __svn_version__.py for string variables version, __version__, and
+ <packagename>_version, until a version number is found.
+ """
+ version = getattr(self, 'version', None)
+ if version is not None:
+ return version
+
+ # Get version from version file.
+ if version_file is None:
+ files = ['__version__.py',
+ self.name.split('.')[-1]+'_version.py',
+ 'version.py',
+ '__svn_version__.py',
+ '__hg_version__.py']
+ else:
+ files = [version_file]
+ if version_variable is None:
+ version_vars = ['version',
+ '__version__',
+ self.name.split('.')[-1]+'_version']
+ else:
+ version_vars = [version_variable]
+ for f in files:
+ fn = njoin(self.local_path, f)
+ if os.path.isfile(fn):
+ info = ('.py', 'U', 1)
+ name = os.path.splitext(os.path.basename(fn))[0]
+ n = dot_join(self.name, name)
+ try:
+ version_module = exec_mod_from_location(
+ '_'.join(n.split('.')), fn)
+ except ImportError as e:
+ self.warn(str(e))
+ version_module = None
+ if version_module is None:
+ continue
+
+ for a in version_vars:
+ version = getattr(version_module, a, None)
+ if version is not None:
+ break
+
+ # Try if versioneer module
+ try:
+ version = version_module.get_versions()['version']
+ except AttributeError:
+ pass
+
+ if version is not None:
+ break
+
+ if version is not None:
+ self.version = version
+ return version
+
+ # Get version as SVN or Mercurial revision number
+ revision = self._get_svn_revision(self.local_path)
+ if revision is None:
+ revision = self._get_hg_revision(self.local_path)
+
+ if revision is not None:
+ version = str(revision)
+ self.version = version
+
+ return version
+
+ def make_svn_version_py(self, delete=True):
+ """Appends a data function to the data_files list that will generate
+ __svn_version__.py file to the current package directory.
+
+ Generate package __svn_version__.py file from SVN revision number,
+ it will be removed after python exits but will be available
+ when sdist, etc commands are executed.
+
+ Notes
+ -----
+ If __svn_version__.py existed before, nothing is done.
+
+ This is
+ intended for working with source directories that are in an SVN
+ repository.
+ """
+ target = njoin(self.local_path, '__svn_version__.py')
+ revision = self._get_svn_revision(self.local_path)
+ if os.path.isfile(target) or revision is None:
+ return
+ else:
+ def generate_svn_version_py():
+ if not os.path.isfile(target):
+ version = str(revision)
+ self.info('Creating %s (version=%r)' % (target, version))
+ with open(target, 'w') as f:
+ f.write('version = %r\n' % (version))
+
+ def rm_file(f=target,p=self.info):
+ if delete:
+ try: os.remove(f); p('removed '+f)
+ except OSError: pass
+ try: os.remove(f+'c'); p('removed '+f+'c')
+ except OSError: pass
+
+ atexit.register(rm_file)
+
+ return target
+
+ self.add_data_files(('', generate_svn_version_py()))
+
+ def make_hg_version_py(self, delete=True):
+ """Appends a data function to the data_files list that will generate
+ __hg_version__.py file to the current package directory.
+
+ Generate package __hg_version__.py file from Mercurial revision,
+ it will be removed after python exits but will be available
+ when sdist, etc commands are executed.
+
+ Notes
+ -----
+ If __hg_version__.py existed before, nothing is done.
+
+ This is intended for working with source directories that are
+ in an Mercurial repository.
+ """
+ target = njoin(self.local_path, '__hg_version__.py')
+ revision = self._get_hg_revision(self.local_path)
+ if os.path.isfile(target) or revision is None:
+ return
+ else:
+ def generate_hg_version_py():
+ if not os.path.isfile(target):
+ version = str(revision)
+ self.info('Creating %s (version=%r)' % (target, version))
+ with open(target, 'w') as f:
+ f.write('version = %r\n' % (version))
+
+ def rm_file(f=target,p=self.info):
+ if delete:
+ try: os.remove(f); p('removed '+f)
+ except OSError: pass
+ try: os.remove(f+'c'); p('removed '+f+'c')
+ except OSError: pass
+
+ atexit.register(rm_file)
+
+ return target
+
+ self.add_data_files(('', generate_hg_version_py()))
+
+ def make_config_py(self,name='__config__'):
+ """Generate package __config__.py file containing system_info
+ information used during building the package.
+
+ This file is installed to the
+ package installation directory.
+
+ """
+ self.py_modules.append((self.name, name, generate_config_py))
+
+ def get_info(self,*names):
+ """Get resources information.
+
+ Return information (from system_info.get_info) for all of the names in
+ the argument list in a single dictionary.
+ """
+ from .system_info import get_info, dict_append
+ info_dict = {}
+ for a in names:
+ dict_append(info_dict,**get_info(a))
+ return info_dict
+
+
+def get_cmd(cmdname, _cache={}):
+ if cmdname not in _cache:
+ import distutils.core
+ dist = distutils.core._setup_distribution
+ if dist is None:
+ from distutils.errors import DistutilsInternalError
+ raise DistutilsInternalError(
+ 'setup distribution instance not initialized')
+ cmd = dist.get_command_obj(cmdname)
+ _cache[cmdname] = cmd
+ return _cache[cmdname]
+
+def get_numpy_include_dirs():
+ # numpy_include_dirs are set by numpy/core/setup.py, otherwise []
+ include_dirs = Configuration.numpy_include_dirs[:]
+ if not include_dirs:
+ import numpy
+ include_dirs = [ numpy.get_include() ]
+ # else running numpy/core/setup.py
+ return include_dirs
+
+def get_npy_pkg_dir():
+ """Return the path where to find the npy-pkg-config directory.
+
+ If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that
+ is returned. Otherwise, a path inside the location of the numpy module is
+ returned.
+
+ The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining
+ customized npy-pkg-config .ini files for the cross-compilation
+ environment, and using them when cross-compiling.
+
+ """
+ d = os.environ.get('NPY_PKG_CONFIG_PATH')
+ if d is not None:
+ return d
+ spec = importlib.util.find_spec('numpy')
+ d = os.path.join(os.path.dirname(spec.origin),
+ 'core', 'lib', 'npy-pkg-config')
+ return d
+
+def get_pkg_info(pkgname, dirs=None):
+ """
+ Return library info for the given package.
+
+ Parameters
+ ----------
+ pkgname : str
+ Name of the package (should match the name of the .ini file, without
+ the extension, e.g. foo for the file foo.ini).
+ dirs : sequence, optional
+ If given, should be a sequence of additional directories where to look
+ for npy-pkg-config files. Those directories are searched prior to the
+ NumPy directory.
+
+ Returns
+ -------
+ pkginfo : class instance
+ The `LibraryInfo` instance containing the build information.
+
+ Raises
+ ------
+ PkgNotFound
+ If the package is not found.
+
+ See Also
+ --------
+ Configuration.add_npy_pkg_config, Configuration.add_installed_library,
+ get_info
+
+ """
+ from numpy.distutils.npy_pkg_config import read_config
+
+ if dirs:
+ dirs.append(get_npy_pkg_dir())
+ else:
+ dirs = [get_npy_pkg_dir()]
+ return read_config(pkgname, dirs)
+
+def get_info(pkgname, dirs=None):
+ """
+ Return an info dict for a given C library.
+
+ The info dict contains the necessary options to use the C library.
+
+ Parameters
+ ----------
+ pkgname : str
+ Name of the package (should match the name of the .ini file, without
+ the extension, e.g. foo for the file foo.ini).
+ dirs : sequence, optional
+ If given, should be a sequence of additional directories where to look
+ for npy-pkg-config files. Those directories are searched prior to the
+ NumPy directory.
+
+ Returns
+ -------
+ info : dict
+ The dictionary with build information.
+
+ Raises
+ ------
+ PkgNotFound
+ If the package is not found.
+
+ See Also
+ --------
+ Configuration.add_npy_pkg_config, Configuration.add_installed_library,
+ get_pkg_info
+
+ Examples
+ --------
+ To get the necessary information for the npymath library from NumPy:
+
+ >>> npymath_info = np.distutils.misc_util.get_info('npymath')
+ >>> npymath_info #doctest: +SKIP
+ {'define_macros': [], 'libraries': ['npymath'], 'library_dirs':
+ ['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']}
+
+ This info dict can then be used as input to a `Configuration` instance::
+
+ config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info)
+
+ """
+ from numpy.distutils.npy_pkg_config import parse_flags
+ pkg_info = get_pkg_info(pkgname, dirs)
+
+ # Translate LibraryInfo instance into a build_info dict
+ info = parse_flags(pkg_info.cflags())
+ for k, v in parse_flags(pkg_info.libs()).items():
+ info[k].extend(v)
+
+ # add_extension extra_info argument is ANAL
+ info['define_macros'] = info['macros']
+ del info['macros']
+ del info['ignored']
+
+ return info
+
+def is_bootstrapping():
+ import builtins
+
+ try:
+ builtins.__NUMPY_SETUP__
+ return True
+ except AttributeError:
+ return False
+
+
+#########################
+
+def default_config_dict(name = None, parent_name = None, local_path=None):
+ """Return a configuration dictionary for usage in
+ configuration() function defined in file setup_<name>.py.
+ """
+ import warnings
+ warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\
+ 'deprecated default_config_dict(%r,%r,%r)'
+ % (name, parent_name, local_path,
+ name, parent_name, local_path,
+ ), stacklevel=2)
+ c = Configuration(name, parent_name, local_path)
+ return c.todict()
+
+
+def dict_append(d, **kws):
+ for k, v in kws.items():
+ if k in d:
+ ov = d[k]
+ if isinstance(ov, str):
+ d[k] = v
+ else:
+ d[k].extend(v)
+ else:
+ d[k] = v
+
+def appendpath(prefix, path):
+ if os.path.sep != '/':
+ prefix = prefix.replace('/', os.path.sep)
+ path = path.replace('/', os.path.sep)
+ drive = ''
+ if os.path.isabs(path):
+ drive = os.path.splitdrive(prefix)[0]
+ absprefix = os.path.splitdrive(os.path.abspath(prefix))[1]
+ pathdrive, path = os.path.splitdrive(path)
+ d = os.path.commonprefix([absprefix, path])
+ if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \
+ or os.path.join(path[:len(d)], path[len(d):]) != path:
+ # Handle invalid paths
+ d = os.path.dirname(d)
+ subpath = path[len(d):]
+ if os.path.isabs(subpath):
+ subpath = subpath[1:]
+ else:
+ subpath = path
+ return os.path.normpath(njoin(drive + prefix, subpath))
+
+def generate_config_py(target):
+ """Generate config.py file containing system_info information
+ used during building the package.
+
+ Usage:
+ config['py_modules'].append((packagename, '__config__',generate_config_py))
+ """
+ from numpy.distutils.system_info import system_info
+ from distutils.dir_util import mkpath
+ mkpath(os.path.dirname(target))
+ with open(target, 'w') as f:
+ f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0])))
+ f.write('# It contains system_info results at the time of building this package.\n')
+ f.write('__all__ = ["get_info","show"]\n\n')
+
+ # For gfortran+msvc combination, extra shared libraries may exist
+ f.write(textwrap.dedent("""
+ import os
+ import sys
+
+ extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
+
+ if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
+ os.add_dll_directory(extra_dll_dir)
+
+ """))
+
+ for k, i in system_info.saved_results.items():
+ f.write('%s=%r\n' % (k, i))
+ f.write(textwrap.dedent(r'''
+ def get_info(name):
+ g = globals()
+ return g.get(name, g.get(name + "_info", {}))
+
+ def show():
+ """
+ Show libraries in the system on which NumPy was built.
+
+ Print information about various resources (libraries, library
+ directories, include directories, etc.) in the system on which
+ NumPy was built.
+
+ See Also
+ --------
+ get_include : Returns the directory containing NumPy C
+ header files.
+
+ Notes
+ -----
+ 1. Classes specifying the information to be printed are defined
+ in the `numpy.distutils.system_info` module.
+
+ Information may include:
+
+ * ``language``: language used to write the libraries (mostly
+ C or f77)
+ * ``libraries``: names of libraries found in the system
+ * ``library_dirs``: directories containing the libraries
+ * ``include_dirs``: directories containing library header files
+ * ``src_dirs``: directories containing library source files
+ * ``define_macros``: preprocessor macros used by
+ ``distutils.setup``
+ * ``baseline``: minimum CPU features required
+ * ``found``: dispatched features supported in the system
+ * ``not found``: dispatched features that are not supported
+ in the system
+
+ 2. NumPy BLAS/LAPACK Installation Notes
+
+ Installing a numpy wheel (``pip install numpy`` or force it
+ via ``pip install numpy --only-binary :numpy: numpy``) includes
+ an OpenBLAS implementation of the BLAS and LAPACK linear algebra
+ APIs. In this case, ``library_dirs`` reports the original build
+ time configuration as compiled with gcc/gfortran; at run time
+ the OpenBLAS library is in
+ ``site-packages/numpy.libs/`` (linux), or
+ ``site-packages/numpy/.dylibs/`` (macOS), or
+ ``site-packages/numpy/.libs/`` (windows).
+
+ Installing numpy from source
+ (``pip install numpy --no-binary numpy``) searches for BLAS and
+ LAPACK dynamic link libraries at build time as influenced by
+ environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and
+ NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER;
+ or the optional file ``~/.numpy-site.cfg``.
+ NumPy remembers those locations and expects to load the same
+ libraries at run-time.
+ In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS
+ library) is in the default build-time search order after
+ 'openblas'.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.show_config()
+ blas_opt_info:
+ language = c
+ define_macros = [('HAVE_CBLAS', None)]
+ libraries = ['openblas', 'openblas']
+ library_dirs = ['/usr/local/lib']
+ """
+ from numpy.core._multiarray_umath import (
+ __cpu_features__, __cpu_baseline__, __cpu_dispatch__
+ )
+ for name,info_dict in globals().items():
+ if name[0] == "_" or type(info_dict) is not type({}): continue
+ print(name + ":")
+ if not info_dict:
+ print(" NOT AVAILABLE")
+ for k,v in info_dict.items():
+ v = str(v)
+ if k == "sources" and len(v) > 200:
+ v = v[:60] + " ...\n... " + v[-60:]
+ print(" %s = %s" % (k,v))
+
+ features_found, features_not_found = [], []
+ for feature in __cpu_dispatch__:
+ if __cpu_features__[feature]:
+ features_found.append(feature)
+ else:
+ features_not_found.append(feature)
+
+ print("Supported SIMD extensions in this NumPy install:")
+ print(" baseline = %s" % (','.join(__cpu_baseline__)))
+ print(" found = %s" % (','.join(features_found)))
+ print(" not found = %s" % (','.join(features_not_found)))
+
+ '''))
+
+ return target
+
+def msvc_version(compiler):
+ """Return version major and minor of compiler instance if it is
+ MSVC, raise an exception otherwise."""
+ if not compiler.compiler_type == "msvc":
+ raise ValueError("Compiler instance is not msvc (%s)"\
+ % compiler.compiler_type)
+ return compiler._MSVCCompiler__version
+
+def get_build_architecture():
+ # Importing distutils.msvccompiler triggers a warning on non-Windows
+ # systems, so delay the import to here.
+ from distutils.msvccompiler import get_build_architecture
+ return get_build_architecture()
+
+
+_cxx_ignore_flags = {'-Werror=implicit-function-declaration', '-std=c99'}
+
+
+def sanitize_cxx_flags(cxxflags):
+ '''
+ Some flags are valid for C but not C++. Prune them.
+ '''
+ return [flag for flag in cxxflags if flag not in _cxx_ignore_flags]
+
+
+def exec_mod_from_location(modname, modfile):
+ '''
+ Use importlib machinery to import a module `modname` from the file
+ `modfile`. Depending on the `spec.loader`, the module may not be
+ registered in sys.modules.
+ '''
+ spec = importlib.util.spec_from_file_location(modname, modfile)
+ foo = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(foo)
+ return foo
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/msvc9compiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/msvc9compiler.py
new file mode 100644
index 00000000..68239495
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/msvc9compiler.py
@@ -0,0 +1,63 @@
+import os
+from distutils.msvc9compiler import MSVCCompiler as _MSVCCompiler
+
+from .system_info import platform_bits
+
+
+def _merge(old, new):
+ """Concatenate two environment paths avoiding repeats.
+
+ Here `old` is the environment string before the base class initialize
+ function is called and `new` is the string after the call. The new string
+ will be a fixed string if it is not obtained from the current environment,
+ or the same as the old string if obtained from the same environment. The aim
+ here is not to append the new string if it is already contained in the old
+ string so as to limit the growth of the environment string.
+
+ Parameters
+ ----------
+ old : string
+ Previous environment string.
+ new : string
+ New environment string.
+
+ Returns
+ -------
+ ret : string
+ Updated environment string.
+
+ """
+ if not old:
+ return new
+ if new in old:
+ return old
+
+ # Neither new nor old is empty. Give old priority.
+ return ';'.join([old, new])
+
+
+class MSVCCompiler(_MSVCCompiler):
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ _MSVCCompiler.__init__(self, verbose, dry_run, force)
+
+ def initialize(self, plat_name=None):
+ # The 'lib' and 'include' variables may be overwritten
+ # by MSVCCompiler.initialize, so save them for later merge.
+ environ_lib = os.getenv('lib')
+ environ_include = os.getenv('include')
+ _MSVCCompiler.initialize(self, plat_name)
+
+ # Merge current and previous values of 'lib' and 'include'
+ os.environ['lib'] = _merge(environ_lib, os.environ['lib'])
+ os.environ['include'] = _merge(environ_include, os.environ['include'])
+
+ # msvc9 building for 32 bits requires SSE2 to work around a
+ # compiler bug.
+ if platform_bits == 32:
+ self.compile_options += ['/arch:SSE2']
+ self.compile_options_debug += ['/arch:SSE2']
+
+ def manifest_setup_ldargs(self, output_filename, build_temp, ld_args):
+ ld_args.append('/MANIFEST')
+ _MSVCCompiler.manifest_setup_ldargs(self, output_filename,
+ build_temp, ld_args)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/msvccompiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/msvccompiler.py
new file mode 100644
index 00000000..2b93221b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/msvccompiler.py
@@ -0,0 +1,76 @@
+import os
+from distutils.msvccompiler import MSVCCompiler as _MSVCCompiler
+
+from .system_info import platform_bits
+
+
+def _merge(old, new):
+ """Concatenate two environment paths avoiding repeats.
+
+ Here `old` is the environment string before the base class initialize
+ function is called and `new` is the string after the call. The new string
+ will be a fixed string if it is not obtained from the current environment,
+ or the same as the old string if obtained from the same environment. The aim
+ here is not to append the new string if it is already contained in the old
+ string so as to limit the growth of the environment string.
+
+ Parameters
+ ----------
+ old : string
+ Previous environment string.
+ new : string
+ New environment string.
+
+ Returns
+ -------
+ ret : string
+ Updated environment string.
+
+ """
+ if new in old:
+ return old
+ if not old:
+ return new
+
+ # Neither new nor old is empty. Give old priority.
+ return ';'.join([old, new])
+
+
+class MSVCCompiler(_MSVCCompiler):
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ _MSVCCompiler.__init__(self, verbose, dry_run, force)
+
+ def initialize(self):
+ # The 'lib' and 'include' variables may be overwritten
+ # by MSVCCompiler.initialize, so save them for later merge.
+ environ_lib = os.getenv('lib', '')
+ environ_include = os.getenv('include', '')
+ _MSVCCompiler.initialize(self)
+
+ # Merge current and previous values of 'lib' and 'include'
+ os.environ['lib'] = _merge(environ_lib, os.environ['lib'])
+ os.environ['include'] = _merge(environ_include, os.environ['include'])
+
+ # msvc9 building for 32 bits requires SSE2 to work around a
+ # compiler bug.
+ if platform_bits == 32:
+ self.compile_options += ['/arch:SSE2']
+ self.compile_options_debug += ['/arch:SSE2']
+
+
+def lib_opts_if_msvc(build_cmd):
+ """ Add flags if we are using MSVC compiler
+
+ We can't see `build_cmd` in our scope, because we have not initialized
+ the distutils build command, so use this deferred calculation to run
+ when we are building the library.
+ """
+ if build_cmd.compiler.compiler_type != 'msvc':
+ return []
+ # Explicitly disable whole-program optimization.
+ flags = ['/GL-']
+ # Disable voltbl section for vc142 to allow link using mingw-w64; see:
+ # https://github.com/matthew-brett/dll_investigation/issues/1#issuecomment-1100468171
+ if build_cmd.compiler_opt.cc_test_flags(['-d2VolatileMetadata-']):
+ flags.append('-d2VolatileMetadata-')
+ return flags
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/npy_pkg_config.py b/venv/lib/python3.9/site-packages/numpy/distutils/npy_pkg_config.py
new file mode 100644
index 00000000..f6e3ad39
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/npy_pkg_config.py
@@ -0,0 +1,437 @@
+import sys
+import re
+import os
+
+from configparser import RawConfigParser
+
+__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
+ 'read_config', 'parse_flags']
+
+_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}')
+
+class FormatError(OSError):
+ """
+ Exception thrown when there is a problem parsing a configuration file.
+
+ """
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+class PkgNotFound(OSError):
+ """Exception raised when a package can not be located."""
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+def parse_flags(line):
+ """
+ Parse a line from a config file containing compile flags.
+
+ Parameters
+ ----------
+ line : str
+ A single line containing one or more compile flags.
+
+ Returns
+ -------
+ d : dict
+ Dictionary of parsed flags, split into relevant categories.
+ These categories are the keys of `d`:
+
+ * 'include_dirs'
+ * 'library_dirs'
+ * 'libraries'
+ * 'macros'
+ * 'ignored'
+
+ """
+ d = {'include_dirs': [], 'library_dirs': [], 'libraries': [],
+ 'macros': [], 'ignored': []}
+
+ flags = (' ' + line).split(' -')
+ for flag in flags:
+ flag = '-' + flag
+ if len(flag) > 0:
+ if flag.startswith('-I'):
+ d['include_dirs'].append(flag[2:].strip())
+ elif flag.startswith('-L'):
+ d['library_dirs'].append(flag[2:].strip())
+ elif flag.startswith('-l'):
+ d['libraries'].append(flag[2:].strip())
+ elif flag.startswith('-D'):
+ d['macros'].append(flag[2:].strip())
+ else:
+ d['ignored'].append(flag)
+
+ return d
+
+def _escape_backslash(val):
+ return val.replace('\\', '\\\\')
+
+class LibraryInfo:
+ """
+ Object containing build information about a library.
+
+ Parameters
+ ----------
+ name : str
+ The library name.
+ description : str
+ Description of the library.
+ version : str
+ Version string.
+ sections : dict
+ The sections of the configuration file for the library. The keys are
+ the section headers, the values the text under each header.
+ vars : class instance
+ A `VariableSet` instance, which contains ``(name, value)`` pairs for
+ variables defined in the configuration file for the library.
+ requires : sequence, optional
+ The required libraries for the library to be installed.
+
+ Notes
+ -----
+ All input parameters (except "sections" which is a method) are available as
+ attributes of the same name.
+
+ """
+ def __init__(self, name, description, version, sections, vars, requires=None):
+ self.name = name
+ self.description = description
+ if requires:
+ self.requires = requires
+ else:
+ self.requires = []
+ self.version = version
+ self._sections = sections
+ self.vars = vars
+
+ def sections(self):
+ """
+ Return the section headers of the config file.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ keys : list of str
+ The list of section headers.
+
+ """
+ return list(self._sections.keys())
+
+ def cflags(self, section="default"):
+ val = self.vars.interpolate(self._sections[section]['cflags'])
+ return _escape_backslash(val)
+
+ def libs(self, section="default"):
+ val = self.vars.interpolate(self._sections[section]['libs'])
+ return _escape_backslash(val)
+
+ def __str__(self):
+ m = ['Name: %s' % self.name, 'Description: %s' % self.description]
+ if self.requires:
+ m.append('Requires:')
+ else:
+ m.append('Requires: %s' % ",".join(self.requires))
+ m.append('Version: %s' % self.version)
+
+ return "\n".join(m)
+
+class VariableSet:
+ """
+ Container object for the variables defined in a config file.
+
+ `VariableSet` can be used as a plain dictionary, with the variable names
+ as keys.
+
+ Parameters
+ ----------
+ d : dict
+ Dict of items in the "variables" section of the configuration file.
+
+ """
+ def __init__(self, d):
+ self._raw_data = dict([(k, v) for k, v in d.items()])
+
+ self._re = {}
+ self._re_sub = {}
+
+ self._init_parse()
+
+ def _init_parse(self):
+ for k, v in self._raw_data.items():
+ self._init_parse_var(k, v)
+
+ def _init_parse_var(self, name, value):
+ self._re[name] = re.compile(r'\$\{%s\}' % name)
+ self._re_sub[name] = value
+
+ def interpolate(self, value):
+ # Brute force: we keep interpolating until there is no '${var}' anymore
+ # or until interpolated string is equal to input string
+ def _interpolate(value):
+ for k in self._re.keys():
+ value = self._re[k].sub(self._re_sub[k], value)
+ return value
+ while _VAR.search(value):
+ nvalue = _interpolate(value)
+ if nvalue == value:
+ break
+ value = nvalue
+
+ return value
+
+ def variables(self):
+ """
+ Return the list of variable names.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ names : list of str
+ The names of all variables in the `VariableSet` instance.
+
+ """
+ return list(self._raw_data.keys())
+
+ # Emulate a dict to set/get variables values
+ def __getitem__(self, name):
+ return self._raw_data[name]
+
+ def __setitem__(self, name, value):
+ self._raw_data[name] = value
+ self._init_parse_var(name, value)
+
+def parse_meta(config):
+ if not config.has_section('meta'):
+ raise FormatError("No meta section found !")
+
+ d = dict(config.items('meta'))
+
+ for k in ['name', 'description', 'version']:
+ if not k in d:
+ raise FormatError("Option %s (section [meta]) is mandatory, "
+ "but not found" % k)
+
+ if not 'requires' in d:
+ d['requires'] = []
+
+ return d
+
+def parse_variables(config):
+ if not config.has_section('variables'):
+ raise FormatError("No variables section found !")
+
+ d = {}
+
+ for name, value in config.items("variables"):
+ d[name] = value
+
+ return VariableSet(d)
+
+def parse_sections(config):
+ return meta_d, r
+
+def pkg_to_filename(pkg_name):
+ return "%s.ini" % pkg_name
+
+def parse_config(filename, dirs=None):
+ if dirs:
+ filenames = [os.path.join(d, filename) for d in dirs]
+ else:
+ filenames = [filename]
+
+ config = RawConfigParser()
+
+ n = config.read(filenames)
+ if not len(n) >= 1:
+ raise PkgNotFound("Could not find file(s) %s" % str(filenames))
+
+ # Parse meta and variables sections
+ meta = parse_meta(config)
+
+ vars = {}
+ if config.has_section('variables'):
+ for name, value in config.items("variables"):
+ vars[name] = _escape_backslash(value)
+
+ # Parse "normal" sections
+ secs = [s for s in config.sections() if not s in ['meta', 'variables']]
+ sections = {}
+
+ requires = {}
+ for s in secs:
+ d = {}
+ if config.has_option(s, "requires"):
+ requires[s] = config.get(s, 'requires')
+
+ for name, value in config.items(s):
+ d[name] = value
+ sections[s] = d
+
+ return meta, vars, sections, requires
+
+def _read_config_imp(filenames, dirs=None):
+ def _read_config(f):
+ meta, vars, sections, reqs = parse_config(f, dirs)
+ # recursively add sections and variables of required libraries
+ for rname, rvalue in reqs.items():
+ nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue))
+
+ # Update var dict for variables not in 'top' config file
+ for k, v in nvars.items():
+ if not k in vars:
+ vars[k] = v
+
+ # Update sec dict
+ for oname, ovalue in nsections[rname].items():
+ if ovalue:
+ sections[rname][oname] += ' %s' % ovalue
+
+ return meta, vars, sections, reqs
+
+ meta, vars, sections, reqs = _read_config(filenames)
+
+ # FIXME: document this. If pkgname is defined in the variables section, and
+ # there is no pkgdir variable defined, pkgdir is automatically defined to
+ # the path of pkgname. This requires the package to be imported to work
+ if not 'pkgdir' in vars and "pkgname" in vars:
+ pkgname = vars["pkgname"]
+ if not pkgname in sys.modules:
+ raise ValueError("You should import %s to get information on %s" %
+ (pkgname, meta["name"]))
+
+ mod = sys.modules[pkgname]
+ vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__))
+
+ return LibraryInfo(name=meta["name"], description=meta["description"],
+ version=meta["version"], sections=sections, vars=VariableSet(vars))
+
+# Trivial cache to cache LibraryInfo instances creation. To be really
+# efficient, the cache should be handled in read_config, since a same file can
+# be parsed many time outside LibraryInfo creation, but I doubt this will be a
+# problem in practice
+_CACHE = {}
+def read_config(pkgname, dirs=None):
+ """
+ Return library info for a package from its configuration file.
+
+ Parameters
+ ----------
+ pkgname : str
+ Name of the package (should match the name of the .ini file, without
+ the extension, e.g. foo for the file foo.ini).
+ dirs : sequence, optional
+ If given, should be a sequence of directories - usually including
+ the NumPy base directory - where to look for npy-pkg-config files.
+
+ Returns
+ -------
+ pkginfo : class instance
+ The `LibraryInfo` instance containing the build information.
+
+ Raises
+ ------
+ PkgNotFound
+ If the package is not found.
+
+ See Also
+ --------
+ misc_util.get_info, misc_util.get_pkg_info
+
+ Examples
+ --------
+ >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath')
+ >>> type(npymath_info)
+ <class 'numpy.distutils.npy_pkg_config.LibraryInfo'>
+ >>> print(npymath_info)
+ Name: npymath
+ Description: Portable, core math library implementing C99 standard
+ Requires:
+ Version: 0.1 #random
+
+ """
+ try:
+ return _CACHE[pkgname]
+ except KeyError:
+ v = _read_config_imp(pkg_to_filename(pkgname), dirs)
+ _CACHE[pkgname] = v
+ return v
+
+# TODO:
+# - implements version comparison (modversion + atleast)
+
+# pkg-config simple emulator - useful for debugging, and maybe later to query
+# the system
+if __name__ == '__main__':
+ from optparse import OptionParser
+ import glob
+
+ parser = OptionParser()
+ parser.add_option("--cflags", dest="cflags", action="store_true",
+ help="output all preprocessor and compiler flags")
+ parser.add_option("--libs", dest="libs", action="store_true",
+ help="output all linker flags")
+ parser.add_option("--use-section", dest="section",
+ help="use this section instead of default for options")
+ parser.add_option("--version", dest="version", action="store_true",
+ help="output version")
+ parser.add_option("--atleast-version", dest="min_version",
+ help="Minimal version")
+ parser.add_option("--list-all", dest="list_all", action="store_true",
+ help="Minimal version")
+ parser.add_option("--define-variable", dest="define_variable",
+ help="Replace variable with the given value")
+
+ (options, args) = parser.parse_args(sys.argv)
+
+ if len(args) < 2:
+ raise ValueError("Expect package name on the command line:")
+
+ if options.list_all:
+ files = glob.glob("*.ini")
+ for f in files:
+ info = read_config(f)
+ print("%s\t%s - %s" % (info.name, info.name, info.description))
+
+ pkg_name = args[1]
+ d = os.environ.get('NPY_PKG_CONFIG_PATH')
+ if d:
+ info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d])
+ else:
+ info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.'])
+
+ if options.section:
+ section = options.section
+ else:
+ section = "default"
+
+ if options.define_variable:
+ m = re.search(r'([\S]+)=([\S]+)', options.define_variable)
+ if not m:
+ raise ValueError("--define-variable option should be of "
+ "the form --define-variable=foo=bar")
+ else:
+ name = m.group(1)
+ value = m.group(2)
+ info.vars[name] = value
+
+ if options.cflags:
+ print(info.cflags(section))
+ if options.libs:
+ print(info.libs(section))
+ if options.version:
+ print(info.version)
+ if options.min_version:
+ print(info.version >= options.min_version)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/numpy_distribution.py b/venv/lib/python3.9/site-packages/numpy/distutils/numpy_distribution.py
new file mode 100644
index 00000000..ea818265
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/numpy_distribution.py
@@ -0,0 +1,17 @@
+# XXX: Handle setuptools ?
+from distutils.core import Distribution
+
+# This class is used because we add new files (sconscripts, and so on) with the
+# scons command
+class NumpyDistribution(Distribution):
+ def __init__(self, attrs = None):
+ # A list of (sconscripts, pre_hook, post_hook, src, parent_names)
+ self.scons_data = []
+ # A list of installable libraries
+ self.installed_libraries = []
+ # A dict of pkg_config files to generate/install
+ self.installed_pkg_config = {}
+ Distribution.__init__(self, attrs)
+
+ def has_scons_scripts(self):
+ return bool(self.scons_data)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/pathccompiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/pathccompiler.py
new file mode 100644
index 00000000..48051810
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/pathccompiler.py
@@ -0,0 +1,21 @@
+from distutils.unixccompiler import UnixCCompiler
+
+class PathScaleCCompiler(UnixCCompiler):
+
+ """
+ PathScale compiler compatible with an gcc built Python.
+ """
+
+ compiler_type = 'pathcc'
+ cc_exe = 'pathcc'
+ cxx_exe = 'pathCC'
+
+ def __init__ (self, verbose=0, dry_run=0, force=0):
+ UnixCCompiler.__init__ (self, verbose, dry_run, force)
+ cc_compiler = self.cc_exe
+ cxx_compiler = self.cxx_exe
+ self.set_executables(compiler=cc_compiler,
+ compiler_so=cc_compiler,
+ compiler_cxx=cxx_compiler,
+ linker_exe=cc_compiler,
+ linker_so=cc_compiler + ' -shared')
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/setup.py b/venv/lib/python3.9/site-packages/numpy/distutils/setup.py
new file mode 100644
index 00000000..522756fc
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/setup.py
@@ -0,0 +1,17 @@
+#!/usr/bin/env python3
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('distutils', parent_package, top_path)
+ config.add_subpackage('command')
+ config.add_subpackage('fcompiler')
+ config.add_subpackage('tests')
+ config.add_data_files('site.cfg')
+ config.add_data_files('mingw/gfortran_vs2003_hack.c')
+ config.add_data_dir('checks')
+ config.add_data_files('*.pyi')
+ config.make_config_py()
+ return config
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/system_info.py b/venv/lib/python3.9/site-packages/numpy/distutils/system_info.py
new file mode 100644
index 00000000..d5a1687d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/system_info.py
@@ -0,0 +1,3172 @@
+#!/usr/bin/env python3
+"""
+This file defines a set of system_info classes for getting
+information about various resources (libraries, library directories,
+include directories, etc.) in the system. Usage:
+ info_dict = get_info(<name>)
+ where <name> is a string 'atlas','x11','fftw','lapack','blas',
+ 'lapack_src', 'blas_src', etc. For a complete list of allowed names,
+ see the definition of get_info() function below.
+
+ Returned info_dict is a dictionary which is compatible with
+ distutils.setup keyword arguments. If info_dict == {}, then the
+ asked resource is not available (system_info could not find it).
+
+ Several *_info classes specify an environment variable to specify
+ the locations of software. When setting the corresponding environment
+ variable to 'None' then the software will be ignored, even when it
+ is available in system.
+
+Global parameters:
+ system_info.search_static_first - search static libraries (.a)
+ in precedence to shared ones (.so, .sl) if enabled.
+ system_info.verbosity - output the results to stdout if enabled.
+
+The file 'site.cfg' is looked for in
+
+1) Directory of main setup.py file being run.
+2) Home directory of user running the setup.py file as ~/.numpy-site.cfg
+3) System wide directory (location of this file...)
+
+The first one found is used to get system configuration options The
+format is that used by ConfigParser (i.e., Windows .INI style). The
+section ALL is not intended for general use.
+
+Appropriate defaults are used if nothing is specified.
+
+The order of finding the locations of resources is the following:
+ 1. environment variable
+ 2. section in site.cfg
+ 3. DEFAULT section in site.cfg
+ 4. System default search paths (see ``default_*`` variables below).
+Only the first complete match is returned.
+
+Currently, the following classes are available, along with their section names:
+
+ Numeric_info:Numeric
+ _numpy_info:Numeric
+ _pkg_config_info:None
+ accelerate_info:accelerate
+ agg2_info:agg2
+ amd_info:amd
+ atlas_3_10_blas_info:atlas
+ atlas_3_10_blas_threads_info:atlas
+ atlas_3_10_info:atlas
+ atlas_3_10_threads_info:atlas
+ atlas_blas_info:atlas
+ atlas_blas_threads_info:atlas
+ atlas_info:atlas
+ atlas_threads_info:atlas
+ blas64__opt_info:ALL # usage recommended (general ILP64 BLAS, 64_ symbol suffix)
+ blas_ilp64_opt_info:ALL # usage recommended (general ILP64 BLAS)
+ blas_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 BLAS, no symbol suffix)
+ blas_info:blas
+ blas_mkl_info:mkl
+ blas_opt_info:ALL # usage recommended
+ blas_src_info:blas_src
+ blis_info:blis
+ boost_python_info:boost_python
+ dfftw_info:fftw
+ dfftw_threads_info:fftw
+ djbfft_info:djbfft
+ f2py_info:ALL
+ fft_opt_info:ALL
+ fftw2_info:fftw
+ fftw3_info:fftw3
+ fftw_info:fftw
+ fftw_threads_info:fftw
+ flame_info:flame
+ freetype2_info:freetype2
+ gdk_2_info:gdk_2
+ gdk_info:gdk
+ gdk_pixbuf_2_info:gdk_pixbuf_2
+ gdk_pixbuf_xlib_2_info:gdk_pixbuf_xlib_2
+ gdk_x11_2_info:gdk_x11_2
+ gtkp_2_info:gtkp_2
+ gtkp_x11_2_info:gtkp_x11_2
+ lapack64__opt_info:ALL # usage recommended (general ILP64 LAPACK, 64_ symbol suffix)
+ lapack_atlas_3_10_info:atlas
+ lapack_atlas_3_10_threads_info:atlas
+ lapack_atlas_info:atlas
+ lapack_atlas_threads_info:atlas
+ lapack_ilp64_opt_info:ALL # usage recommended (general ILP64 LAPACK)
+ lapack_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 LAPACK, no symbol suffix)
+ lapack_info:lapack
+ lapack_mkl_info:mkl
+ lapack_opt_info:ALL # usage recommended
+ lapack_src_info:lapack_src
+ mkl_info:mkl
+ numarray_info:numarray
+ numerix_info:numerix
+ numpy_info:numpy
+ openblas64__info:openblas64_
+ openblas64__lapack_info:openblas64_
+ openblas_clapack_info:openblas
+ openblas_ilp64_info:openblas_ilp64
+ openblas_ilp64_lapack_info:openblas_ilp64
+ openblas_info:openblas
+ openblas_lapack_info:openblas
+ sfftw_info:fftw
+ sfftw_threads_info:fftw
+ system_info:ALL
+ umfpack_info:umfpack
+ wx_info:wx
+ x11_info:x11
+ xft_info:xft
+
+Note that blas_opt_info and lapack_opt_info honor the NPY_BLAS_ORDER
+and NPY_LAPACK_ORDER environment variables to determine the order in which
+specific BLAS and LAPACK libraries are searched for.
+
+This search (or autodetection) can be bypassed by defining the environment
+variables NPY_BLAS_LIBS and NPY_LAPACK_LIBS, which should then contain the
+exact linker flags to use (language will be set to F77). Building against
+Netlib BLAS/LAPACK or stub files, in order to be able to switch BLAS and LAPACK
+implementations at runtime. If using this to build NumPy itself, it is
+recommended to also define NPY_CBLAS_LIBS (assuming your BLAS library has a
+CBLAS interface) to enable CBLAS usage for matrix multiplication (unoptimized
+otherwise).
+
+Example:
+----------
+[DEFAULT]
+# default section
+library_dirs = /usr/lib:/usr/local/lib:/opt/lib
+include_dirs = /usr/include:/usr/local/include:/opt/include
+src_dirs = /usr/local/src:/opt/src
+# search static libraries (.a) in preference to shared ones (.so)
+search_static_first = 0
+
+[fftw]
+libraries = rfftw, fftw
+
+[atlas]
+library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas
+# for overriding the names of the atlas libraries
+libraries = lapack, f77blas, cblas, atlas
+
+[x11]
+library_dirs = /usr/X11R6/lib
+include_dirs = /usr/X11R6/include
+----------
+
+Note that the ``libraries`` key is the default setting for libraries.
+
+Authors:
+ Pearu Peterson <pearu@cens.ioc.ee>, February 2002
+ David M. Cooke <cookedm@physics.mcmaster.ca>, April 2002
+
+Copyright 2002 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@cens.ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy (BSD style) license. See LICENSE.txt that came with
+this distribution for specifics.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+
+"""
+import sys
+import os
+import re
+import copy
+import warnings
+import subprocess
+import textwrap
+
+from glob import glob
+from functools import reduce
+from configparser import NoOptionError
+from configparser import RawConfigParser as ConfigParser
+# It seems that some people are importing ConfigParser from here so is
+# good to keep its class name. Use of RawConfigParser is needed in
+# order to be able to load path names with percent in them, like
+# `feature%2Fcool` which is common on git flow branch names.
+
+from distutils.errors import DistutilsError
+from distutils.dist import Distribution
+import sysconfig
+from numpy.distutils import log
+from distutils.util import get_platform
+
+from numpy.distutils.exec_command import (
+ find_executable, filepath_from_subprocess_output,
+ )
+from numpy.distutils.misc_util import (is_sequence, is_string,
+ get_shared_lib_extension)
+from numpy.distutils.command.config import config as cmd_config
+from numpy.distutils import customized_ccompiler as _customized_ccompiler
+from numpy.distutils import _shell_utils
+import distutils.ccompiler
+import tempfile
+import shutil
+
+__all__ = ['system_info']
+
+# Determine number of bits
+import platform
+_bits = {'32bit': 32, '64bit': 64}
+platform_bits = _bits[platform.architecture()[0]]
+
+
+global_compiler = None
+
+def customized_ccompiler():
+ global global_compiler
+ if not global_compiler:
+ global_compiler = _customized_ccompiler()
+ return global_compiler
+
+
+def _c_string_literal(s):
+ """
+ Convert a python string into a literal suitable for inclusion into C code
+ """
+ # only these three characters are forbidden in C strings
+ s = s.replace('\\', r'\\')
+ s = s.replace('"', r'\"')
+ s = s.replace('\n', r'\n')
+ return '"{}"'.format(s)
+
+
+def libpaths(paths, bits):
+ """Return a list of library paths valid on 32 or 64 bit systems.
+
+ Inputs:
+ paths : sequence
+ A sequence of strings (typically paths)
+ bits : int
+ An integer, the only valid values are 32 or 64. A ValueError exception
+ is raised otherwise.
+
+ Examples:
+
+ Consider a list of directories
+ >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib']
+
+ For a 32-bit platform, this is already valid:
+ >>> np.distutils.system_info.libpaths(paths,32)
+ ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib']
+
+ On 64 bits, we prepend the '64' postfix
+ >>> np.distutils.system_info.libpaths(paths,64)
+ ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib',
+ '/usr/lib64', '/usr/lib']
+ """
+ if bits not in (32, 64):
+ raise ValueError("Invalid bit size in libpaths: 32 or 64 only")
+
+ # Handle 32bit case
+ if bits == 32:
+ return paths
+
+ # Handle 64bit case
+ out = []
+ for p in paths:
+ out.extend([p + '64', p])
+
+ return out
+
+
+if sys.platform == 'win32':
+ default_lib_dirs = ['C:\\',
+ os.path.join(sysconfig.get_config_var('exec_prefix'),
+ 'libs')]
+ default_runtime_dirs = []
+ default_include_dirs = []
+ default_src_dirs = ['.']
+ default_x11_lib_dirs = []
+ default_x11_include_dirs = []
+ _include_dirs = [
+ 'include',
+ 'include/suitesparse',
+ ]
+ _lib_dirs = [
+ 'lib',
+ ]
+
+ _include_dirs = [d.replace('/', os.sep) for d in _include_dirs]
+ _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs]
+ def add_system_root(library_root):
+ """Add a package manager root to the include directories"""
+ global default_lib_dirs
+ global default_include_dirs
+
+ library_root = os.path.normpath(library_root)
+
+ default_lib_dirs.extend(
+ os.path.join(library_root, d) for d in _lib_dirs)
+ default_include_dirs.extend(
+ os.path.join(library_root, d) for d in _include_dirs)
+
+ # VCpkg is the de-facto package manager on windows for C/C++
+ # libraries. If it is on the PATH, then we append its paths here.
+ vcpkg = shutil.which('vcpkg')
+ if vcpkg:
+ vcpkg_dir = os.path.dirname(vcpkg)
+ if platform.architecture()[0] == '32bit':
+ specifier = 'x86'
+ else:
+ specifier = 'x64'
+
+ vcpkg_installed = os.path.join(vcpkg_dir, 'installed')
+ for vcpkg_root in [
+ os.path.join(vcpkg_installed, specifier + '-windows'),
+ os.path.join(vcpkg_installed, specifier + '-windows-static'),
+ ]:
+ add_system_root(vcpkg_root)
+
+ # Conda is another popular package manager that provides libraries
+ conda = shutil.which('conda')
+ if conda:
+ conda_dir = os.path.dirname(conda)
+ add_system_root(os.path.join(conda_dir, '..', 'Library'))
+ add_system_root(os.path.join(conda_dir, 'Library'))
+
+else:
+ default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib',
+ '/opt/local/lib', '/sw/lib'], platform_bits)
+ default_runtime_dirs = []
+ default_include_dirs = ['/usr/local/include',
+ '/opt/include',
+ # path of umfpack under macports
+ '/opt/local/include/ufsparse',
+ '/opt/local/include', '/sw/include',
+ '/usr/include/suitesparse']
+ default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src']
+
+ default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib',
+ '/usr/lib'], platform_bits)
+ default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include']
+
+ if os.path.exists('/usr/lib/X11'):
+ globbed_x11_dir = glob('/usr/lib/*/libX11.so')
+ if globbed_x11_dir:
+ x11_so_dir = os.path.split(globbed_x11_dir[0])[0]
+ default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11'])
+ default_x11_include_dirs.extend(['/usr/lib/X11/include',
+ '/usr/include/X11'])
+
+ with open(os.devnull, 'w') as tmp:
+ try:
+ p = subprocess.Popen(["gcc", "-print-multiarch"], stdout=subprocess.PIPE,
+ stderr=tmp)
+ except (OSError, DistutilsError):
+ # OSError if gcc is not installed, or SandboxViolation (DistutilsError
+ # subclass) if an old setuptools bug is triggered (see gh-3160).
+ pass
+ else:
+ triplet = str(p.communicate()[0].decode().strip())
+ if p.returncode == 0:
+ # gcc supports the "-print-multiarch" option
+ default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)]
+ default_lib_dirs += [os.path.join("/usr/lib/", triplet)]
+
+
+if os.path.join(sys.prefix, 'lib') not in default_lib_dirs:
+ default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib'))
+ default_include_dirs.append(os.path.join(sys.prefix, 'include'))
+ default_src_dirs.append(os.path.join(sys.prefix, 'src'))
+
+default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)]
+default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)]
+default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)]
+default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)]
+
+so_ext = get_shared_lib_extension()
+
+
+def get_standard_file(fname):
+ """Returns a list of files named 'fname' from
+ 1) System-wide directory (directory-location of this module)
+ 2) Users HOME directory (os.environ['HOME'])
+ 3) Local directory
+ """
+ # System-wide file
+ filenames = []
+ try:
+ f = __file__
+ except NameError:
+ f = sys.argv[0]
+ sysfile = os.path.join(os.path.split(os.path.abspath(f))[0],
+ fname)
+ if os.path.isfile(sysfile):
+ filenames.append(sysfile)
+
+ # Home directory
+ # And look for the user config file
+ try:
+ f = os.path.expanduser('~')
+ except KeyError:
+ pass
+ else:
+ user_file = os.path.join(f, fname)
+ if os.path.isfile(user_file):
+ filenames.append(user_file)
+
+ # Local file
+ if os.path.isfile(fname):
+ filenames.append(os.path.abspath(fname))
+
+ return filenames
+
+
+def _parse_env_order(base_order, env):
+ """ Parse an environment variable `env` by splitting with "," and only returning elements from `base_order`
+
+ This method will sequence the environment variable and check for their
+ individual elements in `base_order`.
+
+ The items in the environment variable may be negated via '^item' or '!itema,itemb'.
+ It must start with ^/! to negate all options.
+
+ Raises
+ ------
+ ValueError: for mixed negated and non-negated orders or multiple negated orders
+
+ Parameters
+ ----------
+ base_order : list of str
+ the base list of orders
+ env : str
+ the environment variable to be parsed, if none is found, `base_order` is returned
+
+ Returns
+ -------
+ allow_order : list of str
+ allowed orders in lower-case
+ unknown_order : list of str
+ for values not overlapping with `base_order`
+ """
+ order_str = os.environ.get(env, None)
+
+ # ensure all base-orders are lower-case (for easier comparison)
+ base_order = [order.lower() for order in base_order]
+ if order_str is None:
+ return base_order, []
+
+ neg = order_str.startswith('^') or order_str.startswith('!')
+ # Check format
+ order_str_l = list(order_str)
+ sum_neg = order_str_l.count('^') + order_str_l.count('!')
+ if neg:
+ if sum_neg > 1:
+ raise ValueError(f"Environment variable '{env}' may only contain a single (prefixed) negation: {order_str}")
+ # remove prefix
+ order_str = order_str[1:]
+ elif sum_neg > 0:
+ raise ValueError(f"Environment variable '{env}' may not mix negated an non-negated items: {order_str}")
+
+ # Split and lower case
+ orders = order_str.lower().split(',')
+
+ # to inform callee about non-overlapping elements
+ unknown_order = []
+
+ # if negated, we have to remove from the order
+ if neg:
+ allow_order = base_order.copy()
+
+ for order in orders:
+ if not order:
+ continue
+
+ if order not in base_order:
+ unknown_order.append(order)
+ continue
+
+ if order in allow_order:
+ allow_order.remove(order)
+
+ else:
+ allow_order = []
+
+ for order in orders:
+ if not order:
+ continue
+
+ if order not in base_order:
+ unknown_order.append(order)
+ continue
+
+ if order not in allow_order:
+ allow_order.append(order)
+
+ return allow_order, unknown_order
+
+
+def get_info(name, notfound_action=0):
+ """
+ notfound_action:
+ 0 - do nothing
+ 1 - display warning message
+ 2 - raise error
+ """
+ cl = {'armpl': armpl_info,
+ 'blas_armpl': blas_armpl_info,
+ 'lapack_armpl': lapack_armpl_info,
+ 'fftw3_armpl': fftw3_armpl_info,
+ 'atlas': atlas_info, # use lapack_opt or blas_opt instead
+ 'atlas_threads': atlas_threads_info, # ditto
+ 'atlas_blas': atlas_blas_info,
+ 'atlas_blas_threads': atlas_blas_threads_info,
+ 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead
+ 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto
+ 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead
+ 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto
+ 'atlas_3_10_blas': atlas_3_10_blas_info,
+ 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info,
+ 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead
+ 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto
+ 'flame': flame_info, # use lapack_opt instead
+ 'mkl': mkl_info,
+ # openblas which may or may not have embedded lapack
+ 'openblas': openblas_info, # use blas_opt instead
+ # openblas with embedded lapack
+ 'openblas_lapack': openblas_lapack_info, # use blas_opt instead
+ 'openblas_clapack': openblas_clapack_info, # use blas_opt instead
+ 'blis': blis_info, # use blas_opt instead
+ 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
+ 'blas_mkl': blas_mkl_info, # use blas_opt instead
+ 'accelerate': accelerate_info, # use blas_opt instead
+ 'openblas64_': openblas64__info,
+ 'openblas64__lapack': openblas64__lapack_info,
+ 'openblas_ilp64': openblas_ilp64_info,
+ 'openblas_ilp64_lapack': openblas_ilp64_lapack_info,
+ 'x11': x11_info,
+ 'fft_opt': fft_opt_info,
+ 'fftw': fftw_info,
+ 'fftw2': fftw2_info,
+ 'fftw3': fftw3_info,
+ 'dfftw': dfftw_info,
+ 'sfftw': sfftw_info,
+ 'fftw_threads': fftw_threads_info,
+ 'dfftw_threads': dfftw_threads_info,
+ 'sfftw_threads': sfftw_threads_info,
+ 'djbfft': djbfft_info,
+ 'blas': blas_info, # use blas_opt instead
+ 'lapack': lapack_info, # use lapack_opt instead
+ 'lapack_src': lapack_src_info,
+ 'blas_src': blas_src_info,
+ 'numpy': numpy_info,
+ 'f2py': f2py_info,
+ 'Numeric': Numeric_info,
+ 'numeric': Numeric_info,
+ 'numarray': numarray_info,
+ 'numerix': numerix_info,
+ 'lapack_opt': lapack_opt_info,
+ 'lapack_ilp64_opt': lapack_ilp64_opt_info,
+ 'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info,
+ 'lapack64__opt': lapack64__opt_info,
+ 'blas_opt': blas_opt_info,
+ 'blas_ilp64_opt': blas_ilp64_opt_info,
+ 'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info,
+ 'blas64__opt': blas64__opt_info,
+ 'boost_python': boost_python_info,
+ 'agg2': agg2_info,
+ 'wx': wx_info,
+ 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info,
+ 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info,
+ 'gdk_pixbuf_2': gdk_pixbuf_2_info,
+ 'gdk-pixbuf-2.0': gdk_pixbuf_2_info,
+ 'gdk': gdk_info,
+ 'gdk_2': gdk_2_info,
+ 'gdk-2.0': gdk_2_info,
+ 'gdk_x11_2': gdk_x11_2_info,
+ 'gdk-x11-2.0': gdk_x11_2_info,
+ 'gtkp_x11_2': gtkp_x11_2_info,
+ 'gtk+-x11-2.0': gtkp_x11_2_info,
+ 'gtkp_2': gtkp_2_info,
+ 'gtk+-2.0': gtkp_2_info,
+ 'xft': xft_info,
+ 'freetype2': freetype2_info,
+ 'umfpack': umfpack_info,
+ 'amd': amd_info,
+ }.get(name.lower(), system_info)
+ return cl().get_info(notfound_action)
+
+
+class NotFoundError(DistutilsError):
+ """Some third-party program or library is not found."""
+
+
+class AliasedOptionError(DistutilsError):
+ """
+ Aliases entries in config files should not be existing.
+ In section '{section}' we found multiple appearances of options {options}."""
+
+
+class AtlasNotFoundError(NotFoundError):
+ """
+ Atlas (http://github.com/math-atlas/math-atlas) libraries not found.
+ Directories to search for the libraries can be specified in the
+ numpy/distutils/site.cfg file (section [atlas]) or by setting
+ the ATLAS environment variable."""
+
+
+class FlameNotFoundError(NotFoundError):
+ """
+ FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found.
+ Directories to search for the libraries can be specified in the
+ numpy/distutils/site.cfg file (section [flame])."""
+
+
+class LapackNotFoundError(NotFoundError):
+ """
+ Lapack (http://www.netlib.org/lapack/) libraries not found.
+ Directories to search for the libraries can be specified in the
+ numpy/distutils/site.cfg file (section [lapack]) or by setting
+ the LAPACK environment variable."""
+
+
+class LapackSrcNotFoundError(LapackNotFoundError):
+ """
+ Lapack (http://www.netlib.org/lapack/) sources not found.
+ Directories to search for the sources can be specified in the
+ numpy/distutils/site.cfg file (section [lapack_src]) or by setting
+ the LAPACK_SRC environment variable."""
+
+
+class LapackILP64NotFoundError(NotFoundError):
+ """
+ 64-bit Lapack libraries not found.
+ Known libraries in numpy/distutils/site.cfg file are:
+ openblas64_, openblas_ilp64
+ """
+
+class BlasOptNotFoundError(NotFoundError):
+ """
+ Optimized (vendor) Blas libraries are not found.
+ Falls back to netlib Blas library which has worse performance.
+ A better performance should be easily gained by switching
+ Blas library."""
+
+class BlasNotFoundError(NotFoundError):
+ """
+ Blas (http://www.netlib.org/blas/) libraries not found.
+ Directories to search for the libraries can be specified in the
+ numpy/distutils/site.cfg file (section [blas]) or by setting
+ the BLAS environment variable."""
+
+class BlasILP64NotFoundError(NotFoundError):
+ """
+ 64-bit Blas libraries not found.
+ Known libraries in numpy/distutils/site.cfg file are:
+ openblas64_, openblas_ilp64
+ """
+
+class BlasSrcNotFoundError(BlasNotFoundError):
+ """
+ Blas (http://www.netlib.org/blas/) sources not found.
+ Directories to search for the sources can be specified in the
+ numpy/distutils/site.cfg file (section [blas_src]) or by setting
+ the BLAS_SRC environment variable."""
+
+
+class FFTWNotFoundError(NotFoundError):
+ """
+ FFTW (http://www.fftw.org/) libraries not found.
+ Directories to search for the libraries can be specified in the
+ numpy/distutils/site.cfg file (section [fftw]) or by setting
+ the FFTW environment variable."""
+
+
+class DJBFFTNotFoundError(NotFoundError):
+ """
+ DJBFFT (https://cr.yp.to/djbfft.html) libraries not found.
+ Directories to search for the libraries can be specified in the
+ numpy/distutils/site.cfg file (section [djbfft]) or by setting
+ the DJBFFT environment variable."""
+
+
+class NumericNotFoundError(NotFoundError):
+ """
+ Numeric (https://www.numpy.org/) module not found.
+ Get it from above location, install it, and retry setup.py."""
+
+
+class X11NotFoundError(NotFoundError):
+ """X11 libraries not found."""
+
+
+class UmfpackNotFoundError(NotFoundError):
+ """
+ UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/)
+ not found. Directories to search for the libraries can be specified in the
+ numpy/distutils/site.cfg file (section [umfpack]) or by setting
+ the UMFPACK environment variable."""
+
+
+class system_info:
+
+ """ get_info() is the only public method. Don't use others.
+ """
+ dir_env_var = None
+ # XXX: search_static_first is disabled by default, may disappear in
+ # future unless it is proved to be useful.
+ search_static_first = 0
+ # The base-class section name is a random word "ALL" and is not really
+ # intended for general use. It cannot be None nor can it be DEFAULT as
+ # these break the ConfigParser. See gh-15338
+ section = 'ALL'
+ saved_results = {}
+
+ notfounderror = NotFoundError
+
+ def __init__(self,
+ default_lib_dirs=default_lib_dirs,
+ default_include_dirs=default_include_dirs,
+ ):
+ self.__class__.info = {}
+ self.local_prefixes = []
+ defaults = {'library_dirs': os.pathsep.join(default_lib_dirs),
+ 'include_dirs': os.pathsep.join(default_include_dirs),
+ 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs),
+ 'rpath': '',
+ 'src_dirs': os.pathsep.join(default_src_dirs),
+ 'search_static_first': str(self.search_static_first),
+ 'extra_compile_args': '', 'extra_link_args': ''}
+ self.cp = ConfigParser(defaults)
+ self.files = []
+ self.files.extend(get_standard_file('.numpy-site.cfg'))
+ self.files.extend(get_standard_file('site.cfg'))
+ self.parse_config_files()
+
+ if self.section is not None:
+ self.search_static_first = self.cp.getboolean(
+ self.section, 'search_static_first')
+ assert isinstance(self.search_static_first, int)
+
+ def parse_config_files(self):
+ self.cp.read(self.files)
+ if not self.cp.has_section(self.section):
+ if self.section is not None:
+ self.cp.add_section(self.section)
+
+ def calc_libraries_info(self):
+ libs = self.get_libraries()
+ dirs = self.get_lib_dirs()
+ # The extensions use runtime_library_dirs
+ r_dirs = self.get_runtime_lib_dirs()
+ # Intrinsic distutils use rpath, we simply append both entries
+ # as though they were one entry
+ r_dirs.extend(self.get_runtime_lib_dirs(key='rpath'))
+ info = {}
+ for lib in libs:
+ i = self.check_libs(dirs, [lib])
+ if i is not None:
+ dict_append(info, **i)
+ else:
+ log.info('Library %s was not found. Ignoring' % (lib))
+
+ if r_dirs:
+ i = self.check_libs(r_dirs, [lib])
+ if i is not None:
+ # Swap library keywords found to runtime_library_dirs
+ # the libraries are insisting on the user having defined
+ # them using the library_dirs, and not necessarily by
+ # runtime_library_dirs
+ del i['libraries']
+ i['runtime_library_dirs'] = i.pop('library_dirs')
+ dict_append(info, **i)
+ else:
+ log.info('Runtime library %s was not found. Ignoring' % (lib))
+
+ return info
+
+ def set_info(self, **info):
+ if info:
+ lib_info = self.calc_libraries_info()
+ dict_append(info, **lib_info)
+ # Update extra information
+ extra_info = self.calc_extra_info()
+ dict_append(info, **extra_info)
+ self.saved_results[self.__class__.__name__] = info
+
+ def get_option_single(self, *options):
+ """ Ensure that only one of `options` are found in the section
+
+ Parameters
+ ----------
+ *options : list of str
+ a list of options to be found in the section (``self.section``)
+
+ Returns
+ -------
+ str :
+ the option that is uniquely found in the section
+
+ Raises
+ ------
+ AliasedOptionError :
+ in case more than one of the options are found
+ """
+ found = [self.cp.has_option(self.section, opt) for opt in options]
+ if sum(found) == 1:
+ return options[found.index(True)]
+ elif sum(found) == 0:
+ # nothing is found anyways
+ return options[0]
+
+ # Else we have more than 1 key found
+ if AliasedOptionError.__doc__ is None:
+ raise AliasedOptionError()
+ raise AliasedOptionError(AliasedOptionError.__doc__.format(
+ section=self.section, options='[{}]'.format(', '.join(options))))
+
+
+ def has_info(self):
+ return self.__class__.__name__ in self.saved_results
+
+ def calc_extra_info(self):
+ """ Updates the information in the current information with
+ respect to these flags:
+ extra_compile_args
+ extra_link_args
+ """
+ info = {}
+ for key in ['extra_compile_args', 'extra_link_args']:
+ # Get values
+ opt = self.cp.get(self.section, key)
+ opt = _shell_utils.NativeParser.split(opt)
+ if opt:
+ tmp = {key: opt}
+ dict_append(info, **tmp)
+ return info
+
+ def get_info(self, notfound_action=0):
+ """ Return a dictionary with items that are compatible
+ with numpy.distutils.setup keyword arguments.
+ """
+ flag = 0
+ if not self.has_info():
+ flag = 1
+ log.info(self.__class__.__name__ + ':')
+ if hasattr(self, 'calc_info'):
+ self.calc_info()
+ if notfound_action:
+ if not self.has_info():
+ if notfound_action == 1:
+ warnings.warn(self.notfounderror.__doc__, stacklevel=2)
+ elif notfound_action == 2:
+ raise self.notfounderror(self.notfounderror.__doc__)
+ else:
+ raise ValueError(repr(notfound_action))
+
+ if not self.has_info():
+ log.info(' NOT AVAILABLE')
+ self.set_info()
+ else:
+ log.info(' FOUND:')
+
+ res = self.saved_results.get(self.__class__.__name__)
+ if log.get_threshold() <= log.INFO and flag:
+ for k, v in res.items():
+ v = str(v)
+ if k in ['sources', 'libraries'] and len(v) > 270:
+ v = v[:120] + '...\n...\n...' + v[-120:]
+ log.info(' %s = %s', k, v)
+ log.info('')
+
+ return copy.deepcopy(res)
+
+ def get_paths(self, section, key):
+ dirs = self.cp.get(section, key).split(os.pathsep)
+ env_var = self.dir_env_var
+ if env_var:
+ if is_sequence(env_var):
+ e0 = env_var[-1]
+ for e in env_var:
+ if e in os.environ:
+ e0 = e
+ break
+ if not env_var[0] == e0:
+ log.info('Setting %s=%s' % (env_var[0], e0))
+ env_var = e0
+ if env_var and env_var in os.environ:
+ d = os.environ[env_var]
+ if d == 'None':
+ log.info('Disabled %s: %s',
+ self.__class__.__name__, '(%s is None)'
+ % (env_var,))
+ return []
+ if os.path.isfile(d):
+ dirs = [os.path.dirname(d)] + dirs
+ l = getattr(self, '_lib_names', [])
+ if len(l) == 1:
+ b = os.path.basename(d)
+ b = os.path.splitext(b)[0]
+ if b[:3] == 'lib':
+ log.info('Replacing _lib_names[0]==%r with %r' \
+ % (self._lib_names[0], b[3:]))
+ self._lib_names[0] = b[3:]
+ else:
+ ds = d.split(os.pathsep)
+ ds2 = []
+ for d in ds:
+ if os.path.isdir(d):
+ ds2.append(d)
+ for dd in ['include', 'lib']:
+ d1 = os.path.join(d, dd)
+ if os.path.isdir(d1):
+ ds2.append(d1)
+ dirs = ds2 + dirs
+ default_dirs = self.cp.get(self.section, key).split(os.pathsep)
+ dirs.extend(default_dirs)
+ ret = []
+ for d in dirs:
+ if len(d) > 0 and not os.path.isdir(d):
+ warnings.warn('Specified path %s is invalid.' % d, stacklevel=2)
+ continue
+
+ if d not in ret:
+ ret.append(d)
+
+ log.debug('( %s = %s )', key, ':'.join(ret))
+ return ret
+
+ def get_lib_dirs(self, key='library_dirs'):
+ return self.get_paths(self.section, key)
+
+ def get_runtime_lib_dirs(self, key='runtime_library_dirs'):
+ path = self.get_paths(self.section, key)
+ if path == ['']:
+ path = []
+ return path
+
+ def get_include_dirs(self, key='include_dirs'):
+ return self.get_paths(self.section, key)
+
+ def get_src_dirs(self, key='src_dirs'):
+ return self.get_paths(self.section, key)
+
+ def get_libs(self, key, default):
+ try:
+ libs = self.cp.get(self.section, key)
+ except NoOptionError:
+ if not default:
+ return []
+ if is_string(default):
+ return [default]
+ return default
+ return [b for b in [a.strip() for a in libs.split(',')] if b]
+
+ def get_libraries(self, key='libraries'):
+ if hasattr(self, '_lib_names'):
+ return self.get_libs(key, default=self._lib_names)
+ else:
+ return self.get_libs(key, '')
+
+ def library_extensions(self):
+ c = customized_ccompiler()
+ static_exts = []
+ if c.compiler_type != 'msvc':
+ # MSVC doesn't understand binutils
+ static_exts.append('.a')
+ if sys.platform == 'win32':
+ static_exts.append('.lib') # .lib is used by MSVC and others
+ if self.search_static_first:
+ exts = static_exts + [so_ext]
+ else:
+ exts = [so_ext] + static_exts
+ if sys.platform == 'cygwin':
+ exts.append('.dll.a')
+ if sys.platform == 'darwin':
+ exts.append('.dylib')
+ return exts
+
+ def check_libs(self, lib_dirs, libs, opt_libs=[]):
+ """If static or shared libraries are available then return
+ their info dictionary.
+
+ Checks for all libraries as shared libraries first, then
+ static (or vice versa if self.search_static_first is True).
+ """
+ exts = self.library_extensions()
+ info = None
+ for ext in exts:
+ info = self._check_libs(lib_dirs, libs, opt_libs, [ext])
+ if info is not None:
+ break
+ if not info:
+ log.info(' libraries %s not found in %s', ','.join(libs),
+ lib_dirs)
+ return info
+
+ def check_libs2(self, lib_dirs, libs, opt_libs=[]):
+ """If static or shared libraries are available then return
+ their info dictionary.
+
+ Checks each library for shared or static.
+ """
+ exts = self.library_extensions()
+ info = self._check_libs(lib_dirs, libs, opt_libs, exts)
+ if not info:
+ log.info(' libraries %s not found in %s', ','.join(libs),
+ lib_dirs)
+
+ return info
+
+ def _find_lib(self, lib_dir, lib, exts):
+ assert is_string(lib_dir)
+ # under windows first try without 'lib' prefix
+ if sys.platform == 'win32':
+ lib_prefixes = ['', 'lib']
+ else:
+ lib_prefixes = ['lib']
+ # for each library name, see if we can find a file for it.
+ for ext in exts:
+ for prefix in lib_prefixes:
+ p = self.combine_paths(lib_dir, prefix + lib + ext)
+ if p:
+ break
+ if p:
+ assert len(p) == 1
+ # ??? splitext on p[0] would do this for cygwin
+ # doesn't seem correct
+ if ext == '.dll.a':
+ lib += '.dll'
+ if ext == '.lib':
+ lib = prefix + lib
+ return lib
+
+ return False
+
+ def _find_libs(self, lib_dirs, libs, exts):
+ # make sure we preserve the order of libs, as it can be important
+ found_dirs, found_libs = [], []
+ for lib in libs:
+ for lib_dir in lib_dirs:
+ found_lib = self._find_lib(lib_dir, lib, exts)
+ if found_lib:
+ found_libs.append(found_lib)
+ if lib_dir not in found_dirs:
+ found_dirs.append(lib_dir)
+ break
+ return found_dirs, found_libs
+
+ def _check_libs(self, lib_dirs, libs, opt_libs, exts):
+ """Find mandatory and optional libs in expected paths.
+
+ Missing optional libraries are silently forgotten.
+ """
+ if not is_sequence(lib_dirs):
+ lib_dirs = [lib_dirs]
+ # First, try to find the mandatory libraries
+ found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts)
+ if len(found_libs) > 0 and len(found_libs) == len(libs):
+ # Now, check for optional libraries
+ opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts)
+ found_libs.extend(opt_found_libs)
+ for lib_dir in opt_found_dirs:
+ if lib_dir not in found_dirs:
+ found_dirs.append(lib_dir)
+ info = {'libraries': found_libs, 'library_dirs': found_dirs}
+ return info
+ else:
+ return None
+
+ def combine_paths(self, *args):
+ """Return a list of existing paths composed by all combinations
+ of items from the arguments.
+ """
+ return combine_paths(*args)
+
+
+class fft_opt_info(system_info):
+
+ def calc_info(self):
+ info = {}
+ fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw')
+ djbfft_info = get_info('djbfft')
+ if fftw_info:
+ dict_append(info, **fftw_info)
+ if djbfft_info:
+ dict_append(info, **djbfft_info)
+ self.set_info(**info)
+ return
+
+
+class fftw_info(system_info):
+ #variables to override
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ notfounderror = FFTWNotFoundError
+ ver_info = [{'name':'fftw3',
+ 'libs':['fftw3'],
+ 'includes':['fftw3.h'],
+ 'macros':[('SCIPY_FFTW3_H', None)]},
+ {'name':'fftw2',
+ 'libs':['rfftw', 'fftw'],
+ 'includes':['fftw.h', 'rfftw.h'],
+ 'macros':[('SCIPY_FFTW_H', None)]}]
+
+ def calc_ver_info(self, ver_param):
+ """Returns True on successful version detection, else False"""
+ lib_dirs = self.get_lib_dirs()
+ incl_dirs = self.get_include_dirs()
+
+ opt = self.get_option_single(self.section + '_libs', 'libraries')
+ libs = self.get_libs(opt, ver_param['libs'])
+ info = self.check_libs(lib_dirs, libs)
+ if info is not None:
+ flag = 0
+ for d in incl_dirs:
+ if len(self.combine_paths(d, ver_param['includes'])) \
+ == len(ver_param['includes']):
+ dict_append(info, include_dirs=[d])
+ flag = 1
+ break
+ if flag:
+ dict_append(info, define_macros=ver_param['macros'])
+ else:
+ info = None
+ if info is not None:
+ self.set_info(**info)
+ return True
+ else:
+ log.info(' %s not found' % (ver_param['name']))
+ return False
+
+ def calc_info(self):
+ for i in self.ver_info:
+ if self.calc_ver_info(i):
+ break
+
+
+class fftw2_info(fftw_info):
+ #variables to override
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ notfounderror = FFTWNotFoundError
+ ver_info = [{'name':'fftw2',
+ 'libs':['rfftw', 'fftw'],
+ 'includes':['fftw.h', 'rfftw.h'],
+ 'macros':[('SCIPY_FFTW_H', None)]}
+ ]
+
+
+class fftw3_info(fftw_info):
+ #variables to override
+ section = 'fftw3'
+ dir_env_var = 'FFTW3'
+ notfounderror = FFTWNotFoundError
+ ver_info = [{'name':'fftw3',
+ 'libs':['fftw3'],
+ 'includes':['fftw3.h'],
+ 'macros':[('SCIPY_FFTW3_H', None)]},
+ ]
+
+
+class fftw3_armpl_info(fftw_info):
+ section = 'fftw3'
+ dir_env_var = 'ARMPL_DIR'
+ notfounderror = FFTWNotFoundError
+ ver_info = [{'name': 'fftw3',
+ 'libs': ['armpl_lp64_mp'],
+ 'includes': ['fftw3.h'],
+ 'macros': [('SCIPY_FFTW3_H', None)]}]
+
+
+class dfftw_info(fftw_info):
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ ver_info = [{'name':'dfftw',
+ 'libs':['drfftw', 'dfftw'],
+ 'includes':['dfftw.h', 'drfftw.h'],
+ 'macros':[('SCIPY_DFFTW_H', None)]}]
+
+
+class sfftw_info(fftw_info):
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ ver_info = [{'name':'sfftw',
+ 'libs':['srfftw', 'sfftw'],
+ 'includes':['sfftw.h', 'srfftw.h'],
+ 'macros':[('SCIPY_SFFTW_H', None)]}]
+
+
+class fftw_threads_info(fftw_info):
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ ver_info = [{'name':'fftw threads',
+ 'libs':['rfftw_threads', 'fftw_threads'],
+ 'includes':['fftw_threads.h', 'rfftw_threads.h'],
+ 'macros':[('SCIPY_FFTW_THREADS_H', None)]}]
+
+
+class dfftw_threads_info(fftw_info):
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ ver_info = [{'name':'dfftw threads',
+ 'libs':['drfftw_threads', 'dfftw_threads'],
+ 'includes':['dfftw_threads.h', 'drfftw_threads.h'],
+ 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}]
+
+
+class sfftw_threads_info(fftw_info):
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ ver_info = [{'name':'sfftw threads',
+ 'libs':['srfftw_threads', 'sfftw_threads'],
+ 'includes':['sfftw_threads.h', 'srfftw_threads.h'],
+ 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}]
+
+
+class djbfft_info(system_info):
+ section = 'djbfft'
+ dir_env_var = 'DJBFFT'
+ notfounderror = DJBFFTNotFoundError
+
+ def get_paths(self, section, key):
+ pre_dirs = system_info.get_paths(self, section, key)
+ dirs = []
+ for d in pre_dirs:
+ dirs.extend(self.combine_paths(d, ['djbfft']) + [d])
+ return [d for d in dirs if os.path.isdir(d)]
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ incl_dirs = self.get_include_dirs()
+ info = None
+ for d in lib_dirs:
+ p = self.combine_paths(d, ['djbfft.a'])
+ if p:
+ info = {'extra_objects': p}
+ break
+ p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext])
+ if p:
+ info = {'libraries': ['djbfft'], 'library_dirs': [d]}
+ break
+ if info is None:
+ return
+ for d in incl_dirs:
+ if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2:
+ dict_append(info, include_dirs=[d],
+ define_macros=[('SCIPY_DJBFFT_H', None)])
+ self.set_info(**info)
+ return
+ return
+
+
+class mkl_info(system_info):
+ section = 'mkl'
+ dir_env_var = 'MKLROOT'
+ _lib_mkl = ['mkl_rt']
+
+ def get_mkl_rootdir(self):
+ mklroot = os.environ.get('MKLROOT', None)
+ if mklroot is not None:
+ return mklroot
+ paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep)
+ ld_so_conf = '/etc/ld.so.conf'
+ if os.path.isfile(ld_so_conf):
+ with open(ld_so_conf, 'r') as f:
+ for d in f:
+ d = d.strip()
+ if d:
+ paths.append(d)
+ intel_mkl_dirs = []
+ for path in paths:
+ path_atoms = path.split(os.sep)
+ for m in path_atoms:
+ if m.startswith('mkl'):
+ d = os.sep.join(path_atoms[:path_atoms.index(m) + 2])
+ intel_mkl_dirs.append(d)
+ break
+ for d in paths:
+ dirs = glob(os.path.join(d, 'mkl', '*'))
+ dirs += glob(os.path.join(d, 'mkl*'))
+ for sub_dir in dirs:
+ if os.path.isdir(os.path.join(sub_dir, 'lib')):
+ return sub_dir
+ return None
+
+ def __init__(self):
+ mklroot = self.get_mkl_rootdir()
+ if mklroot is None:
+ system_info.__init__(self)
+ else:
+ from .cpuinfo import cpu
+ if cpu.is_Itanium():
+ plt = '64'
+ elif cpu.is_Intel() and cpu.is_64bit():
+ plt = 'intel64'
+ else:
+ plt = '32'
+ system_info.__init__(
+ self,
+ default_lib_dirs=[os.path.join(mklroot, 'lib', plt)],
+ default_include_dirs=[os.path.join(mklroot, 'include')])
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ incl_dirs = self.get_include_dirs()
+ opt = self.get_option_single('mkl_libs', 'libraries')
+ mkl_libs = self.get_libs(opt, self._lib_mkl)
+ info = self.check_libs2(lib_dirs, mkl_libs)
+ if info is None:
+ return
+ dict_append(info,
+ define_macros=[('SCIPY_MKL_H', None),
+ ('HAVE_CBLAS', None)],
+ include_dirs=incl_dirs)
+ if sys.platform == 'win32':
+ pass # win32 has no pthread library
+ else:
+ dict_append(info, libraries=['pthread'])
+ self.set_info(**info)
+
+
+class lapack_mkl_info(mkl_info):
+ pass
+
+
+class blas_mkl_info(mkl_info):
+ pass
+
+
+class armpl_info(system_info):
+ section = 'armpl'
+ dir_env_var = 'ARMPL_DIR'
+ _lib_armpl = ['armpl_lp64_mp']
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ incl_dirs = self.get_include_dirs()
+ armpl_libs = self.get_libs('armpl_libs', self._lib_armpl)
+ info = self.check_libs2(lib_dirs, armpl_libs)
+ if info is None:
+ return
+ dict_append(info,
+ define_macros=[('SCIPY_MKL_H', None),
+ ('HAVE_CBLAS', None)],
+ include_dirs=incl_dirs)
+ self.set_info(**info)
+
+class lapack_armpl_info(armpl_info):
+ pass
+
+class blas_armpl_info(armpl_info):
+ pass
+
+
+class atlas_info(system_info):
+ section = 'atlas'
+ dir_env_var = 'ATLAS'
+ _lib_names = ['f77blas', 'cblas']
+ if sys.platform[:7] == 'freebsd':
+ _lib_atlas = ['atlas_r']
+ _lib_lapack = ['alapack_r']
+ else:
+ _lib_atlas = ['atlas']
+ _lib_lapack = ['lapack']
+
+ notfounderror = AtlasNotFoundError
+
+ def get_paths(self, section, key):
+ pre_dirs = system_info.get_paths(self, section, key)
+ dirs = []
+ for d in pre_dirs:
+ dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*',
+ 'sse', '3dnow', 'sse2']) + [d])
+ return [d for d in dirs if os.path.isdir(d)]
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ info = {}
+ opt = self.get_option_single('atlas_libs', 'libraries')
+ atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas)
+ lapack_libs = self.get_libs('lapack_libs', self._lib_lapack)
+ atlas = None
+ lapack = None
+ atlas_1 = None
+ for d in lib_dirs:
+ atlas = self.check_libs2(d, atlas_libs, [])
+ if atlas is not None:
+ lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*'])
+ lapack = self.check_libs2(lib_dirs2, lapack_libs, [])
+ if lapack is not None:
+ break
+ if atlas:
+ atlas_1 = atlas
+ log.info(self.__class__)
+ if atlas is None:
+ atlas = atlas_1
+ if atlas is None:
+ return
+ include_dirs = self.get_include_dirs()
+ h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
+ h = h[0]
+ if h:
+ h = os.path.dirname(h)
+ dict_append(info, include_dirs=[h])
+ info['language'] = 'c'
+ if lapack is not None:
+ dict_append(info, **lapack)
+ dict_append(info, **atlas)
+ elif 'lapack_atlas' in atlas['libraries']:
+ dict_append(info, **atlas)
+ dict_append(info,
+ define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)])
+ self.set_info(**info)
+ return
+ else:
+ dict_append(info, **atlas)
+ dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)])
+ message = textwrap.dedent("""
+ *********************************************************************
+ Could not find lapack library within the ATLAS installation.
+ *********************************************************************
+ """)
+ warnings.warn(message, stacklevel=2)
+ self.set_info(**info)
+ return
+
+ # Check if lapack library is complete, only warn if it is not.
+ lapack_dir = lapack['library_dirs'][0]
+ lapack_name = lapack['libraries'][0]
+ lapack_lib = None
+ lib_prefixes = ['lib']
+ if sys.platform == 'win32':
+ lib_prefixes.append('')
+ for e in self.library_extensions():
+ for prefix in lib_prefixes:
+ fn = os.path.join(lapack_dir, prefix + lapack_name + e)
+ if os.path.exists(fn):
+ lapack_lib = fn
+ break
+ if lapack_lib:
+ break
+ if lapack_lib is not None:
+ sz = os.stat(lapack_lib)[6]
+ if sz <= 4000 * 1024:
+ message = textwrap.dedent("""
+ *********************************************************************
+ Lapack library (from ATLAS) is probably incomplete:
+ size of %s is %sk (expected >4000k)
+
+ Follow the instructions in the KNOWN PROBLEMS section of the file
+ numpy/INSTALL.txt.
+ *********************************************************************
+ """) % (lapack_lib, sz / 1024)
+ warnings.warn(message, stacklevel=2)
+ else:
+ info['language'] = 'f77'
+
+ atlas_version, atlas_extra_info = get_atlas_version(**atlas)
+ dict_append(info, **atlas_extra_info)
+
+ self.set_info(**info)
+
+
+class atlas_blas_info(atlas_info):
+ _lib_names = ['f77blas', 'cblas']
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ info = {}
+ opt = self.get_option_single('atlas_libs', 'libraries')
+ atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas)
+ atlas = self.check_libs2(lib_dirs, atlas_libs, [])
+ if atlas is None:
+ return
+ include_dirs = self.get_include_dirs()
+ h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
+ h = h[0]
+ if h:
+ h = os.path.dirname(h)
+ dict_append(info, include_dirs=[h])
+ info['language'] = 'c'
+ info['define_macros'] = [('HAVE_CBLAS', None)]
+
+ atlas_version, atlas_extra_info = get_atlas_version(**atlas)
+ dict_append(atlas, **atlas_extra_info)
+
+ dict_append(info, **atlas)
+
+ self.set_info(**info)
+ return
+
+
+class atlas_threads_info(atlas_info):
+ dir_env_var = ['PTATLAS', 'ATLAS']
+ _lib_names = ['ptf77blas', 'ptcblas']
+
+
+class atlas_blas_threads_info(atlas_blas_info):
+ dir_env_var = ['PTATLAS', 'ATLAS']
+ _lib_names = ['ptf77blas', 'ptcblas']
+
+
+class lapack_atlas_info(atlas_info):
+ _lib_names = ['lapack_atlas'] + atlas_info._lib_names
+
+
+class lapack_atlas_threads_info(atlas_threads_info):
+ _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names
+
+
+class atlas_3_10_info(atlas_info):
+ _lib_names = ['satlas']
+ _lib_atlas = _lib_names
+ _lib_lapack = _lib_names
+
+
+class atlas_3_10_blas_info(atlas_3_10_info):
+ _lib_names = ['satlas']
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ info = {}
+ opt = self.get_option_single('atlas_lib', 'libraries')
+ atlas_libs = self.get_libs(opt, self._lib_names)
+ atlas = self.check_libs2(lib_dirs, atlas_libs, [])
+ if atlas is None:
+ return
+ include_dirs = self.get_include_dirs()
+ h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
+ h = h[0]
+ if h:
+ h = os.path.dirname(h)
+ dict_append(info, include_dirs=[h])
+ info['language'] = 'c'
+ info['define_macros'] = [('HAVE_CBLAS', None)]
+
+ atlas_version, atlas_extra_info = get_atlas_version(**atlas)
+ dict_append(atlas, **atlas_extra_info)
+
+ dict_append(info, **atlas)
+
+ self.set_info(**info)
+ return
+
+
+class atlas_3_10_threads_info(atlas_3_10_info):
+ dir_env_var = ['PTATLAS', 'ATLAS']
+ _lib_names = ['tatlas']
+ _lib_atlas = _lib_names
+ _lib_lapack = _lib_names
+
+
+class atlas_3_10_blas_threads_info(atlas_3_10_blas_info):
+ dir_env_var = ['PTATLAS', 'ATLAS']
+ _lib_names = ['tatlas']
+
+
+class lapack_atlas_3_10_info(atlas_3_10_info):
+ pass
+
+
+class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info):
+ pass
+
+
+class lapack_info(system_info):
+ section = 'lapack'
+ dir_env_var = 'LAPACK'
+ _lib_names = ['lapack']
+ notfounderror = LapackNotFoundError
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+
+ opt = self.get_option_single('lapack_libs', 'libraries')
+ lapack_libs = self.get_libs(opt, self._lib_names)
+ info = self.check_libs(lib_dirs, lapack_libs, [])
+ if info is None:
+ return
+ info['language'] = 'f77'
+ self.set_info(**info)
+
+
+class lapack_src_info(system_info):
+ # LAPACK_SRC is deprecated, please do not use this!
+ # Build or install a BLAS library via your package manager or from
+ # source separately.
+ section = 'lapack_src'
+ dir_env_var = 'LAPACK_SRC'
+ notfounderror = LapackSrcNotFoundError
+
+ def get_paths(self, section, key):
+ pre_dirs = system_info.get_paths(self, section, key)
+ dirs = []
+ for d in pre_dirs:
+ dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC']))
+ return [d for d in dirs if os.path.isdir(d)]
+
+ def calc_info(self):
+ src_dirs = self.get_src_dirs()
+ src_dir = ''
+ for d in src_dirs:
+ if os.path.isfile(os.path.join(d, 'dgesv.f')):
+ src_dir = d
+ break
+ if not src_dir:
+ #XXX: Get sources from netlib. May be ask first.
+ return
+ # The following is extracted from LAPACK-3.0/SRC/Makefile.
+ # Added missing names from lapack-lite-3.1.1/SRC/Makefile
+ # while keeping removed names for Lapack-3.0 compatibility.
+ allaux = '''
+ ilaenv ieeeck lsame lsamen xerbla
+ iparmq
+ ''' # *.f
+ laux = '''
+ bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1
+ laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2
+ lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre
+ larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4
+ lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1
+ lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf
+ stebz stedc steqr sterf
+
+ larra larrc larrd larr larrk larrj larrr laneg laisnan isnan
+ lazq3 lazq4
+ ''' # [s|d]*.f
+ lasrc = '''
+ gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak
+ gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv
+ gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2
+ geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd
+ gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal
+ gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd
+ ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein
+ hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0
+ lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb
+ lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp
+ laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv
+ lartv larz larzb larzt laswp lasyf latbs latdf latps latrd
+ latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv
+ pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2
+ potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri
+ pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs
+ spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv
+ sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2
+ tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs
+ trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs
+ tzrqf tzrzf
+
+ lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5
+ ''' # [s|c|d|z]*.f
+ sd_lasrc = '''
+ laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l
+ org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr
+ orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3
+ ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx
+ sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd
+ stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd
+ sygvx sytd2 sytrd
+ ''' # [s|d]*.f
+ cz_lasrc = '''
+ bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev
+ heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv
+ hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd
+ hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf
+ hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7
+ laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe
+ laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv
+ spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq
+ ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2
+ unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr
+ ''' # [c|z]*.f
+ #######
+ sclaux = laux + ' econd ' # s*.f
+ dzlaux = laux + ' secnd ' # d*.f
+ slasrc = lasrc + sd_lasrc # s*.f
+ dlasrc = lasrc + sd_lasrc # d*.f
+ clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f
+ zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f
+ oclasrc = ' icmax1 scsum1 ' # *.f
+ ozlasrc = ' izmax1 dzsum1 ' # *.f
+ sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \
+ + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \
+ + ['c%s.f' % f for f in (clasrc).split()] \
+ + ['z%s.f' % f for f in (zlasrc).split()] \
+ + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()]
+ sources = [os.path.join(src_dir, f) for f in sources]
+ # Lapack 3.1:
+ src_dir2 = os.path.join(src_dir, '..', 'INSTALL')
+ sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz']
+ # Lapack 3.2.1:
+ sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz']
+ sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz']
+ sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz']
+ # Should we check here actual existence of source files?
+ # Yes, the file listing is different between 3.0 and 3.1
+ # versions.
+ sources = [f for f in sources if os.path.isfile(f)]
+ info = {'sources': sources, 'language': 'f77'}
+ self.set_info(**info)
+
+atlas_version_c_text = r'''
+/* This file is generated from numpy/distutils/system_info.py */
+void ATL_buildinfo(void);
+int main(void) {
+ ATL_buildinfo();
+ return 0;
+}
+'''
+
+_cached_atlas_version = {}
+
+
+def get_atlas_version(**config):
+ libraries = config.get('libraries', [])
+ library_dirs = config.get('library_dirs', [])
+ key = (tuple(libraries), tuple(library_dirs))
+ if key in _cached_atlas_version:
+ return _cached_atlas_version[key]
+ c = cmd_config(Distribution())
+ atlas_version = None
+ info = {}
+ try:
+ s, o = c.get_output(atlas_version_c_text,
+ libraries=libraries, library_dirs=library_dirs,
+ )
+ if s and re.search(r'undefined reference to `_gfortran', o, re.M):
+ s, o = c.get_output(atlas_version_c_text,
+ libraries=libraries + ['gfortran'],
+ library_dirs=library_dirs,
+ )
+ if not s:
+ warnings.warn(textwrap.dedent("""
+ *****************************************************
+ Linkage with ATLAS requires gfortran. Use
+
+ python setup.py config_fc --fcompiler=gnu95 ...
+
+ when building extension libraries that use ATLAS.
+ Make sure that -lgfortran is used for C++ extensions.
+ *****************************************************
+ """), stacklevel=2)
+ dict_append(info, language='f90',
+ define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)])
+ except Exception: # failed to get version from file -- maybe on Windows
+ # look at directory name
+ for o in library_dirs:
+ m = re.search(r'ATLAS_(?P<version>\d+[.]\d+[.]\d+)_', o)
+ if m:
+ atlas_version = m.group('version')
+ if atlas_version is not None:
+ break
+
+ # final choice --- look at ATLAS_VERSION environment
+ # variable
+ if atlas_version is None:
+ atlas_version = os.environ.get('ATLAS_VERSION', None)
+ if atlas_version:
+ dict_append(info, define_macros=[(
+ 'ATLAS_INFO', _c_string_literal(atlas_version))
+ ])
+ else:
+ dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)])
+ return atlas_version or '?.?.?', info
+
+ if not s:
+ m = re.search(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)', o)
+ if m:
+ atlas_version = m.group('version')
+ if atlas_version is None:
+ if re.search(r'undefined symbol: ATL_buildinfo', o, re.M):
+ atlas_version = '3.2.1_pre3.3.6'
+ else:
+ log.info('Status: %d', s)
+ log.info('Output: %s', o)
+
+ elif atlas_version == '3.2.1_pre3.3.6':
+ dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)])
+ else:
+ dict_append(info, define_macros=[(
+ 'ATLAS_INFO', _c_string_literal(atlas_version))
+ ])
+ result = _cached_atlas_version[key] = atlas_version, info
+ return result
+
+
+class lapack_opt_info(system_info):
+ notfounderror = LapackNotFoundError
+
+ # List of all known LAPACK libraries, in the default order
+ lapack_order = ['armpl', 'mkl', 'openblas', 'flame',
+ 'accelerate', 'atlas', 'lapack']
+ order_env_var_name = 'NPY_LAPACK_ORDER'
+
+ def _calc_info_armpl(self):
+ info = get_info('lapack_armpl')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_mkl(self):
+ info = get_info('lapack_mkl')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_openblas(self):
+ info = get_info('openblas_lapack')
+ if info:
+ self.set_info(**info)
+ return True
+ info = get_info('openblas_clapack')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_flame(self):
+ info = get_info('flame')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_atlas(self):
+ info = get_info('atlas_3_10_threads')
+ if not info:
+ info = get_info('atlas_3_10')
+ if not info:
+ info = get_info('atlas_threads')
+ if not info:
+ info = get_info('atlas')
+ if info:
+ # Figure out if ATLAS has lapack...
+ # If not we need the lapack library, but not BLAS!
+ l = info.get('define_macros', [])
+ if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \
+ or ('ATLAS_WITHOUT_LAPACK', None) in l:
+ # Get LAPACK (with possible warnings)
+ # If not found we don't accept anything
+ # since we can't use ATLAS with LAPACK!
+ lapack_info = self._get_info_lapack()
+ if not lapack_info:
+ return False
+ dict_append(info, **lapack_info)
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_accelerate(self):
+ info = get_info('accelerate')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _get_info_blas(self):
+ # Default to get the optimized BLAS implementation
+ info = get_info('blas_opt')
+ if not info:
+ warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3)
+ info_src = get_info('blas_src')
+ if not info_src:
+ warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3)
+ return {}
+ dict_append(info, libraries=[('fblas_src', info_src)])
+ return info
+
+ def _get_info_lapack(self):
+ info = get_info('lapack')
+ if not info:
+ warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=3)
+ info_src = get_info('lapack_src')
+ if not info_src:
+ warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=3)
+ return {}
+ dict_append(info, libraries=[('flapack_src', info_src)])
+ return info
+
+ def _calc_info_lapack(self):
+ info = self._get_info_lapack()
+ if info:
+ info_blas = self._get_info_blas()
+ dict_append(info, **info_blas)
+ dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_from_envvar(self):
+ info = {}
+ info['language'] = 'f77'
+ info['libraries'] = []
+ info['include_dirs'] = []
+ info['define_macros'] = []
+ info['extra_link_args'] = os.environ['NPY_LAPACK_LIBS'].split()
+ self.set_info(**info)
+ return True
+
+ def _calc_info(self, name):
+ return getattr(self, '_calc_info_{}'.format(name))()
+
+ def calc_info(self):
+ lapack_order, unknown_order = _parse_env_order(self.lapack_order, self.order_env_var_name)
+ if len(unknown_order) > 0:
+ raise ValueError("lapack_opt_info user defined "
+ "LAPACK order has unacceptable "
+ "values: {}".format(unknown_order))
+
+ if 'NPY_LAPACK_LIBS' in os.environ:
+ # Bypass autodetection, set language to F77 and use env var linker
+ # flags directly
+ self._calc_info_from_envvar()
+ return
+
+ for lapack in lapack_order:
+ if self._calc_info(lapack):
+ return
+
+ if 'lapack' not in lapack_order:
+ # Since the user may request *not* to use any library, we still need
+ # to raise warnings to signal missing packages!
+ warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2)
+ warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2)
+
+
+class _ilp64_opt_info_mixin:
+ symbol_suffix = None
+ symbol_prefix = None
+
+ def _check_info(self, info):
+ macros = dict(info.get('define_macros', []))
+ prefix = macros.get('BLAS_SYMBOL_PREFIX', '')
+ suffix = macros.get('BLAS_SYMBOL_SUFFIX', '')
+
+ if self.symbol_prefix not in (None, prefix):
+ return False
+
+ if self.symbol_suffix not in (None, suffix):
+ return False
+
+ return bool(info)
+
+
+class lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin):
+ notfounderror = LapackILP64NotFoundError
+ lapack_order = ['openblas64_', 'openblas_ilp64']
+ order_env_var_name = 'NPY_LAPACK_ILP64_ORDER'
+
+ def _calc_info(self, name):
+ info = get_info(name + '_lapack')
+ if self._check_info(info):
+ self.set_info(**info)
+ return True
+ return False
+
+
+class lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info):
+ # Same as lapack_ilp64_opt_info, but fix symbol names
+ symbol_prefix = ''
+ symbol_suffix = ''
+
+
+class lapack64__opt_info(lapack_ilp64_opt_info):
+ symbol_prefix = ''
+ symbol_suffix = '64_'
+
+
+class blas_opt_info(system_info):
+ notfounderror = BlasNotFoundError
+ # List of all known BLAS libraries, in the default order
+
+ blas_order = ['armpl', 'mkl', 'blis', 'openblas',
+ 'accelerate', 'atlas', 'blas']
+ order_env_var_name = 'NPY_BLAS_ORDER'
+
+ def _calc_info_armpl(self):
+ info = get_info('blas_armpl')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_mkl(self):
+ info = get_info('blas_mkl')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_blis(self):
+ info = get_info('blis')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_openblas(self):
+ info = get_info('openblas')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_atlas(self):
+ info = get_info('atlas_3_10_blas_threads')
+ if not info:
+ info = get_info('atlas_3_10_blas')
+ if not info:
+ info = get_info('atlas_blas_threads')
+ if not info:
+ info = get_info('atlas_blas')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_accelerate(self):
+ info = get_info('accelerate')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_blas(self):
+ # Warn about a non-optimized BLAS library
+ warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3)
+ info = {}
+ dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
+
+ blas = get_info('blas')
+ if blas:
+ dict_append(info, **blas)
+ else:
+ # Not even BLAS was found!
+ warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3)
+
+ blas_src = get_info('blas_src')
+ if not blas_src:
+ warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3)
+ return False
+ dict_append(info, libraries=[('fblas_src', blas_src)])
+
+ self.set_info(**info)
+ return True
+
+ def _calc_info_from_envvar(self):
+ info = {}
+ info['language'] = 'f77'
+ info['libraries'] = []
+ info['include_dirs'] = []
+ info['define_macros'] = []
+ info['extra_link_args'] = os.environ['NPY_BLAS_LIBS'].split()
+ if 'NPY_CBLAS_LIBS' in os.environ:
+ info['define_macros'].append(('HAVE_CBLAS', None))
+ info['extra_link_args'].extend(
+ os.environ['NPY_CBLAS_LIBS'].split())
+ self.set_info(**info)
+ return True
+
+ def _calc_info(self, name):
+ return getattr(self, '_calc_info_{}'.format(name))()
+
+ def calc_info(self):
+ blas_order, unknown_order = _parse_env_order(self.blas_order, self.order_env_var_name)
+ if len(unknown_order) > 0:
+ raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(unknown_order))
+
+ if 'NPY_BLAS_LIBS' in os.environ:
+ # Bypass autodetection, set language to F77 and use env var linker
+ # flags directly
+ self._calc_info_from_envvar()
+ return
+
+ for blas in blas_order:
+ if self._calc_info(blas):
+ return
+
+ if 'blas' not in blas_order:
+ # Since the user may request *not* to use any library, we still need
+ # to raise warnings to signal missing packages!
+ warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=2)
+ warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2)
+
+
+class blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin):
+ notfounderror = BlasILP64NotFoundError
+ blas_order = ['openblas64_', 'openblas_ilp64']
+ order_env_var_name = 'NPY_BLAS_ILP64_ORDER'
+
+ def _calc_info(self, name):
+ info = get_info(name)
+ if self._check_info(info):
+ self.set_info(**info)
+ return True
+ return False
+
+
+class blas_ilp64_plain_opt_info(blas_ilp64_opt_info):
+ symbol_prefix = ''
+ symbol_suffix = ''
+
+
+class blas64__opt_info(blas_ilp64_opt_info):
+ symbol_prefix = ''
+ symbol_suffix = '64_'
+
+
+class cblas_info(system_info):
+ section = 'cblas'
+ dir_env_var = 'CBLAS'
+ # No default as it's used only in blas_info
+ _lib_names = []
+ notfounderror = BlasNotFoundError
+
+
+class blas_info(system_info):
+ section = 'blas'
+ dir_env_var = 'BLAS'
+ _lib_names = ['blas']
+ notfounderror = BlasNotFoundError
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ opt = self.get_option_single('blas_libs', 'libraries')
+ blas_libs = self.get_libs(opt, self._lib_names)
+ info = self.check_libs(lib_dirs, blas_libs, [])
+ if info is None:
+ return
+ else:
+ info['include_dirs'] = self.get_include_dirs()
+ if platform.system() == 'Windows':
+ # The check for windows is needed because get_cblas_libs uses the
+ # same compiler that was used to compile Python and msvc is
+ # often not installed when mingw is being used. This rough
+ # treatment is not desirable, but windows is tricky.
+ info['language'] = 'f77' # XXX: is it generally true?
+ # If cblas is given as an option, use those
+ cblas_info_obj = cblas_info()
+ cblas_opt = cblas_info_obj.get_option_single('cblas_libs', 'libraries')
+ cblas_libs = cblas_info_obj.get_libs(cblas_opt, None)
+ if cblas_libs:
+ info['libraries'] = cblas_libs + blas_libs
+ info['define_macros'] = [('HAVE_CBLAS', None)]
+ else:
+ lib = self.get_cblas_libs(info)
+ if lib is not None:
+ info['language'] = 'c'
+ info['libraries'] = lib
+ info['define_macros'] = [('HAVE_CBLAS', None)]
+ self.set_info(**info)
+
+ def get_cblas_libs(self, info):
+ """ Check whether we can link with CBLAS interface
+
+ This method will search through several combinations of libraries
+ to check whether CBLAS is present:
+
+ 1. Libraries in ``info['libraries']``, as is
+ 2. As 1. but also explicitly adding ``'cblas'`` as a library
+ 3. As 1. but also explicitly adding ``'blas'`` as a library
+ 4. Check only library ``'cblas'``
+ 5. Check only library ``'blas'``
+
+ Parameters
+ ----------
+ info : dict
+ system information dictionary for compilation and linking
+
+ Returns
+ -------
+ libraries : list of str or None
+ a list of libraries that enables the use of CBLAS interface.
+ Returns None if not found or a compilation error occurs.
+
+ Since 1.17 returns a list.
+ """
+ # primitive cblas check by looking for the header and trying to link
+ # cblas or blas
+ c = customized_ccompiler()
+ tmpdir = tempfile.mkdtemp()
+ s = textwrap.dedent("""\
+ #include <cblas.h>
+ int main(int argc, const char *argv[])
+ {
+ double a[4] = {1,2,3,4};
+ double b[4] = {5,6,7,8};
+ return cblas_ddot(4, a, 1, b, 1) > 10;
+ }""")
+ src = os.path.join(tmpdir, 'source.c')
+ try:
+ with open(src, 'wt') as f:
+ f.write(s)
+
+ try:
+ # check we can compile (find headers)
+ obj = c.compile([src], output_dir=tmpdir,
+ include_dirs=self.get_include_dirs())
+ except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError):
+ return None
+
+ # check we can link (find library)
+ # some systems have separate cblas and blas libs.
+ for libs in [info['libraries'], ['cblas'] + info['libraries'],
+ ['blas'] + info['libraries'], ['cblas'], ['blas']]:
+ try:
+ c.link_executable(obj, os.path.join(tmpdir, "a.out"),
+ libraries=libs,
+ library_dirs=info['library_dirs'],
+ extra_postargs=info.get('extra_link_args', []))
+ return libs
+ except distutils.ccompiler.LinkError:
+ pass
+ finally:
+ shutil.rmtree(tmpdir)
+ return None
+
+
+class openblas_info(blas_info):
+ section = 'openblas'
+ dir_env_var = 'OPENBLAS'
+ _lib_names = ['openblas']
+ _require_symbols = []
+ notfounderror = BlasNotFoundError
+
+ @property
+ def symbol_prefix(self):
+ try:
+ return self.cp.get(self.section, 'symbol_prefix')
+ except NoOptionError:
+ return ''
+
+ @property
+ def symbol_suffix(self):
+ try:
+ return self.cp.get(self.section, 'symbol_suffix')
+ except NoOptionError:
+ return ''
+
+ def _calc_info(self):
+ c = customized_ccompiler()
+
+ lib_dirs = self.get_lib_dirs()
+
+ # Prefer to use libraries over openblas_libs
+ opt = self.get_option_single('openblas_libs', 'libraries')
+ openblas_libs = self.get_libs(opt, self._lib_names)
+
+ info = self.check_libs(lib_dirs, openblas_libs, [])
+
+ if c.compiler_type == "msvc" and info is None:
+ from numpy.distutils.fcompiler import new_fcompiler
+ f = new_fcompiler(c_compiler=c)
+ if f and f.compiler_type == 'gnu95':
+ # Try gfortran-compatible library files
+ info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs)
+ # Skip lapack check, we'd need build_ext to do it
+ skip_symbol_check = True
+ elif info:
+ skip_symbol_check = False
+ info['language'] = 'c'
+
+ if info is None:
+ return None
+
+ # Add extra info for OpenBLAS
+ extra_info = self.calc_extra_info()
+ dict_append(info, **extra_info)
+
+ if not (skip_symbol_check or self.check_symbols(info)):
+ return None
+
+ info['define_macros'] = [('HAVE_CBLAS', None)]
+ if self.symbol_prefix:
+ info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)]
+ if self.symbol_suffix:
+ info['define_macros'] += [('BLAS_SYMBOL_SUFFIX', self.symbol_suffix)]
+
+ return info
+
+ def calc_info(self):
+ info = self._calc_info()
+ if info is not None:
+ self.set_info(**info)
+
+ def check_msvc_gfortran_libs(self, library_dirs, libraries):
+ # First, find the full path to each library directory
+ library_paths = []
+ for library in libraries:
+ for library_dir in library_dirs:
+ # MinGW static ext will be .a
+ fullpath = os.path.join(library_dir, library + '.a')
+ if os.path.isfile(fullpath):
+ library_paths.append(fullpath)
+ break
+ else:
+ return None
+
+ # Generate numpy.distutils virtual static library file
+ basename = self.__class__.__name__
+ tmpdir = os.path.join(os.getcwd(), 'build', basename)
+ if not os.path.isdir(tmpdir):
+ os.makedirs(tmpdir)
+
+ info = {'library_dirs': [tmpdir],
+ 'libraries': [basename],
+ 'language': 'f77'}
+
+ fake_lib_file = os.path.join(tmpdir, basename + '.fobjects')
+ fake_clib_file = os.path.join(tmpdir, basename + '.cobjects')
+ with open(fake_lib_file, 'w') as f:
+ f.write("\n".join(library_paths))
+ with open(fake_clib_file, 'w') as f:
+ pass
+
+ return info
+
+ def check_symbols(self, info):
+ res = False
+ c = customized_ccompiler()
+
+ tmpdir = tempfile.mkdtemp()
+
+ prototypes = "\n".join("void %s%s%s();" % (self.symbol_prefix,
+ symbol_name,
+ self.symbol_suffix)
+ for symbol_name in self._require_symbols)
+ calls = "\n".join("%s%s%s();" % (self.symbol_prefix,
+ symbol_name,
+ self.symbol_suffix)
+ for symbol_name in self._require_symbols)
+ s = textwrap.dedent("""\
+ %(prototypes)s
+ int main(int argc, const char *argv[])
+ {
+ %(calls)s
+ return 0;
+ }""") % dict(prototypes=prototypes, calls=calls)
+ src = os.path.join(tmpdir, 'source.c')
+ out = os.path.join(tmpdir, 'a.out')
+ # Add the additional "extra" arguments
+ try:
+ extra_args = info['extra_link_args']
+ except Exception:
+ extra_args = []
+ try:
+ with open(src, 'wt') as f:
+ f.write(s)
+ obj = c.compile([src], output_dir=tmpdir)
+ try:
+ c.link_executable(obj, out, libraries=info['libraries'],
+ library_dirs=info['library_dirs'],
+ extra_postargs=extra_args)
+ res = True
+ except distutils.ccompiler.LinkError:
+ res = False
+ finally:
+ shutil.rmtree(tmpdir)
+ return res
+
+class openblas_lapack_info(openblas_info):
+ section = 'openblas'
+ dir_env_var = 'OPENBLAS'
+ _lib_names = ['openblas']
+ _require_symbols = ['zungqr_']
+ notfounderror = BlasNotFoundError
+
+class openblas_clapack_info(openblas_lapack_info):
+ _lib_names = ['openblas', 'lapack']
+
+class openblas_ilp64_info(openblas_info):
+ section = 'openblas_ilp64'
+ dir_env_var = 'OPENBLAS_ILP64'
+ _lib_names = ['openblas64']
+ _require_symbols = ['dgemm_', 'cblas_dgemm']
+ notfounderror = BlasILP64NotFoundError
+
+ def _calc_info(self):
+ info = super()._calc_info()
+ if info is not None:
+ info['define_macros'] += [('HAVE_BLAS_ILP64', None)]
+ return info
+
+class openblas_ilp64_lapack_info(openblas_ilp64_info):
+ _require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr']
+
+ def _calc_info(self):
+ info = super()._calc_info()
+ if info:
+ info['define_macros'] += [('HAVE_LAPACKE', None)]
+ return info
+
+class openblas64__info(openblas_ilp64_info):
+ # ILP64 Openblas, with default symbol suffix
+ section = 'openblas64_'
+ dir_env_var = 'OPENBLAS64_'
+ _lib_names = ['openblas64_']
+ symbol_suffix = '64_'
+ symbol_prefix = ''
+
+class openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info):
+ pass
+
+class blis_info(blas_info):
+ section = 'blis'
+ dir_env_var = 'BLIS'
+ _lib_names = ['blis']
+ notfounderror = BlasNotFoundError
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ opt = self.get_option_single('blis_libs', 'libraries')
+ blis_libs = self.get_libs(opt, self._lib_names)
+ info = self.check_libs2(lib_dirs, blis_libs, [])
+ if info is None:
+ return
+
+ # Add include dirs
+ incl_dirs = self.get_include_dirs()
+ dict_append(info,
+ language='c',
+ define_macros=[('HAVE_CBLAS', None)],
+ include_dirs=incl_dirs)
+ self.set_info(**info)
+
+
+class flame_info(system_info):
+ """ Usage of libflame for LAPACK operations
+
+ This requires libflame to be compiled with lapack wrappers:
+
+ ./configure --enable-lapack2flame ...
+
+ Be aware that libflame 5.1.0 has some missing names in the shared library, so
+ if you have problems, try the static flame library.
+ """
+ section = 'flame'
+ _lib_names = ['flame']
+ notfounderror = FlameNotFoundError
+
+ def check_embedded_lapack(self, info):
+ """ libflame does not necessarily have a wrapper for fortran LAPACK, we need to check """
+ c = customized_ccompiler()
+
+ tmpdir = tempfile.mkdtemp()
+ s = textwrap.dedent("""\
+ void zungqr_();
+ int main(int argc, const char *argv[])
+ {
+ zungqr_();
+ return 0;
+ }""")
+ src = os.path.join(tmpdir, 'source.c')
+ out = os.path.join(tmpdir, 'a.out')
+ # Add the additional "extra" arguments
+ extra_args = info.get('extra_link_args', [])
+ try:
+ with open(src, 'wt') as f:
+ f.write(s)
+ obj = c.compile([src], output_dir=tmpdir)
+ try:
+ c.link_executable(obj, out, libraries=info['libraries'],
+ library_dirs=info['library_dirs'],
+ extra_postargs=extra_args)
+ return True
+ except distutils.ccompiler.LinkError:
+ return False
+ finally:
+ shutil.rmtree(tmpdir)
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ flame_libs = self.get_libs('libraries', self._lib_names)
+
+ info = self.check_libs2(lib_dirs, flame_libs, [])
+ if info is None:
+ return
+
+ # Add the extra flag args to info
+ extra_info = self.calc_extra_info()
+ dict_append(info, **extra_info)
+
+ if self.check_embedded_lapack(info):
+ # check if the user has supplied all information required
+ self.set_info(**info)
+ else:
+ # Try and get the BLAS lib to see if we can get it to work
+ blas_info = get_info('blas_opt')
+ if not blas_info:
+ # since we already failed once, this ain't going to work either
+ return
+
+ # Now we need to merge the two dictionaries
+ for key in blas_info:
+ if isinstance(blas_info[key], list):
+ info[key] = info.get(key, []) + blas_info[key]
+ elif isinstance(blas_info[key], tuple):
+ info[key] = info.get(key, ()) + blas_info[key]
+ else:
+ info[key] = info.get(key, '') + blas_info[key]
+
+ # Now check again
+ if self.check_embedded_lapack(info):
+ self.set_info(**info)
+
+
+class accelerate_info(system_info):
+ section = 'accelerate'
+ _lib_names = ['accelerate', 'veclib']
+ notfounderror = BlasNotFoundError
+
+ def calc_info(self):
+ # Make possible to enable/disable from config file/env var
+ libraries = os.environ.get('ACCELERATE')
+ if libraries:
+ libraries = [libraries]
+ else:
+ libraries = self.get_libs('libraries', self._lib_names)
+ libraries = [lib.strip().lower() for lib in libraries]
+
+ if (sys.platform == 'darwin' and
+ not os.getenv('_PYTHON_HOST_PLATFORM', None)):
+ # Use the system BLAS from Accelerate or vecLib under OSX
+ args = []
+ link_args = []
+ if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
+ 'x86_64' in get_platform() or \
+ 'i386' in platform.platform():
+ intel = 1
+ else:
+ intel = 0
+ if (os.path.exists('/System/Library/Frameworks'
+ '/Accelerate.framework/') and
+ 'accelerate' in libraries):
+ if intel:
+ args.extend(['-msse3'])
+ args.extend([
+ '-I/System/Library/Frameworks/vecLib.framework/Headers'])
+ link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
+ elif (os.path.exists('/System/Library/Frameworks'
+ '/vecLib.framework/') and
+ 'veclib' in libraries):
+ if intel:
+ args.extend(['-msse3'])
+ args.extend([
+ '-I/System/Library/Frameworks/vecLib.framework/Headers'])
+ link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
+
+ if args:
+ self.set_info(extra_compile_args=args,
+ extra_link_args=link_args,
+ define_macros=[('NO_ATLAS_INFO', 3),
+ ('HAVE_CBLAS', None)])
+
+ return
+
+class blas_src_info(system_info):
+ # BLAS_SRC is deprecated, please do not use this!
+ # Build or install a BLAS library via your package manager or from
+ # source separately.
+ section = 'blas_src'
+ dir_env_var = 'BLAS_SRC'
+ notfounderror = BlasSrcNotFoundError
+
+ def get_paths(self, section, key):
+ pre_dirs = system_info.get_paths(self, section, key)
+ dirs = []
+ for d in pre_dirs:
+ dirs.extend([d] + self.combine_paths(d, ['blas']))
+ return [d for d in dirs if os.path.isdir(d)]
+
+ def calc_info(self):
+ src_dirs = self.get_src_dirs()
+ src_dir = ''
+ for d in src_dirs:
+ if os.path.isfile(os.path.join(d, 'daxpy.f')):
+ src_dir = d
+ break
+ if not src_dir:
+ #XXX: Get sources from netlib. May be ask first.
+ return
+ blas1 = '''
+ caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot
+ dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2
+ srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg
+ dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax
+ snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap
+ scabs1
+ '''
+ blas2 = '''
+ cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv
+ chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv
+ dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv
+ sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger
+ stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc
+ zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2
+ ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv
+ '''
+ blas3 = '''
+ cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k
+ dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm
+ ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm
+ '''
+ sources = [os.path.join(src_dir, f + '.f') \
+ for f in (blas1 + blas2 + blas3).split()]
+ #XXX: should we check here actual existence of source files?
+ sources = [f for f in sources if os.path.isfile(f)]
+ info = {'sources': sources, 'language': 'f77'}
+ self.set_info(**info)
+
+
+class x11_info(system_info):
+ section = 'x11'
+ notfounderror = X11NotFoundError
+ _lib_names = ['X11']
+
+ def __init__(self):
+ system_info.__init__(self,
+ default_lib_dirs=default_x11_lib_dirs,
+ default_include_dirs=default_x11_include_dirs)
+
+ def calc_info(self):
+ if sys.platform in ['win32']:
+ return
+ lib_dirs = self.get_lib_dirs()
+ include_dirs = self.get_include_dirs()
+ opt = self.get_option_single('x11_libs', 'libraries')
+ x11_libs = self.get_libs(opt, self._lib_names)
+ info = self.check_libs(lib_dirs, x11_libs, [])
+ if info is None:
+ return
+ inc_dir = None
+ for d in include_dirs:
+ if self.combine_paths(d, 'X11/X.h'):
+ inc_dir = d
+ break
+ if inc_dir is not None:
+ dict_append(info, include_dirs=[inc_dir])
+ self.set_info(**info)
+
+
+class _numpy_info(system_info):
+ section = 'Numeric'
+ modulename = 'Numeric'
+ notfounderror = NumericNotFoundError
+
+ def __init__(self):
+ include_dirs = []
+ try:
+ module = __import__(self.modulename)
+ prefix = []
+ for name in module.__file__.split(os.sep):
+ if name == 'lib':
+ break
+ prefix.append(name)
+
+ # Ask numpy for its own include path before attempting
+ # anything else
+ try:
+ include_dirs.append(getattr(module, 'get_include')())
+ except AttributeError:
+ pass
+
+ include_dirs.append(sysconfig.get_path('include'))
+ except ImportError:
+ pass
+ py_incl_dir = sysconfig.get_path('include')
+ include_dirs.append(py_incl_dir)
+ py_pincl_dir = sysconfig.get_path('platinclude')
+ if py_pincl_dir not in include_dirs:
+ include_dirs.append(py_pincl_dir)
+ for d in default_include_dirs:
+ d = os.path.join(d, os.path.basename(py_incl_dir))
+ if d not in include_dirs:
+ include_dirs.append(d)
+ system_info.__init__(self,
+ default_lib_dirs=[],
+ default_include_dirs=include_dirs)
+
+ def calc_info(self):
+ try:
+ module = __import__(self.modulename)
+ except ImportError:
+ return
+ info = {}
+ macros = []
+ for v in ['__version__', 'version']:
+ vrs = getattr(module, v, None)
+ if vrs is None:
+ continue
+ macros = [(self.modulename.upper() + '_VERSION',
+ _c_string_literal(vrs)),
+ (self.modulename.upper(), None)]
+ break
+ dict_append(info, define_macros=macros)
+ include_dirs = self.get_include_dirs()
+ inc_dir = None
+ for d in include_dirs:
+ if self.combine_paths(d,
+ os.path.join(self.modulename,
+ 'arrayobject.h')):
+ inc_dir = d
+ break
+ if inc_dir is not None:
+ dict_append(info, include_dirs=[inc_dir])
+ if info:
+ self.set_info(**info)
+ return
+
+
+class numarray_info(_numpy_info):
+ section = 'numarray'
+ modulename = 'numarray'
+
+
+class Numeric_info(_numpy_info):
+ section = 'Numeric'
+ modulename = 'Numeric'
+
+
+class numpy_info(_numpy_info):
+ section = 'numpy'
+ modulename = 'numpy'
+
+
+class numerix_info(system_info):
+ section = 'numerix'
+
+ def calc_info(self):
+ which = None, None
+ if os.getenv("NUMERIX"):
+ which = os.getenv("NUMERIX"), "environment var"
+ # If all the above fail, default to numpy.
+ if which[0] is None:
+ which = "numpy", "defaulted"
+ try:
+ import numpy # noqa: F401
+ which = "numpy", "defaulted"
+ except ImportError as e:
+ msg1 = str(e)
+ try:
+ import Numeric # noqa: F401
+ which = "numeric", "defaulted"
+ except ImportError as e:
+ msg2 = str(e)
+ try:
+ import numarray # noqa: F401
+ which = "numarray", "defaulted"
+ except ImportError as e:
+ msg3 = str(e)
+ log.info(msg1)
+ log.info(msg2)
+ log.info(msg3)
+ which = which[0].strip().lower(), which[1]
+ if which[0] not in ["numeric", "numarray", "numpy"]:
+ raise ValueError("numerix selector must be either 'Numeric' "
+ "or 'numarray' or 'numpy' but the value obtained"
+ " from the %s was '%s'." % (which[1], which[0]))
+ os.environ['NUMERIX'] = which[0]
+ self.set_info(**get_info(which[0]))
+
+
+class f2py_info(system_info):
+ def calc_info(self):
+ try:
+ import numpy.f2py as f2py
+ except ImportError:
+ return
+ f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src')
+ self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')],
+ include_dirs=[f2py_dir])
+ return
+
+
+class boost_python_info(system_info):
+ section = 'boost_python'
+ dir_env_var = 'BOOST'
+
+ def get_paths(self, section, key):
+ pre_dirs = system_info.get_paths(self, section, key)
+ dirs = []
+ for d in pre_dirs:
+ dirs.extend([d] + self.combine_paths(d, ['boost*']))
+ return [d for d in dirs if os.path.isdir(d)]
+
+ def calc_info(self):
+ src_dirs = self.get_src_dirs()
+ src_dir = ''
+ for d in src_dirs:
+ if os.path.isfile(os.path.join(d, 'libs', 'python', 'src',
+ 'module.cpp')):
+ src_dir = d
+ break
+ if not src_dir:
+ return
+ py_incl_dirs = [sysconfig.get_path('include')]
+ py_pincl_dir = sysconfig.get_path('platinclude')
+ if py_pincl_dir not in py_incl_dirs:
+ py_incl_dirs.append(py_pincl_dir)
+ srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src')
+ bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp'))
+ bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp'))
+ info = {'libraries': [('boost_python_src',
+ {'include_dirs': [src_dir] + py_incl_dirs,
+ 'sources':bpl_srcs}
+ )],
+ 'include_dirs': [src_dir],
+ }
+ if info:
+ self.set_info(**info)
+ return
+
+
+class agg2_info(system_info):
+ section = 'agg2'
+ dir_env_var = 'AGG2'
+
+ def get_paths(self, section, key):
+ pre_dirs = system_info.get_paths(self, section, key)
+ dirs = []
+ for d in pre_dirs:
+ dirs.extend([d] + self.combine_paths(d, ['agg2*']))
+ return [d for d in dirs if os.path.isdir(d)]
+
+ def calc_info(self):
+ src_dirs = self.get_src_dirs()
+ src_dir = ''
+ for d in src_dirs:
+ if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')):
+ src_dir = d
+ break
+ if not src_dir:
+ return
+ if sys.platform == 'win32':
+ agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform',
+ 'win32', 'agg_win32_bmp.cpp'))
+ else:
+ agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp'))
+ agg2_srcs += [os.path.join(src_dir, 'src', 'platform',
+ 'X11',
+ 'agg_platform_support.cpp')]
+
+ info = {'libraries':
+ [('agg2_src',
+ {'sources': agg2_srcs,
+ 'include_dirs': [os.path.join(src_dir, 'include')],
+ }
+ )],
+ 'include_dirs': [os.path.join(src_dir, 'include')],
+ }
+ if info:
+ self.set_info(**info)
+ return
+
+
+class _pkg_config_info(system_info):
+ section = None
+ config_env_var = 'PKG_CONFIG'
+ default_config_exe = 'pkg-config'
+ append_config_exe = ''
+ version_macro_name = None
+ release_macro_name = None
+ version_flag = '--modversion'
+ cflags_flag = '--cflags'
+
+ def get_config_exe(self):
+ if self.config_env_var in os.environ:
+ return os.environ[self.config_env_var]
+ return self.default_config_exe
+
+ def get_config_output(self, config_exe, option):
+ cmd = config_exe + ' ' + self.append_config_exe + ' ' + option
+ try:
+ o = subprocess.check_output(cmd)
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ o = filepath_from_subprocess_output(o)
+ return o
+
+ def calc_info(self):
+ config_exe = find_executable(self.get_config_exe())
+ if not config_exe:
+ log.warn('File not found: %s. Cannot determine %s info.' \
+ % (config_exe, self.section))
+ return
+ info = {}
+ macros = []
+ libraries = []
+ library_dirs = []
+ include_dirs = []
+ extra_link_args = []
+ extra_compile_args = []
+ version = self.get_config_output(config_exe, self.version_flag)
+ if version:
+ macros.append((self.__class__.__name__.split('.')[-1].upper(),
+ _c_string_literal(version)))
+ if self.version_macro_name:
+ macros.append((self.version_macro_name + '_%s'
+ % (version.replace('.', '_')), None))
+ if self.release_macro_name:
+ release = self.get_config_output(config_exe, '--release')
+ if release:
+ macros.append((self.release_macro_name + '_%s'
+ % (release.replace('.', '_')), None))
+ opts = self.get_config_output(config_exe, '--libs')
+ if opts:
+ for opt in opts.split():
+ if opt[:2] == '-l':
+ libraries.append(opt[2:])
+ elif opt[:2] == '-L':
+ library_dirs.append(opt[2:])
+ else:
+ extra_link_args.append(opt)
+ opts = self.get_config_output(config_exe, self.cflags_flag)
+ if opts:
+ for opt in opts.split():
+ if opt[:2] == '-I':
+ include_dirs.append(opt[2:])
+ elif opt[:2] == '-D':
+ if '=' in opt:
+ n, v = opt[2:].split('=')
+ macros.append((n, v))
+ else:
+ macros.append((opt[2:], None))
+ else:
+ extra_compile_args.append(opt)
+ if macros:
+ dict_append(info, define_macros=macros)
+ if libraries:
+ dict_append(info, libraries=libraries)
+ if library_dirs:
+ dict_append(info, library_dirs=library_dirs)
+ if include_dirs:
+ dict_append(info, include_dirs=include_dirs)
+ if extra_link_args:
+ dict_append(info, extra_link_args=extra_link_args)
+ if extra_compile_args:
+ dict_append(info, extra_compile_args=extra_compile_args)
+ if info:
+ self.set_info(**info)
+ return
+
+
+class wx_info(_pkg_config_info):
+ section = 'wx'
+ config_env_var = 'WX_CONFIG'
+ default_config_exe = 'wx-config'
+ append_config_exe = ''
+ version_macro_name = 'WX_VERSION'
+ release_macro_name = 'WX_RELEASE'
+ version_flag = '--version'
+ cflags_flag = '--cxxflags'
+
+
+class gdk_pixbuf_xlib_2_info(_pkg_config_info):
+ section = 'gdk_pixbuf_xlib_2'
+ append_config_exe = 'gdk-pixbuf-xlib-2.0'
+ version_macro_name = 'GDK_PIXBUF_XLIB_VERSION'
+
+
+class gdk_pixbuf_2_info(_pkg_config_info):
+ section = 'gdk_pixbuf_2'
+ append_config_exe = 'gdk-pixbuf-2.0'
+ version_macro_name = 'GDK_PIXBUF_VERSION'
+
+
+class gdk_x11_2_info(_pkg_config_info):
+ section = 'gdk_x11_2'
+ append_config_exe = 'gdk-x11-2.0'
+ version_macro_name = 'GDK_X11_VERSION'
+
+
+class gdk_2_info(_pkg_config_info):
+ section = 'gdk_2'
+ append_config_exe = 'gdk-2.0'
+ version_macro_name = 'GDK_VERSION'
+
+
+class gdk_info(_pkg_config_info):
+ section = 'gdk'
+ append_config_exe = 'gdk'
+ version_macro_name = 'GDK_VERSION'
+
+
+class gtkp_x11_2_info(_pkg_config_info):
+ section = 'gtkp_x11_2'
+ append_config_exe = 'gtk+-x11-2.0'
+ version_macro_name = 'GTK_X11_VERSION'
+
+
+class gtkp_2_info(_pkg_config_info):
+ section = 'gtkp_2'
+ append_config_exe = 'gtk+-2.0'
+ version_macro_name = 'GTK_VERSION'
+
+
+class xft_info(_pkg_config_info):
+ section = 'xft'
+ append_config_exe = 'xft'
+ version_macro_name = 'XFT_VERSION'
+
+
+class freetype2_info(_pkg_config_info):
+ section = 'freetype2'
+ append_config_exe = 'freetype2'
+ version_macro_name = 'FREETYPE2_VERSION'
+
+
+class amd_info(system_info):
+ section = 'amd'
+ dir_env_var = 'AMD'
+ _lib_names = ['amd']
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+
+ opt = self.get_option_single('amd_libs', 'libraries')
+ amd_libs = self.get_libs(opt, self._lib_names)
+ info = self.check_libs(lib_dirs, amd_libs, [])
+ if info is None:
+ return
+
+ include_dirs = self.get_include_dirs()
+
+ inc_dir = None
+ for d in include_dirs:
+ p = self.combine_paths(d, 'amd.h')
+ if p:
+ inc_dir = os.path.dirname(p[0])
+ break
+ if inc_dir is not None:
+ dict_append(info, include_dirs=[inc_dir],
+ define_macros=[('SCIPY_AMD_H', None)],
+ swig_opts=['-I' + inc_dir])
+
+ self.set_info(**info)
+ return
+
+
+class umfpack_info(system_info):
+ section = 'umfpack'
+ dir_env_var = 'UMFPACK'
+ notfounderror = UmfpackNotFoundError
+ _lib_names = ['umfpack']
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+
+ opt = self.get_option_single('umfpack_libs', 'libraries')
+ umfpack_libs = self.get_libs(opt, self._lib_names)
+ info = self.check_libs(lib_dirs, umfpack_libs, [])
+ if info is None:
+ return
+
+ include_dirs = self.get_include_dirs()
+
+ inc_dir = None
+ for d in include_dirs:
+ p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h')
+ if p:
+ inc_dir = os.path.dirname(p[0])
+ break
+ if inc_dir is not None:
+ dict_append(info, include_dirs=[inc_dir],
+ define_macros=[('SCIPY_UMFPACK_H', None)],
+ swig_opts=['-I' + inc_dir])
+
+ dict_append(info, **get_info('amd'))
+
+ self.set_info(**info)
+ return
+
+
+def combine_paths(*args, **kws):
+ """ Return a list of existing paths composed by all combinations of
+ items from arguments.
+ """
+ r = []
+ for a in args:
+ if not a:
+ continue
+ if is_string(a):
+ a = [a]
+ r.append(a)
+ args = r
+ if not args:
+ return []
+ if len(args) == 1:
+ result = reduce(lambda a, b: a + b, map(glob, args[0]), [])
+ elif len(args) == 2:
+ result = []
+ for a0 in args[0]:
+ for a1 in args[1]:
+ result.extend(glob(os.path.join(a0, a1)))
+ else:
+ result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:]))
+ log.debug('(paths: %s)', ','.join(result))
+ return result
+
+language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3}
+inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'}
+
+
+def dict_append(d, **kws):
+ languages = []
+ for k, v in kws.items():
+ if k == 'language':
+ languages.append(v)
+ continue
+ if k in d:
+ if k in ['library_dirs', 'include_dirs',
+ 'extra_compile_args', 'extra_link_args',
+ 'runtime_library_dirs', 'define_macros']:
+ [d[k].append(vv) for vv in v if vv not in d[k]]
+ else:
+ d[k].extend(v)
+ else:
+ d[k] = v
+ if languages:
+ l = inv_language_map[max([language_map.get(l, 0) for l in languages])]
+ d['language'] = l
+ return
+
+
+def parseCmdLine(argv=(None,)):
+ import optparse
+ parser = optparse.OptionParser("usage: %prog [-v] [info objs]")
+ parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
+ default=False,
+ help='be verbose and print more messages')
+
+ opts, args = parser.parse_args(args=argv[1:])
+ return opts, args
+
+
+def show_all(argv=None):
+ import inspect
+ if argv is None:
+ argv = sys.argv
+ opts, args = parseCmdLine(argv)
+ if opts.verbose:
+ log.set_threshold(log.DEBUG)
+ else:
+ log.set_threshold(log.INFO)
+ show_only = []
+ for n in args:
+ if n[-5:] != '_info':
+ n = n + '_info'
+ show_only.append(n)
+ show_all = not show_only
+ _gdict_ = globals().copy()
+ for name, c in _gdict_.items():
+ if not inspect.isclass(c):
+ continue
+ if not issubclass(c, system_info) or c is system_info:
+ continue
+ if not show_all:
+ if name not in show_only:
+ continue
+ del show_only[show_only.index(name)]
+ conf = c()
+ conf.verbosity = 2
+ # we don't need the result, but we want
+ # the side effect of printing diagnostics
+ conf.get_info()
+ if show_only:
+ log.info('Info classes not defined: %s', ','.join(show_only))
+
+if __name__ == "__main__":
+ show_all()
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/__init__.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/__init__.py
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_build_ext.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_build_ext.py
new file mode 100644
index 00000000..372100fc
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_build_ext.py
@@ -0,0 +1,74 @@
+'''Tests for numpy.distutils.build_ext.'''
+
+import os
+import subprocess
+import sys
+from textwrap import indent, dedent
+import pytest
+from numpy.testing import IS_WASM
+
+@pytest.mark.skipif(IS_WASM, reason="cannot start subprocess in wasm")
+@pytest.mark.slow
+def test_multi_fortran_libs_link(tmp_path):
+ '''
+ Ensures multiple "fake" static libraries are correctly linked.
+ see gh-18295
+ '''
+
+ # We need to make sure we actually have an f77 compiler.
+ # This is nontrivial, so we'll borrow the utilities
+ # from f2py tests:
+ from numpy.f2py.tests.util import has_f77_compiler
+ if not has_f77_compiler():
+ pytest.skip('No F77 compiler found')
+
+ # make some dummy sources
+ with open(tmp_path / '_dummy1.f', 'w') as fid:
+ fid.write(indent(dedent('''\
+ FUNCTION dummy_one()
+ RETURN
+ END FUNCTION'''), prefix=' '*6))
+ with open(tmp_path / '_dummy2.f', 'w') as fid:
+ fid.write(indent(dedent('''\
+ FUNCTION dummy_two()
+ RETURN
+ END FUNCTION'''), prefix=' '*6))
+ with open(tmp_path / '_dummy.c', 'w') as fid:
+ # doesn't need to load - just needs to exist
+ fid.write('int PyInit_dummyext;')
+
+ # make a setup file
+ with open(tmp_path / 'setup.py', 'w') as fid:
+ srctree = os.path.join(os.path.dirname(__file__), '..', '..', '..')
+ fid.write(dedent(f'''\
+ def configuration(parent_package="", top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration("", parent_package, top_path)
+ config.add_library("dummy1", sources=["_dummy1.f"])
+ config.add_library("dummy2", sources=["_dummy2.f"])
+ config.add_extension("dummyext", sources=["_dummy.c"], libraries=["dummy1", "dummy2"])
+ return config
+
+
+ if __name__ == "__main__":
+ import sys
+ sys.path.insert(0, r"{srctree}")
+ from numpy.distutils.core import setup
+ setup(**configuration(top_path="").todict())'''))
+
+ # build the test extensino and "install" into a temporary directory
+ build_dir = tmp_path
+ subprocess.check_call([sys.executable, 'setup.py', 'build', 'install',
+ '--prefix', str(tmp_path / 'installdir'),
+ '--record', str(tmp_path / 'tmp_install_log.txt'),
+ ],
+ cwd=str(build_dir),
+ )
+ # get the path to the so
+ so = None
+ with open(tmp_path /'tmp_install_log.txt') as fid:
+ for line in fid:
+ if 'dummyext' in line:
+ so = line.strip()
+ break
+ assert so is not None
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt.py
new file mode 100644
index 00000000..657ebdb6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt.py
@@ -0,0 +1,808 @@
+import re, textwrap, os
+from os import sys, path
+from distutils.errors import DistutilsError
+
+is_standalone = __name__ == '__main__' and __package__ is None
+if is_standalone:
+ import unittest, contextlib, tempfile, shutil
+ sys.path.append(path.abspath(path.join(path.dirname(__file__), "..")))
+ from ccompiler_opt import CCompilerOpt
+
+ # from numpy/testing/_private/utils.py
+ @contextlib.contextmanager
+ def tempdir(*args, **kwargs):
+ tmpdir = tempfile.mkdtemp(*args, **kwargs)
+ try:
+ yield tmpdir
+ finally:
+ shutil.rmtree(tmpdir)
+
+ def assert_(expr, msg=''):
+ if not expr:
+ raise AssertionError(msg)
+else:
+ from numpy.distutils.ccompiler_opt import CCompilerOpt
+ from numpy.testing import assert_, tempdir
+
+# architectures and compilers to test
+arch_compilers = dict(
+ x86 = ("gcc", "clang", "icc", "iccw", "msvc"),
+ x64 = ("gcc", "clang", "icc", "iccw", "msvc"),
+ ppc64 = ("gcc", "clang"),
+ ppc64le = ("gcc", "clang"),
+ armhf = ("gcc", "clang"),
+ aarch64 = ("gcc", "clang"),
+ s390x = ("gcc", "clang"),
+ noarch = ("gcc",)
+)
+
+class FakeCCompilerOpt(CCompilerOpt):
+ fake_info = ""
+ def __init__(self, trap_files="", trap_flags="", *args, **kwargs):
+ self.fake_trap_files = trap_files
+ self.fake_trap_flags = trap_flags
+ CCompilerOpt.__init__(self, None, **kwargs)
+
+ def __repr__(self):
+ return textwrap.dedent("""\
+ <<<<
+ march : {}
+ compiler : {}
+ ----------------
+ {}
+ >>>>
+ """).format(self.cc_march, self.cc_name, self.report())
+
+ def dist_compile(self, sources, flags, **kwargs):
+ assert(isinstance(sources, list))
+ assert(isinstance(flags, list))
+ if self.fake_trap_files:
+ for src in sources:
+ if re.match(self.fake_trap_files, src):
+ self.dist_error("source is trapped by a fake interface")
+ if self.fake_trap_flags:
+ for f in flags:
+ if re.match(self.fake_trap_flags, f):
+ self.dist_error("flag is trapped by a fake interface")
+ # fake objects
+ return zip(sources, [' '.join(flags)] * len(sources))
+
+ def dist_info(self):
+ return FakeCCompilerOpt.fake_info
+
+ @staticmethod
+ def dist_log(*args, stderr=False):
+ pass
+
+class _Test_CCompilerOpt:
+ arch = None # x86_64
+ cc = None # gcc
+
+ def setup_class(self):
+ FakeCCompilerOpt.conf_nocache = True
+ self._opt = None
+
+ def nopt(self, *args, **kwargs):
+ FakeCCompilerOpt.fake_info = (self.arch, self.cc, "")
+ return FakeCCompilerOpt(*args, **kwargs)
+
+ def opt(self):
+ if not self._opt:
+ self._opt = self.nopt()
+ return self._opt
+
+ def march(self):
+ return self.opt().cc_march
+
+ def cc_name(self):
+ return self.opt().cc_name
+
+ def get_targets(self, targets, groups, **kwargs):
+ FakeCCompilerOpt.conf_target_groups = groups
+ opt = self.nopt(
+ cpu_baseline=kwargs.get("baseline", "min"),
+ cpu_dispatch=kwargs.get("dispatch", "max"),
+ trap_files=kwargs.get("trap_files", ""),
+ trap_flags=kwargs.get("trap_flags", "")
+ )
+ with tempdir() as tmpdir:
+ file = os.path.join(tmpdir, "test_targets.c")
+ with open(file, 'w') as f:
+ f.write(targets)
+ gtargets = []
+ gflags = {}
+ fake_objects = opt.try_dispatch([file])
+ for source, flags in fake_objects:
+ gtar = path.basename(source).split('.')[1:-1]
+ glen = len(gtar)
+ if glen == 0:
+ gtar = "baseline"
+ elif glen == 1:
+ gtar = gtar[0].upper()
+ else:
+ # converting multi-target into parentheses str format to be equivalent
+ # to the configuration statements syntax.
+ gtar = ('('+' '.join(gtar)+')').upper()
+ gtargets.append(gtar)
+ gflags[gtar] = flags
+
+ has_baseline, targets = opt.sources_status[file]
+ targets = targets + ["baseline"] if has_baseline else targets
+ # convert tuple that represent multi-target into parentheses str format
+ targets = [
+ '('+' '.join(tar)+')' if isinstance(tar, tuple) else tar
+ for tar in targets
+ ]
+ if len(targets) != len(gtargets) or not all(t in gtargets for t in targets):
+ raise AssertionError(
+ "'sources_status' returns different targets than the compiled targets\n"
+ "%s != %s" % (targets, gtargets)
+ )
+ # return targets from 'sources_status' since the order is matters
+ return targets, gflags
+
+ def arg_regex(self, **kwargs):
+ map2origin = dict(
+ x64 = "x86",
+ ppc64le = "ppc64",
+ aarch64 = "armhf",
+ clang = "gcc",
+ )
+ march = self.march(); cc_name = self.cc_name()
+ map_march = map2origin.get(march, march)
+ map_cc = map2origin.get(cc_name, cc_name)
+ for key in (
+ march, cc_name, map_march, map_cc,
+ march + '_' + cc_name,
+ map_march + '_' + cc_name,
+ march + '_' + map_cc,
+ map_march + '_' + map_cc,
+ ) :
+ regex = kwargs.pop(key, None)
+ if regex is not None:
+ break
+ if regex:
+ if isinstance(regex, dict):
+ for k, v in regex.items():
+ if v[-1:] not in ')}$?\\.+*':
+ regex[k] = v + '$'
+ else:
+ assert(isinstance(regex, str))
+ if regex[-1:] not in ')}$?\\.+*':
+ regex += '$'
+ return regex
+
+ def expect(self, dispatch, baseline="", **kwargs):
+ match = self.arg_regex(**kwargs)
+ if match is None:
+ return
+ opt = self.nopt(
+ cpu_baseline=baseline, cpu_dispatch=dispatch,
+ trap_files=kwargs.get("trap_files", ""),
+ trap_flags=kwargs.get("trap_flags", "")
+ )
+ features = ' '.join(opt.cpu_dispatch_names())
+ if not match:
+ if len(features) != 0:
+ raise AssertionError(
+ 'expected empty features, not "%s"' % features
+ )
+ return
+ if not re.match(match, features, re.IGNORECASE):
+ raise AssertionError(
+ 'dispatch features "%s" not match "%s"' % (features, match)
+ )
+
+ def expect_baseline(self, baseline, dispatch="", **kwargs):
+ match = self.arg_regex(**kwargs)
+ if match is None:
+ return
+ opt = self.nopt(
+ cpu_baseline=baseline, cpu_dispatch=dispatch,
+ trap_files=kwargs.get("trap_files", ""),
+ trap_flags=kwargs.get("trap_flags", "")
+ )
+ features = ' '.join(opt.cpu_baseline_names())
+ if not match:
+ if len(features) != 0:
+ raise AssertionError(
+ 'expected empty features, not "%s"' % features
+ )
+ return
+ if not re.match(match, features, re.IGNORECASE):
+ raise AssertionError(
+ 'baseline features "%s" not match "%s"' % (features, match)
+ )
+
+ def expect_flags(self, baseline, dispatch="", **kwargs):
+ match = self.arg_regex(**kwargs)
+ if match is None:
+ return
+ opt = self.nopt(
+ cpu_baseline=baseline, cpu_dispatch=dispatch,
+ trap_files=kwargs.get("trap_files", ""),
+ trap_flags=kwargs.get("trap_flags", "")
+ )
+ flags = ' '.join(opt.cpu_baseline_flags())
+ if not match:
+ if len(flags) != 0:
+ raise AssertionError(
+ 'expected empty flags not "%s"' % flags
+ )
+ return
+ if not re.match(match, flags):
+ raise AssertionError(
+ 'flags "%s" not match "%s"' % (flags, match)
+ )
+
+ def expect_targets(self, targets, groups={}, **kwargs):
+ match = self.arg_regex(**kwargs)
+ if match is None:
+ return
+ targets, _ = self.get_targets(targets=targets, groups=groups, **kwargs)
+ targets = ' '.join(targets)
+ if not match:
+ if len(targets) != 0:
+ raise AssertionError(
+ 'expected empty targets, not "%s"' % targets
+ )
+ return
+ if not re.match(match, targets, re.IGNORECASE):
+ raise AssertionError(
+ 'targets "%s" not match "%s"' % (targets, match)
+ )
+
+ def expect_target_flags(self, targets, groups={}, **kwargs):
+ match_dict = self.arg_regex(**kwargs)
+ if match_dict is None:
+ return
+ assert(isinstance(match_dict, dict))
+ _, tar_flags = self.get_targets(targets=targets, groups=groups)
+
+ for match_tar, match_flags in match_dict.items():
+ if match_tar not in tar_flags:
+ raise AssertionError(
+ 'expected to find target "%s"' % match_tar
+ )
+ flags = tar_flags[match_tar]
+ if not match_flags:
+ if len(flags) != 0:
+ raise AssertionError(
+ 'expected to find empty flags in target "%s"' % match_tar
+ )
+ if not re.match(match_flags, flags):
+ raise AssertionError(
+ '"%s" flags "%s" not match "%s"' % (match_tar, flags, match_flags)
+ )
+
+ def test_interface(self):
+ wrong_arch = "ppc64" if self.arch != "ppc64" else "x86"
+ wrong_cc = "clang" if self.cc != "clang" else "icc"
+ opt = self.opt()
+ assert_(getattr(opt, "cc_on_" + self.arch))
+ assert_(not getattr(opt, "cc_on_" + wrong_arch))
+ assert_(getattr(opt, "cc_is_" + self.cc))
+ assert_(not getattr(opt, "cc_is_" + wrong_cc))
+
+ def test_args_empty(self):
+ for baseline, dispatch in (
+ ("", "none"),
+ (None, ""),
+ ("none +none", "none - none"),
+ ("none -max", "min - max"),
+ ("+vsx2 -VSX2", "vsx avx2 avx512f -max"),
+ ("max -vsx - avx + avx512f neon -MAX ",
+ "min -min + max -max -vsx + avx2 -avx2 +NONE")
+ ) :
+ opt = self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch)
+ assert(len(opt.cpu_baseline_names()) == 0)
+ assert(len(opt.cpu_dispatch_names()) == 0)
+
+ def test_args_validation(self):
+ if self.march() == "unknown":
+ return
+ # check sanity of argument's validation
+ for baseline, dispatch in (
+ ("unkown_feature - max +min", "unknown max min"), # unknowing features
+ ("#avx2", "$vsx") # groups and polices aren't acceptable
+ ) :
+ try:
+ self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch)
+ raise AssertionError("excepted an exception for invalid arguments")
+ except DistutilsError:
+ pass
+
+ def test_skip(self):
+ # only takes what platform supports and skip the others
+ # without casing exceptions
+ self.expect(
+ "sse vsx neon",
+ x86="sse", ppc64="vsx", armhf="neon", unknown=""
+ )
+ self.expect(
+ "sse41 avx avx2 vsx2 vsx3 neon_vfpv4 asimd",
+ x86 = "sse41 avx avx2",
+ ppc64 = "vsx2 vsx3",
+ armhf = "neon_vfpv4 asimd",
+ unknown = ""
+ )
+ # any features in cpu_dispatch must be ignored if it's part of baseline
+ self.expect(
+ "sse neon vsx", baseline="sse neon vsx",
+ x86="", ppc64="", armhf=""
+ )
+ self.expect(
+ "avx2 vsx3 asimdhp", baseline="avx2 vsx3 asimdhp",
+ x86="", ppc64="", armhf=""
+ )
+
+ def test_implies(self):
+ # baseline combining implied features, so we count
+ # on it instead of testing 'feature_implies()'' directly
+ self.expect_baseline(
+ "fma3 avx2 asimd vsx3",
+ # .* between two spaces can validate features in between
+ x86 = "sse .* sse41 .* fma3.*avx2",
+ ppc64 = "vsx vsx2 vsx3",
+ armhf = "neon neon_fp16 neon_vfpv4 asimd"
+ )
+ """
+ special cases
+ """
+ # in icc and msvc, FMA3 and AVX2 can't be separated
+ # both need to implies each other, same for avx512f & cd
+ for f0, f1 in (
+ ("fma3", "avx2"),
+ ("avx512f", "avx512cd"),
+ ):
+ diff = ".* sse42 .* %s .*%s$" % (f0, f1)
+ self.expect_baseline(f0,
+ x86_gcc=".* sse42 .* %s$" % f0,
+ x86_icc=diff, x86_iccw=diff
+ )
+ self.expect_baseline(f1,
+ x86_gcc=".* avx .* %s$" % f1,
+ x86_icc=diff, x86_iccw=diff
+ )
+ # in msvc, following features can't be separated too
+ for f in (("fma3", "avx2"), ("avx512f", "avx512cd", "avx512_skx")):
+ for ff in f:
+ self.expect_baseline(ff,
+ x86_msvc=".*%s" % ' '.join(f)
+ )
+
+ # in ppc64le VSX and VSX2 can't be separated
+ self.expect_baseline("vsx", ppc64le="vsx vsx2")
+ # in aarch64 following features can't be separated
+ for f in ("neon", "neon_fp16", "neon_vfpv4", "asimd"):
+ self.expect_baseline(f, aarch64="neon neon_fp16 neon_vfpv4 asimd")
+
+ def test_args_options(self):
+ # max & native
+ for o in ("max", "native"):
+ if o == "native" and self.cc_name() == "msvc":
+ continue
+ self.expect(o,
+ trap_files=".*cpu_(sse|vsx|neon|vx).c",
+ x86="", ppc64="", armhf="", s390x=""
+ )
+ self.expect(o,
+ trap_files=".*cpu_(sse3|vsx2|neon_vfpv4|vxe).c",
+ x86="sse sse2", ppc64="vsx", armhf="neon neon_fp16",
+ aarch64="", ppc64le="", s390x="vx"
+ )
+ self.expect(o,
+ trap_files=".*cpu_(popcnt|vsx3).c",
+ x86="sse .* sse41", ppc64="vsx vsx2",
+ armhf="neon neon_fp16 .* asimd .*",
+ s390x="vx vxe vxe2"
+ )
+ self.expect(o,
+ x86_gcc=".* xop fma4 .* avx512f .* avx512_knl avx512_knm avx512_skx .*",
+ # in icc, xop and fam4 aren't supported
+ x86_icc=".* avx512f .* avx512_knl avx512_knm avx512_skx .*",
+ x86_iccw=".* avx512f .* avx512_knl avx512_knm avx512_skx .*",
+ # in msvc, avx512_knl avx512_knm aren't supported
+ x86_msvc=".* xop fma4 .* avx512f .* avx512_skx .*",
+ armhf=".* asimd asimdhp asimddp .*",
+ ppc64="vsx vsx2 vsx3 vsx4.*",
+ s390x="vx vxe vxe2.*"
+ )
+ # min
+ self.expect("min",
+ x86="sse sse2", x64="sse sse2 sse3",
+ armhf="", aarch64="neon neon_fp16 .* asimd",
+ ppc64="", ppc64le="vsx vsx2", s390x=""
+ )
+ self.expect(
+ "min", trap_files=".*cpu_(sse2|vsx2).c",
+ x86="", ppc64le=""
+ )
+ # an exception must triggered if native flag isn't supported
+ # when option "native" is activated through the args
+ try:
+ self.expect("native",
+ trap_flags=".*(-march=native|-xHost|/QxHost).*",
+ x86=".*", ppc64=".*", armhf=".*", s390x=".*"
+ )
+ if self.march() != "unknown":
+ raise AssertionError(
+ "excepted an exception for %s" % self.march()
+ )
+ except DistutilsError:
+ if self.march() == "unknown":
+ raise AssertionError("excepted no exceptions")
+
+ def test_flags(self):
+ self.expect_flags(
+ "sse sse2 vsx vsx2 neon neon_fp16 vx vxe",
+ x86_gcc="-msse -msse2", x86_icc="-msse -msse2",
+ x86_iccw="/arch:SSE2",
+ x86_msvc="/arch:SSE2" if self.march() == "x86" else "",
+ ppc64_gcc= "-mcpu=power8",
+ ppc64_clang="-maltivec -mvsx -mpower8-vector",
+ armhf_gcc="-mfpu=neon-fp16 -mfp16-format=ieee",
+ aarch64="",
+ s390x="-mzvector -march=arch12"
+ )
+ # testing normalize -march
+ self.expect_flags(
+ "asimd",
+ aarch64="",
+ armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8-a\+simd"
+ )
+ self.expect_flags(
+ "asimdhp",
+ aarch64_gcc=r"-march=armv8.2-a\+fp16",
+ armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8.2-a\+fp16"
+ )
+ self.expect_flags(
+ "asimddp", aarch64_gcc=r"-march=armv8.2-a\+dotprod"
+ )
+ self.expect_flags(
+ # asimdfhm implies asimdhp
+ "asimdfhm", aarch64_gcc=r"-march=armv8.2-a\+fp16\+fp16fml"
+ )
+ self.expect_flags(
+ "asimddp asimdhp asimdfhm",
+ aarch64_gcc=r"-march=armv8.2-a\+dotprod\+fp16\+fp16fml"
+ )
+ self.expect_flags(
+ "vx vxe vxe2",
+ s390x=r"-mzvector -march=arch13"
+ )
+
+ def test_targets_exceptions(self):
+ for targets in (
+ "bla bla", "/*@targets",
+ "/*@targets */",
+ "/*@targets unknown */",
+ "/*@targets $unknown_policy avx2 */",
+ "/*@targets #unknown_group avx2 */",
+ "/*@targets $ */",
+ "/*@targets # vsx */",
+ "/*@targets #$ vsx */",
+ "/*@targets vsx avx2 ) */",
+ "/*@targets vsx avx2 (avx2 */",
+ "/*@targets vsx avx2 () */",
+ "/*@targets vsx avx2 ($autovec) */", # no features
+ "/*@targets vsx avx2 (xxx) */",
+ "/*@targets vsx avx2 (baseline) */",
+ ) :
+ try:
+ self.expect_targets(
+ targets,
+ x86="", armhf="", ppc64="", s390x=""
+ )
+ if self.march() != "unknown":
+ raise AssertionError(
+ "excepted an exception for %s" % self.march()
+ )
+ except DistutilsError:
+ if self.march() == "unknown":
+ raise AssertionError("excepted no exceptions")
+
+ def test_targets_syntax(self):
+ for targets in (
+ "/*@targets $keep_baseline sse vsx neon vx*/",
+ "/*@targets,$keep_baseline,sse,vsx,neon vx*/",
+ "/*@targets*$keep_baseline*sse*vsx*neon*vx*/",
+ """
+ /*
+ ** @targets
+ ** $keep_baseline, sse vsx,neon, vx
+ */
+ """,
+ """
+ /*
+ ************@targets****************
+ ** $keep_baseline, sse vsx, neon, vx
+ ************************************
+ */
+ """,
+ """
+ /*
+ /////////////@targets/////////////////
+ //$keep_baseline//sse//vsx//neon//vx
+ /////////////////////////////////////
+ */
+ """,
+ """
+ /*
+ @targets
+ $keep_baseline
+ SSE VSX NEON VX*/
+ """
+ ) :
+ self.expect_targets(targets,
+ x86="sse", ppc64="vsx", armhf="neon", s390x="vx", unknown=""
+ )
+
+ def test_targets(self):
+ # test skipping baseline features
+ self.expect_targets(
+ """
+ /*@targets
+ sse sse2 sse41 avx avx2 avx512f
+ vsx vsx2 vsx3 vsx4
+ neon neon_fp16 asimdhp asimddp
+ vx vxe vxe2
+ */
+ """,
+ baseline="avx vsx2 asimd vx vxe",
+ x86="avx512f avx2", armhf="asimddp asimdhp", ppc64="vsx4 vsx3",
+ s390x="vxe2"
+ )
+ # test skipping non-dispatch features
+ self.expect_targets(
+ """
+ /*@targets
+ sse41 avx avx2 avx512f
+ vsx2 vsx3 vsx4
+ asimd asimdhp asimddp
+ vx vxe vxe2
+ */
+ """,
+ baseline="", dispatch="sse41 avx2 vsx2 asimd asimddp vxe2",
+ x86="avx2 sse41", armhf="asimddp asimd", ppc64="vsx2", s390x="vxe2"
+ )
+ # test skipping features that not supported
+ self.expect_targets(
+ """
+ /*@targets
+ sse2 sse41 avx2 avx512f
+ vsx2 vsx3 vsx4
+ neon asimdhp asimddp
+ vx vxe vxe2
+ */
+ """,
+ baseline="",
+ trap_files=".*(avx2|avx512f|vsx3|vsx4|asimddp|vxe2).c",
+ x86="sse41 sse2", ppc64="vsx2", armhf="asimdhp neon",
+ s390x="vxe vx"
+ )
+ # test skipping features that implies each other
+ self.expect_targets(
+ """
+ /*@targets
+ sse sse2 avx fma3 avx2 avx512f avx512cd
+ vsx vsx2 vsx3
+ neon neon_vfpv4 neon_fp16 neon_fp16 asimd asimdhp
+ asimddp asimdfhm
+ */
+ """,
+ baseline="",
+ x86_gcc="avx512cd avx512f avx2 fma3 avx sse2",
+ x86_msvc="avx512cd avx2 avx sse2",
+ x86_icc="avx512cd avx2 avx sse2",
+ x86_iccw="avx512cd avx2 avx sse2",
+ ppc64="vsx3 vsx2 vsx",
+ ppc64le="vsx3 vsx2",
+ armhf="asimdfhm asimddp asimdhp asimd neon_vfpv4 neon_fp16 neon",
+ aarch64="asimdfhm asimddp asimdhp asimd"
+ )
+
+ def test_targets_policies(self):
+ # 'keep_baseline', generate objects for baseline features
+ self.expect_targets(
+ """
+ /*@targets
+ $keep_baseline
+ sse2 sse42 avx2 avx512f
+ vsx2 vsx3
+ neon neon_vfpv4 asimd asimddp
+ vx vxe vxe2
+ */
+ """,
+ baseline="sse41 avx2 vsx2 asimd vsx3 vxe",
+ x86="avx512f avx2 sse42 sse2",
+ ppc64="vsx3 vsx2",
+ armhf="asimddp asimd neon_vfpv4 neon",
+ # neon, neon_vfpv4, asimd implies each other
+ aarch64="asimddp asimd",
+ s390x="vxe2 vxe vx"
+ )
+ # 'keep_sort', leave the sort as-is
+ self.expect_targets(
+ """
+ /*@targets
+ $keep_baseline $keep_sort
+ avx512f sse42 avx2 sse2
+ vsx2 vsx3
+ asimd neon neon_vfpv4 asimddp
+ vxe vxe2
+ */
+ """,
+ x86="avx512f sse42 avx2 sse2",
+ ppc64="vsx2 vsx3",
+ armhf="asimd neon neon_vfpv4 asimddp",
+ # neon, neon_vfpv4, asimd implies each other
+ aarch64="asimd asimddp",
+ s390x="vxe vxe2"
+ )
+ # 'autovec', skipping features that can't be
+ # vectorized by the compiler
+ self.expect_targets(
+ """
+ /*@targets
+ $keep_baseline $keep_sort $autovec
+ avx512f avx2 sse42 sse41 sse2
+ vsx3 vsx2
+ asimddp asimd neon_vfpv4 neon
+ */
+ """,
+ x86_gcc="avx512f avx2 sse42 sse41 sse2",
+ x86_icc="avx512f avx2 sse42 sse41 sse2",
+ x86_iccw="avx512f avx2 sse42 sse41 sse2",
+ x86_msvc="avx512f avx2 sse2"
+ if self.march() == 'x86' else "avx512f avx2",
+ ppc64="vsx3 vsx2",
+ armhf="asimddp asimd neon_vfpv4 neon",
+ # neon, neon_vfpv4, asimd implies each other
+ aarch64="asimddp asimd"
+ )
+ for policy in ("$maxopt", "$autovec"):
+ # 'maxopt' and autovec set the max acceptable optimization flags
+ self.expect_target_flags(
+ "/*@targets baseline %s */" % policy,
+ gcc={"baseline":".*-O3.*"}, icc={"baseline":".*-O3.*"},
+ iccw={"baseline":".*/O3.*"}, msvc={"baseline":".*/O2.*"},
+ unknown={"baseline":".*"}
+ )
+
+ # 'werror', force compilers to treat warnings as errors
+ self.expect_target_flags(
+ "/*@targets baseline $werror */",
+ gcc={"baseline":".*-Werror.*"}, icc={"baseline":".*-Werror.*"},
+ iccw={"baseline":".*/Werror.*"}, msvc={"baseline":".*/WX.*"},
+ unknown={"baseline":".*"}
+ )
+
+ def test_targets_groups(self):
+ self.expect_targets(
+ """
+ /*@targets $keep_baseline baseline #test_group */
+ """,
+ groups=dict(
+ test_group=("""
+ $keep_baseline
+ asimddp sse2 vsx2 avx2 vsx3
+ avx512f asimdhp
+ """)
+ ),
+ x86="avx512f avx2 sse2 baseline",
+ ppc64="vsx3 vsx2 baseline",
+ armhf="asimddp asimdhp baseline"
+ )
+ # test skip duplicating and sorting
+ self.expect_targets(
+ """
+ /*@targets
+ * sse42 avx avx512f
+ * #test_group_1
+ * vsx2
+ * #test_group_2
+ * asimddp asimdfhm
+ */
+ """,
+ groups=dict(
+ test_group_1=("""
+ VSX2 vsx3 asimd avx2 SSE41
+ """),
+ test_group_2=("""
+ vsx2 vsx3 asImd aVx2 sse41
+ """)
+ ),
+ x86="avx512f avx2 avx sse42 sse41",
+ ppc64="vsx3 vsx2",
+ # vsx2 part of the default baseline of ppc64le, option ("min")
+ ppc64le="vsx3",
+ armhf="asimdfhm asimddp asimd",
+ # asimd part of the default baseline of aarch64, option ("min")
+ aarch64="asimdfhm asimddp"
+ )
+
+ def test_targets_multi(self):
+ self.expect_targets(
+ """
+ /*@targets
+ (avx512_clx avx512_cnl) (asimdhp asimddp)
+ */
+ """,
+ x86=r"\(avx512_clx avx512_cnl\)",
+ armhf=r"\(asimdhp asimddp\)",
+ )
+ # test skipping implied features and auto-sort
+ self.expect_targets(
+ """
+ /*@targets
+ f16c (sse41 avx sse42) (sse3 avx2 avx512f)
+ vsx2 (vsx vsx3 vsx2)
+ (neon neon_vfpv4 asimd asimdhp asimddp)
+ */
+ """,
+ x86="avx512f f16c avx",
+ ppc64="vsx3 vsx2",
+ ppc64le="vsx3", # vsx2 part of baseline
+ armhf=r"\(asimdhp asimddp\)",
+ )
+ # test skipping implied features and keep sort
+ self.expect_targets(
+ """
+ /*@targets $keep_sort
+ (sse41 avx sse42) (sse3 avx2 avx512f)
+ (vsx vsx3 vsx2)
+ (asimddp neon neon_vfpv4 asimd asimdhp)
+ (vx vxe vxe2)
+ */
+ """,
+ x86="avx avx512f",
+ ppc64="vsx3",
+ armhf=r"\(asimdhp asimddp\)",
+ s390x="vxe2"
+ )
+ # test compiler variety and avoiding duplicating
+ self.expect_targets(
+ """
+ /*@targets $keep_sort
+ fma3 avx2 (fma3 avx2) (avx2 fma3) avx2 fma3
+ */
+ """,
+ x86_gcc=r"fma3 avx2 \(fma3 avx2\)",
+ x86_icc="avx2", x86_iccw="avx2",
+ x86_msvc="avx2"
+ )
+
+def new_test(arch, cc):
+ if is_standalone: return textwrap.dedent("""\
+ class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt, unittest.TestCase):
+ arch = '{arch}'
+ cc = '{cc}'
+ def __init__(self, methodName="runTest"):
+ unittest.TestCase.__init__(self, methodName)
+ self.setup_class()
+ """).format(
+ class_name=arch + '_' + cc, arch=arch, cc=cc
+ )
+ return textwrap.dedent("""\
+ class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt):
+ arch = '{arch}'
+ cc = '{cc}'
+ """).format(
+ class_name=arch + '_' + cc, arch=arch, cc=cc
+ )
+"""
+if 1 and is_standalone:
+ FakeCCompilerOpt.fake_info = "x86_icc"
+ cco = FakeCCompilerOpt(None, cpu_baseline="avx2")
+ print(' '.join(cco.cpu_baseline_names()))
+ print(cco.cpu_baseline_flags())
+ unittest.main()
+ sys.exit()
+"""
+for arch, compilers in arch_compilers.items():
+ for cc in compilers:
+ exec(new_test(arch, cc))
+
+if is_standalone:
+ unittest.main()
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt_conf.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt_conf.py
new file mode 100644
index 00000000..d9e8b2b0
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt_conf.py
@@ -0,0 +1,176 @@
+import unittest
+from os import sys, path
+
+is_standalone = __name__ == '__main__' and __package__ is None
+if is_standalone:
+ sys.path.append(path.abspath(path.join(path.dirname(__file__), "..")))
+ from ccompiler_opt import CCompilerOpt
+else:
+ from numpy.distutils.ccompiler_opt import CCompilerOpt
+
+arch_compilers = dict(
+ x86 = ("gcc", "clang", "icc", "iccw", "msvc"),
+ x64 = ("gcc", "clang", "icc", "iccw", "msvc"),
+ ppc64 = ("gcc", "clang"),
+ ppc64le = ("gcc", "clang"),
+ armhf = ("gcc", "clang"),
+ aarch64 = ("gcc", "clang"),
+ narch = ("gcc",)
+)
+
+class FakeCCompilerOpt(CCompilerOpt):
+ fake_info = ("arch", "compiler", "extra_args")
+ def __init__(self, *args, **kwargs):
+ CCompilerOpt.__init__(self, None, **kwargs)
+ def dist_compile(self, sources, flags, **kwargs):
+ return sources
+ def dist_info(self):
+ return FakeCCompilerOpt.fake_info
+ @staticmethod
+ def dist_log(*args, stderr=False):
+ pass
+
+class _TestConfFeatures(FakeCCompilerOpt):
+ """A hook to check the sanity of configured features
+- before it called by the abstract class '_Feature'
+ """
+
+ def conf_features_partial(self):
+ conf_all = self.conf_features
+ for feature_name, feature in conf_all.items():
+ self.test_feature(
+ "attribute conf_features",
+ conf_all, feature_name, feature
+ )
+
+ conf_partial = FakeCCompilerOpt.conf_features_partial(self)
+ for feature_name, feature in conf_partial.items():
+ self.test_feature(
+ "conf_features_partial()",
+ conf_partial, feature_name, feature
+ )
+ return conf_partial
+
+ def test_feature(self, log, search_in, feature_name, feature_dict):
+ error_msg = (
+ "during validate '{}' within feature '{}', "
+ "march '{}' and compiler '{}'\n>> "
+ ).format(log, feature_name, self.cc_march, self.cc_name)
+
+ if not feature_name.isupper():
+ raise AssertionError(error_msg + "feature name must be in uppercase")
+
+ for option, val in feature_dict.items():
+ self.test_option_types(error_msg, option, val)
+ self.test_duplicates(error_msg, option, val)
+
+ self.test_implies(error_msg, search_in, feature_name, feature_dict)
+ self.test_group(error_msg, search_in, feature_name, feature_dict)
+ self.test_extra_checks(error_msg, search_in, feature_name, feature_dict)
+
+ def test_option_types(self, error_msg, option, val):
+ for tp, available in (
+ ((str, list), (
+ "implies", "headers", "flags", "group", "detect", "extra_checks"
+ )),
+ ((str,), ("disable",)),
+ ((int,), ("interest",)),
+ ((bool,), ("implies_detect",)),
+ ((bool, type(None)), ("autovec",)),
+ ) :
+ found_it = option in available
+ if not found_it:
+ continue
+ if not isinstance(val, tp):
+ error_tp = [t.__name__ for t in (*tp,)]
+ error_tp = ' or '.join(error_tp)
+ raise AssertionError(error_msg +
+ "expected '%s' type for option '%s' not '%s'" % (
+ error_tp, option, type(val).__name__
+ ))
+ break
+
+ if not found_it:
+ raise AssertionError(error_msg + "invalid option name '%s'" % option)
+
+ def test_duplicates(self, error_msg, option, val):
+ if option not in (
+ "implies", "headers", "flags", "group", "detect", "extra_checks"
+ ) : return
+
+ if isinstance(val, str):
+ val = val.split()
+
+ if len(val) != len(set(val)):
+ raise AssertionError(error_msg + "duplicated values in option '%s'" % option)
+
+ def test_implies(self, error_msg, search_in, feature_name, feature_dict):
+ if feature_dict.get("disabled") is not None:
+ return
+ implies = feature_dict.get("implies", "")
+ if not implies:
+ return
+ if isinstance(implies, str):
+ implies = implies.split()
+
+ if feature_name in implies:
+ raise AssertionError(error_msg + "feature implies itself")
+
+ for impl in implies:
+ impl_dict = search_in.get(impl)
+ if impl_dict is not None:
+ if "disable" in impl_dict:
+ raise AssertionError(error_msg + "implies disabled feature '%s'" % impl)
+ continue
+ raise AssertionError(error_msg + "implies non-exist feature '%s'" % impl)
+
+ def test_group(self, error_msg, search_in, feature_name, feature_dict):
+ if feature_dict.get("disabled") is not None:
+ return
+ group = feature_dict.get("group", "")
+ if not group:
+ return
+ if isinstance(group, str):
+ group = group.split()
+
+ for f in group:
+ impl_dict = search_in.get(f)
+ if not impl_dict or "disable" in impl_dict:
+ continue
+ raise AssertionError(error_msg +
+ "in option 'group', '%s' already exists as a feature name" % f
+ )
+
+ def test_extra_checks(self, error_msg, search_in, feature_name, feature_dict):
+ if feature_dict.get("disabled") is not None:
+ return
+ extra_checks = feature_dict.get("extra_checks", "")
+ if not extra_checks:
+ return
+ if isinstance(extra_checks, str):
+ extra_checks = extra_checks.split()
+
+ for f in extra_checks:
+ impl_dict = search_in.get(f)
+ if not impl_dict or "disable" in impl_dict:
+ continue
+ raise AssertionError(error_msg +
+ "in option 'extra_checks', extra test case '%s' already exists as a feature name" % f
+ )
+
+class TestConfFeatures(unittest.TestCase):
+ def __init__(self, methodName="runTest"):
+ unittest.TestCase.__init__(self, methodName)
+ self._setup()
+
+ def _setup(self):
+ FakeCCompilerOpt.conf_nocache = True
+
+ def test_features(self):
+ for arch, compilers in arch_compilers.items():
+ for cc in compilers:
+ FakeCCompilerOpt.fake_info = (arch, cc, "")
+ _TestConfFeatures()
+
+if is_standalone:
+ unittest.main()
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_exec_command.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_exec_command.py
new file mode 100644
index 00000000..d1a20056
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_exec_command.py
@@ -0,0 +1,217 @@
+import os
+import pytest
+import sys
+from tempfile import TemporaryFile
+
+from numpy.distutils import exec_command
+from numpy.distutils.exec_command import get_pythonexe
+from numpy.testing import tempdir, assert_, assert_warns, IS_WASM
+
+
+# In python 3 stdout, stderr are text (unicode compliant) devices, so to
+# emulate them import StringIO from the io module.
+from io import StringIO
+
+class redirect_stdout:
+ """Context manager to redirect stdout for exec_command test."""
+ def __init__(self, stdout=None):
+ self._stdout = stdout or sys.stdout
+
+ def __enter__(self):
+ self.old_stdout = sys.stdout
+ sys.stdout = self._stdout
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self._stdout.flush()
+ sys.stdout = self.old_stdout
+ # note: closing sys.stdout won't close it.
+ self._stdout.close()
+
+class redirect_stderr:
+ """Context manager to redirect stderr for exec_command test."""
+ def __init__(self, stderr=None):
+ self._stderr = stderr or sys.stderr
+
+ def __enter__(self):
+ self.old_stderr = sys.stderr
+ sys.stderr = self._stderr
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self._stderr.flush()
+ sys.stderr = self.old_stderr
+ # note: closing sys.stderr won't close it.
+ self._stderr.close()
+
+class emulate_nonposix:
+ """Context manager to emulate os.name != 'posix' """
+ def __init__(self, osname='non-posix'):
+ self._new_name = osname
+
+ def __enter__(self):
+ self._old_name = os.name
+ os.name = self._new_name
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ os.name = self._old_name
+
+
+def test_exec_command_stdout():
+ # Regression test for gh-2999 and gh-2915.
+ # There are several packages (nose, scipy.weave.inline, Sage inline
+ # Fortran) that replace stdout, in which case it doesn't have a fileno
+ # method. This is tested here, with a do-nothing command that fails if the
+ # presence of fileno() is assumed in exec_command.
+
+ # The code has a special case for posix systems, so if we are on posix test
+ # both that the special case works and that the generic code works.
+
+ # Test posix version:
+ with redirect_stdout(StringIO()):
+ with redirect_stderr(TemporaryFile()):
+ with assert_warns(DeprecationWarning):
+ exec_command.exec_command("cd '.'")
+
+ if os.name == 'posix':
+ # Test general (non-posix) version:
+ with emulate_nonposix():
+ with redirect_stdout(StringIO()):
+ with redirect_stderr(TemporaryFile()):
+ with assert_warns(DeprecationWarning):
+ exec_command.exec_command("cd '.'")
+
+def test_exec_command_stderr():
+ # Test posix version:
+ with redirect_stdout(TemporaryFile(mode='w+')):
+ with redirect_stderr(StringIO()):
+ with assert_warns(DeprecationWarning):
+ exec_command.exec_command("cd '.'")
+
+ if os.name == 'posix':
+ # Test general (non-posix) version:
+ with emulate_nonposix():
+ with redirect_stdout(TemporaryFile()):
+ with redirect_stderr(StringIO()):
+ with assert_warns(DeprecationWarning):
+ exec_command.exec_command("cd '.'")
+
+
+@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess")
+class TestExecCommand:
+ def setup_method(self):
+ self.pyexe = get_pythonexe()
+
+ def check_nt(self, **kws):
+ s, o = exec_command.exec_command('cmd /C echo path=%path%')
+ assert_(s == 0)
+ assert_(o != '')
+
+ s, o = exec_command.exec_command(
+ '"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe)
+ assert_(s == 0)
+ assert_(o == 'win32')
+
+ def check_posix(self, **kws):
+ s, o = exec_command.exec_command("echo Hello", **kws)
+ assert_(s == 0)
+ assert_(o == 'Hello')
+
+ s, o = exec_command.exec_command('echo $AAA', **kws)
+ assert_(s == 0)
+ assert_(o == '')
+
+ s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws)
+ assert_(s == 0)
+ assert_(o == 'Tere')
+
+ s, o = exec_command.exec_command('echo "$AAA"', **kws)
+ assert_(s == 0)
+ assert_(o == '')
+
+ if 'BBB' not in os.environ:
+ os.environ['BBB'] = 'Hi'
+ s, o = exec_command.exec_command('echo "$BBB"', **kws)
+ assert_(s == 0)
+ assert_(o == 'Hi')
+
+ s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws)
+ assert_(s == 0)
+ assert_(o == 'Hey')
+
+ s, o = exec_command.exec_command('echo "$BBB"', **kws)
+ assert_(s == 0)
+ assert_(o == 'Hi')
+
+ del os.environ['BBB']
+
+ s, o = exec_command.exec_command('echo "$BBB"', **kws)
+ assert_(s == 0)
+ assert_(o == '')
+
+
+ s, o = exec_command.exec_command('this_is_not_a_command', **kws)
+ assert_(s != 0)
+ assert_(o != '')
+
+ s, o = exec_command.exec_command('echo path=$PATH', **kws)
+ assert_(s == 0)
+ assert_(o != '')
+
+ s, o = exec_command.exec_command(
+ '"%s" -c "import sys,os;sys.stderr.write(os.name)"' %
+ self.pyexe, **kws)
+ assert_(s == 0)
+ assert_(o == 'posix')
+
+ def check_basic(self, *kws):
+ s, o = exec_command.exec_command(
+ '"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws)
+ assert_(s != 0)
+ assert_(o != '')
+
+ s, o = exec_command.exec_command(
+ '"%s" -c "import sys;sys.stderr.write(\'0\');'
+ 'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' %
+ self.pyexe, **kws)
+ assert_(s == 0)
+ assert_(o == '012')
+
+ s, o = exec_command.exec_command(
+ '"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws)
+ assert_(s == 15)
+ assert_(o == '')
+
+ s, o = exec_command.exec_command(
+ '"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws)
+ assert_(s == 0)
+ assert_(o == 'Heipa')
+
+ def check_execute_in(self, **kws):
+ with tempdir() as tmpdir:
+ fn = "file"
+ tmpfile = os.path.join(tmpdir, fn)
+ with open(tmpfile, 'w') as f:
+ f.write('Hello')
+
+ s, o = exec_command.exec_command(
+ '"%s" -c "f = open(\'%s\', \'r\'); f.close()"' %
+ (self.pyexe, fn), **kws)
+ assert_(s != 0)
+ assert_(o != '')
+ s, o = exec_command.exec_command(
+ '"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); '
+ 'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws)
+ assert_(s == 0)
+ assert_(o == 'Hello')
+
+ def test_basic(self):
+ with redirect_stdout(StringIO()):
+ with redirect_stderr(StringIO()):
+ with assert_warns(DeprecationWarning):
+ if os.name == "posix":
+ self.check_posix(use_tee=0)
+ self.check_posix(use_tee=1)
+ elif os.name == "nt":
+ self.check_nt(use_tee=0)
+ self.check_nt(use_tee=1)
+ self.check_execute_in(use_tee=0)
+ self.check_execute_in(use_tee=1)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler.py
new file mode 100644
index 00000000..dd97f1e7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler.py
@@ -0,0 +1,43 @@
+from numpy.testing import assert_
+import numpy.distutils.fcompiler
+
+customizable_flags = [
+ ('f77', 'F77FLAGS'),
+ ('f90', 'F90FLAGS'),
+ ('free', 'FREEFLAGS'),
+ ('arch', 'FARCH'),
+ ('debug', 'FDEBUG'),
+ ('flags', 'FFLAGS'),
+ ('linker_so', 'LDFLAGS'),
+]
+
+
+def test_fcompiler_flags(monkeypatch):
+ monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0')
+ fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none')
+ flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None)
+
+ for opt, envvar in customizable_flags:
+ new_flag = '-dummy-{}-flag'.format(opt)
+ prev_flags = getattr(flag_vars, opt)
+
+ monkeypatch.setenv(envvar, new_flag)
+ new_flags = getattr(flag_vars, opt)
+
+ monkeypatch.delenv(envvar)
+ assert_(new_flags == [new_flag])
+
+ monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1')
+
+ for opt, envvar in customizable_flags:
+ new_flag = '-dummy-{}-flag'.format(opt)
+ prev_flags = getattr(flag_vars, opt)
+ monkeypatch.setenv(envvar, new_flag)
+ new_flags = getattr(flag_vars, opt)
+
+ monkeypatch.delenv(envvar)
+ if prev_flags is None:
+ assert_(new_flags == [new_flag])
+ else:
+ assert_(new_flags == prev_flags + [new_flag])
+
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py
new file mode 100644
index 00000000..0817ae58
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py
@@ -0,0 +1,55 @@
+from numpy.testing import assert_
+
+import numpy.distutils.fcompiler
+
+g77_version_strings = [
+ ('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'),
+ ('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'),
+ ('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'),
+ ('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'),
+ ('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2'
+ ' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'),
+]
+
+gfortran_version_strings = [
+ ('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))',
+ '4.0.3'),
+ ('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'),
+ ('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'),
+ ('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'),
+ ('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'),
+ ('4.8.0', '4.8.0'),
+ ('4.0.3-7', '4.0.3'),
+ ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n4.9.1",
+ '4.9.1'),
+ ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n"
+ "gfortran: warning: yet another warning\n4.9.1",
+ '4.9.1'),
+ ('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0')
+]
+
+class TestG77Versions:
+ def test_g77_version(self):
+ fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu')
+ for vs, version in g77_version_strings:
+ v = fc.version_match(vs)
+ assert_(v == version, (vs, v))
+
+ def test_not_g77(self):
+ fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu')
+ for vs, _ in gfortran_version_strings:
+ v = fc.version_match(vs)
+ assert_(v is None, (vs, v))
+
+class TestGFortranVersions:
+ def test_gfortran_version(self):
+ fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
+ for vs, version in gfortran_version_strings:
+ v = fc.version_match(vs)
+ assert_(v == version, (vs, v))
+
+ def test_not_gfortran(self):
+ fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
+ for vs, _ in g77_version_strings:
+ v = fc.version_match(vs)
+ assert_(v is None, (vs, v))
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_intel.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_intel.py
new file mode 100644
index 00000000..45c9cdac
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_intel.py
@@ -0,0 +1,30 @@
+import numpy.distutils.fcompiler
+from numpy.testing import assert_
+
+
+intel_32bit_version_strings = [
+ ("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications"
+ "running on Intel(R) 32, Version 11.1", '11.1'),
+]
+
+intel_64bit_version_strings = [
+ ("Intel(R) Fortran IA-64 Compiler Professional for applications"
+ "running on IA-64, Version 11.0", '11.0'),
+ ("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications"
+ "running on Intel(R) 64, Version 11.1", '11.1')
+]
+
+class TestIntelFCompilerVersions:
+ def test_32bit_version(self):
+ fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel')
+ for vs, version in intel_32bit_version_strings:
+ v = fc.version_match(vs)
+ assert_(v == version)
+
+
+class TestIntelEM64TFCompilerVersions:
+ def test_64bit_version(self):
+ fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem')
+ for vs, version in intel_64bit_version_strings:
+ v = fc.version_match(vs)
+ assert_(v == version)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py
new file mode 100644
index 00000000..2e04f526
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py
@@ -0,0 +1,22 @@
+from numpy.testing import assert_
+import numpy.distutils.fcompiler
+
+nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release '
+ '6.2(Chiyoda) Build 6200', '6.2'),
+ ('nagfor', 'NAG Fortran Compiler Release '
+ '6.1(Tozai) Build 6136', '6.1'),
+ ('nagfor', 'NAG Fortran Compiler Release '
+ '6.0(Hibiya) Build 1021', '6.0'),
+ ('nagfor', 'NAG Fortran Compiler Release '
+ '5.3.2(971)', '5.3.2'),
+ ('nag', 'NAGWare Fortran 95 compiler Release 5.1'
+ '(347,355-367,375,380-383,389,394,399,401-402,407,'
+ '431,435,437,446,459-460,463,472,494,496,503,508,'
+ '511,517,529,555,557,565)', '5.1')]
+
+class TestNagFCompilerVersions:
+ def test_version_match(self):
+ for comp, vs, version in nag_version_strings:
+ fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp)
+ v = fc.version_match(vs)
+ assert_(v == version)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_from_template.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_from_template.py
new file mode 100644
index 00000000..58817549
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_from_template.py
@@ -0,0 +1,44 @@
+
+from numpy.distutils.from_template import process_str
+from numpy.testing import assert_equal
+
+
+pyf_src = """
+python module foo
+ <_rd=real,double precision>
+ interface
+ subroutine <s,d>foosub(tol)
+ <_rd>, intent(in,out) :: tol
+ end subroutine <s,d>foosub
+ end interface
+end python module foo
+"""
+
+expected_pyf = """
+python module foo
+ interface
+ subroutine sfoosub(tol)
+ real, intent(in,out) :: tol
+ end subroutine sfoosub
+ subroutine dfoosub(tol)
+ double precision, intent(in,out) :: tol
+ end subroutine dfoosub
+ end interface
+end python module foo
+"""
+
+
+def normalize_whitespace(s):
+ """
+ Remove leading and trailing whitespace, and convert internal
+ stretches of whitespace to a single space.
+ """
+ return ' '.join(s.split())
+
+
+def test_from_template():
+ """Regression test for gh-10712."""
+ pyf = process_str(pyf_src)
+ normalized_pyf = normalize_whitespace(pyf)
+ normalized_expected_pyf = normalize_whitespace(expected_pyf)
+ assert_equal(normalized_pyf, normalized_expected_pyf)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_log.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_log.py
new file mode 100644
index 00000000..72fddf37
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_log.py
@@ -0,0 +1,34 @@
+import io
+import re
+from contextlib import redirect_stdout
+
+import pytest
+
+from numpy.distutils import log
+
+
+def setup_module():
+ f = io.StringIO() # changing verbosity also logs here, capture that
+ with redirect_stdout(f):
+ log.set_verbosity(2, force=True) # i.e. DEBUG
+
+
+def teardown_module():
+ log.set_verbosity(0, force=True) # the default
+
+
+r_ansi = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
+
+
+@pytest.mark.parametrize("func_name", ["error", "warn", "info", "debug"])
+def test_log_prefix(func_name):
+ func = getattr(log, func_name)
+ msg = f"{func_name} message"
+ f = io.StringIO()
+ with redirect_stdout(f):
+ func(msg)
+ out = f.getvalue()
+ assert out # sanity check
+ clean_out = r_ansi.sub("", out)
+ line = next(line for line in clean_out.splitlines())
+ assert line == f"{func_name.upper()}: {msg}"
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py
new file mode 100644
index 00000000..ebedacb3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py
@@ -0,0 +1,42 @@
+import shutil
+import subprocess
+import sys
+import pytest
+
+from numpy.distutils import mingw32ccompiler
+
+
+@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test')
+def test_build_import():
+ '''Test the mingw32ccompiler.build_import_library, which builds a
+ `python.a` from the MSVC `python.lib`
+ '''
+
+ # make sure `nm.exe` exists and supports the current python version. This
+ # can get mixed up when the PATH has a 64-bit nm but the python is 32-bit
+ try:
+ out = subprocess.check_output(['nm.exe', '--help'])
+ except FileNotFoundError:
+ pytest.skip("'nm.exe' not on path, is mingw installed?")
+ supported = out[out.find(b'supported targets:'):]
+ if sys.maxsize < 2**32:
+ if b'pe-i386' not in supported:
+ raise ValueError("'nm.exe' found but it does not support 32-bit "
+ "dlls when using 32-bit python. Supported "
+ "formats: '%s'" % supported)
+ elif b'pe-x86-64' not in supported:
+ raise ValueError("'nm.exe' found but it does not support 64-bit "
+ "dlls when using 64-bit python. Supported "
+ "formats: '%s'" % supported)
+ # Hide the import library to force a build
+ has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib()
+ if has_import_lib:
+ shutil.move(fullpath, fullpath + '.bak')
+
+ try:
+ # Whew, now we can actually test the function
+ mingw32ccompiler.build_import_library()
+
+ finally:
+ if has_import_lib:
+ shutil.move(fullpath + '.bak', fullpath)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_misc_util.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_misc_util.py
new file mode 100644
index 00000000..605c8048
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_misc_util.py
@@ -0,0 +1,82 @@
+from os.path import join, sep, dirname
+
+from numpy.distutils.misc_util import (
+ appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info
+ )
+from numpy.testing import (
+ assert_, assert_equal
+ )
+
+ajoin = lambda *paths: join(*((sep,)+paths))
+
+class TestAppendpath:
+
+ def test_1(self):
+ assert_equal(appendpath('prefix', 'name'), join('prefix', 'name'))
+ assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name'))
+ assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name'))
+ assert_equal(appendpath('prefix', '/name'), join('prefix', 'name'))
+
+ def test_2(self):
+ assert_equal(appendpath('prefix/sub', 'name'),
+ join('prefix', 'sub', 'name'))
+ assert_equal(appendpath('prefix/sub', 'sup/name'),
+ join('prefix', 'sub', 'sup', 'name'))
+ assert_equal(appendpath('/prefix/sub', '/prefix/name'),
+ ajoin('prefix', 'sub', 'name'))
+
+ def test_3(self):
+ assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'),
+ ajoin('prefix', 'sub', 'sup', 'name'))
+ assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'),
+ ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name'))
+ assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'),
+ ajoin('prefix', 'sub', 'sub2', 'sup', 'name'))
+
+class TestMinrelpath:
+
+ def test_1(self):
+ n = lambda path: path.replace('/', sep)
+ assert_equal(minrelpath(n('aa/bb')), n('aa/bb'))
+ assert_equal(minrelpath('..'), '..')
+ assert_equal(minrelpath(n('aa/..')), '')
+ assert_equal(minrelpath(n('aa/../bb')), 'bb')
+ assert_equal(minrelpath(n('aa/bb/..')), 'aa')
+ assert_equal(minrelpath(n('aa/bb/../..')), '')
+ assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd'))
+ assert_equal(minrelpath(n('.././..')), n('../..'))
+ assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd'))
+
+class TestGpaths:
+
+ def test_gpaths(self):
+ local_path = minrelpath(join(dirname(__file__), '..'))
+ ls = gpaths('command/*.py', local_path)
+ assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls))
+ f = gpaths('system_info.py', local_path)
+ assert_(join(local_path, 'system_info.py') == f[0], repr(f))
+
+class TestSharedExtension:
+
+ def test_get_shared_lib_extension(self):
+ import sys
+ ext = get_shared_lib_extension(is_python_ext=False)
+ if sys.platform.startswith('linux'):
+ assert_equal(ext, '.so')
+ elif sys.platform.startswith('gnukfreebsd'):
+ assert_equal(ext, '.so')
+ elif sys.platform.startswith('darwin'):
+ assert_equal(ext, '.dylib')
+ elif sys.platform.startswith('win'):
+ assert_equal(ext, '.dll')
+ # just check for no crash
+ assert_(get_shared_lib_extension(is_python_ext=True))
+
+
+def test_installed_npymath_ini():
+ # Regression test for gh-7707. If npymath.ini wasn't installed, then this
+ # will give an error.
+ info = get_info('npymath')
+
+ assert isinstance(info, dict)
+ assert "define_macros" in info
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_npy_pkg_config.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_npy_pkg_config.py
new file mode 100644
index 00000000..b287ebe2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_npy_pkg_config.py
@@ -0,0 +1,84 @@
+import os
+
+from numpy.distutils.npy_pkg_config import read_config, parse_flags
+from numpy.testing import temppath, assert_
+
+simple = """\
+[meta]
+Name = foo
+Description = foo lib
+Version = 0.1
+
+[default]
+cflags = -I/usr/include
+libs = -L/usr/lib
+"""
+simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib',
+ 'version': '0.1', 'name': 'foo'}
+
+simple_variable = """\
+[meta]
+Name = foo
+Description = foo lib
+Version = 0.1
+
+[variables]
+prefix = /foo/bar
+libdir = ${prefix}/lib
+includedir = ${prefix}/include
+
+[default]
+cflags = -I${includedir}
+libs = -L${libdir}
+"""
+simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib',
+ 'version': '0.1', 'name': 'foo'}
+
+class TestLibraryInfo:
+ def test_simple(self):
+ with temppath('foo.ini') as path:
+ with open(path, 'w') as f:
+ f.write(simple)
+ pkg = os.path.splitext(path)[0]
+ out = read_config(pkg)
+
+ assert_(out.cflags() == simple_d['cflags'])
+ assert_(out.libs() == simple_d['libflags'])
+ assert_(out.name == simple_d['name'])
+ assert_(out.version == simple_d['version'])
+
+ def test_simple_variable(self):
+ with temppath('foo.ini') as path:
+ with open(path, 'w') as f:
+ f.write(simple_variable)
+ pkg = os.path.splitext(path)[0]
+ out = read_config(pkg)
+
+ assert_(out.cflags() == simple_variable_d['cflags'])
+ assert_(out.libs() == simple_variable_d['libflags'])
+ assert_(out.name == simple_variable_d['name'])
+ assert_(out.version == simple_variable_d['version'])
+ out.vars['prefix'] = '/Users/david'
+ assert_(out.cflags() == '-I/Users/david/include')
+
+class TestParseFlags:
+ def test_simple_cflags(self):
+ d = parse_flags("-I/usr/include")
+ assert_(d['include_dirs'] == ['/usr/include'])
+
+ d = parse_flags("-I/usr/include -DFOO")
+ assert_(d['include_dirs'] == ['/usr/include'])
+ assert_(d['macros'] == ['FOO'])
+
+ d = parse_flags("-I /usr/include -DFOO")
+ assert_(d['include_dirs'] == ['/usr/include'])
+ assert_(d['macros'] == ['FOO'])
+
+ def test_simple_lflags(self):
+ d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar")
+ assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
+ assert_(d['libraries'] == ['foo', 'bar'])
+
+ d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar")
+ assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
+ assert_(d['libraries'] == ['foo', 'bar'])
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_shell_utils.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_shell_utils.py
new file mode 100644
index 00000000..696d38dd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_shell_utils.py
@@ -0,0 +1,79 @@
+import pytest
+import subprocess
+import json
+import sys
+
+from numpy.distutils import _shell_utils
+from numpy.testing import IS_WASM
+
+argv_cases = [
+ [r'exe'],
+ [r'path/exe'],
+ [r'path\exe'],
+ [r'\\server\path\exe'],
+ [r'path to/exe'],
+ [r'path to\exe'],
+
+ [r'exe', '--flag'],
+ [r'path/exe', '--flag'],
+ [r'path\exe', '--flag'],
+ [r'path to/exe', '--flag'],
+ [r'path to\exe', '--flag'],
+
+ # flags containing literal quotes in their name
+ [r'path to/exe', '--flag-"quoted"'],
+ [r'path to\exe', '--flag-"quoted"'],
+ [r'path to/exe', '"--flag-quoted"'],
+ [r'path to\exe', '"--flag-quoted"'],
+]
+
+
+@pytest.fixture(params=[
+ _shell_utils.WindowsParser,
+ _shell_utils.PosixParser
+])
+def Parser(request):
+ return request.param
+
+
+@pytest.fixture
+def runner(Parser):
+ if Parser != _shell_utils.NativeParser:
+ pytest.skip('Unable to run with non-native parser')
+
+ if Parser == _shell_utils.WindowsParser:
+ return lambda cmd: subprocess.check_output(cmd)
+ elif Parser == _shell_utils.PosixParser:
+ # posix has no non-shell string parsing
+ return lambda cmd: subprocess.check_output(cmd, shell=True)
+ else:
+ raise NotImplementedError
+
+
+@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess")
+@pytest.mark.parametrize('argv', argv_cases)
+def test_join_matches_subprocess(Parser, runner, argv):
+ """
+ Test that join produces strings understood by subprocess
+ """
+ # invoke python to return its arguments as json
+ cmd = [
+ sys.executable, '-c',
+ 'import json, sys; print(json.dumps(sys.argv[1:]))'
+ ]
+ joined = Parser.join(cmd + argv)
+ json_out = runner(joined).decode()
+ assert json.loads(json_out) == argv
+
+
+@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess")
+@pytest.mark.parametrize('argv', argv_cases)
+def test_roundtrip(Parser, argv):
+ """
+ Test that split is the inverse operation of join
+ """
+ try:
+ joined = Parser.join(argv)
+ assert argv == Parser.split(joined)
+ except NotImplementedError:
+ pytest.skip("Not implemented")
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_system_info.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_system_info.py
new file mode 100644
index 00000000..eb7235e0
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_system_info.py
@@ -0,0 +1,323 @@
+import os
+import shutil
+import pytest
+from tempfile import mkstemp, mkdtemp
+from subprocess import Popen, PIPE
+from distutils.errors import DistutilsError
+
+from numpy.testing import assert_, assert_equal, assert_raises
+from numpy.distutils import ccompiler, customized_ccompiler
+from numpy.distutils.system_info import system_info, ConfigParser, mkl_info
+from numpy.distutils.system_info import AliasedOptionError
+from numpy.distutils.system_info import default_lib_dirs, default_include_dirs
+from numpy.distutils import _shell_utils
+
+
+def get_class(name, notfound_action=1):
+ """
+ notfound_action:
+ 0 - do nothing
+ 1 - display warning message
+ 2 - raise error
+ """
+ cl = {'temp1': Temp1Info,
+ 'temp2': Temp2Info,
+ 'duplicate_options': DuplicateOptionInfo,
+ }.get(name.lower(), _system_info)
+ return cl()
+
+simple_site = """
+[ALL]
+library_dirs = {dir1:s}{pathsep:s}{dir2:s}
+libraries = {lib1:s},{lib2:s}
+extra_compile_args = -I/fake/directory -I"/path with/spaces" -Os
+runtime_library_dirs = {dir1:s}
+
+[temp1]
+library_dirs = {dir1:s}
+libraries = {lib1:s}
+runtime_library_dirs = {dir1:s}
+
+[temp2]
+library_dirs = {dir2:s}
+libraries = {lib2:s}
+extra_link_args = -Wl,-rpath={lib2_escaped:s}
+rpath = {dir2:s}
+
+[duplicate_options]
+mylib_libs = {lib1:s}
+libraries = {lib2:s}
+"""
+site_cfg = simple_site
+
+fakelib_c_text = """
+/* This file is generated from numpy/distutils/testing/test_system_info.py */
+#include<stdio.h>
+void foo(void) {
+ printf("Hello foo");
+}
+void bar(void) {
+ printf("Hello bar");
+}
+"""
+
+def have_compiler():
+ """ Return True if there appears to be an executable compiler
+ """
+ compiler = customized_ccompiler()
+ try:
+ cmd = compiler.compiler # Unix compilers
+ except AttributeError:
+ try:
+ if not compiler.initialized:
+ compiler.initialize() # MSVC is different
+ except (DistutilsError, ValueError):
+ return False
+ cmd = [compiler.cc]
+ try:
+ p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+ p.stdout.close()
+ p.stderr.close()
+ p.wait()
+ except OSError:
+ return False
+ return True
+
+
+HAVE_COMPILER = have_compiler()
+
+
+class _system_info(system_info):
+
+ def __init__(self,
+ default_lib_dirs=default_lib_dirs,
+ default_include_dirs=default_include_dirs,
+ verbosity=1,
+ ):
+ self.__class__.info = {}
+ self.local_prefixes = []
+ defaults = {'library_dirs': '',
+ 'include_dirs': '',
+ 'runtime_library_dirs': '',
+ 'rpath': '',
+ 'src_dirs': '',
+ 'search_static_first': "0",
+ 'extra_compile_args': '',
+ 'extra_link_args': ''}
+ self.cp = ConfigParser(defaults)
+ # We have to parse the config files afterwards
+ # to have a consistent temporary filepath
+
+ def _check_libs(self, lib_dirs, libs, opt_libs, exts):
+ """Override _check_libs to return with all dirs """
+ info = {'libraries': libs, 'library_dirs': lib_dirs}
+ return info
+
+
+class Temp1Info(_system_info):
+ """For testing purposes"""
+ section = 'temp1'
+
+
+class Temp2Info(_system_info):
+ """For testing purposes"""
+ section = 'temp2'
+
+class DuplicateOptionInfo(_system_info):
+ """For testing purposes"""
+ section = 'duplicate_options'
+
+
+class TestSystemInfoReading:
+
+ def setup_method(self):
+ """ Create the libraries """
+ # Create 2 sources and 2 libraries
+ self._dir1 = mkdtemp()
+ self._src1 = os.path.join(self._dir1, 'foo.c')
+ self._lib1 = os.path.join(self._dir1, 'libfoo.so')
+ self._dir2 = mkdtemp()
+ self._src2 = os.path.join(self._dir2, 'bar.c')
+ self._lib2 = os.path.join(self._dir2, 'libbar.so')
+ # Update local site.cfg
+ global simple_site, site_cfg
+ site_cfg = simple_site.format(**{
+ 'dir1': self._dir1,
+ 'lib1': self._lib1,
+ 'dir2': self._dir2,
+ 'lib2': self._lib2,
+ 'pathsep': os.pathsep,
+ 'lib2_escaped': _shell_utils.NativeParser.join([self._lib2])
+ })
+ # Write site.cfg
+ fd, self._sitecfg = mkstemp()
+ os.close(fd)
+ with open(self._sitecfg, 'w') as fd:
+ fd.write(site_cfg)
+ # Write the sources
+ with open(self._src1, 'w') as fd:
+ fd.write(fakelib_c_text)
+ with open(self._src2, 'w') as fd:
+ fd.write(fakelib_c_text)
+ # We create all class-instances
+
+ def site_and_parse(c, site_cfg):
+ c.files = [site_cfg]
+ c.parse_config_files()
+ return c
+ self.c_default = site_and_parse(get_class('default'), self._sitecfg)
+ self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg)
+ self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg)
+ self.c_dup_options = site_and_parse(get_class('duplicate_options'),
+ self._sitecfg)
+
+ def teardown_method(self):
+ # Do each removal separately
+ try:
+ shutil.rmtree(self._dir1)
+ except Exception:
+ pass
+ try:
+ shutil.rmtree(self._dir2)
+ except Exception:
+ pass
+ try:
+ os.remove(self._sitecfg)
+ except Exception:
+ pass
+
+ def test_all(self):
+ # Read in all information in the ALL block
+ tsi = self.c_default
+ assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2])
+ assert_equal(tsi.get_libraries(), [self._lib1, self._lib2])
+ assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1])
+ extra = tsi.calc_extra_info()
+ assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os'])
+
+ def test_temp1(self):
+ # Read in all information in the temp1 block
+ tsi = self.c_temp1
+ assert_equal(tsi.get_lib_dirs(), [self._dir1])
+ assert_equal(tsi.get_libraries(), [self._lib1])
+ assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1])
+
+ def test_temp2(self):
+ # Read in all information in the temp2 block
+ tsi = self.c_temp2
+ assert_equal(tsi.get_lib_dirs(), [self._dir2])
+ assert_equal(tsi.get_libraries(), [self._lib2])
+ # Now from rpath and not runtime_library_dirs
+ assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2])
+ extra = tsi.calc_extra_info()
+ assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2])
+
+ def test_duplicate_options(self):
+ # Ensure that duplicates are raising an AliasedOptionError
+ tsi = self.c_dup_options
+ assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries")
+ assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1])
+ assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2])
+
+ @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler")
+ def test_compile1(self):
+ # Compile source and link the first source
+ c = customized_ccompiler()
+ previousDir = os.getcwd()
+ try:
+ # Change directory to not screw up directories
+ os.chdir(self._dir1)
+ c.compile([os.path.basename(self._src1)], output_dir=self._dir1)
+ # Ensure that the object exists
+ assert_(os.path.isfile(self._src1.replace('.c', '.o')) or
+ os.path.isfile(self._src1.replace('.c', '.obj')))
+ finally:
+ os.chdir(previousDir)
+
+ @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler")
+ @pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()),
+ reason="Fails with MSVC compiler ")
+ def test_compile2(self):
+ # Compile source and link the second source
+ tsi = self.c_temp2
+ c = customized_ccompiler()
+ extra_link_args = tsi.calc_extra_info()['extra_link_args']
+ previousDir = os.getcwd()
+ try:
+ # Change directory to not screw up directories
+ os.chdir(self._dir2)
+ c.compile([os.path.basename(self._src2)], output_dir=self._dir2,
+ extra_postargs=extra_link_args)
+ # Ensure that the object exists
+ assert_(os.path.isfile(self._src2.replace('.c', '.o')))
+ finally:
+ os.chdir(previousDir)
+
+ HAS_MKL = "mkl_rt" in mkl_info().calc_libraries_info().get("libraries", [])
+
+ @pytest.mark.xfail(HAS_MKL, reason=("`[DEFAULT]` override doesn't work if "
+ "numpy is built with MKL support"))
+ def test_overrides(self):
+ previousDir = os.getcwd()
+ cfg = os.path.join(self._dir1, 'site.cfg')
+ shutil.copy(self._sitecfg, cfg)
+ try:
+ os.chdir(self._dir1)
+ # Check that the '[ALL]' section does not override
+ # missing values from other sections
+ info = mkl_info()
+ lib_dirs = info.cp['ALL']['library_dirs'].split(os.pathsep)
+ assert info.get_lib_dirs() != lib_dirs
+
+ # But if we copy the values to a '[mkl]' section the value
+ # is correct
+ with open(cfg, 'r') as fid:
+ mkl = fid.read().replace('[ALL]', '[mkl]', 1)
+ with open(cfg, 'w') as fid:
+ fid.write(mkl)
+ info = mkl_info()
+ assert info.get_lib_dirs() == lib_dirs
+
+ # Also, the values will be taken from a section named '[DEFAULT]'
+ with open(cfg, 'r') as fid:
+ dflt = fid.read().replace('[mkl]', '[DEFAULT]', 1)
+ with open(cfg, 'w') as fid:
+ fid.write(dflt)
+ info = mkl_info()
+ assert info.get_lib_dirs() == lib_dirs
+ finally:
+ os.chdir(previousDir)
+
+
+def test_distutils_parse_env_order(monkeypatch):
+ from numpy.distutils.system_info import _parse_env_order
+ env = 'NPY_TESTS_DISTUTILS_PARSE_ENV_ORDER'
+
+ base_order = list('abcdef')
+
+ monkeypatch.setenv(env, 'b,i,e,f')
+ order, unknown = _parse_env_order(base_order, env)
+ assert len(order) == 3
+ assert order == list('bef')
+ assert len(unknown) == 1
+
+ # For when LAPACK/BLAS optimization is disabled
+ monkeypatch.setenv(env, '')
+ order, unknown = _parse_env_order(base_order, env)
+ assert len(order) == 0
+ assert len(unknown) == 0
+
+ for prefix in '^!':
+ monkeypatch.setenv(env, f'{prefix}b,i,e')
+ order, unknown = _parse_env_order(base_order, env)
+ assert len(order) == 4
+ assert order == list('acdf')
+ assert len(unknown) == 1
+
+ with pytest.raises(ValueError):
+ monkeypatch.setenv(env, 'b,^e,i')
+ _parse_env_order(base_order, env)
+
+ with pytest.raises(ValueError):
+ monkeypatch.setenv(env, '!b,^e,i')
+ _parse_env_order(base_order, env)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/unixccompiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/unixccompiler.py
new file mode 100644
index 00000000..4884960f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/unixccompiler.py
@@ -0,0 +1,141 @@
+"""
+unixccompiler - can handle very long argument lists for ar.
+
+"""
+import os
+import sys
+import subprocess
+import shlex
+
+from distutils.errors import CompileError, DistutilsExecError, LibError
+from distutils.unixccompiler import UnixCCompiler
+from numpy.distutils.ccompiler import replace_method
+from numpy.distutils.misc_util import _commandline_dep_string
+from numpy.distutils import log
+
+# Note that UnixCCompiler._compile appeared in Python 2.3
+def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
+ """Compile a single source files with a Unix-style compiler."""
+ # HP ad-hoc fix, see ticket 1383
+ ccomp = self.compiler_so
+ if ccomp[0] == 'aCC':
+ # remove flags that will trigger ANSI-C mode for aCC
+ if '-Ae' in ccomp:
+ ccomp.remove('-Ae')
+ if '-Aa' in ccomp:
+ ccomp.remove('-Aa')
+ # add flags for (almost) sane C++ handling
+ ccomp += ['-AA']
+ self.compiler_so = ccomp
+ # ensure OPT environment variable is read
+ if 'OPT' in os.environ:
+ # XXX who uses this?
+ from sysconfig import get_config_vars
+ opt = shlex.join(shlex.split(os.environ['OPT']))
+ gcv_opt = shlex.join(shlex.split(get_config_vars('OPT')[0]))
+ ccomp_s = shlex.join(self.compiler_so)
+ if opt not in ccomp_s:
+ ccomp_s = ccomp_s.replace(gcv_opt, opt)
+ self.compiler_so = shlex.split(ccomp_s)
+ llink_s = shlex.join(self.linker_so)
+ if opt not in llink_s:
+ self.linker_so = self.linker_so + shlex.split(opt)
+
+ display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src)
+
+ # gcc style automatic dependencies, outputs a makefile (-MF) that lists
+ # all headers needed by a c file as a side effect of compilation (-MMD)
+ if getattr(self, '_auto_depends', False):
+ deps = ['-MMD', '-MF', obj + '.d']
+ else:
+ deps = []
+
+ try:
+ self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + deps +
+ extra_postargs, display = display)
+ except DistutilsExecError as e:
+ msg = str(e)
+ raise CompileError(msg) from None
+
+ # add commandline flags to dependency file
+ if deps:
+ # After running the compiler, the file created will be in EBCDIC
+ # but will not be tagged as such. This tags it so the file does not
+ # have multiple different encodings being written to it
+ if sys.platform == 'zos':
+ subprocess.check_output(['chtag', '-tc', 'IBM1047', obj + '.d'])
+ with open(obj + '.d', 'a') as f:
+ f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts))
+
+replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile)
+
+
+def UnixCCompiler_create_static_lib(self, objects, output_libname,
+ output_dir=None, debug=0, target_lang=None):
+ """
+ Build a static library in a separate sub-process.
+
+ Parameters
+ ----------
+ objects : list or tuple of str
+ List of paths to object files used to build the static library.
+ output_libname : str
+ The library name as an absolute or relative (if `output_dir` is used)
+ path.
+ output_dir : str, optional
+ The path to the output directory. Default is None, in which case
+ the ``output_dir`` attribute of the UnixCCompiler instance.
+ debug : bool, optional
+ This parameter is not used.
+ target_lang : str, optional
+ This parameter is not used.
+
+ Returns
+ -------
+ None
+
+ """
+ objects, output_dir = self._fix_object_args(objects, output_dir)
+
+ output_filename = \
+ self.library_filename(output_libname, output_dir=output_dir)
+
+ if self._need_link(objects, output_filename):
+ try:
+ # previous .a may be screwed up; best to remove it first
+ # and recreate.
+ # Also, ar on OS X doesn't handle updating universal archives
+ os.unlink(output_filename)
+ except OSError:
+ pass
+ self.mkpath(os.path.dirname(output_filename))
+ tmp_objects = objects + self.objects
+ while tmp_objects:
+ objects = tmp_objects[:50]
+ tmp_objects = tmp_objects[50:]
+ display = '%s: adding %d object files to %s' % (
+ os.path.basename(self.archiver[0]),
+ len(objects), output_filename)
+ self.spawn(self.archiver + [output_filename] + objects,
+ display = display)
+
+ # Not many Unices required ranlib anymore -- SunOS 4.x is, I
+ # think the only major Unix that does. Maybe we need some
+ # platform intelligence here to skip ranlib if it's not
+ # needed -- or maybe Python's configure script took care of
+ # it for us, hence the check for leading colon.
+ if self.ranlib:
+ display = '%s:@ %s' % (os.path.basename(self.ranlib[0]),
+ output_filename)
+ try:
+ self.spawn(self.ranlib + [output_filename],
+ display = display)
+ except DistutilsExecError as e:
+ msg = str(e)
+ raise LibError(msg) from None
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+ return
+
+replace_method(UnixCCompiler, 'create_static_lib',
+ UnixCCompiler_create_static_lib)