done
This commit is contained in:
170
lib/python3.11/site-packages/numpy/__config__.py
Normal file
170
lib/python3.11/site-packages/numpy/__config__.py
Normal file
@ -0,0 +1,170 @@
|
||||
# This file is generated by numpy's build process
|
||||
# It contains system_info results at the time of building this package.
|
||||
from enum import Enum
|
||||
from numpy._core._multiarray_umath import (
|
||||
__cpu_features__,
|
||||
__cpu_baseline__,
|
||||
__cpu_dispatch__,
|
||||
)
|
||||
|
||||
__all__ = ["show_config"]
|
||||
_built_with_meson = True
|
||||
|
||||
|
||||
class DisplayModes(Enum):
|
||||
stdout = "stdout"
|
||||
dicts = "dicts"
|
||||
|
||||
|
||||
def _cleanup(d):
|
||||
"""
|
||||
Removes empty values in a `dict` recursively
|
||||
This ensures we remove values that Meson could not provide to CONFIG
|
||||
"""
|
||||
if isinstance(d, dict):
|
||||
return {k: _cleanup(v) for k, v in d.items() if v and _cleanup(v)}
|
||||
else:
|
||||
return d
|
||||
|
||||
|
||||
CONFIG = _cleanup(
|
||||
{
|
||||
"Compilers": {
|
||||
"c": {
|
||||
"name": "clang",
|
||||
"linker": r"ld64",
|
||||
"version": "15.0.0",
|
||||
"commands": r"cc",
|
||||
"args": r"",
|
||||
"linker args": r"",
|
||||
},
|
||||
"cython": {
|
||||
"name": "cython",
|
||||
"linker": r"cython",
|
||||
"version": "3.1.2",
|
||||
"commands": r"cython",
|
||||
"args": r"",
|
||||
"linker args": r"",
|
||||
},
|
||||
"c++": {
|
||||
"name": "clang",
|
||||
"linker": r"ld64",
|
||||
"version": "15.0.0",
|
||||
"commands": r"c++",
|
||||
"args": r"",
|
||||
"linker args": r"",
|
||||
},
|
||||
},
|
||||
"Machine Information": {
|
||||
"host": {
|
||||
"cpu": "aarch64",
|
||||
"family": "aarch64",
|
||||
"endian": "little",
|
||||
"system": "darwin",
|
||||
},
|
||||
"build": {
|
||||
"cpu": "aarch64",
|
||||
"family": "aarch64",
|
||||
"endian": "little",
|
||||
"system": "darwin",
|
||||
},
|
||||
"cross-compiled": bool("False".lower().replace("false", "")),
|
||||
},
|
||||
"Build Dependencies": {
|
||||
"blas": {
|
||||
"name": "accelerate",
|
||||
"found": bool("True".lower().replace("false", "")),
|
||||
"version": "unknown",
|
||||
"detection method": "system",
|
||||
"include directory": r"unknown",
|
||||
"lib directory": r"unknown",
|
||||
"openblas configuration": r"unknown",
|
||||
"pc file directory": r"unknown",
|
||||
},
|
||||
"lapack": {
|
||||
"name": "accelerate",
|
||||
"found": bool("True".lower().replace("false", "")),
|
||||
"version": "unknown",
|
||||
"detection method": "system",
|
||||
"include directory": r"unknown",
|
||||
"lib directory": r"unknown",
|
||||
"openblas configuration": r"unknown",
|
||||
"pc file directory": r"unknown",
|
||||
},
|
||||
},
|
||||
"Python Information": {
|
||||
"path": r"/private/var/folders/y6/nj790rtn62lfktb1sh__79hc0000gn/T/build-env-i54si8eq/bin/python",
|
||||
"version": "3.11",
|
||||
},
|
||||
"SIMD Extensions": {
|
||||
"baseline": __cpu_baseline__,
|
||||
"found": [
|
||||
feature for feature in __cpu_dispatch__ if __cpu_features__[feature]
|
||||
],
|
||||
"not found": [
|
||||
feature for feature in __cpu_dispatch__ if not __cpu_features__[feature]
|
||||
],
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _check_pyyaml():
|
||||
import yaml
|
||||
|
||||
return yaml
|
||||
|
||||
|
||||
def show(mode=DisplayModes.stdout.value):
|
||||
"""
|
||||
Show libraries and system information on which NumPy was built
|
||||
and is being used
|
||||
|
||||
Parameters
|
||||
----------
|
||||
mode : {`'stdout'`, `'dicts'`}, optional.
|
||||
Indicates how to display the config information.
|
||||
`'stdout'` prints to console, `'dicts'` returns a dictionary
|
||||
of the configuration.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : {`dict`, `None`}
|
||||
If mode is `'dicts'`, a dict is returned, else None
|
||||
|
||||
See Also
|
||||
--------
|
||||
get_include : Returns the directory containing NumPy C
|
||||
header files.
|
||||
|
||||
Notes
|
||||
-----
|
||||
1. The `'stdout'` mode will give more readable
|
||||
output if ``pyyaml`` is installed
|
||||
|
||||
"""
|
||||
if mode == DisplayModes.stdout.value:
|
||||
try: # Non-standard library, check import
|
||||
yaml = _check_pyyaml()
|
||||
|
||||
print(yaml.dump(CONFIG))
|
||||
except ModuleNotFoundError:
|
||||
import warnings
|
||||
import json
|
||||
|
||||
warnings.warn("Install `pyyaml` for better output", stacklevel=1)
|
||||
print(json.dumps(CONFIG, indent=2))
|
||||
elif mode == DisplayModes.dicts.value:
|
||||
return CONFIG
|
||||
else:
|
||||
raise AttributeError(
|
||||
f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}"
|
||||
)
|
||||
|
||||
|
||||
def show_config(mode=DisplayModes.stdout.value):
|
||||
return show(mode)
|
||||
|
||||
|
||||
show_config.__doc__ = show.__doc__
|
||||
show_config.__module__ = "numpy"
|
||||
102
lib/python3.11/site-packages/numpy/__config__.pyi
Normal file
102
lib/python3.11/site-packages/numpy/__config__.pyi
Normal file
@ -0,0 +1,102 @@
|
||||
from enum import Enum
|
||||
from types import ModuleType
|
||||
from typing import Final, NotRequired, TypedDict, overload, type_check_only
|
||||
from typing import Literal as L
|
||||
|
||||
_CompilerConfigDictValue = TypedDict(
|
||||
"_CompilerConfigDictValue",
|
||||
{
|
||||
"name": str,
|
||||
"linker": str,
|
||||
"version": str,
|
||||
"commands": str,
|
||||
"args": str,
|
||||
"linker args": str,
|
||||
},
|
||||
)
|
||||
_CompilerConfigDict = TypedDict(
|
||||
"_CompilerConfigDict",
|
||||
{
|
||||
"c": _CompilerConfigDictValue,
|
||||
"cython": _CompilerConfigDictValue,
|
||||
"c++": _CompilerConfigDictValue,
|
||||
},
|
||||
)
|
||||
_MachineInformationDict = TypedDict(
|
||||
"_MachineInformationDict",
|
||||
{
|
||||
"host": _MachineInformationDictValue,
|
||||
"build": _MachineInformationDictValue,
|
||||
"cross-compiled": NotRequired[L[True]],
|
||||
},
|
||||
)
|
||||
|
||||
@type_check_only
|
||||
class _MachineInformationDictValue(TypedDict):
|
||||
cpu: str
|
||||
family: str
|
||||
endian: L["little", "big"]
|
||||
system: str
|
||||
|
||||
_BuildDependenciesDictValue = TypedDict(
|
||||
"_BuildDependenciesDictValue",
|
||||
{
|
||||
"name": str,
|
||||
"found": NotRequired[L[True]],
|
||||
"version": str,
|
||||
"include directory": str,
|
||||
"lib directory": str,
|
||||
"openblas configuration": str,
|
||||
"pc file directory": str,
|
||||
},
|
||||
)
|
||||
|
||||
class _BuildDependenciesDict(TypedDict):
|
||||
blas: _BuildDependenciesDictValue
|
||||
lapack: _BuildDependenciesDictValue
|
||||
|
||||
class _PythonInformationDict(TypedDict):
|
||||
path: str
|
||||
version: str
|
||||
|
||||
_SIMDExtensionsDict = TypedDict(
|
||||
"_SIMDExtensionsDict",
|
||||
{
|
||||
"baseline": list[str],
|
||||
"found": list[str],
|
||||
"not found": list[str],
|
||||
},
|
||||
)
|
||||
|
||||
_ConfigDict = TypedDict(
|
||||
"_ConfigDict",
|
||||
{
|
||||
"Compilers": _CompilerConfigDict,
|
||||
"Machine Information": _MachineInformationDict,
|
||||
"Build Dependencies": _BuildDependenciesDict,
|
||||
"Python Information": _PythonInformationDict,
|
||||
"SIMD Extensions": _SIMDExtensionsDict,
|
||||
},
|
||||
)
|
||||
|
||||
###
|
||||
|
||||
__all__ = ["show_config"]
|
||||
|
||||
CONFIG: Final[_ConfigDict] = ...
|
||||
|
||||
class DisplayModes(Enum):
|
||||
stdout = "stdout"
|
||||
dicts = "dicts"
|
||||
|
||||
def _check_pyyaml() -> ModuleType: ...
|
||||
|
||||
@overload
|
||||
def show(mode: L["stdout"] = "stdout") -> None: ...
|
||||
@overload
|
||||
def show(mode: L["dicts"]) -> _ConfigDict: ...
|
||||
|
||||
@overload
|
||||
def show_config(mode: L["stdout"] = "stdout") -> None: ...
|
||||
@overload
|
||||
def show_config(mode: L["dicts"]) -> _ConfigDict: ...
|
||||
1241
lib/python3.11/site-packages/numpy/__init__.cython-30.pxd
Normal file
1241
lib/python3.11/site-packages/numpy/__init__.cython-30.pxd
Normal file
File diff suppressed because it is too large
Load Diff
1154
lib/python3.11/site-packages/numpy/__init__.pxd
Normal file
1154
lib/python3.11/site-packages/numpy/__init__.pxd
Normal file
File diff suppressed because it is too large
Load Diff
928
lib/python3.11/site-packages/numpy/__init__.py
Normal file
928
lib/python3.11/site-packages/numpy/__init__.py
Normal file
@ -0,0 +1,928 @@
|
||||
"""
|
||||
NumPy
|
||||
=====
|
||||
|
||||
Provides
|
||||
1. An array object of arbitrary homogeneous items
|
||||
2. Fast mathematical operations over arrays
|
||||
3. Linear Algebra, Fourier Transforms, Random Number Generation
|
||||
|
||||
How to use the documentation
|
||||
----------------------------
|
||||
Documentation is available in two forms: docstrings provided
|
||||
with the code, and a loose standing reference guide, available from
|
||||
`the NumPy homepage <https://numpy.org>`_.
|
||||
|
||||
We recommend exploring the docstrings using
|
||||
`IPython <https://ipython.org>`_, an advanced Python shell with
|
||||
TAB-completion and introspection capabilities. See below for further
|
||||
instructions.
|
||||
|
||||
The docstring examples assume that `numpy` has been imported as ``np``::
|
||||
|
||||
>>> import numpy as np
|
||||
|
||||
Code snippets are indicated by three greater-than signs::
|
||||
|
||||
>>> x = 42
|
||||
>>> x = x + 1
|
||||
|
||||
Use the built-in ``help`` function to view a function's docstring::
|
||||
|
||||
>>> help(np.sort)
|
||||
... # doctest: +SKIP
|
||||
|
||||
For some objects, ``np.info(obj)`` may provide additional help. This is
|
||||
particularly true if you see the line "Help on ufunc object:" at the top
|
||||
of the help() page. Ufuncs are implemented in C, not Python, for speed.
|
||||
The native Python help() does not know how to view their help, but our
|
||||
np.info() function does.
|
||||
|
||||
Available subpackages
|
||||
---------------------
|
||||
lib
|
||||
Basic functions used by several sub-packages.
|
||||
random
|
||||
Core Random Tools
|
||||
linalg
|
||||
Core Linear Algebra Tools
|
||||
fft
|
||||
Core FFT routines
|
||||
polynomial
|
||||
Polynomial tools
|
||||
testing
|
||||
NumPy testing tools
|
||||
distutils
|
||||
Enhancements to distutils with support for
|
||||
Fortran compilers support and more (for Python <= 3.11)
|
||||
|
||||
Utilities
|
||||
---------
|
||||
test
|
||||
Run numpy unittests
|
||||
show_config
|
||||
Show numpy build configuration
|
||||
__version__
|
||||
NumPy version string
|
||||
|
||||
Viewing documentation using IPython
|
||||
-----------------------------------
|
||||
|
||||
Start IPython and import `numpy` usually under the alias ``np``: `import
|
||||
numpy as np`. Then, directly past or use the ``%cpaste`` magic to paste
|
||||
examples into the shell. To see which functions are available in `numpy`,
|
||||
type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use
|
||||
``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow
|
||||
down the list. To view the docstring for a function, use
|
||||
``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view
|
||||
the source code).
|
||||
|
||||
Copies vs. in-place operation
|
||||
-----------------------------
|
||||
Most of the functions in `numpy` return a copy of the array argument
|
||||
(e.g., `np.sort`). In-place versions of these functions are often
|
||||
available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.
|
||||
Exceptions to this rule are documented.
|
||||
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
# If a version with git hash was stored, use that instead
|
||||
from . import version
|
||||
from ._expired_attrs_2_0 import __expired_attributes__
|
||||
from ._globals import _CopyMode, _NoValue
|
||||
from .version import __version__
|
||||
|
||||
# We first need to detect if we're being called as part of the numpy setup
|
||||
# procedure itself in a reliable manner.
|
||||
try:
|
||||
__NUMPY_SETUP__ # noqa: B018
|
||||
except NameError:
|
||||
__NUMPY_SETUP__ = False
|
||||
|
||||
if __NUMPY_SETUP__:
|
||||
sys.stderr.write('Running from numpy source directory.\n')
|
||||
else:
|
||||
# Allow distributors to run custom init code before importing numpy._core
|
||||
from . import _distributor_init
|
||||
|
||||
try:
|
||||
from numpy.__config__ import show_config
|
||||
except ImportError as e:
|
||||
msg = """Error importing numpy: you should not try to import numpy from
|
||||
its source directory; please exit the numpy source tree, and relaunch
|
||||
your python interpreter from there."""
|
||||
raise ImportError(msg) from e
|
||||
|
||||
from . import _core
|
||||
from ._core import (
|
||||
False_,
|
||||
ScalarType,
|
||||
True_,
|
||||
abs,
|
||||
absolute,
|
||||
acos,
|
||||
acosh,
|
||||
add,
|
||||
all,
|
||||
allclose,
|
||||
amax,
|
||||
amin,
|
||||
any,
|
||||
arange,
|
||||
arccos,
|
||||
arccosh,
|
||||
arcsin,
|
||||
arcsinh,
|
||||
arctan,
|
||||
arctan2,
|
||||
arctanh,
|
||||
argmax,
|
||||
argmin,
|
||||
argpartition,
|
||||
argsort,
|
||||
argwhere,
|
||||
around,
|
||||
array,
|
||||
array2string,
|
||||
array_equal,
|
||||
array_equiv,
|
||||
array_repr,
|
||||
array_str,
|
||||
asanyarray,
|
||||
asarray,
|
||||
ascontiguousarray,
|
||||
asfortranarray,
|
||||
asin,
|
||||
asinh,
|
||||
astype,
|
||||
atan,
|
||||
atan2,
|
||||
atanh,
|
||||
atleast_1d,
|
||||
atleast_2d,
|
||||
atleast_3d,
|
||||
base_repr,
|
||||
binary_repr,
|
||||
bitwise_and,
|
||||
bitwise_count,
|
||||
bitwise_invert,
|
||||
bitwise_left_shift,
|
||||
bitwise_not,
|
||||
bitwise_or,
|
||||
bitwise_right_shift,
|
||||
bitwise_xor,
|
||||
block,
|
||||
bool,
|
||||
bool_,
|
||||
broadcast,
|
||||
busday_count,
|
||||
busday_offset,
|
||||
busdaycalendar,
|
||||
byte,
|
||||
bytes_,
|
||||
can_cast,
|
||||
cbrt,
|
||||
cdouble,
|
||||
ceil,
|
||||
character,
|
||||
choose,
|
||||
clip,
|
||||
clongdouble,
|
||||
complex64,
|
||||
complex128,
|
||||
complexfloating,
|
||||
compress,
|
||||
concat,
|
||||
concatenate,
|
||||
conj,
|
||||
conjugate,
|
||||
convolve,
|
||||
copysign,
|
||||
copyto,
|
||||
correlate,
|
||||
cos,
|
||||
cosh,
|
||||
count_nonzero,
|
||||
cross,
|
||||
csingle,
|
||||
cumprod,
|
||||
cumsum,
|
||||
cumulative_prod,
|
||||
cumulative_sum,
|
||||
datetime64,
|
||||
datetime_as_string,
|
||||
datetime_data,
|
||||
deg2rad,
|
||||
degrees,
|
||||
diagonal,
|
||||
divide,
|
||||
divmod,
|
||||
dot,
|
||||
double,
|
||||
dtype,
|
||||
e,
|
||||
einsum,
|
||||
einsum_path,
|
||||
empty,
|
||||
empty_like,
|
||||
equal,
|
||||
errstate,
|
||||
euler_gamma,
|
||||
exp,
|
||||
exp2,
|
||||
expm1,
|
||||
fabs,
|
||||
finfo,
|
||||
flatiter,
|
||||
flatnonzero,
|
||||
flexible,
|
||||
float16,
|
||||
float32,
|
||||
float64,
|
||||
float_power,
|
||||
floating,
|
||||
floor,
|
||||
floor_divide,
|
||||
fmax,
|
||||
fmin,
|
||||
fmod,
|
||||
format_float_positional,
|
||||
format_float_scientific,
|
||||
frexp,
|
||||
from_dlpack,
|
||||
frombuffer,
|
||||
fromfile,
|
||||
fromfunction,
|
||||
fromiter,
|
||||
frompyfunc,
|
||||
fromstring,
|
||||
full,
|
||||
full_like,
|
||||
gcd,
|
||||
generic,
|
||||
geomspace,
|
||||
get_printoptions,
|
||||
getbufsize,
|
||||
geterr,
|
||||
geterrcall,
|
||||
greater,
|
||||
greater_equal,
|
||||
half,
|
||||
heaviside,
|
||||
hstack,
|
||||
hypot,
|
||||
identity,
|
||||
iinfo,
|
||||
indices,
|
||||
inexact,
|
||||
inf,
|
||||
inner,
|
||||
int8,
|
||||
int16,
|
||||
int32,
|
||||
int64,
|
||||
int_,
|
||||
intc,
|
||||
integer,
|
||||
intp,
|
||||
invert,
|
||||
is_busday,
|
||||
isclose,
|
||||
isdtype,
|
||||
isfinite,
|
||||
isfortran,
|
||||
isinf,
|
||||
isnan,
|
||||
isnat,
|
||||
isscalar,
|
||||
issubdtype,
|
||||
lcm,
|
||||
ldexp,
|
||||
left_shift,
|
||||
less,
|
||||
less_equal,
|
||||
lexsort,
|
||||
linspace,
|
||||
little_endian,
|
||||
log,
|
||||
log1p,
|
||||
log2,
|
||||
log10,
|
||||
logaddexp,
|
||||
logaddexp2,
|
||||
logical_and,
|
||||
logical_not,
|
||||
logical_or,
|
||||
logical_xor,
|
||||
logspace,
|
||||
long,
|
||||
longdouble,
|
||||
longlong,
|
||||
matmul,
|
||||
matrix_transpose,
|
||||
matvec,
|
||||
max,
|
||||
maximum,
|
||||
may_share_memory,
|
||||
mean,
|
||||
memmap,
|
||||
min,
|
||||
min_scalar_type,
|
||||
minimum,
|
||||
mod,
|
||||
modf,
|
||||
moveaxis,
|
||||
multiply,
|
||||
nan,
|
||||
ndarray,
|
||||
ndim,
|
||||
nditer,
|
||||
negative,
|
||||
nested_iters,
|
||||
newaxis,
|
||||
nextafter,
|
||||
nonzero,
|
||||
not_equal,
|
||||
number,
|
||||
object_,
|
||||
ones,
|
||||
ones_like,
|
||||
outer,
|
||||
partition,
|
||||
permute_dims,
|
||||
pi,
|
||||
positive,
|
||||
pow,
|
||||
power,
|
||||
printoptions,
|
||||
prod,
|
||||
promote_types,
|
||||
ptp,
|
||||
put,
|
||||
putmask,
|
||||
rad2deg,
|
||||
radians,
|
||||
ravel,
|
||||
recarray,
|
||||
reciprocal,
|
||||
record,
|
||||
remainder,
|
||||
repeat,
|
||||
require,
|
||||
reshape,
|
||||
resize,
|
||||
result_type,
|
||||
right_shift,
|
||||
rint,
|
||||
roll,
|
||||
rollaxis,
|
||||
round,
|
||||
sctypeDict,
|
||||
searchsorted,
|
||||
set_printoptions,
|
||||
setbufsize,
|
||||
seterr,
|
||||
seterrcall,
|
||||
shape,
|
||||
shares_memory,
|
||||
short,
|
||||
sign,
|
||||
signbit,
|
||||
signedinteger,
|
||||
sin,
|
||||
single,
|
||||
sinh,
|
||||
size,
|
||||
sort,
|
||||
spacing,
|
||||
sqrt,
|
||||
square,
|
||||
squeeze,
|
||||
stack,
|
||||
std,
|
||||
str_,
|
||||
subtract,
|
||||
sum,
|
||||
swapaxes,
|
||||
take,
|
||||
tan,
|
||||
tanh,
|
||||
tensordot,
|
||||
timedelta64,
|
||||
trace,
|
||||
transpose,
|
||||
true_divide,
|
||||
trunc,
|
||||
typecodes,
|
||||
ubyte,
|
||||
ufunc,
|
||||
uint,
|
||||
uint8,
|
||||
uint16,
|
||||
uint32,
|
||||
uint64,
|
||||
uintc,
|
||||
uintp,
|
||||
ulong,
|
||||
ulonglong,
|
||||
unsignedinteger,
|
||||
unstack,
|
||||
ushort,
|
||||
var,
|
||||
vdot,
|
||||
vecdot,
|
||||
vecmat,
|
||||
void,
|
||||
vstack,
|
||||
where,
|
||||
zeros,
|
||||
zeros_like,
|
||||
)
|
||||
|
||||
# NOTE: It's still under discussion whether these aliases
|
||||
# should be removed.
|
||||
for ta in ["float96", "float128", "complex192", "complex256"]:
|
||||
try:
|
||||
globals()[ta] = getattr(_core, ta)
|
||||
except AttributeError:
|
||||
pass
|
||||
del ta
|
||||
|
||||
from . import lib
|
||||
from . import matrixlib as _mat
|
||||
from .lib import scimath as emath
|
||||
from .lib._arraypad_impl import pad
|
||||
from .lib._arraysetops_impl import (
|
||||
ediff1d,
|
||||
in1d,
|
||||
intersect1d,
|
||||
isin,
|
||||
setdiff1d,
|
||||
setxor1d,
|
||||
union1d,
|
||||
unique,
|
||||
unique_all,
|
||||
unique_counts,
|
||||
unique_inverse,
|
||||
unique_values,
|
||||
)
|
||||
from .lib._function_base_impl import (
|
||||
angle,
|
||||
append,
|
||||
asarray_chkfinite,
|
||||
average,
|
||||
bartlett,
|
||||
bincount,
|
||||
blackman,
|
||||
copy,
|
||||
corrcoef,
|
||||
cov,
|
||||
delete,
|
||||
diff,
|
||||
digitize,
|
||||
extract,
|
||||
flip,
|
||||
gradient,
|
||||
hamming,
|
||||
hanning,
|
||||
i0,
|
||||
insert,
|
||||
interp,
|
||||
iterable,
|
||||
kaiser,
|
||||
median,
|
||||
meshgrid,
|
||||
percentile,
|
||||
piecewise,
|
||||
place,
|
||||
quantile,
|
||||
rot90,
|
||||
select,
|
||||
sinc,
|
||||
sort_complex,
|
||||
trapezoid,
|
||||
trapz,
|
||||
trim_zeros,
|
||||
unwrap,
|
||||
vectorize,
|
||||
)
|
||||
from .lib._histograms_impl import histogram, histogram_bin_edges, histogramdd
|
||||
from .lib._index_tricks_impl import (
|
||||
c_,
|
||||
diag_indices,
|
||||
diag_indices_from,
|
||||
fill_diagonal,
|
||||
index_exp,
|
||||
ix_,
|
||||
mgrid,
|
||||
ndenumerate,
|
||||
ndindex,
|
||||
ogrid,
|
||||
r_,
|
||||
ravel_multi_index,
|
||||
s_,
|
||||
unravel_index,
|
||||
)
|
||||
from .lib._nanfunctions_impl import (
|
||||
nanargmax,
|
||||
nanargmin,
|
||||
nancumprod,
|
||||
nancumsum,
|
||||
nanmax,
|
||||
nanmean,
|
||||
nanmedian,
|
||||
nanmin,
|
||||
nanpercentile,
|
||||
nanprod,
|
||||
nanquantile,
|
||||
nanstd,
|
||||
nansum,
|
||||
nanvar,
|
||||
)
|
||||
from .lib._npyio_impl import (
|
||||
fromregex,
|
||||
genfromtxt,
|
||||
load,
|
||||
loadtxt,
|
||||
packbits,
|
||||
save,
|
||||
savetxt,
|
||||
savez,
|
||||
savez_compressed,
|
||||
unpackbits,
|
||||
)
|
||||
from .lib._polynomial_impl import (
|
||||
poly,
|
||||
poly1d,
|
||||
polyadd,
|
||||
polyder,
|
||||
polydiv,
|
||||
polyfit,
|
||||
polyint,
|
||||
polymul,
|
||||
polysub,
|
||||
polyval,
|
||||
roots,
|
||||
)
|
||||
from .lib._shape_base_impl import (
|
||||
apply_along_axis,
|
||||
apply_over_axes,
|
||||
array_split,
|
||||
column_stack,
|
||||
dsplit,
|
||||
dstack,
|
||||
expand_dims,
|
||||
hsplit,
|
||||
kron,
|
||||
put_along_axis,
|
||||
row_stack,
|
||||
split,
|
||||
take_along_axis,
|
||||
tile,
|
||||
vsplit,
|
||||
)
|
||||
from .lib._stride_tricks_impl import (
|
||||
broadcast_arrays,
|
||||
broadcast_shapes,
|
||||
broadcast_to,
|
||||
)
|
||||
from .lib._twodim_base_impl import (
|
||||
diag,
|
||||
diagflat,
|
||||
eye,
|
||||
fliplr,
|
||||
flipud,
|
||||
histogram2d,
|
||||
mask_indices,
|
||||
tri,
|
||||
tril,
|
||||
tril_indices,
|
||||
tril_indices_from,
|
||||
triu,
|
||||
triu_indices,
|
||||
triu_indices_from,
|
||||
vander,
|
||||
)
|
||||
from .lib._type_check_impl import (
|
||||
common_type,
|
||||
imag,
|
||||
iscomplex,
|
||||
iscomplexobj,
|
||||
isreal,
|
||||
isrealobj,
|
||||
mintypecode,
|
||||
nan_to_num,
|
||||
real,
|
||||
real_if_close,
|
||||
typename,
|
||||
)
|
||||
from .lib._ufunclike_impl import fix, isneginf, isposinf
|
||||
from .lib._utils_impl import get_include, info, show_runtime
|
||||
from .matrixlib import asmatrix, bmat, matrix
|
||||
|
||||
# public submodules are imported lazily, therefore are accessible from
|
||||
# __getattr__. Note that `distutils` (deprecated) and `array_api`
|
||||
# (experimental label) are not added here, because `from numpy import *`
|
||||
# must not raise any warnings - that's too disruptive.
|
||||
__numpy_submodules__ = {
|
||||
"linalg", "fft", "dtypes", "random", "polynomial", "ma",
|
||||
"exceptions", "lib", "ctypeslib", "testing", "typing",
|
||||
"f2py", "test", "rec", "char", "core", "strings",
|
||||
}
|
||||
|
||||
# We build warning messages for former attributes
|
||||
_msg = (
|
||||
"module 'numpy' has no attribute '{n}'.\n"
|
||||
"`np.{n}` was a deprecated alias for the builtin `{n}`. "
|
||||
"To avoid this error in existing code, use `{n}` by itself. "
|
||||
"Doing this will not modify any behavior and is safe. {extended_msg}\n"
|
||||
"The aliases was originally deprecated in NumPy 1.20; for more "
|
||||
"details and guidance see the original release note at:\n"
|
||||
" https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations")
|
||||
|
||||
_specific_msg = (
|
||||
"If you specifically wanted the numpy scalar type, use `np.{}` here.")
|
||||
|
||||
_int_extended_msg = (
|
||||
"When replacing `np.{}`, you may wish to use e.g. `np.int64` "
|
||||
"or `np.int32` to specify the precision. If you wish to review "
|
||||
"your current use, check the release note link for "
|
||||
"additional information.")
|
||||
|
||||
_type_info = [
|
||||
("object", ""), # The NumPy scalar only exists by name.
|
||||
("float", _specific_msg.format("float64")),
|
||||
("complex", _specific_msg.format("complex128")),
|
||||
("str", _specific_msg.format("str_")),
|
||||
("int", _int_extended_msg.format("int"))]
|
||||
|
||||
__former_attrs__ = {
|
||||
n: _msg.format(n=n, extended_msg=extended_msg)
|
||||
for n, extended_msg in _type_info
|
||||
}
|
||||
|
||||
# Some of these could be defined right away, but most were aliases to
|
||||
# the Python objects and only removed in NumPy 1.24. Defining them should
|
||||
# probably wait for NumPy 1.26 or 2.0.
|
||||
# When defined, these should possibly not be added to `__all__` to avoid
|
||||
# import with `from numpy import *`.
|
||||
__future_scalars__ = {"str", "bytes", "object"}
|
||||
|
||||
__array_api_version__ = "2024.12"
|
||||
|
||||
from ._array_api_info import __array_namespace_info__
|
||||
|
||||
# now that numpy core module is imported, can initialize limits
|
||||
_core.getlimits._register_known_types()
|
||||
|
||||
__all__ = list(
|
||||
__numpy_submodules__ |
|
||||
set(_core.__all__) |
|
||||
set(_mat.__all__) |
|
||||
set(lib._histograms_impl.__all__) |
|
||||
set(lib._nanfunctions_impl.__all__) |
|
||||
set(lib._function_base_impl.__all__) |
|
||||
set(lib._twodim_base_impl.__all__) |
|
||||
set(lib._shape_base_impl.__all__) |
|
||||
set(lib._type_check_impl.__all__) |
|
||||
set(lib._arraysetops_impl.__all__) |
|
||||
set(lib._ufunclike_impl.__all__) |
|
||||
set(lib._arraypad_impl.__all__) |
|
||||
set(lib._utils_impl.__all__) |
|
||||
set(lib._stride_tricks_impl.__all__) |
|
||||
set(lib._polynomial_impl.__all__) |
|
||||
set(lib._npyio_impl.__all__) |
|
||||
set(lib._index_tricks_impl.__all__) |
|
||||
{"emath", "show_config", "__version__", "__array_namespace_info__"}
|
||||
)
|
||||
|
||||
# Filter out Cython harmless warnings
|
||||
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
|
||||
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
|
||||
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
|
||||
|
||||
def __getattr__(attr):
|
||||
# Warn for expired attributes
|
||||
import warnings
|
||||
|
||||
if attr == "linalg":
|
||||
import numpy.linalg as linalg
|
||||
return linalg
|
||||
elif attr == "fft":
|
||||
import numpy.fft as fft
|
||||
return fft
|
||||
elif attr == "dtypes":
|
||||
import numpy.dtypes as dtypes
|
||||
return dtypes
|
||||
elif attr == "random":
|
||||
import numpy.random as random
|
||||
return random
|
||||
elif attr == "polynomial":
|
||||
import numpy.polynomial as polynomial
|
||||
return polynomial
|
||||
elif attr == "ma":
|
||||
import numpy.ma as ma
|
||||
return ma
|
||||
elif attr == "ctypeslib":
|
||||
import numpy.ctypeslib as ctypeslib
|
||||
return ctypeslib
|
||||
elif attr == "exceptions":
|
||||
import numpy.exceptions as exceptions
|
||||
return exceptions
|
||||
elif attr == "testing":
|
||||
import numpy.testing as testing
|
||||
return testing
|
||||
elif attr == "matlib":
|
||||
import numpy.matlib as matlib
|
||||
return matlib
|
||||
elif attr == "f2py":
|
||||
import numpy.f2py as f2py
|
||||
return f2py
|
||||
elif attr == "typing":
|
||||
import numpy.typing as typing
|
||||
return typing
|
||||
elif attr == "rec":
|
||||
import numpy.rec as rec
|
||||
return rec
|
||||
elif attr == "char":
|
||||
import numpy.char as char
|
||||
return char
|
||||
elif attr == "array_api":
|
||||
raise AttributeError("`numpy.array_api` is not available from "
|
||||
"numpy 2.0 onwards", name=None)
|
||||
elif attr == "core":
|
||||
import numpy.core as core
|
||||
return core
|
||||
elif attr == "strings":
|
||||
import numpy.strings as strings
|
||||
return strings
|
||||
elif attr == "distutils":
|
||||
if 'distutils' in __numpy_submodules__:
|
||||
import numpy.distutils as distutils
|
||||
return distutils
|
||||
else:
|
||||
raise AttributeError("`numpy.distutils` is not available from "
|
||||
"Python 3.12 onwards", name=None)
|
||||
|
||||
if attr in __future_scalars__:
|
||||
# And future warnings for those that will change, but also give
|
||||
# the AttributeError
|
||||
warnings.warn(
|
||||
f"In the future `np.{attr}` will be defined as the "
|
||||
"corresponding NumPy scalar.", FutureWarning, stacklevel=2)
|
||||
|
||||
if attr in __former_attrs__:
|
||||
raise AttributeError(__former_attrs__[attr], name=None)
|
||||
|
||||
if attr in __expired_attributes__:
|
||||
raise AttributeError(
|
||||
f"`np.{attr}` was removed in the NumPy 2.0 release. "
|
||||
f"{__expired_attributes__[attr]}",
|
||||
name=None
|
||||
)
|
||||
|
||||
if attr == "chararray":
|
||||
warnings.warn(
|
||||
"`np.chararray` is deprecated and will be removed from "
|
||||
"the main namespace in the future. Use an array with a string "
|
||||
"or bytes dtype instead.", DeprecationWarning, stacklevel=2)
|
||||
import numpy.char as char
|
||||
return char.chararray
|
||||
|
||||
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}")
|
||||
|
||||
def __dir__():
|
||||
public_symbols = (
|
||||
globals().keys() | __numpy_submodules__
|
||||
)
|
||||
public_symbols -= {
|
||||
"matrixlib", "matlib", "tests", "conftest", "version",
|
||||
"distutils", "array_api"
|
||||
}
|
||||
return list(public_symbols)
|
||||
|
||||
# Pytest testing
|
||||
from numpy._pytesttester import PytestTester
|
||||
test = PytestTester(__name__)
|
||||
del PytestTester
|
||||
|
||||
def _sanity_check():
|
||||
"""
|
||||
Quick sanity checks for common bugs caused by environment.
|
||||
There are some cases e.g. with wrong BLAS ABI that cause wrong
|
||||
results under specific runtime conditions that are not necessarily
|
||||
achieved during test suite runs, and it is useful to catch those early.
|
||||
|
||||
See https://github.com/numpy/numpy/issues/8577 and other
|
||||
similar bug reports.
|
||||
|
||||
"""
|
||||
try:
|
||||
x = ones(2, dtype=float32)
|
||||
if not abs(x.dot(x) - float32(2.0)) < 1e-5:
|
||||
raise AssertionError
|
||||
except AssertionError:
|
||||
msg = ("The current Numpy installation ({!r}) fails to "
|
||||
"pass simple sanity checks. This can be caused for example "
|
||||
"by incorrect BLAS library being linked in, or by mixing "
|
||||
"package managers (pip, conda, apt, ...). Search closed "
|
||||
"numpy issues for similar problems.")
|
||||
raise RuntimeError(msg.format(__file__)) from None
|
||||
|
||||
_sanity_check()
|
||||
del _sanity_check
|
||||
|
||||
def _mac_os_check():
|
||||
"""
|
||||
Quick Sanity check for Mac OS look for accelerate build bugs.
|
||||
Testing numpy polyfit calls init_dgelsd(LAPACK)
|
||||
"""
|
||||
try:
|
||||
c = array([3., 2., 1.])
|
||||
x = linspace(0, 2, 5)
|
||||
y = polyval(c, x)
|
||||
_ = polyfit(x, y, 2, cov=True)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
if sys.platform == "darwin":
|
||||
from . import exceptions
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
_mac_os_check()
|
||||
# Throw runtime error, if the test failed
|
||||
# Check for warning and report the error_message
|
||||
if len(w) > 0:
|
||||
for _wn in w:
|
||||
if _wn.category is exceptions.RankWarning:
|
||||
# Ignore other warnings, they may not be relevant (see gh-25433)
|
||||
error_message = (
|
||||
f"{_wn.category.__name__}: {_wn.message}"
|
||||
)
|
||||
msg = (
|
||||
"Polyfit sanity test emitted a warning, most likely due "
|
||||
"to using a buggy Accelerate backend."
|
||||
"\nIf you compiled yourself, more information is available at:" # noqa: E501
|
||||
"\nhttps://numpy.org/devdocs/building/index.html"
|
||||
"\nOtherwise report this to the vendor "
|
||||
f"that provided NumPy.\n\n{error_message}\n")
|
||||
raise RuntimeError(msg)
|
||||
del _wn
|
||||
del w
|
||||
del _mac_os_check
|
||||
|
||||
def hugepage_setup():
|
||||
"""
|
||||
We usually use madvise hugepages support, but on some old kernels it
|
||||
is slow and thus better avoided. Specifically kernel version 4.6
|
||||
had a bug fix which probably fixed this:
|
||||
https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff
|
||||
"""
|
||||
use_hugepage = os.environ.get("NUMPY_MADVISE_HUGEPAGE", None)
|
||||
if sys.platform == "linux" and use_hugepage is None:
|
||||
# If there is an issue with parsing the kernel version,
|
||||
# set use_hugepage to 0. Usage of LooseVersion will handle
|
||||
# the kernel version parsing better, but avoided since it
|
||||
# will increase the import time.
|
||||
# See: #16679 for related discussion.
|
||||
try:
|
||||
use_hugepage = 1
|
||||
kernel_version = os.uname().release.split(".")[:2]
|
||||
kernel_version = tuple(int(v) for v in kernel_version)
|
||||
if kernel_version < (4, 6):
|
||||
use_hugepage = 0
|
||||
except ValueError:
|
||||
use_hugepage = 0
|
||||
elif use_hugepage is None:
|
||||
# This is not Linux, so it should not matter, just enable anyway
|
||||
use_hugepage = 1
|
||||
else:
|
||||
use_hugepage = int(use_hugepage)
|
||||
return use_hugepage
|
||||
|
||||
# Note that this will currently only make a difference on Linux
|
||||
_core.multiarray._set_madvise_hugepage(hugepage_setup())
|
||||
del hugepage_setup
|
||||
|
||||
# Give a warning if NumPy is reloaded or imported on a sub-interpreter
|
||||
# We do this from python, since the C-module may not be reloaded and
|
||||
# it is tidier organized.
|
||||
_core.multiarray._multiarray_umath._reload_guard()
|
||||
|
||||
# TODO: Remove the environment variable entirely now that it is "weak"
|
||||
if (os.environ.get("NPY_PROMOTION_STATE", "weak") != "weak"):
|
||||
warnings.warn(
|
||||
"NPY_PROMOTION_STATE was a temporary feature for NumPy 2.0 "
|
||||
"transition and is ignored after NumPy 2.2.",
|
||||
UserWarning, stacklevel=2)
|
||||
|
||||
# Tell PyInstaller where to find hook-numpy.py
|
||||
def _pyinstaller_hooks_dir():
|
||||
from pathlib import Path
|
||||
return [str(Path(__file__).with_name("_pyinstaller").resolve())]
|
||||
|
||||
|
||||
# Remove symbols imported for internal use
|
||||
del os, sys, warnings
|
||||
5387
lib/python3.11/site-packages/numpy/__init__.pyi
Normal file
5387
lib/python3.11/site-packages/numpy/__init__.pyi
Normal file
File diff suppressed because it is too large
Load Diff
346
lib/python3.11/site-packages/numpy/_array_api_info.py
Normal file
346
lib/python3.11/site-packages/numpy/_array_api_info.py
Normal file
@ -0,0 +1,346 @@
|
||||
"""
|
||||
Array API Inspection namespace
|
||||
|
||||
This is the namespace for inspection functions as defined by the array API
|
||||
standard. See
|
||||
https://data-apis.org/array-api/latest/API_specification/inspection.html for
|
||||
more details.
|
||||
|
||||
"""
|
||||
from numpy._core import (
|
||||
bool,
|
||||
complex64,
|
||||
complex128,
|
||||
dtype,
|
||||
float32,
|
||||
float64,
|
||||
int8,
|
||||
int16,
|
||||
int32,
|
||||
int64,
|
||||
intp,
|
||||
uint8,
|
||||
uint16,
|
||||
uint32,
|
||||
uint64,
|
||||
)
|
||||
|
||||
|
||||
class __array_namespace_info__:
|
||||
"""
|
||||
Get the array API inspection namespace for NumPy.
|
||||
|
||||
The array API inspection namespace defines the following functions:
|
||||
|
||||
- capabilities()
|
||||
- default_device()
|
||||
- default_dtypes()
|
||||
- dtypes()
|
||||
- devices()
|
||||
|
||||
See
|
||||
https://data-apis.org/array-api/latest/API_specification/inspection.html
|
||||
for more details.
|
||||
|
||||
Returns
|
||||
-------
|
||||
info : ModuleType
|
||||
The array API inspection namespace for NumPy.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> info = np.__array_namespace_info__()
|
||||
>>> info.default_dtypes()
|
||||
{'real floating': numpy.float64,
|
||||
'complex floating': numpy.complex128,
|
||||
'integral': numpy.int64,
|
||||
'indexing': numpy.int64}
|
||||
|
||||
"""
|
||||
|
||||
__module__ = 'numpy'
|
||||
|
||||
def capabilities(self):
|
||||
"""
|
||||
Return a dictionary of array API library capabilities.
|
||||
|
||||
The resulting dictionary has the following keys:
|
||||
|
||||
- **"boolean indexing"**: boolean indicating whether an array library
|
||||
supports boolean indexing. Always ``True`` for NumPy.
|
||||
|
||||
- **"data-dependent shapes"**: boolean indicating whether an array
|
||||
library supports data-dependent output shapes. Always ``True`` for
|
||||
NumPy.
|
||||
|
||||
See
|
||||
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
|
||||
for more details.
|
||||
|
||||
See Also
|
||||
--------
|
||||
__array_namespace_info__.default_device,
|
||||
__array_namespace_info__.default_dtypes,
|
||||
__array_namespace_info__.dtypes,
|
||||
__array_namespace_info__.devices
|
||||
|
||||
Returns
|
||||
-------
|
||||
capabilities : dict
|
||||
A dictionary of array API library capabilities.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> info = np.__array_namespace_info__()
|
||||
>>> info.capabilities()
|
||||
{'boolean indexing': True,
|
||||
'data-dependent shapes': True,
|
||||
'max dimensions': 64}
|
||||
|
||||
"""
|
||||
return {
|
||||
"boolean indexing": True,
|
||||
"data-dependent shapes": True,
|
||||
"max dimensions": 64,
|
||||
}
|
||||
|
||||
def default_device(self):
|
||||
"""
|
||||
The default device used for new NumPy arrays.
|
||||
|
||||
For NumPy, this always returns ``'cpu'``.
|
||||
|
||||
See Also
|
||||
--------
|
||||
__array_namespace_info__.capabilities,
|
||||
__array_namespace_info__.default_dtypes,
|
||||
__array_namespace_info__.dtypes,
|
||||
__array_namespace_info__.devices
|
||||
|
||||
Returns
|
||||
-------
|
||||
device : str
|
||||
The default device used for new NumPy arrays.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> info = np.__array_namespace_info__()
|
||||
>>> info.default_device()
|
||||
'cpu'
|
||||
|
||||
"""
|
||||
return "cpu"
|
||||
|
||||
def default_dtypes(self, *, device=None):
|
||||
"""
|
||||
The default data types used for new NumPy arrays.
|
||||
|
||||
For NumPy, this always returns the following dictionary:
|
||||
|
||||
- **"real floating"**: ``numpy.float64``
|
||||
- **"complex floating"**: ``numpy.complex128``
|
||||
- **"integral"**: ``numpy.intp``
|
||||
- **"indexing"**: ``numpy.intp``
|
||||
|
||||
Parameters
|
||||
----------
|
||||
device : str, optional
|
||||
The device to get the default data types for. For NumPy, only
|
||||
``'cpu'`` is allowed.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dtypes : dict
|
||||
A dictionary describing the default data types used for new NumPy
|
||||
arrays.
|
||||
|
||||
See Also
|
||||
--------
|
||||
__array_namespace_info__.capabilities,
|
||||
__array_namespace_info__.default_device,
|
||||
__array_namespace_info__.dtypes,
|
||||
__array_namespace_info__.devices
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> info = np.__array_namespace_info__()
|
||||
>>> info.default_dtypes()
|
||||
{'real floating': numpy.float64,
|
||||
'complex floating': numpy.complex128,
|
||||
'integral': numpy.int64,
|
||||
'indexing': numpy.int64}
|
||||
|
||||
"""
|
||||
if device not in ["cpu", None]:
|
||||
raise ValueError(
|
||||
'Device not understood. Only "cpu" is allowed, but received:'
|
||||
f' {device}'
|
||||
)
|
||||
return {
|
||||
"real floating": dtype(float64),
|
||||
"complex floating": dtype(complex128),
|
||||
"integral": dtype(intp),
|
||||
"indexing": dtype(intp),
|
||||
}
|
||||
|
||||
def dtypes(self, *, device=None, kind=None):
|
||||
"""
|
||||
The array API data types supported by NumPy.
|
||||
|
||||
Note that this function only returns data types that are defined by
|
||||
the array API.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
device : str, optional
|
||||
The device to get the data types for. For NumPy, only ``'cpu'`` is
|
||||
allowed.
|
||||
kind : str or tuple of str, optional
|
||||
The kind of data types to return. If ``None``, all data types are
|
||||
returned. If a string, only data types of that kind are returned.
|
||||
If a tuple, a dictionary containing the union of the given kinds
|
||||
is returned. The following kinds are supported:
|
||||
|
||||
- ``'bool'``: boolean data types (i.e., ``bool``).
|
||||
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
|
||||
``int16``, ``int32``, ``int64``).
|
||||
- ``'unsigned integer'``: unsigned integer data types (i.e.,
|
||||
``uint8``, ``uint16``, ``uint32``, ``uint64``).
|
||||
- ``'integral'``: integer data types. Shorthand for ``('signed
|
||||
integer', 'unsigned integer')``.
|
||||
- ``'real floating'``: real-valued floating-point data types
|
||||
(i.e., ``float32``, ``float64``).
|
||||
- ``'complex floating'``: complex floating-point data types (i.e.,
|
||||
``complex64``, ``complex128``).
|
||||
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
|
||||
'real floating', 'complex floating')``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dtypes : dict
|
||||
A dictionary mapping the names of data types to the corresponding
|
||||
NumPy data types.
|
||||
|
||||
See Also
|
||||
--------
|
||||
__array_namespace_info__.capabilities,
|
||||
__array_namespace_info__.default_device,
|
||||
__array_namespace_info__.default_dtypes,
|
||||
__array_namespace_info__.devices
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> info = np.__array_namespace_info__()
|
||||
>>> info.dtypes(kind='signed integer')
|
||||
{'int8': numpy.int8,
|
||||
'int16': numpy.int16,
|
||||
'int32': numpy.int32,
|
||||
'int64': numpy.int64}
|
||||
|
||||
"""
|
||||
if device not in ["cpu", None]:
|
||||
raise ValueError(
|
||||
'Device not understood. Only "cpu" is allowed, but received:'
|
||||
f' {device}'
|
||||
)
|
||||
if kind is None:
|
||||
return {
|
||||
"bool": dtype(bool),
|
||||
"int8": dtype(int8),
|
||||
"int16": dtype(int16),
|
||||
"int32": dtype(int32),
|
||||
"int64": dtype(int64),
|
||||
"uint8": dtype(uint8),
|
||||
"uint16": dtype(uint16),
|
||||
"uint32": dtype(uint32),
|
||||
"uint64": dtype(uint64),
|
||||
"float32": dtype(float32),
|
||||
"float64": dtype(float64),
|
||||
"complex64": dtype(complex64),
|
||||
"complex128": dtype(complex128),
|
||||
}
|
||||
if kind == "bool":
|
||||
return {"bool": bool}
|
||||
if kind == "signed integer":
|
||||
return {
|
||||
"int8": dtype(int8),
|
||||
"int16": dtype(int16),
|
||||
"int32": dtype(int32),
|
||||
"int64": dtype(int64),
|
||||
}
|
||||
if kind == "unsigned integer":
|
||||
return {
|
||||
"uint8": dtype(uint8),
|
||||
"uint16": dtype(uint16),
|
||||
"uint32": dtype(uint32),
|
||||
"uint64": dtype(uint64),
|
||||
}
|
||||
if kind == "integral":
|
||||
return {
|
||||
"int8": dtype(int8),
|
||||
"int16": dtype(int16),
|
||||
"int32": dtype(int32),
|
||||
"int64": dtype(int64),
|
||||
"uint8": dtype(uint8),
|
||||
"uint16": dtype(uint16),
|
||||
"uint32": dtype(uint32),
|
||||
"uint64": dtype(uint64),
|
||||
}
|
||||
if kind == "real floating":
|
||||
return {
|
||||
"float32": dtype(float32),
|
||||
"float64": dtype(float64),
|
||||
}
|
||||
if kind == "complex floating":
|
||||
return {
|
||||
"complex64": dtype(complex64),
|
||||
"complex128": dtype(complex128),
|
||||
}
|
||||
if kind == "numeric":
|
||||
return {
|
||||
"int8": dtype(int8),
|
||||
"int16": dtype(int16),
|
||||
"int32": dtype(int32),
|
||||
"int64": dtype(int64),
|
||||
"uint8": dtype(uint8),
|
||||
"uint16": dtype(uint16),
|
||||
"uint32": dtype(uint32),
|
||||
"uint64": dtype(uint64),
|
||||
"float32": dtype(float32),
|
||||
"float64": dtype(float64),
|
||||
"complex64": dtype(complex64),
|
||||
"complex128": dtype(complex128),
|
||||
}
|
||||
if isinstance(kind, tuple):
|
||||
res = {}
|
||||
for k in kind:
|
||||
res.update(self.dtypes(kind=k))
|
||||
return res
|
||||
raise ValueError(f"unsupported kind: {kind!r}")
|
||||
|
||||
def devices(self):
|
||||
"""
|
||||
The devices supported by NumPy.
|
||||
|
||||
For NumPy, this always returns ``['cpu']``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
devices : list of str
|
||||
The devices supported by NumPy.
|
||||
|
||||
See Also
|
||||
--------
|
||||
__array_namespace_info__.capabilities,
|
||||
__array_namespace_info__.default_device,
|
||||
__array_namespace_info__.default_dtypes,
|
||||
__array_namespace_info__.dtypes
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> info = np.__array_namespace_info__()
|
||||
>>> info.devices()
|
||||
['cpu']
|
||||
|
||||
"""
|
||||
return ["cpu"]
|
||||
207
lib/python3.11/site-packages/numpy/_array_api_info.pyi
Normal file
207
lib/python3.11/site-packages/numpy/_array_api_info.pyi
Normal file
@ -0,0 +1,207 @@
|
||||
from typing import (
|
||||
ClassVar,
|
||||
Literal,
|
||||
Never,
|
||||
TypeAlias,
|
||||
TypedDict,
|
||||
TypeVar,
|
||||
final,
|
||||
overload,
|
||||
type_check_only,
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
|
||||
_Device: TypeAlias = Literal["cpu"]
|
||||
_DeviceLike: TypeAlias = _Device | None
|
||||
|
||||
_Capabilities = TypedDict(
|
||||
"_Capabilities",
|
||||
{
|
||||
"boolean indexing": Literal[True],
|
||||
"data-dependent shapes": Literal[True],
|
||||
},
|
||||
)
|
||||
|
||||
_DefaultDTypes = TypedDict(
|
||||
"_DefaultDTypes",
|
||||
{
|
||||
"real floating": np.dtype[np.float64],
|
||||
"complex floating": np.dtype[np.complex128],
|
||||
"integral": np.dtype[np.intp],
|
||||
"indexing": np.dtype[np.intp],
|
||||
},
|
||||
)
|
||||
|
||||
_KindBool: TypeAlias = Literal["bool"]
|
||||
_KindInt: TypeAlias = Literal["signed integer"]
|
||||
_KindUInt: TypeAlias = Literal["unsigned integer"]
|
||||
_KindInteger: TypeAlias = Literal["integral"]
|
||||
_KindFloat: TypeAlias = Literal["real floating"]
|
||||
_KindComplex: TypeAlias = Literal["complex floating"]
|
||||
_KindNumber: TypeAlias = Literal["numeric"]
|
||||
_Kind: TypeAlias = (
|
||||
_KindBool
|
||||
| _KindInt
|
||||
| _KindUInt
|
||||
| _KindInteger
|
||||
| _KindFloat
|
||||
| _KindComplex
|
||||
| _KindNumber
|
||||
)
|
||||
|
||||
_T1 = TypeVar("_T1")
|
||||
_T2 = TypeVar("_T2")
|
||||
_T3 = TypeVar("_T3")
|
||||
_Permute1: TypeAlias = _T1 | tuple[_T1]
|
||||
_Permute2: TypeAlias = tuple[_T1, _T2] | tuple[_T2, _T1]
|
||||
_Permute3: TypeAlias = (
|
||||
tuple[_T1, _T2, _T3] | tuple[_T1, _T3, _T2]
|
||||
| tuple[_T2, _T1, _T3] | tuple[_T2, _T3, _T1]
|
||||
| tuple[_T3, _T1, _T2] | tuple[_T3, _T2, _T1]
|
||||
)
|
||||
|
||||
@type_check_only
|
||||
class _DTypesBool(TypedDict):
|
||||
bool: np.dtype[np.bool]
|
||||
|
||||
@type_check_only
|
||||
class _DTypesInt(TypedDict):
|
||||
int8: np.dtype[np.int8]
|
||||
int16: np.dtype[np.int16]
|
||||
int32: np.dtype[np.int32]
|
||||
int64: np.dtype[np.int64]
|
||||
|
||||
@type_check_only
|
||||
class _DTypesUInt(TypedDict):
|
||||
uint8: np.dtype[np.uint8]
|
||||
uint16: np.dtype[np.uint16]
|
||||
uint32: np.dtype[np.uint32]
|
||||
uint64: np.dtype[np.uint64]
|
||||
|
||||
@type_check_only
|
||||
class _DTypesInteger(_DTypesInt, _DTypesUInt): ...
|
||||
|
||||
@type_check_only
|
||||
class _DTypesFloat(TypedDict):
|
||||
float32: np.dtype[np.float32]
|
||||
float64: np.dtype[np.float64]
|
||||
|
||||
@type_check_only
|
||||
class _DTypesComplex(TypedDict):
|
||||
complex64: np.dtype[np.complex64]
|
||||
complex128: np.dtype[np.complex128]
|
||||
|
||||
@type_check_only
|
||||
class _DTypesNumber(_DTypesInteger, _DTypesFloat, _DTypesComplex): ...
|
||||
|
||||
@type_check_only
|
||||
class _DTypes(_DTypesBool, _DTypesNumber): ...
|
||||
|
||||
@type_check_only
|
||||
class _DTypesUnion(TypedDict, total=False):
|
||||
bool: np.dtype[np.bool]
|
||||
int8: np.dtype[np.int8]
|
||||
int16: np.dtype[np.int16]
|
||||
int32: np.dtype[np.int32]
|
||||
int64: np.dtype[np.int64]
|
||||
uint8: np.dtype[np.uint8]
|
||||
uint16: np.dtype[np.uint16]
|
||||
uint32: np.dtype[np.uint32]
|
||||
uint64: np.dtype[np.uint64]
|
||||
float32: np.dtype[np.float32]
|
||||
float64: np.dtype[np.float64]
|
||||
complex64: np.dtype[np.complex64]
|
||||
complex128: np.dtype[np.complex128]
|
||||
|
||||
_EmptyDict: TypeAlias = dict[Never, Never]
|
||||
|
||||
@final
|
||||
class __array_namespace_info__:
|
||||
__module__: ClassVar[Literal['numpy']]
|
||||
|
||||
def capabilities(self) -> _Capabilities: ...
|
||||
def default_device(self) -> _Device: ...
|
||||
def default_dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
) -> _DefaultDTypes: ...
|
||||
def devices(self) -> list[_Device]: ...
|
||||
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: None = ...,
|
||||
) -> _DTypes: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: _Permute1[_KindBool],
|
||||
) -> _DTypesBool: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: _Permute1[_KindInt],
|
||||
) -> _DTypesInt: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: _Permute1[_KindUInt],
|
||||
) -> _DTypesUInt: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: _Permute1[_KindFloat],
|
||||
) -> _DTypesFloat: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: _Permute1[_KindComplex],
|
||||
) -> _DTypesComplex: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: (
|
||||
_Permute1[_KindInteger]
|
||||
| _Permute2[_KindInt, _KindUInt]
|
||||
),
|
||||
) -> _DTypesInteger: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: (
|
||||
_Permute1[_KindNumber]
|
||||
| _Permute3[_KindInteger, _KindFloat, _KindComplex]
|
||||
),
|
||||
) -> _DTypesNumber: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: tuple[()],
|
||||
) -> _EmptyDict: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: tuple[_Kind, ...],
|
||||
) -> _DTypesUnion: ...
|
||||
39
lib/python3.11/site-packages/numpy/_configtool.py
Normal file
39
lib/python3.11/site-packages/numpy/_configtool.py
Normal file
@ -0,0 +1,39 @@
|
||||
import argparse
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from .lib._utils_impl import get_include
|
||||
from .version import __version__
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--version",
|
||||
action="version",
|
||||
version=__version__,
|
||||
help="Print the version and exit.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--cflags",
|
||||
action="store_true",
|
||||
help="Compile flag needed when using the NumPy headers.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pkgconfigdir",
|
||||
action="store_true",
|
||||
help=("Print the pkgconfig directory in which `numpy.pc` is stored "
|
||||
"(useful for setting $PKG_CONFIG_PATH)."),
|
||||
)
|
||||
args = parser.parse_args()
|
||||
if not sys.argv[1:]:
|
||||
parser.print_help()
|
||||
if args.cflags:
|
||||
print("-I" + get_include())
|
||||
if args.pkgconfigdir:
|
||||
_path = Path(get_include()) / '..' / 'lib' / 'pkgconfig'
|
||||
print(_path.resolve())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1
lib/python3.11/site-packages/numpy/_configtool.pyi
Normal file
1
lib/python3.11/site-packages/numpy/_configtool.pyi
Normal file
@ -0,0 +1 @@
|
||||
def main() -> None: ...
|
||||
186
lib/python3.11/site-packages/numpy/_core/__init__.py
Normal file
186
lib/python3.11/site-packages/numpy/_core/__init__.py
Normal file
@ -0,0 +1,186 @@
|
||||
"""
|
||||
Contains the core of NumPy: ndarray, ufuncs, dtypes, etc.
|
||||
|
||||
Please note that this module is private. All functions and objects
|
||||
are available in the main ``numpy`` namespace - use that instead.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from numpy.version import version as __version__
|
||||
|
||||
# disables OpenBLAS affinity setting of the main thread that limits
|
||||
# python threads or processes to one core
|
||||
env_added = []
|
||||
for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
|
||||
if envkey not in os.environ:
|
||||
os.environ[envkey] = '1'
|
||||
env_added.append(envkey)
|
||||
|
||||
try:
|
||||
from . import multiarray
|
||||
except ImportError as exc:
|
||||
import sys
|
||||
msg = """
|
||||
|
||||
IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
|
||||
|
||||
Importing the numpy C-extensions failed. This error can happen for
|
||||
many reasons, often due to issues with your setup or how NumPy was
|
||||
installed.
|
||||
|
||||
We have compiled some common reasons and troubleshooting tips at:
|
||||
|
||||
https://numpy.org/devdocs/user/troubleshooting-importerror.html
|
||||
|
||||
Please note and check the following:
|
||||
|
||||
* The Python version is: Python%d.%d from "%s"
|
||||
* The NumPy version is: "%s"
|
||||
|
||||
and make sure that they are the versions you expect.
|
||||
Please carefully study the documentation linked above for further help.
|
||||
|
||||
Original error was: %s
|
||||
""" % (sys.version_info[0], sys.version_info[1], sys.executable,
|
||||
__version__, exc)
|
||||
raise ImportError(msg) from exc
|
||||
finally:
|
||||
for envkey in env_added:
|
||||
del os.environ[envkey]
|
||||
del envkey
|
||||
del env_added
|
||||
del os
|
||||
|
||||
from . import umath
|
||||
|
||||
# Check that multiarray,umath are pure python modules wrapping
|
||||
# _multiarray_umath and not either of the old c-extension modules
|
||||
if not (hasattr(multiarray, '_multiarray_umath') and
|
||||
hasattr(umath, '_multiarray_umath')):
|
||||
import sys
|
||||
path = sys.modules['numpy'].__path__
|
||||
msg = ("Something is wrong with the numpy installation. "
|
||||
"While importing we detected an older version of "
|
||||
"numpy in {}. One method of fixing this is to repeatedly uninstall "
|
||||
"numpy until none is found, then reinstall this version.")
|
||||
raise ImportError(msg.format(path))
|
||||
|
||||
from . import numerictypes as nt
|
||||
from .numerictypes import sctypeDict, sctypes
|
||||
|
||||
multiarray.set_typeDict(nt.sctypeDict)
|
||||
from . import (
|
||||
_machar,
|
||||
einsumfunc,
|
||||
fromnumeric,
|
||||
function_base,
|
||||
getlimits,
|
||||
numeric,
|
||||
shape_base,
|
||||
)
|
||||
from .einsumfunc import *
|
||||
from .fromnumeric import *
|
||||
from .function_base import *
|
||||
from .getlimits import *
|
||||
|
||||
# Note: module name memmap is overwritten by a class with same name
|
||||
from .memmap import *
|
||||
from .numeric import *
|
||||
from .records import recarray, record
|
||||
from .shape_base import *
|
||||
|
||||
del nt
|
||||
|
||||
# do this after everything else, to minimize the chance of this misleadingly
|
||||
# appearing in an import-time traceback
|
||||
# add these for module-freeze analysis (like PyInstaller)
|
||||
from . import (
|
||||
_add_newdocs,
|
||||
_add_newdocs_scalars,
|
||||
_dtype,
|
||||
_dtype_ctypes,
|
||||
_internal,
|
||||
_methods,
|
||||
)
|
||||
from .numeric import absolute as abs
|
||||
|
||||
acos = numeric.arccos
|
||||
acosh = numeric.arccosh
|
||||
asin = numeric.arcsin
|
||||
asinh = numeric.arcsinh
|
||||
atan = numeric.arctan
|
||||
atanh = numeric.arctanh
|
||||
atan2 = numeric.arctan2
|
||||
concat = numeric.concatenate
|
||||
bitwise_left_shift = numeric.left_shift
|
||||
bitwise_invert = numeric.invert
|
||||
bitwise_right_shift = numeric.right_shift
|
||||
permute_dims = numeric.transpose
|
||||
pow = numeric.power
|
||||
|
||||
__all__ = [
|
||||
"abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2",
|
||||
"bitwise_invert", "bitwise_left_shift", "bitwise_right_shift", "concat",
|
||||
"pow", "permute_dims", "memmap", "sctypeDict", "record", "recarray"
|
||||
]
|
||||
__all__ += numeric.__all__
|
||||
__all__ += function_base.__all__
|
||||
__all__ += getlimits.__all__
|
||||
__all__ += shape_base.__all__
|
||||
__all__ += einsumfunc.__all__
|
||||
|
||||
|
||||
def _ufunc_reduce(func):
|
||||
# Report the `__name__`. pickle will try to find the module. Note that
|
||||
# pickle supports for this `__name__` to be a `__qualname__`. It may
|
||||
# make sense to add a `__qualname__` to ufuncs, to allow this more
|
||||
# explicitly (Numba has ufuncs as attributes).
|
||||
# See also: https://github.com/dask/distributed/issues/3450
|
||||
return func.__name__
|
||||
|
||||
|
||||
def _DType_reconstruct(scalar_type):
|
||||
# This is a work-around to pickle type(np.dtype(np.float64)), etc.
|
||||
# and it should eventually be replaced with a better solution, e.g. when
|
||||
# DTypes become HeapTypes.
|
||||
return type(dtype(scalar_type))
|
||||
|
||||
|
||||
def _DType_reduce(DType):
|
||||
# As types/classes, most DTypes can simply be pickled by their name:
|
||||
if not DType._legacy or DType.__module__ == "numpy.dtypes":
|
||||
return DType.__name__
|
||||
|
||||
# However, user defined legacy dtypes (like rational) do not end up in
|
||||
# `numpy.dtypes` as module and do not have a public class at all.
|
||||
# For these, we pickle them by reconstructing them from the scalar type:
|
||||
scalar_type = DType.type
|
||||
return _DType_reconstruct, (scalar_type,)
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
# Deprecated 2022-11-22, NumPy 1.25.
|
||||
if name == "MachAr":
|
||||
import warnings
|
||||
warnings.warn(
|
||||
"The `np._core.MachAr` is considered private API (NumPy 1.24)",
|
||||
DeprecationWarning, stacklevel=2,
|
||||
)
|
||||
return _machar.MachAr
|
||||
raise AttributeError(f"Module {__name__!r} has no attribute {name!r}")
|
||||
|
||||
|
||||
import copyreg
|
||||
|
||||
copyreg.pickle(ufunc, _ufunc_reduce)
|
||||
copyreg.pickle(type(dtype), _DType_reduce, _DType_reconstruct)
|
||||
|
||||
# Unclutter namespace (must keep _*_reconstruct for unpickling)
|
||||
del copyreg, _ufunc_reduce, _DType_reduce
|
||||
|
||||
from numpy._pytesttester import PytestTester
|
||||
|
||||
test = PytestTester(__name__)
|
||||
del PytestTester
|
||||
2
lib/python3.11/site-packages/numpy/_core/__init__.pyi
Normal file
2
lib/python3.11/site-packages/numpy/_core/__init__.pyi
Normal file
@ -0,0 +1,2 @@
|
||||
# NOTE: The `np._core` namespace is deliberately kept empty due to it
|
||||
# being private
|
||||
6967
lib/python3.11/site-packages/numpy/_core/_add_newdocs.py
Normal file
6967
lib/python3.11/site-packages/numpy/_core/_add_newdocs.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,3 @@
|
||||
from .overrides import get_array_function_like_doc as get_array_function_like_doc
|
||||
|
||||
def refer_to_array_attribute(attr: str, method: bool = True) -> tuple[str, str]: ...
|
||||
390
lib/python3.11/site-packages/numpy/_core/_add_newdocs_scalars.py
Normal file
390
lib/python3.11/site-packages/numpy/_core/_add_newdocs_scalars.py
Normal file
@ -0,0 +1,390 @@
|
||||
"""
|
||||
This file is separate from ``_add_newdocs.py`` so that it can be mocked out by
|
||||
our sphinx ``conf.py`` during doc builds, where we want to avoid showing
|
||||
platform-dependent information.
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
|
||||
from numpy._core import dtype
|
||||
from numpy._core import numerictypes as _numerictypes
|
||||
from numpy._core.function_base import add_newdoc
|
||||
|
||||
##############################################################################
|
||||
#
|
||||
# Documentation for concrete scalar classes
|
||||
#
|
||||
##############################################################################
|
||||
|
||||
def numeric_type_aliases(aliases):
|
||||
def type_aliases_gen():
|
||||
for alias, doc in aliases:
|
||||
try:
|
||||
alias_type = getattr(_numerictypes, alias)
|
||||
except AttributeError:
|
||||
# The set of aliases that actually exist varies between platforms
|
||||
pass
|
||||
else:
|
||||
yield (alias_type, alias, doc)
|
||||
return list(type_aliases_gen())
|
||||
|
||||
|
||||
possible_aliases = numeric_type_aliases([
|
||||
('int8', '8-bit signed integer (``-128`` to ``127``)'),
|
||||
('int16', '16-bit signed integer (``-32_768`` to ``32_767``)'),
|
||||
('int32', '32-bit signed integer (``-2_147_483_648`` to ``2_147_483_647``)'),
|
||||
('int64', '64-bit signed integer (``-9_223_372_036_854_775_808`` to ``9_223_372_036_854_775_807``)'),
|
||||
('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
|
||||
('uint8', '8-bit unsigned integer (``0`` to ``255``)'),
|
||||
('uint16', '16-bit unsigned integer (``0`` to ``65_535``)'),
|
||||
('uint32', '32-bit unsigned integer (``0`` to ``4_294_967_295``)'),
|
||||
('uint64', '64-bit unsigned integer (``0`` to ``18_446_744_073_709_551_615``)'),
|
||||
('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
|
||||
('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
|
||||
('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
|
||||
('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
|
||||
('float96', '96-bit extended-precision floating-point number type'),
|
||||
('float128', '128-bit extended-precision floating-point number type'),
|
||||
('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
|
||||
('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
|
||||
('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
|
||||
('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
|
||||
])
|
||||
|
||||
|
||||
def _get_platform_and_machine():
|
||||
try:
|
||||
system, _, _, _, machine = os.uname()
|
||||
except AttributeError:
|
||||
system = sys.platform
|
||||
if system == 'win32':
|
||||
machine = os.environ.get('PROCESSOR_ARCHITEW6432', '') \
|
||||
or os.environ.get('PROCESSOR_ARCHITECTURE', '')
|
||||
else:
|
||||
machine = 'unknown'
|
||||
return system, machine
|
||||
|
||||
|
||||
_system, _machine = _get_platform_and_machine()
|
||||
_doc_alias_string = f":Alias on this platform ({_system} {_machine}):"
|
||||
|
||||
|
||||
def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
|
||||
# note: `:field: value` is rST syntax which renders as field lists.
|
||||
o = getattr(_numerictypes, obj)
|
||||
|
||||
character_code = dtype(o).char
|
||||
canonical_name_doc = "" if obj == o.__name__ else \
|
||||
f":Canonical name: `numpy.{obj}`\n "
|
||||
if fixed_aliases:
|
||||
alias_doc = ''.join(f":Alias: `numpy.{alias}`\n "
|
||||
for alias in fixed_aliases)
|
||||
else:
|
||||
alias_doc = ''
|
||||
alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n "
|
||||
for (alias_type, alias, doc) in possible_aliases if alias_type is o)
|
||||
|
||||
docstring = f"""
|
||||
{doc.strip()}
|
||||
|
||||
:Character code: ``'{character_code}'``
|
||||
{canonical_name_doc}{alias_doc}
|
||||
"""
|
||||
|
||||
add_newdoc('numpy._core.numerictypes', obj, docstring)
|
||||
|
||||
|
||||
_bool_docstring = (
|
||||
"""
|
||||
Boolean type (True or False), stored as a byte.
|
||||
|
||||
.. warning::
|
||||
|
||||
The :class:`bool` type is not a subclass of the :class:`int_` type
|
||||
(the :class:`bool` is not even a number type). This is different
|
||||
than Python's default implementation of :class:`bool` as a
|
||||
sub-class of :class:`int`.
|
||||
"""
|
||||
)
|
||||
|
||||
add_newdoc_for_scalar_type('bool', [], _bool_docstring)
|
||||
|
||||
add_newdoc_for_scalar_type('bool_', [], _bool_docstring)
|
||||
|
||||
add_newdoc_for_scalar_type('byte', [],
|
||||
"""
|
||||
Signed integer type, compatible with C ``char``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('short', [],
|
||||
"""
|
||||
Signed integer type, compatible with C ``short``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('intc', [],
|
||||
"""
|
||||
Signed integer type, compatible with C ``int``.
|
||||
""")
|
||||
|
||||
# TODO: These docs probably need an if to highlight the default rather than
|
||||
# the C-types (and be correct).
|
||||
add_newdoc_for_scalar_type('int_', [],
|
||||
"""
|
||||
Default signed integer type, 64bit on 64bit systems and 32bit on 32bit
|
||||
systems.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('longlong', [],
|
||||
"""
|
||||
Signed integer type, compatible with C ``long long``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('ubyte', [],
|
||||
"""
|
||||
Unsigned integer type, compatible with C ``unsigned char``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('ushort', [],
|
||||
"""
|
||||
Unsigned integer type, compatible with C ``unsigned short``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('uintc', [],
|
||||
"""
|
||||
Unsigned integer type, compatible with C ``unsigned int``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('uint', [],
|
||||
"""
|
||||
Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit
|
||||
systems.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('ulonglong', [],
|
||||
"""
|
||||
Signed integer type, compatible with C ``unsigned long long``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('half', [],
|
||||
"""
|
||||
Half-precision floating-point number type.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('single', [],
|
||||
"""
|
||||
Single-precision floating-point number type, compatible with C ``float``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('double', [],
|
||||
"""
|
||||
Double-precision floating-point number type, compatible with Python
|
||||
:class:`float` and C ``double``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('longdouble', [],
|
||||
"""
|
||||
Extended-precision floating-point number type, compatible with C
|
||||
``long double`` but not necessarily with IEEE 754 quadruple-precision.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('csingle', [],
|
||||
"""
|
||||
Complex number type composed of two single-precision floating-point
|
||||
numbers.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('cdouble', [],
|
||||
"""
|
||||
Complex number type composed of two double-precision floating-point
|
||||
numbers, compatible with Python :class:`complex`.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('clongdouble', [],
|
||||
"""
|
||||
Complex number type composed of two extended-precision floating-point
|
||||
numbers.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('object_', [],
|
||||
"""
|
||||
Any Python object.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('str_', [],
|
||||
r"""
|
||||
A unicode string.
|
||||
|
||||
This type strips trailing null codepoints.
|
||||
|
||||
>>> s = np.str_("abc\x00")
|
||||
>>> s
|
||||
'abc'
|
||||
|
||||
Unlike the builtin :class:`str`, this supports the
|
||||
:ref:`python:bufferobjects`, exposing its contents as UCS4:
|
||||
|
||||
>>> m = memoryview(np.str_("abc"))
|
||||
>>> m.format
|
||||
'3w'
|
||||
>>> m.tobytes()
|
||||
b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00'
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('bytes_', [],
|
||||
r"""
|
||||
A byte string.
|
||||
|
||||
When used in arrays, this type strips trailing null bytes.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('void', [],
|
||||
r"""
|
||||
np.void(length_or_data, /, dtype=None)
|
||||
|
||||
Create a new structured or unstructured void scalar.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
length_or_data : int, array-like, bytes-like, object
|
||||
One of multiple meanings (see notes). The length or
|
||||
bytes data of an unstructured void. Or alternatively,
|
||||
the data to be stored in the new scalar when `dtype`
|
||||
is provided.
|
||||
This can be an array-like, in which case an array may
|
||||
be returned.
|
||||
dtype : dtype, optional
|
||||
If provided the dtype of the new scalar. This dtype must
|
||||
be "void" dtype (i.e. a structured or unstructured void,
|
||||
see also :ref:`defining-structured-types`).
|
||||
|
||||
.. versionadded:: 1.24
|
||||
|
||||
Notes
|
||||
-----
|
||||
For historical reasons and because void scalars can represent both
|
||||
arbitrary byte data and structured dtypes, the void constructor
|
||||
has three calling conventions:
|
||||
|
||||
1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five
|
||||
``\0`` bytes. The 5 can be a Python or NumPy integer.
|
||||
2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string.
|
||||
The dtype itemsize will match the byte string length, here ``"V10"``.
|
||||
3. When a ``dtype=`` is passed the call is roughly the same as an
|
||||
array creation. However, a void scalar rather than array is returned.
|
||||
|
||||
Please see the examples which show all three different conventions.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.void(5)
|
||||
np.void(b'\x00\x00\x00\x00\x00')
|
||||
>>> np.void(b'abcd')
|
||||
np.void(b'\x61\x62\x63\x64')
|
||||
>>> np.void((3.2, b'eggs'), dtype="d,S5")
|
||||
np.void((3.2, b'eggs'), dtype=[('f0', '<f8'), ('f1', 'S5')])
|
||||
>>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)])
|
||||
np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')])
|
||||
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('datetime64', [],
|
||||
"""
|
||||
If created from a 64-bit integer, it represents an offset from
|
||||
``1970-01-01T00:00:00``.
|
||||
If created from string, the string can be in ISO 8601 date
|
||||
or datetime format.
|
||||
|
||||
When parsing a string to create a datetime object, if the string contains
|
||||
a trailing timezone (A 'Z' or a timezone offset), the timezone will be
|
||||
dropped and a User Warning is given.
|
||||
|
||||
Datetime64 objects should be considered to be UTC and therefore have an
|
||||
offset of +0000.
|
||||
|
||||
>>> np.datetime64(10, 'Y')
|
||||
np.datetime64('1980')
|
||||
>>> np.datetime64('1980', 'Y')
|
||||
np.datetime64('1980')
|
||||
>>> np.datetime64(10, 'D')
|
||||
np.datetime64('1970-01-11')
|
||||
|
||||
See :ref:`arrays.datetime` for more information.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('timedelta64', [],
|
||||
"""
|
||||
A timedelta stored as a 64-bit integer.
|
||||
|
||||
See :ref:`arrays.datetime` for more information.
|
||||
""")
|
||||
|
||||
add_newdoc('numpy._core.numerictypes', "integer", ('is_integer',
|
||||
"""
|
||||
integer.is_integer() -> bool
|
||||
|
||||
Return ``True`` if the number is finite with integral value.
|
||||
|
||||
.. versionadded:: 1.22
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.int64(-2).is_integer()
|
||||
True
|
||||
>>> np.uint32(5).is_integer()
|
||||
True
|
||||
"""))
|
||||
|
||||
# TODO: work out how to put this on the base class, np.floating
|
||||
for float_name in ('half', 'single', 'double', 'longdouble'):
|
||||
add_newdoc('numpy._core.numerictypes', float_name, ('as_integer_ratio',
|
||||
f"""
|
||||
{float_name}.as_integer_ratio() -> (int, int)
|
||||
|
||||
Return a pair of integers, whose ratio is exactly equal to the original
|
||||
floating point number, and with a positive denominator.
|
||||
Raise `OverflowError` on infinities and a `ValueError` on NaNs.
|
||||
|
||||
>>> np.{float_name}(10.0).as_integer_ratio()
|
||||
(10, 1)
|
||||
>>> np.{float_name}(0.0).as_integer_ratio()
|
||||
(0, 1)
|
||||
>>> np.{float_name}(-.25).as_integer_ratio()
|
||||
(-1, 4)
|
||||
"""))
|
||||
|
||||
add_newdoc('numpy._core.numerictypes', float_name, ('is_integer',
|
||||
f"""
|
||||
{float_name}.is_integer() -> bool
|
||||
|
||||
Return ``True`` if the floating point number is finite with integral
|
||||
value, and ``False`` otherwise.
|
||||
|
||||
.. versionadded:: 1.22
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.{float_name}(-2.0).is_integer()
|
||||
True
|
||||
>>> np.{float_name}(3.2).is_integer()
|
||||
False
|
||||
"""))
|
||||
|
||||
for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
|
||||
'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'):
|
||||
# Add negative examples for signed cases by checking typecode
|
||||
add_newdoc('numpy._core.numerictypes', int_name, ('bit_count',
|
||||
f"""
|
||||
{int_name}.bit_count() -> int
|
||||
|
||||
Computes the number of 1-bits in the absolute value of the input.
|
||||
Analogous to the builtin `int.bit_count` or ``popcount`` in C++.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.{int_name}(127).bit_count()
|
||||
7""" +
|
||||
(f"""
|
||||
>>> np.{int_name}(-127).bit_count()
|
||||
7
|
||||
""" if dtype(int_name).char.islower() else "")))
|
||||
@ -0,0 +1,16 @@
|
||||
from collections.abc import Iterable
|
||||
from typing import Final
|
||||
|
||||
import numpy as np
|
||||
|
||||
possible_aliases: Final[list[tuple[type[np.number], str, str]]] = ...
|
||||
_system: Final[str] = ...
|
||||
_machine: Final[str] = ...
|
||||
_doc_alias_string: Final[str] = ...
|
||||
_bool_docstring: Final[str] = ...
|
||||
int_name: str = ...
|
||||
float_name: str = ...
|
||||
|
||||
def numeric_type_aliases(aliases: list[tuple[str, str]]) -> list[tuple[type[np.number], str, str]]: ...
|
||||
def add_newdoc_for_scalar_type(obj: str, fixed_aliases: Iterable[str], doc: str) -> None: ...
|
||||
def _get_platform_and_machine() -> tuple[str, str]: ...
|
||||
134
lib/python3.11/site-packages/numpy/_core/_asarray.py
Normal file
134
lib/python3.11/site-packages/numpy/_core/_asarray.py
Normal file
@ -0,0 +1,134 @@
|
||||
"""
|
||||
Functions in the ``as*array`` family that promote array-likes into arrays.
|
||||
|
||||
`require` fits this category despite its name not matching this pattern.
|
||||
"""
|
||||
from .multiarray import array, asanyarray
|
||||
from .overrides import (
|
||||
array_function_dispatch,
|
||||
finalize_array_function_like,
|
||||
set_module,
|
||||
)
|
||||
|
||||
__all__ = ["require"]
|
||||
|
||||
|
||||
POSSIBLE_FLAGS = {
|
||||
'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
|
||||
'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
|
||||
'A': 'A', 'ALIGNED': 'A',
|
||||
'W': 'W', 'WRITEABLE': 'W',
|
||||
'O': 'O', 'OWNDATA': 'O',
|
||||
'E': 'E', 'ENSUREARRAY': 'E'
|
||||
}
|
||||
|
||||
|
||||
@finalize_array_function_like
|
||||
@set_module('numpy')
|
||||
def require(a, dtype=None, requirements=None, *, like=None):
|
||||
"""
|
||||
Return an ndarray of the provided type that satisfies requirements.
|
||||
|
||||
This function is useful to be sure that an array with the correct flags
|
||||
is returned for passing to compiled code (perhaps through ctypes).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
a : array_like
|
||||
The object to be converted to a type-and-requirement-satisfying array.
|
||||
dtype : data-type
|
||||
The required data-type. If None preserve the current dtype. If your
|
||||
application requires the data to be in native byteorder, include
|
||||
a byteorder specification as a part of the dtype specification.
|
||||
requirements : str or sequence of str
|
||||
The requirements list can be any of the following
|
||||
|
||||
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
|
||||
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
|
||||
* 'ALIGNED' ('A') - ensure a data-type aligned array
|
||||
* 'WRITEABLE' ('W') - ensure a writable array
|
||||
* 'OWNDATA' ('O') - ensure an array that owns its own data
|
||||
* 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
|
||||
${ARRAY_FUNCTION_LIKE}
|
||||
|
||||
.. versionadded:: 1.20.0
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray
|
||||
Array with specified requirements and type if given.
|
||||
|
||||
See Also
|
||||
--------
|
||||
asarray : Convert input to an ndarray.
|
||||
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
|
||||
ascontiguousarray : Convert input to a contiguous array.
|
||||
asfortranarray : Convert input to an ndarray with column-major
|
||||
memory order.
|
||||
ndarray.flags : Information about the memory layout of the array.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The returned array will be guaranteed to have the listed requirements
|
||||
by making a copy if needed.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> x = np.arange(6).reshape(2,3)
|
||||
>>> x.flags
|
||||
C_CONTIGUOUS : True
|
||||
F_CONTIGUOUS : False
|
||||
OWNDATA : False
|
||||
WRITEABLE : True
|
||||
ALIGNED : True
|
||||
WRITEBACKIFCOPY : False
|
||||
|
||||
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
|
||||
>>> y.flags
|
||||
C_CONTIGUOUS : False
|
||||
F_CONTIGUOUS : True
|
||||
OWNDATA : True
|
||||
WRITEABLE : True
|
||||
ALIGNED : True
|
||||
WRITEBACKIFCOPY : False
|
||||
|
||||
"""
|
||||
if like is not None:
|
||||
return _require_with_like(
|
||||
like,
|
||||
a,
|
||||
dtype=dtype,
|
||||
requirements=requirements,
|
||||
)
|
||||
|
||||
if not requirements:
|
||||
return asanyarray(a, dtype=dtype)
|
||||
|
||||
requirements = {POSSIBLE_FLAGS[x.upper()] for x in requirements}
|
||||
|
||||
if 'E' in requirements:
|
||||
requirements.remove('E')
|
||||
subok = False
|
||||
else:
|
||||
subok = True
|
||||
|
||||
order = 'A'
|
||||
if requirements >= {'C', 'F'}:
|
||||
raise ValueError('Cannot specify both "C" and "F" order')
|
||||
elif 'F' in requirements:
|
||||
order = 'F'
|
||||
requirements.remove('F')
|
||||
elif 'C' in requirements:
|
||||
order = 'C'
|
||||
requirements.remove('C')
|
||||
|
||||
arr = array(a, dtype=dtype, order=order, copy=None, subok=subok)
|
||||
|
||||
for prop in requirements:
|
||||
if not arr.flags[prop]:
|
||||
return arr.copy(order)
|
||||
return arr
|
||||
|
||||
|
||||
_require_with_like = array_function_dispatch()(require)
|
||||
41
lib/python3.11/site-packages/numpy/_core/_asarray.pyi
Normal file
41
lib/python3.11/site-packages/numpy/_core/_asarray.pyi
Normal file
@ -0,0 +1,41 @@
|
||||
from collections.abc import Iterable
|
||||
from typing import Any, Literal, TypeAlias, TypeVar, overload
|
||||
|
||||
from numpy._typing import DTypeLike, NDArray, _SupportsArrayFunc
|
||||
|
||||
_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any])
|
||||
|
||||
_Requirements: TypeAlias = Literal[
|
||||
"C", "C_CONTIGUOUS", "CONTIGUOUS",
|
||||
"F", "F_CONTIGUOUS", "FORTRAN",
|
||||
"A", "ALIGNED",
|
||||
"W", "WRITEABLE",
|
||||
"O", "OWNDATA"
|
||||
]
|
||||
_E: TypeAlias = Literal["E", "ENSUREARRAY"]
|
||||
_RequirementsWithE: TypeAlias = _Requirements | _E
|
||||
|
||||
@overload
|
||||
def require(
|
||||
a: _ArrayT,
|
||||
dtype: None = ...,
|
||||
requirements: _Requirements | Iterable[_Requirements] | None = ...,
|
||||
*,
|
||||
like: _SupportsArrayFunc = ...
|
||||
) -> _ArrayT: ...
|
||||
@overload
|
||||
def require(
|
||||
a: object,
|
||||
dtype: DTypeLike = ...,
|
||||
requirements: _E | Iterable[_RequirementsWithE] = ...,
|
||||
*,
|
||||
like: _SupportsArrayFunc = ...
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def require(
|
||||
a: object,
|
||||
dtype: DTypeLike = ...,
|
||||
requirements: _Requirements | Iterable[_Requirements] | None = ...,
|
||||
*,
|
||||
like: _SupportsArrayFunc = ...
|
||||
) -> NDArray[Any]: ...
|
||||
366
lib/python3.11/site-packages/numpy/_core/_dtype.py
Normal file
366
lib/python3.11/site-packages/numpy/_core/_dtype.py
Normal file
@ -0,0 +1,366 @@
|
||||
"""
|
||||
A place for code to be called from the implementation of np.dtype
|
||||
|
||||
String handling is much easier to do correctly in python.
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
_kind_to_stem = {
|
||||
'u': 'uint',
|
||||
'i': 'int',
|
||||
'c': 'complex',
|
||||
'f': 'float',
|
||||
'b': 'bool',
|
||||
'V': 'void',
|
||||
'O': 'object',
|
||||
'M': 'datetime',
|
||||
'm': 'timedelta',
|
||||
'S': 'bytes',
|
||||
'U': 'str',
|
||||
}
|
||||
|
||||
|
||||
def _kind_name(dtype):
|
||||
try:
|
||||
return _kind_to_stem[dtype.kind]
|
||||
except KeyError as e:
|
||||
raise RuntimeError(
|
||||
f"internal dtype error, unknown kind {dtype.kind!r}"
|
||||
) from None
|
||||
|
||||
|
||||
def __str__(dtype):
|
||||
if dtype.fields is not None:
|
||||
return _struct_str(dtype, include_align=True)
|
||||
elif dtype.subdtype:
|
||||
return _subarray_str(dtype)
|
||||
elif issubclass(dtype.type, np.flexible) or not dtype.isnative:
|
||||
return dtype.str
|
||||
else:
|
||||
return dtype.name
|
||||
|
||||
|
||||
def __repr__(dtype):
|
||||
arg_str = _construction_repr(dtype, include_align=False)
|
||||
if dtype.isalignedstruct:
|
||||
arg_str = arg_str + ", align=True"
|
||||
return f"dtype({arg_str})"
|
||||
|
||||
|
||||
def _unpack_field(dtype, offset, title=None):
|
||||
"""
|
||||
Helper function to normalize the items in dtype.fields.
|
||||
|
||||
Call as:
|
||||
|
||||
dtype, offset, title = _unpack_field(*dtype.fields[name])
|
||||
"""
|
||||
return dtype, offset, title
|
||||
|
||||
|
||||
def _isunsized(dtype):
|
||||
# PyDataType_ISUNSIZED
|
||||
return dtype.itemsize == 0
|
||||
|
||||
|
||||
def _construction_repr(dtype, include_align=False, short=False):
|
||||
"""
|
||||
Creates a string repr of the dtype, excluding the 'dtype()' part
|
||||
surrounding the object. This object may be a string, a list, or
|
||||
a dict depending on the nature of the dtype. This
|
||||
is the object passed as the first parameter to the dtype
|
||||
constructor, and if no additional constructor parameters are
|
||||
given, will reproduce the exact memory layout.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
short : bool
|
||||
If true, this creates a shorter repr using 'kind' and 'itemsize',
|
||||
instead of the longer type name.
|
||||
|
||||
include_align : bool
|
||||
If true, this includes the 'align=True' parameter
|
||||
inside the struct dtype construction dict when needed. Use this flag
|
||||
if you want a proper repr string without the 'dtype()' part around it.
|
||||
|
||||
If false, this does not preserve the
|
||||
'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for
|
||||
struct arrays like the regular repr does, because the 'align'
|
||||
flag is not part of first dtype constructor parameter. This
|
||||
mode is intended for a full 'repr', where the 'align=True' is
|
||||
provided as the second parameter.
|
||||
"""
|
||||
if dtype.fields is not None:
|
||||
return _struct_str(dtype, include_align=include_align)
|
||||
elif dtype.subdtype:
|
||||
return _subarray_str(dtype)
|
||||
else:
|
||||
return _scalar_str(dtype, short=short)
|
||||
|
||||
|
||||
def _scalar_str(dtype, short):
|
||||
byteorder = _byte_order_str(dtype)
|
||||
|
||||
if dtype.type == np.bool:
|
||||
if short:
|
||||
return "'?'"
|
||||
else:
|
||||
return "'bool'"
|
||||
|
||||
elif dtype.type == np.object_:
|
||||
# The object reference may be different sizes on different
|
||||
# platforms, so it should never include the itemsize here.
|
||||
return "'O'"
|
||||
|
||||
elif dtype.type == np.bytes_:
|
||||
if _isunsized(dtype):
|
||||
return "'S'"
|
||||
else:
|
||||
return "'S%d'" % dtype.itemsize
|
||||
|
||||
elif dtype.type == np.str_:
|
||||
if _isunsized(dtype):
|
||||
return f"'{byteorder}U'"
|
||||
else:
|
||||
return "'%sU%d'" % (byteorder, dtype.itemsize / 4)
|
||||
|
||||
elif dtype.type == str:
|
||||
return "'T'"
|
||||
|
||||
elif not type(dtype)._legacy:
|
||||
return f"'{byteorder}{type(dtype).__name__}{dtype.itemsize * 8}'"
|
||||
|
||||
# unlike the other types, subclasses of void are preserved - but
|
||||
# historically the repr does not actually reveal the subclass
|
||||
elif issubclass(dtype.type, np.void):
|
||||
if _isunsized(dtype):
|
||||
return "'V'"
|
||||
else:
|
||||
return "'V%d'" % dtype.itemsize
|
||||
|
||||
elif dtype.type == np.datetime64:
|
||||
return f"'{byteorder}M8{_datetime_metadata_str(dtype)}'"
|
||||
|
||||
elif dtype.type == np.timedelta64:
|
||||
return f"'{byteorder}m8{_datetime_metadata_str(dtype)}'"
|
||||
|
||||
elif dtype.isbuiltin == 2:
|
||||
return dtype.type.__name__
|
||||
|
||||
elif np.issubdtype(dtype, np.number):
|
||||
# Short repr with endianness, like '<f8'
|
||||
if short or dtype.byteorder not in ('=', '|'):
|
||||
return "'%s%c%d'" % (byteorder, dtype.kind, dtype.itemsize)
|
||||
|
||||
# Longer repr, like 'float64'
|
||||
else:
|
||||
return "'%s%d'" % (_kind_name(dtype), 8 * dtype.itemsize)
|
||||
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"Internal error: NumPy dtype unrecognized type number")
|
||||
|
||||
|
||||
def _byte_order_str(dtype):
|
||||
""" Normalize byteorder to '<' or '>' """
|
||||
# hack to obtain the native and swapped byte order characters
|
||||
swapped = np.dtype(int).newbyteorder('S')
|
||||
native = swapped.newbyteorder('S')
|
||||
|
||||
byteorder = dtype.byteorder
|
||||
if byteorder == '=':
|
||||
return native.byteorder
|
||||
if byteorder == 'S':
|
||||
# TODO: this path can never be reached
|
||||
return swapped.byteorder
|
||||
elif byteorder == '|':
|
||||
return ''
|
||||
else:
|
||||
return byteorder
|
||||
|
||||
|
||||
def _datetime_metadata_str(dtype):
|
||||
# TODO: this duplicates the C metastr_to_unicode functionality
|
||||
unit, count = np.datetime_data(dtype)
|
||||
if unit == 'generic':
|
||||
return ''
|
||||
elif count == 1:
|
||||
return f'[{unit}]'
|
||||
else:
|
||||
return f'[{count}{unit}]'
|
||||
|
||||
|
||||
def _struct_dict_str(dtype, includealignedflag):
|
||||
# unpack the fields dictionary into ls
|
||||
names = dtype.names
|
||||
fld_dtypes = []
|
||||
offsets = []
|
||||
titles = []
|
||||
for name in names:
|
||||
fld_dtype, offset, title = _unpack_field(*dtype.fields[name])
|
||||
fld_dtypes.append(fld_dtype)
|
||||
offsets.append(offset)
|
||||
titles.append(title)
|
||||
|
||||
# Build up a string to make the dictionary
|
||||
|
||||
if np._core.arrayprint._get_legacy_print_mode() <= 121:
|
||||
colon = ":"
|
||||
fieldsep = ","
|
||||
else:
|
||||
colon = ": "
|
||||
fieldsep = ", "
|
||||
|
||||
# First, the names
|
||||
ret = "{'names'%s[" % colon
|
||||
ret += fieldsep.join(repr(name) for name in names)
|
||||
|
||||
# Second, the formats
|
||||
ret += f"], 'formats'{colon}["
|
||||
ret += fieldsep.join(
|
||||
_construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes)
|
||||
|
||||
# Third, the offsets
|
||||
ret += f"], 'offsets'{colon}["
|
||||
ret += fieldsep.join("%d" % offset for offset in offsets)
|
||||
|
||||
# Fourth, the titles
|
||||
if any(title is not None for title in titles):
|
||||
ret += f"], 'titles'{colon}["
|
||||
ret += fieldsep.join(repr(title) for title in titles)
|
||||
|
||||
# Fifth, the itemsize
|
||||
ret += "], 'itemsize'%s%d" % (colon, dtype.itemsize)
|
||||
|
||||
if (includealignedflag and dtype.isalignedstruct):
|
||||
# Finally, the aligned flag
|
||||
ret += ", 'aligned'%sTrue}" % colon
|
||||
else:
|
||||
ret += "}"
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def _aligned_offset(offset, alignment):
|
||||
# round up offset:
|
||||
return - (-offset // alignment) * alignment
|
||||
|
||||
|
||||
def _is_packed(dtype):
|
||||
"""
|
||||
Checks whether the structured data type in 'dtype'
|
||||
has a simple layout, where all the fields are in order,
|
||||
and follow each other with no alignment padding.
|
||||
|
||||
When this returns true, the dtype can be reconstructed
|
||||
from a list of the field names and dtypes with no additional
|
||||
dtype parameters.
|
||||
|
||||
Duplicates the C `is_dtype_struct_simple_unaligned_layout` function.
|
||||
"""
|
||||
align = dtype.isalignedstruct
|
||||
max_alignment = 1
|
||||
total_offset = 0
|
||||
for name in dtype.names:
|
||||
fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
|
||||
|
||||
if align:
|
||||
total_offset = _aligned_offset(total_offset, fld_dtype.alignment)
|
||||
max_alignment = max(max_alignment, fld_dtype.alignment)
|
||||
|
||||
if fld_offset != total_offset:
|
||||
return False
|
||||
total_offset += fld_dtype.itemsize
|
||||
|
||||
if align:
|
||||
total_offset = _aligned_offset(total_offset, max_alignment)
|
||||
|
||||
return total_offset == dtype.itemsize
|
||||
|
||||
|
||||
def _struct_list_str(dtype):
|
||||
items = []
|
||||
for name in dtype.names:
|
||||
fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
|
||||
|
||||
item = "("
|
||||
if title is not None:
|
||||
item += f"({title!r}, {name!r}), "
|
||||
else:
|
||||
item += f"{name!r}, "
|
||||
# Special case subarray handling here
|
||||
if fld_dtype.subdtype is not None:
|
||||
base, shape = fld_dtype.subdtype
|
||||
item += f"{_construction_repr(base, short=True)}, {shape}"
|
||||
else:
|
||||
item += _construction_repr(fld_dtype, short=True)
|
||||
|
||||
item += ")"
|
||||
items.append(item)
|
||||
|
||||
return "[" + ", ".join(items) + "]"
|
||||
|
||||
|
||||
def _struct_str(dtype, include_align):
|
||||
# The list str representation can't include the 'align=' flag,
|
||||
# so if it is requested and the struct has the aligned flag set,
|
||||
# we must use the dict str instead.
|
||||
if not (include_align and dtype.isalignedstruct) and _is_packed(dtype):
|
||||
sub = _struct_list_str(dtype)
|
||||
|
||||
else:
|
||||
sub = _struct_dict_str(dtype, include_align)
|
||||
|
||||
# If the data type isn't the default, void, show it
|
||||
if dtype.type != np.void:
|
||||
return f"({dtype.type.__module__}.{dtype.type.__name__}, {sub})"
|
||||
else:
|
||||
return sub
|
||||
|
||||
|
||||
def _subarray_str(dtype):
|
||||
base, shape = dtype.subdtype
|
||||
return f"({_construction_repr(base, short=True)}, {shape})"
|
||||
|
||||
|
||||
def _name_includes_bit_suffix(dtype):
|
||||
if dtype.type == np.object_:
|
||||
# pointer size varies by system, best to omit it
|
||||
return False
|
||||
elif dtype.type == np.bool:
|
||||
# implied
|
||||
return False
|
||||
elif dtype.type is None:
|
||||
return True
|
||||
elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype):
|
||||
# unspecified
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def _name_get(dtype):
|
||||
# provides dtype.name.__get__, documented as returning a "bit name"
|
||||
|
||||
if dtype.isbuiltin == 2:
|
||||
# user dtypes don't promise to do anything special
|
||||
return dtype.type.__name__
|
||||
|
||||
if not type(dtype)._legacy:
|
||||
name = type(dtype).__name__
|
||||
|
||||
elif issubclass(dtype.type, np.void):
|
||||
# historically, void subclasses preserve their name, eg `record64`
|
||||
name = dtype.type.__name__
|
||||
else:
|
||||
name = _kind_name(dtype)
|
||||
|
||||
# append bit counts
|
||||
if _name_includes_bit_suffix(dtype):
|
||||
name += f"{dtype.itemsize * 8}"
|
||||
|
||||
# append metadata to datetimes
|
||||
if dtype.type in (np.datetime64, np.timedelta64):
|
||||
name += _datetime_metadata_str(dtype)
|
||||
|
||||
return name
|
||||
58
lib/python3.11/site-packages/numpy/_core/_dtype.pyi
Normal file
58
lib/python3.11/site-packages/numpy/_core/_dtype.pyi
Normal file
@ -0,0 +1,58 @@
|
||||
from typing import Final, TypeAlias, TypedDict, overload, type_check_only
|
||||
from typing import Literal as L
|
||||
|
||||
from typing_extensions import ReadOnly, TypeVar
|
||||
|
||||
import numpy as np
|
||||
|
||||
###
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
_Name: TypeAlias = L["uint", "int", "complex", "float", "bool", "void", "object", "datetime", "timedelta", "bytes", "str"]
|
||||
|
||||
@type_check_only
|
||||
class _KindToStemType(TypedDict):
|
||||
u: ReadOnly[L["uint"]]
|
||||
i: ReadOnly[L["int"]]
|
||||
c: ReadOnly[L["complex"]]
|
||||
f: ReadOnly[L["float"]]
|
||||
b: ReadOnly[L["bool"]]
|
||||
V: ReadOnly[L["void"]]
|
||||
O: ReadOnly[L["object"]]
|
||||
M: ReadOnly[L["datetime"]]
|
||||
m: ReadOnly[L["timedelta"]]
|
||||
S: ReadOnly[L["bytes"]]
|
||||
U: ReadOnly[L["str"]]
|
||||
|
||||
###
|
||||
|
||||
_kind_to_stem: Final[_KindToStemType] = ...
|
||||
|
||||
#
|
||||
def _kind_name(dtype: np.dtype) -> _Name: ...
|
||||
def __str__(dtype: np.dtype) -> str: ...
|
||||
def __repr__(dtype: np.dtype) -> str: ...
|
||||
|
||||
#
|
||||
def _isunsized(dtype: np.dtype) -> bool: ...
|
||||
def _is_packed(dtype: np.dtype) -> bool: ...
|
||||
def _name_includes_bit_suffix(dtype: np.dtype) -> bool: ...
|
||||
|
||||
#
|
||||
def _construction_repr(dtype: np.dtype, include_align: bool = False, short: bool = False) -> str: ...
|
||||
def _scalar_str(dtype: np.dtype, short: bool) -> str: ...
|
||||
def _byte_order_str(dtype: np.dtype) -> str: ...
|
||||
def _datetime_metadata_str(dtype: np.dtype) -> str: ...
|
||||
def _struct_dict_str(dtype: np.dtype, includealignedflag: bool) -> str: ...
|
||||
def _struct_list_str(dtype: np.dtype) -> str: ...
|
||||
def _struct_str(dtype: np.dtype, include_align: bool) -> str: ...
|
||||
def _subarray_str(dtype: np.dtype) -> str: ...
|
||||
def _name_get(dtype: np.dtype) -> str: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def _unpack_field(dtype: np.dtype, offset: int, title: _T) -> tuple[np.dtype, int, _T]: ...
|
||||
@overload
|
||||
def _unpack_field(dtype: np.dtype, offset: int, title: None = None) -> tuple[np.dtype, int, None]: ...
|
||||
def _aligned_offset(offset: int, alignment: int) -> int: ...
|
||||
120
lib/python3.11/site-packages/numpy/_core/_dtype_ctypes.py
Normal file
120
lib/python3.11/site-packages/numpy/_core/_dtype_ctypes.py
Normal file
@ -0,0 +1,120 @@
|
||||
"""
|
||||
Conversion from ctypes to dtype.
|
||||
|
||||
In an ideal world, we could achieve this through the PEP3118 buffer protocol,
|
||||
something like::
|
||||
|
||||
def dtype_from_ctypes_type(t):
|
||||
# needed to ensure that the shape of `t` is within memoryview.format
|
||||
class DummyStruct(ctypes.Structure):
|
||||
_fields_ = [('a', t)]
|
||||
|
||||
# empty to avoid memory allocation
|
||||
ctype_0 = (DummyStruct * 0)()
|
||||
mv = memoryview(ctype_0)
|
||||
|
||||
# convert the struct, and slice back out the field
|
||||
return _dtype_from_pep3118(mv.format)['a']
|
||||
|
||||
Unfortunately, this fails because:
|
||||
|
||||
* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782)
|
||||
* PEP3118 cannot represent unions, but both numpy and ctypes can
|
||||
* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780)
|
||||
"""
|
||||
|
||||
# We delay-import ctypes for distributions that do not include it.
|
||||
# While this module is not used unless the user passes in ctypes
|
||||
# members, it is eagerly imported from numpy/_core/__init__.py.
|
||||
import numpy as np
|
||||
|
||||
|
||||
def _from_ctypes_array(t):
|
||||
return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,)))
|
||||
|
||||
|
||||
def _from_ctypes_structure(t):
|
||||
for item in t._fields_:
|
||||
if len(item) > 2:
|
||||
raise TypeError(
|
||||
"ctypes bitfields have no dtype equivalent")
|
||||
|
||||
if hasattr(t, "_pack_"):
|
||||
import ctypes
|
||||
formats = []
|
||||
offsets = []
|
||||
names = []
|
||||
current_offset = 0
|
||||
for fname, ftyp in t._fields_:
|
||||
names.append(fname)
|
||||
formats.append(dtype_from_ctypes_type(ftyp))
|
||||
# Each type has a default offset, this is platform dependent
|
||||
# for some types.
|
||||
effective_pack = min(t._pack_, ctypes.alignment(ftyp))
|
||||
current_offset = (
|
||||
(current_offset + effective_pack - 1) // effective_pack
|
||||
) * effective_pack
|
||||
offsets.append(current_offset)
|
||||
current_offset += ctypes.sizeof(ftyp)
|
||||
|
||||
return np.dtype({
|
||||
"formats": formats,
|
||||
"offsets": offsets,
|
||||
"names": names,
|
||||
"itemsize": ctypes.sizeof(t)})
|
||||
else:
|
||||
fields = []
|
||||
for fname, ftyp in t._fields_:
|
||||
fields.append((fname, dtype_from_ctypes_type(ftyp)))
|
||||
|
||||
# by default, ctypes structs are aligned
|
||||
return np.dtype(fields, align=True)
|
||||
|
||||
|
||||
def _from_ctypes_scalar(t):
|
||||
"""
|
||||
Return the dtype type with endianness included if it's the case
|
||||
"""
|
||||
if getattr(t, '__ctype_be__', None) is t:
|
||||
return np.dtype('>' + t._type_)
|
||||
elif getattr(t, '__ctype_le__', None) is t:
|
||||
return np.dtype('<' + t._type_)
|
||||
else:
|
||||
return np.dtype(t._type_)
|
||||
|
||||
|
||||
def _from_ctypes_union(t):
|
||||
import ctypes
|
||||
formats = []
|
||||
offsets = []
|
||||
names = []
|
||||
for fname, ftyp in t._fields_:
|
||||
names.append(fname)
|
||||
formats.append(dtype_from_ctypes_type(ftyp))
|
||||
offsets.append(0) # Union fields are offset to 0
|
||||
|
||||
return np.dtype({
|
||||
"formats": formats,
|
||||
"offsets": offsets,
|
||||
"names": names,
|
||||
"itemsize": ctypes.sizeof(t)})
|
||||
|
||||
|
||||
def dtype_from_ctypes_type(t):
|
||||
"""
|
||||
Construct a dtype object from a ctypes type
|
||||
"""
|
||||
import _ctypes
|
||||
if issubclass(t, _ctypes.Array):
|
||||
return _from_ctypes_array(t)
|
||||
elif issubclass(t, _ctypes._Pointer):
|
||||
raise TypeError("ctypes pointers have no dtype equivalent")
|
||||
elif issubclass(t, _ctypes.Structure):
|
||||
return _from_ctypes_structure(t)
|
||||
elif issubclass(t, _ctypes.Union):
|
||||
return _from_ctypes_union(t)
|
||||
elif isinstance(getattr(t, '_type_', None), str):
|
||||
return _from_ctypes_scalar(t)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"Unknown ctypes type {t.__name__}")
|
||||
83
lib/python3.11/site-packages/numpy/_core/_dtype_ctypes.pyi
Normal file
83
lib/python3.11/site-packages/numpy/_core/_dtype_ctypes.pyi
Normal file
@ -0,0 +1,83 @@
|
||||
import _ctypes
|
||||
import ctypes as ct
|
||||
from typing import Any, overload
|
||||
|
||||
import numpy as np
|
||||
|
||||
#
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[_ctypes.Array[Any] | _ctypes.Structure]) -> np.dtype[np.void]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_bool]) -> np.dtype[np.bool]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_float]) -> np.dtype[np.float32]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_double]) -> np.dtype[np.float64]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ...
|
||||
|
||||
# NOTE: the complex ctypes on python>=3.14 are not yet supported at runtim, see
|
||||
# https://github.com/numpy/numpy/issues/28360
|
||||
|
||||
#
|
||||
def _from_ctypes_array(t: type[_ctypes.Array[Any]]) -> np.dtype[np.void]: ...
|
||||
def _from_ctypes_structure(t: type[_ctypes.Structure]) -> np.dtype[np.void]: ...
|
||||
def _from_ctypes_union(t: type[_ctypes.Union]) -> np.dtype[np.void]: ...
|
||||
|
||||
# keep in sync with `dtype_from_ctypes_type` (minus the first overload)
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_bool]) -> np.dtype[np.bool]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_float]) -> np.dtype[np.float32]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_double]) -> np.dtype[np.float64]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ...
|
||||
162
lib/python3.11/site-packages/numpy/_core/_exceptions.py
Normal file
162
lib/python3.11/site-packages/numpy/_core/_exceptions.py
Normal file
@ -0,0 +1,162 @@
|
||||
"""
|
||||
Various richly-typed exceptions, that also help us deal with string formatting
|
||||
in python where it's easier.
|
||||
|
||||
By putting the formatting in `__str__`, we also avoid paying the cost for
|
||||
users who silence the exceptions.
|
||||
"""
|
||||
|
||||
def _unpack_tuple(tup):
|
||||
if len(tup) == 1:
|
||||
return tup[0]
|
||||
else:
|
||||
return tup
|
||||
|
||||
|
||||
def _display_as_base(cls):
|
||||
"""
|
||||
A decorator that makes an exception class look like its base.
|
||||
|
||||
We use this to hide subclasses that are implementation details - the user
|
||||
should catch the base type, which is what the traceback will show them.
|
||||
|
||||
Classes decorated with this decorator are subject to removal without a
|
||||
deprecation warning.
|
||||
"""
|
||||
assert issubclass(cls, Exception)
|
||||
cls.__name__ = cls.__base__.__name__
|
||||
return cls
|
||||
|
||||
|
||||
class UFuncTypeError(TypeError):
|
||||
""" Base class for all ufunc exceptions """
|
||||
def __init__(self, ufunc):
|
||||
self.ufunc = ufunc
|
||||
|
||||
|
||||
@_display_as_base
|
||||
class _UFuncNoLoopError(UFuncTypeError):
|
||||
""" Thrown when a ufunc loop cannot be found """
|
||||
def __init__(self, ufunc, dtypes):
|
||||
super().__init__(ufunc)
|
||||
self.dtypes = tuple(dtypes)
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
f"ufunc {self.ufunc.__name__!r} did not contain a loop with signature "
|
||||
f"matching types {_unpack_tuple(self.dtypes[:self.ufunc.nin])!r} "
|
||||
f"-> {_unpack_tuple(self.dtypes[self.ufunc.nin:])!r}"
|
||||
)
|
||||
|
||||
|
||||
@_display_as_base
|
||||
class _UFuncBinaryResolutionError(_UFuncNoLoopError):
|
||||
""" Thrown when a binary resolution fails """
|
||||
def __init__(self, ufunc, dtypes):
|
||||
super().__init__(ufunc, dtypes)
|
||||
assert len(self.dtypes) == 2
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
"ufunc {!r} cannot use operands with types {!r} and {!r}"
|
||||
).format(
|
||||
self.ufunc.__name__, *self.dtypes
|
||||
)
|
||||
|
||||
|
||||
@_display_as_base
|
||||
class _UFuncCastingError(UFuncTypeError):
|
||||
def __init__(self, ufunc, casting, from_, to):
|
||||
super().__init__(ufunc)
|
||||
self.casting = casting
|
||||
self.from_ = from_
|
||||
self.to = to
|
||||
|
||||
|
||||
@_display_as_base
|
||||
class _UFuncInputCastingError(_UFuncCastingError):
|
||||
""" Thrown when a ufunc input cannot be casted """
|
||||
def __init__(self, ufunc, casting, from_, to, i):
|
||||
super().__init__(ufunc, casting, from_, to)
|
||||
self.in_i = i
|
||||
|
||||
def __str__(self):
|
||||
# only show the number if more than one input exists
|
||||
i_str = f"{self.in_i} " if self.ufunc.nin != 1 else ""
|
||||
return (
|
||||
f"Cannot cast ufunc {self.ufunc.__name__!r} input {i_str}from "
|
||||
f"{self.from_!r} to {self.to!r} with casting rule {self.casting!r}"
|
||||
)
|
||||
|
||||
|
||||
@_display_as_base
|
||||
class _UFuncOutputCastingError(_UFuncCastingError):
|
||||
""" Thrown when a ufunc output cannot be casted """
|
||||
def __init__(self, ufunc, casting, from_, to, i):
|
||||
super().__init__(ufunc, casting, from_, to)
|
||||
self.out_i = i
|
||||
|
||||
def __str__(self):
|
||||
# only show the number if more than one output exists
|
||||
i_str = f"{self.out_i} " if self.ufunc.nout != 1 else ""
|
||||
return (
|
||||
f"Cannot cast ufunc {self.ufunc.__name__!r} output {i_str}from "
|
||||
f"{self.from_!r} to {self.to!r} with casting rule {self.casting!r}"
|
||||
)
|
||||
|
||||
|
||||
@_display_as_base
|
||||
class _ArrayMemoryError(MemoryError):
|
||||
""" Thrown when an array cannot be allocated"""
|
||||
def __init__(self, shape, dtype):
|
||||
self.shape = shape
|
||||
self.dtype = dtype
|
||||
|
||||
@property
|
||||
def _total_size(self):
|
||||
num_bytes = self.dtype.itemsize
|
||||
for dim in self.shape:
|
||||
num_bytes *= dim
|
||||
return num_bytes
|
||||
|
||||
@staticmethod
|
||||
def _size_to_string(num_bytes):
|
||||
""" Convert a number of bytes into a binary size string """
|
||||
|
||||
# https://en.wikipedia.org/wiki/Binary_prefix
|
||||
LOG2_STEP = 10
|
||||
STEP = 1024
|
||||
units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']
|
||||
|
||||
unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP
|
||||
unit_val = 1 << (unit_i * LOG2_STEP)
|
||||
n_units = num_bytes / unit_val
|
||||
del unit_val
|
||||
|
||||
# ensure we pick a unit that is correct after rounding
|
||||
if round(n_units) == STEP:
|
||||
unit_i += 1
|
||||
n_units /= STEP
|
||||
|
||||
# deal with sizes so large that we don't have units for them
|
||||
if unit_i >= len(units):
|
||||
new_unit_i = len(units) - 1
|
||||
n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP)
|
||||
unit_i = new_unit_i
|
||||
|
||||
unit_name = units[unit_i]
|
||||
# format with a sensible number of digits
|
||||
if unit_i == 0:
|
||||
# no decimal point on bytes
|
||||
return f'{n_units:.0f} {unit_name}'
|
||||
elif round(n_units) < 1000:
|
||||
# 3 significant figures, if none are dropped to the left of the .
|
||||
return f'{n_units:#.3g} {unit_name}'
|
||||
else:
|
||||
# just give all the digits otherwise
|
||||
return f'{n_units:#.0f} {unit_name}'
|
||||
|
||||
def __str__(self):
|
||||
size_str = self._size_to_string(self._total_size)
|
||||
return (f"Unable to allocate {size_str} for an array with shape "
|
||||
f"{self.shape} and data type {self.dtype}")
|
||||
55
lib/python3.11/site-packages/numpy/_core/_exceptions.pyi
Normal file
55
lib/python3.11/site-packages/numpy/_core/_exceptions.pyi
Normal file
@ -0,0 +1,55 @@
|
||||
from collections.abc import Iterable
|
||||
from typing import Any, Final, TypeVar, overload
|
||||
|
||||
import numpy as np
|
||||
from numpy import _CastingKind
|
||||
from numpy._utils import set_module as set_module
|
||||
|
||||
###
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]])
|
||||
_ExceptionT = TypeVar("_ExceptionT", bound=Exception)
|
||||
|
||||
###
|
||||
|
||||
class UFuncTypeError(TypeError):
|
||||
ufunc: Final[np.ufunc]
|
||||
def __init__(self, /, ufunc: np.ufunc) -> None: ...
|
||||
|
||||
class _UFuncNoLoopError(UFuncTypeError):
|
||||
dtypes: tuple[np.dtype, ...]
|
||||
def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ...
|
||||
|
||||
class _UFuncBinaryResolutionError(_UFuncNoLoopError):
|
||||
dtypes: tuple[np.dtype, np.dtype]
|
||||
def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ...
|
||||
|
||||
class _UFuncCastingError(UFuncTypeError):
|
||||
casting: Final[_CastingKind]
|
||||
from_: Final[np.dtype]
|
||||
to: Final[np.dtype]
|
||||
def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ...
|
||||
|
||||
class _UFuncInputCastingError(_UFuncCastingError):
|
||||
in_i: Final[int]
|
||||
def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ...
|
||||
|
||||
class _UFuncOutputCastingError(_UFuncCastingError):
|
||||
out_i: Final[int]
|
||||
def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ...
|
||||
|
||||
class _ArrayMemoryError(MemoryError):
|
||||
shape: tuple[int, ...]
|
||||
dtype: np.dtype
|
||||
def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ...
|
||||
@property
|
||||
def _total_size(self) -> int: ...
|
||||
@staticmethod
|
||||
def _size_to_string(num_bytes: int) -> str: ...
|
||||
|
||||
@overload
|
||||
def _unpack_tuple(tup: tuple[_T]) -> _T: ...
|
||||
@overload
|
||||
def _unpack_tuple(tup: _TupleT) -> _TupleT: ...
|
||||
def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ...
|
||||
958
lib/python3.11/site-packages/numpy/_core/_internal.py
Normal file
958
lib/python3.11/site-packages/numpy/_core/_internal.py
Normal file
@ -0,0 +1,958 @@
|
||||
"""
|
||||
A place for internal code
|
||||
|
||||
Some things are more easily handled Python.
|
||||
|
||||
"""
|
||||
import ast
|
||||
import math
|
||||
import re
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from numpy import _NoValue
|
||||
from numpy.exceptions import DTypePromotionError
|
||||
|
||||
from .multiarray import StringDType, array, dtype, promote_types
|
||||
|
||||
try:
|
||||
import ctypes
|
||||
except ImportError:
|
||||
ctypes = None
|
||||
|
||||
IS_PYPY = sys.implementation.name == 'pypy'
|
||||
|
||||
if sys.byteorder == 'little':
|
||||
_nbo = '<'
|
||||
else:
|
||||
_nbo = '>'
|
||||
|
||||
def _makenames_list(adict, align):
|
||||
allfields = []
|
||||
|
||||
for fname, obj in adict.items():
|
||||
n = len(obj)
|
||||
if not isinstance(obj, tuple) or n not in (2, 3):
|
||||
raise ValueError("entry not a 2- or 3- tuple")
|
||||
if n > 2 and obj[2] == fname:
|
||||
continue
|
||||
num = int(obj[1])
|
||||
if num < 0:
|
||||
raise ValueError("invalid offset.")
|
||||
format = dtype(obj[0], align=align)
|
||||
if n > 2:
|
||||
title = obj[2]
|
||||
else:
|
||||
title = None
|
||||
allfields.append((fname, format, num, title))
|
||||
# sort by offsets
|
||||
allfields.sort(key=lambda x: x[2])
|
||||
names = [x[0] for x in allfields]
|
||||
formats = [x[1] for x in allfields]
|
||||
offsets = [x[2] for x in allfields]
|
||||
titles = [x[3] for x in allfields]
|
||||
|
||||
return names, formats, offsets, titles
|
||||
|
||||
# Called in PyArray_DescrConverter function when
|
||||
# a dictionary without "names" and "formats"
|
||||
# fields is used as a data-type descriptor.
|
||||
def _usefields(adict, align):
|
||||
try:
|
||||
names = adict[-1]
|
||||
except KeyError:
|
||||
names = None
|
||||
if names is None:
|
||||
names, formats, offsets, titles = _makenames_list(adict, align)
|
||||
else:
|
||||
formats = []
|
||||
offsets = []
|
||||
titles = []
|
||||
for name in names:
|
||||
res = adict[name]
|
||||
formats.append(res[0])
|
||||
offsets.append(res[1])
|
||||
if len(res) > 2:
|
||||
titles.append(res[2])
|
||||
else:
|
||||
titles.append(None)
|
||||
|
||||
return dtype({"names": names,
|
||||
"formats": formats,
|
||||
"offsets": offsets,
|
||||
"titles": titles}, align)
|
||||
|
||||
|
||||
# construct an array_protocol descriptor list
|
||||
# from the fields attribute of a descriptor
|
||||
# This calls itself recursively but should eventually hit
|
||||
# a descriptor that has no fields and then return
|
||||
# a simple typestring
|
||||
|
||||
def _array_descr(descriptor):
|
||||
fields = descriptor.fields
|
||||
if fields is None:
|
||||
subdtype = descriptor.subdtype
|
||||
if subdtype is None:
|
||||
if descriptor.metadata is None:
|
||||
return descriptor.str
|
||||
else:
|
||||
new = descriptor.metadata.copy()
|
||||
if new:
|
||||
return (descriptor.str, new)
|
||||
else:
|
||||
return descriptor.str
|
||||
else:
|
||||
return (_array_descr(subdtype[0]), subdtype[1])
|
||||
|
||||
names = descriptor.names
|
||||
ordered_fields = [fields[x] + (x,) for x in names]
|
||||
result = []
|
||||
offset = 0
|
||||
for field in ordered_fields:
|
||||
if field[1] > offset:
|
||||
num = field[1] - offset
|
||||
result.append(('', f'|V{num}'))
|
||||
offset += num
|
||||
elif field[1] < offset:
|
||||
raise ValueError(
|
||||
"dtype.descr is not defined for types with overlapping or "
|
||||
"out-of-order fields")
|
||||
if len(field) > 3:
|
||||
name = (field[2], field[3])
|
||||
else:
|
||||
name = field[2]
|
||||
if field[0].subdtype:
|
||||
tup = (name, _array_descr(field[0].subdtype[0]),
|
||||
field[0].subdtype[1])
|
||||
else:
|
||||
tup = (name, _array_descr(field[0]))
|
||||
offset += field[0].itemsize
|
||||
result.append(tup)
|
||||
|
||||
if descriptor.itemsize > offset:
|
||||
num = descriptor.itemsize - offset
|
||||
result.append(('', f'|V{num}'))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# format_re was originally from numarray by J. Todd Miller
|
||||
|
||||
format_re = re.compile(r'(?P<order1>[<>|=]?)'
|
||||
r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
|
||||
r'(?P<order2>[<>|=]?)'
|
||||
r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
|
||||
sep_re = re.compile(r'\s*,\s*')
|
||||
space_re = re.compile(r'\s+$')
|
||||
|
||||
# astr is a string (perhaps comma separated)
|
||||
|
||||
_convorder = {'=': _nbo}
|
||||
|
||||
def _commastring(astr):
|
||||
startindex = 0
|
||||
result = []
|
||||
islist = False
|
||||
while startindex < len(astr):
|
||||
mo = format_re.match(astr, pos=startindex)
|
||||
try:
|
||||
(order1, repeats, order2, dtype) = mo.groups()
|
||||
except (TypeError, AttributeError):
|
||||
raise ValueError(
|
||||
f'format number {len(result) + 1} of "{astr}" is not recognized'
|
||||
) from None
|
||||
startindex = mo.end()
|
||||
# Separator or ending padding
|
||||
if startindex < len(astr):
|
||||
if space_re.match(astr, pos=startindex):
|
||||
startindex = len(astr)
|
||||
else:
|
||||
mo = sep_re.match(astr, pos=startindex)
|
||||
if not mo:
|
||||
raise ValueError(
|
||||
'format number %d of "%s" is not recognized' %
|
||||
(len(result) + 1, astr))
|
||||
startindex = mo.end()
|
||||
islist = True
|
||||
|
||||
if order2 == '':
|
||||
order = order1
|
||||
elif order1 == '':
|
||||
order = order2
|
||||
else:
|
||||
order1 = _convorder.get(order1, order1)
|
||||
order2 = _convorder.get(order2, order2)
|
||||
if (order1 != order2):
|
||||
raise ValueError(
|
||||
f'inconsistent byte-order specification {order1} and {order2}')
|
||||
order = order1
|
||||
|
||||
if order in ('|', '=', _nbo):
|
||||
order = ''
|
||||
dtype = order + dtype
|
||||
if repeats == '':
|
||||
newitem = dtype
|
||||
else:
|
||||
if (repeats[0] == "(" and repeats[-1] == ")"
|
||||
and repeats[1:-1].strip() != ""
|
||||
and "," not in repeats):
|
||||
warnings.warn(
|
||||
'Passing in a parenthesized single number for repeats '
|
||||
'is deprecated; pass either a single number or indicate '
|
||||
'a tuple with a comma, like "(2,)".', DeprecationWarning,
|
||||
stacklevel=2)
|
||||
newitem = (dtype, ast.literal_eval(repeats))
|
||||
|
||||
result.append(newitem)
|
||||
|
||||
return result if islist else result[0]
|
||||
|
||||
class dummy_ctype:
|
||||
|
||||
def __init__(self, cls):
|
||||
self._cls = cls
|
||||
|
||||
def __mul__(self, other):
|
||||
return self
|
||||
|
||||
def __call__(self, *other):
|
||||
return self._cls(other)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._cls == other._cls
|
||||
|
||||
def __ne__(self, other):
|
||||
return self._cls != other._cls
|
||||
|
||||
def _getintp_ctype():
|
||||
val = _getintp_ctype.cache
|
||||
if val is not None:
|
||||
return val
|
||||
if ctypes is None:
|
||||
import numpy as np
|
||||
val = dummy_ctype(np.intp)
|
||||
else:
|
||||
char = dtype('n').char
|
||||
if char == 'i':
|
||||
val = ctypes.c_int
|
||||
elif char == 'l':
|
||||
val = ctypes.c_long
|
||||
elif char == 'q':
|
||||
val = ctypes.c_longlong
|
||||
else:
|
||||
val = ctypes.c_long
|
||||
_getintp_ctype.cache = val
|
||||
return val
|
||||
|
||||
|
||||
_getintp_ctype.cache = None
|
||||
|
||||
# Used for .ctypes attribute of ndarray
|
||||
|
||||
class _missing_ctypes:
|
||||
def cast(self, num, obj):
|
||||
return num.value
|
||||
|
||||
class c_void_p:
|
||||
def __init__(self, ptr):
|
||||
self.value = ptr
|
||||
|
||||
|
||||
class _ctypes:
|
||||
def __init__(self, array, ptr=None):
|
||||
self._arr = array
|
||||
|
||||
if ctypes:
|
||||
self._ctypes = ctypes
|
||||
self._data = self._ctypes.c_void_p(ptr)
|
||||
else:
|
||||
# fake a pointer-like object that holds onto the reference
|
||||
self._ctypes = _missing_ctypes()
|
||||
self._data = self._ctypes.c_void_p(ptr)
|
||||
self._data._objects = array
|
||||
|
||||
if self._arr.ndim == 0:
|
||||
self._zerod = True
|
||||
else:
|
||||
self._zerod = False
|
||||
|
||||
def data_as(self, obj):
|
||||
"""
|
||||
Return the data pointer cast to a particular c-types object.
|
||||
For example, calling ``self._as_parameter_`` is equivalent to
|
||||
``self.data_as(ctypes.c_void_p)``. Perhaps you want to use
|
||||
the data as a pointer to a ctypes array of floating-point data:
|
||||
``self.data_as(ctypes.POINTER(ctypes.c_double))``.
|
||||
|
||||
The returned pointer will keep a reference to the array.
|
||||
"""
|
||||
# _ctypes.cast function causes a circular reference of self._data in
|
||||
# self._data._objects. Attributes of self._data cannot be released
|
||||
# until gc.collect is called. Make a copy of the pointer first then
|
||||
# let it hold the array reference. This is a workaround to circumvent
|
||||
# the CPython bug https://bugs.python.org/issue12836.
|
||||
ptr = self._ctypes.cast(self._data, obj)
|
||||
ptr._arr = self._arr
|
||||
return ptr
|
||||
|
||||
def shape_as(self, obj):
|
||||
"""
|
||||
Return the shape tuple as an array of some other c-types
|
||||
type. For example: ``self.shape_as(ctypes.c_short)``.
|
||||
"""
|
||||
if self._zerod:
|
||||
return None
|
||||
return (obj * self._arr.ndim)(*self._arr.shape)
|
||||
|
||||
def strides_as(self, obj):
|
||||
"""
|
||||
Return the strides tuple as an array of some other
|
||||
c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
|
||||
"""
|
||||
if self._zerod:
|
||||
return None
|
||||
return (obj * self._arr.ndim)(*self._arr.strides)
|
||||
|
||||
@property
|
||||
def data(self):
|
||||
"""
|
||||
A pointer to the memory area of the array as a Python integer.
|
||||
This memory area may contain data that is not aligned, or not in
|
||||
correct byte-order. The memory area may not even be writeable.
|
||||
The array flags and data-type of this array should be respected
|
||||
when passing this attribute to arbitrary C-code to avoid trouble
|
||||
that can include Python crashing. User Beware! The value of this
|
||||
attribute is exactly the same as:
|
||||
``self._array_interface_['data'][0]``.
|
||||
|
||||
Note that unlike ``data_as``, a reference won't be kept to the array:
|
||||
code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
|
||||
pointer to a deallocated array, and should be spelt
|
||||
``(a + b).ctypes.data_as(ctypes.c_void_p)``
|
||||
"""
|
||||
return self._data.value
|
||||
|
||||
@property
|
||||
def shape(self):
|
||||
"""
|
||||
(c_intp*self.ndim): A ctypes array of length self.ndim where
|
||||
the basetype is the C-integer corresponding to ``dtype('p')`` on this
|
||||
platform (see `~numpy.ctypeslib.c_intp`). This base-type could be
|
||||
`ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on
|
||||
the platform. The ctypes array contains the shape of
|
||||
the underlying array.
|
||||
"""
|
||||
return self.shape_as(_getintp_ctype())
|
||||
|
||||
@property
|
||||
def strides(self):
|
||||
"""
|
||||
(c_intp*self.ndim): A ctypes array of length self.ndim where
|
||||
the basetype is the same as for the shape attribute. This ctypes
|
||||
array contains the strides information from the underlying array.
|
||||
This strides information is important for showing how many bytes
|
||||
must be jumped to get to the next element in the array.
|
||||
"""
|
||||
return self.strides_as(_getintp_ctype())
|
||||
|
||||
@property
|
||||
def _as_parameter_(self):
|
||||
"""
|
||||
Overrides the ctypes semi-magic method
|
||||
|
||||
Enables `c_func(some_array.ctypes)`
|
||||
"""
|
||||
return self.data_as(ctypes.c_void_p)
|
||||
|
||||
# Numpy 1.21.0, 2021-05-18
|
||||
|
||||
def get_data(self):
|
||||
"""Deprecated getter for the `_ctypes.data` property.
|
||||
|
||||
.. deprecated:: 1.21
|
||||
"""
|
||||
warnings.warn('"get_data" is deprecated. Use "data" instead',
|
||||
DeprecationWarning, stacklevel=2)
|
||||
return self.data
|
||||
|
||||
def get_shape(self):
|
||||
"""Deprecated getter for the `_ctypes.shape` property.
|
||||
|
||||
.. deprecated:: 1.21
|
||||
"""
|
||||
warnings.warn('"get_shape" is deprecated. Use "shape" instead',
|
||||
DeprecationWarning, stacklevel=2)
|
||||
return self.shape
|
||||
|
||||
def get_strides(self):
|
||||
"""Deprecated getter for the `_ctypes.strides` property.
|
||||
|
||||
.. deprecated:: 1.21
|
||||
"""
|
||||
warnings.warn('"get_strides" is deprecated. Use "strides" instead',
|
||||
DeprecationWarning, stacklevel=2)
|
||||
return self.strides
|
||||
|
||||
def get_as_parameter(self):
|
||||
"""Deprecated getter for the `_ctypes._as_parameter_` property.
|
||||
|
||||
.. deprecated:: 1.21
|
||||
"""
|
||||
warnings.warn(
|
||||
'"get_as_parameter" is deprecated. Use "_as_parameter_" instead',
|
||||
DeprecationWarning, stacklevel=2,
|
||||
)
|
||||
return self._as_parameter_
|
||||
|
||||
|
||||
def _newnames(datatype, order):
|
||||
"""
|
||||
Given a datatype and an order object, return a new names tuple, with the
|
||||
order indicated
|
||||
"""
|
||||
oldnames = datatype.names
|
||||
nameslist = list(oldnames)
|
||||
if isinstance(order, str):
|
||||
order = [order]
|
||||
seen = set()
|
||||
if isinstance(order, (list, tuple)):
|
||||
for name in order:
|
||||
try:
|
||||
nameslist.remove(name)
|
||||
except ValueError:
|
||||
if name in seen:
|
||||
raise ValueError(f"duplicate field name: {name}") from None
|
||||
else:
|
||||
raise ValueError(f"unknown field name: {name}") from None
|
||||
seen.add(name)
|
||||
return tuple(list(order) + nameslist)
|
||||
raise ValueError(f"unsupported order value: {order}")
|
||||
|
||||
def _copy_fields(ary):
|
||||
"""Return copy of structured array with padding between fields removed.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ary : ndarray
|
||||
Structured array from which to remove padding bytes
|
||||
|
||||
Returns
|
||||
-------
|
||||
ary_copy : ndarray
|
||||
Copy of ary with padding bytes removed
|
||||
"""
|
||||
dt = ary.dtype
|
||||
copy_dtype = {'names': dt.names,
|
||||
'formats': [dt.fields[name][0] for name in dt.names]}
|
||||
return array(ary, dtype=copy_dtype, copy=True)
|
||||
|
||||
def _promote_fields(dt1, dt2):
|
||||
""" Perform type promotion for two structured dtypes.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dt1 : structured dtype
|
||||
First dtype.
|
||||
dt2 : structured dtype
|
||||
Second dtype.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : dtype
|
||||
The promoted dtype
|
||||
|
||||
Notes
|
||||
-----
|
||||
If one of the inputs is aligned, the result will be. The titles of
|
||||
both descriptors must match (point to the same field).
|
||||
"""
|
||||
# Both must be structured and have the same names in the same order
|
||||
if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names:
|
||||
raise DTypePromotionError(
|
||||
f"field names `{dt1.names}` and `{dt2.names}` mismatch.")
|
||||
|
||||
# if both are identical, we can (maybe!) just return the same dtype.
|
||||
identical = dt1 is dt2
|
||||
new_fields = []
|
||||
for name in dt1.names:
|
||||
field1 = dt1.fields[name]
|
||||
field2 = dt2.fields[name]
|
||||
new_descr = promote_types(field1[0], field2[0])
|
||||
identical = identical and new_descr is field1[0]
|
||||
|
||||
# Check that the titles match (if given):
|
||||
if field1[2:] != field2[2:]:
|
||||
raise DTypePromotionError(
|
||||
f"field titles of field '{name}' mismatch")
|
||||
if len(field1) == 2:
|
||||
new_fields.append((name, new_descr))
|
||||
else:
|
||||
new_fields.append(((field1[2], name), new_descr))
|
||||
|
||||
res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct)
|
||||
|
||||
# Might as well preserve identity (and metadata) if the dtype is identical
|
||||
# and the itemsize, offsets are also unmodified. This could probably be
|
||||
# sped up, but also probably just be removed entirely.
|
||||
if identical and res.itemsize == dt1.itemsize:
|
||||
for name in dt1.names:
|
||||
if dt1.fields[name][1] != res.fields[name][1]:
|
||||
return res # the dtype changed.
|
||||
return dt1
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def _getfield_is_safe(oldtype, newtype, offset):
|
||||
""" Checks safety of getfield for object arrays.
|
||||
|
||||
As in _view_is_safe, we need to check that memory containing objects is not
|
||||
reinterpreted as a non-object datatype and vice versa.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
oldtype : data-type
|
||||
Data type of the original ndarray.
|
||||
newtype : data-type
|
||||
Data type of the field being accessed by ndarray.getfield
|
||||
offset : int
|
||||
Offset of the field being accessed by ndarray.getfield
|
||||
|
||||
Raises
|
||||
------
|
||||
TypeError
|
||||
If the field access is invalid
|
||||
|
||||
"""
|
||||
if newtype.hasobject or oldtype.hasobject:
|
||||
if offset == 0 and newtype == oldtype:
|
||||
return
|
||||
if oldtype.names is not None:
|
||||
for name in oldtype.names:
|
||||
if (oldtype.fields[name][1] == offset and
|
||||
oldtype.fields[name][0] == newtype):
|
||||
return
|
||||
raise TypeError("Cannot get/set field of an object array")
|
||||
return
|
||||
|
||||
def _view_is_safe(oldtype, newtype):
|
||||
""" Checks safety of a view involving object arrays, for example when
|
||||
doing::
|
||||
|
||||
np.zeros(10, dtype=oldtype).view(newtype)
|
||||
|
||||
Parameters
|
||||
----------
|
||||
oldtype : data-type
|
||||
Data type of original ndarray
|
||||
newtype : data-type
|
||||
Data type of the view
|
||||
|
||||
Raises
|
||||
------
|
||||
TypeError
|
||||
If the new type is incompatible with the old type.
|
||||
|
||||
"""
|
||||
|
||||
# if the types are equivalent, there is no problem.
|
||||
# for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
|
||||
if oldtype == newtype:
|
||||
return
|
||||
|
||||
if newtype.hasobject or oldtype.hasobject:
|
||||
raise TypeError("Cannot change data-type for array of references.")
|
||||
return
|
||||
|
||||
|
||||
# Given a string containing a PEP 3118 format specifier,
|
||||
# construct a NumPy dtype
|
||||
|
||||
_pep3118_native_map = {
|
||||
'?': '?',
|
||||
'c': 'S1',
|
||||
'b': 'b',
|
||||
'B': 'B',
|
||||
'h': 'h',
|
||||
'H': 'H',
|
||||
'i': 'i',
|
||||
'I': 'I',
|
||||
'l': 'l',
|
||||
'L': 'L',
|
||||
'q': 'q',
|
||||
'Q': 'Q',
|
||||
'e': 'e',
|
||||
'f': 'f',
|
||||
'd': 'd',
|
||||
'g': 'g',
|
||||
'Zf': 'F',
|
||||
'Zd': 'D',
|
||||
'Zg': 'G',
|
||||
's': 'S',
|
||||
'w': 'U',
|
||||
'O': 'O',
|
||||
'x': 'V', # padding
|
||||
}
|
||||
_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
|
||||
|
||||
_pep3118_standard_map = {
|
||||
'?': '?',
|
||||
'c': 'S1',
|
||||
'b': 'b',
|
||||
'B': 'B',
|
||||
'h': 'i2',
|
||||
'H': 'u2',
|
||||
'i': 'i4',
|
||||
'I': 'u4',
|
||||
'l': 'i4',
|
||||
'L': 'u4',
|
||||
'q': 'i8',
|
||||
'Q': 'u8',
|
||||
'e': 'f2',
|
||||
'f': 'f',
|
||||
'd': 'd',
|
||||
'Zf': 'F',
|
||||
'Zd': 'D',
|
||||
's': 'S',
|
||||
'w': 'U',
|
||||
'O': 'O',
|
||||
'x': 'V', # padding
|
||||
}
|
||||
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
|
||||
|
||||
_pep3118_unsupported_map = {
|
||||
'u': 'UCS-2 strings',
|
||||
'&': 'pointers',
|
||||
't': 'bitfields',
|
||||
'X': 'function pointers',
|
||||
}
|
||||
|
||||
class _Stream:
|
||||
def __init__(self, s):
|
||||
self.s = s
|
||||
self.byteorder = '@'
|
||||
|
||||
def advance(self, n):
|
||||
res = self.s[:n]
|
||||
self.s = self.s[n:]
|
||||
return res
|
||||
|
||||
def consume(self, c):
|
||||
if self.s[:len(c)] == c:
|
||||
self.advance(len(c))
|
||||
return True
|
||||
return False
|
||||
|
||||
def consume_until(self, c):
|
||||
if callable(c):
|
||||
i = 0
|
||||
while i < len(self.s) and not c(self.s[i]):
|
||||
i = i + 1
|
||||
return self.advance(i)
|
||||
else:
|
||||
i = self.s.index(c)
|
||||
res = self.advance(i)
|
||||
self.advance(len(c))
|
||||
return res
|
||||
|
||||
@property
|
||||
def next(self):
|
||||
return self.s[0]
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self.s)
|
||||
|
||||
|
||||
def _dtype_from_pep3118(spec):
|
||||
stream = _Stream(spec)
|
||||
dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
|
||||
return dtype
|
||||
|
||||
def __dtype_from_pep3118(stream, is_subdtype):
|
||||
field_spec = {
|
||||
'names': [],
|
||||
'formats': [],
|
||||
'offsets': [],
|
||||
'itemsize': 0
|
||||
}
|
||||
offset = 0
|
||||
common_alignment = 1
|
||||
is_padding = False
|
||||
|
||||
# Parse spec
|
||||
while stream:
|
||||
value = None
|
||||
|
||||
# End of structure, bail out to upper level
|
||||
if stream.consume('}'):
|
||||
break
|
||||
|
||||
# Sub-arrays (1)
|
||||
shape = None
|
||||
if stream.consume('('):
|
||||
shape = stream.consume_until(')')
|
||||
shape = tuple(map(int, shape.split(',')))
|
||||
|
||||
# Byte order
|
||||
if stream.next in ('@', '=', '<', '>', '^', '!'):
|
||||
byteorder = stream.advance(1)
|
||||
if byteorder == '!':
|
||||
byteorder = '>'
|
||||
stream.byteorder = byteorder
|
||||
|
||||
# Byte order characters also control native vs. standard type sizes
|
||||
if stream.byteorder in ('@', '^'):
|
||||
type_map = _pep3118_native_map
|
||||
type_map_chars = _pep3118_native_typechars
|
||||
else:
|
||||
type_map = _pep3118_standard_map
|
||||
type_map_chars = _pep3118_standard_typechars
|
||||
|
||||
# Item sizes
|
||||
itemsize_str = stream.consume_until(lambda c: not c.isdigit())
|
||||
if itemsize_str:
|
||||
itemsize = int(itemsize_str)
|
||||
else:
|
||||
itemsize = 1
|
||||
|
||||
# Data types
|
||||
is_padding = False
|
||||
|
||||
if stream.consume('T{'):
|
||||
value, align = __dtype_from_pep3118(
|
||||
stream, is_subdtype=True)
|
||||
elif stream.next in type_map_chars:
|
||||
if stream.next == 'Z':
|
||||
typechar = stream.advance(2)
|
||||
else:
|
||||
typechar = stream.advance(1)
|
||||
|
||||
is_padding = (typechar == 'x')
|
||||
dtypechar = type_map[typechar]
|
||||
if dtypechar in 'USV':
|
||||
dtypechar += '%d' % itemsize
|
||||
itemsize = 1
|
||||
numpy_byteorder = {'@': '=', '^': '='}.get(
|
||||
stream.byteorder, stream.byteorder)
|
||||
value = dtype(numpy_byteorder + dtypechar)
|
||||
align = value.alignment
|
||||
elif stream.next in _pep3118_unsupported_map:
|
||||
desc = _pep3118_unsupported_map[stream.next]
|
||||
raise NotImplementedError(
|
||||
f"Unrepresentable PEP 3118 data type {stream.next!r} ({desc})")
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unknown PEP 3118 data type specifier {stream.s!r}"
|
||||
)
|
||||
|
||||
#
|
||||
# Native alignment may require padding
|
||||
#
|
||||
# Here we assume that the presence of a '@' character implicitly
|
||||
# implies that the start of the array is *already* aligned.
|
||||
#
|
||||
extra_offset = 0
|
||||
if stream.byteorder == '@':
|
||||
start_padding = (-offset) % align
|
||||
intra_padding = (-value.itemsize) % align
|
||||
|
||||
offset += start_padding
|
||||
|
||||
if intra_padding != 0:
|
||||
if itemsize > 1 or (shape is not None and _prod(shape) > 1):
|
||||
# Inject internal padding to the end of the sub-item
|
||||
value = _add_trailing_padding(value, intra_padding)
|
||||
else:
|
||||
# We can postpone the injection of internal padding,
|
||||
# as the item appears at most once
|
||||
extra_offset += intra_padding
|
||||
|
||||
# Update common alignment
|
||||
common_alignment = _lcm(align, common_alignment)
|
||||
|
||||
# Convert itemsize to sub-array
|
||||
if itemsize != 1:
|
||||
value = dtype((value, (itemsize,)))
|
||||
|
||||
# Sub-arrays (2)
|
||||
if shape is not None:
|
||||
value = dtype((value, shape))
|
||||
|
||||
# Field name
|
||||
if stream.consume(':'):
|
||||
name = stream.consume_until(':')
|
||||
else:
|
||||
name = None
|
||||
|
||||
if not (is_padding and name is None):
|
||||
if name is not None and name in field_spec['names']:
|
||||
raise RuntimeError(
|
||||
f"Duplicate field name '{name}' in PEP3118 format"
|
||||
)
|
||||
field_spec['names'].append(name)
|
||||
field_spec['formats'].append(value)
|
||||
field_spec['offsets'].append(offset)
|
||||
|
||||
offset += value.itemsize
|
||||
offset += extra_offset
|
||||
|
||||
field_spec['itemsize'] = offset
|
||||
|
||||
# extra final padding for aligned types
|
||||
if stream.byteorder == '@':
|
||||
field_spec['itemsize'] += (-offset) % common_alignment
|
||||
|
||||
# Check if this was a simple 1-item type, and unwrap it
|
||||
if (field_spec['names'] == [None]
|
||||
and field_spec['offsets'][0] == 0
|
||||
and field_spec['itemsize'] == field_spec['formats'][0].itemsize
|
||||
and not is_subdtype):
|
||||
ret = field_spec['formats'][0]
|
||||
else:
|
||||
_fix_names(field_spec)
|
||||
ret = dtype(field_spec)
|
||||
|
||||
# Finished
|
||||
return ret, common_alignment
|
||||
|
||||
def _fix_names(field_spec):
|
||||
""" Replace names which are None with the next unused f%d name """
|
||||
names = field_spec['names']
|
||||
for i, name in enumerate(names):
|
||||
if name is not None:
|
||||
continue
|
||||
|
||||
j = 0
|
||||
while True:
|
||||
name = f'f{j}'
|
||||
if name not in names:
|
||||
break
|
||||
j = j + 1
|
||||
names[i] = name
|
||||
|
||||
def _add_trailing_padding(value, padding):
|
||||
"""Inject the specified number of padding bytes at the end of a dtype"""
|
||||
if value.fields is None:
|
||||
field_spec = {
|
||||
'names': ['f0'],
|
||||
'formats': [value],
|
||||
'offsets': [0],
|
||||
'itemsize': value.itemsize
|
||||
}
|
||||
else:
|
||||
fields = value.fields
|
||||
names = value.names
|
||||
field_spec = {
|
||||
'names': names,
|
||||
'formats': [fields[name][0] for name in names],
|
||||
'offsets': [fields[name][1] for name in names],
|
||||
'itemsize': value.itemsize
|
||||
}
|
||||
|
||||
field_spec['itemsize'] += padding
|
||||
return dtype(field_spec)
|
||||
|
||||
def _prod(a):
|
||||
p = 1
|
||||
for x in a:
|
||||
p *= x
|
||||
return p
|
||||
|
||||
def _gcd(a, b):
|
||||
"""Calculate the greatest common divisor of a and b"""
|
||||
if not (math.isfinite(a) and math.isfinite(b)):
|
||||
raise ValueError('Can only find greatest common divisor of '
|
||||
f'finite arguments, found "{a}" and "{b}"')
|
||||
while b:
|
||||
a, b = b, a % b
|
||||
return a
|
||||
|
||||
def _lcm(a, b):
|
||||
return a // _gcd(a, b) * b
|
||||
|
||||
def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
|
||||
""" Format the error message for when __array_ufunc__ gives up. """
|
||||
args_string = ', '.join([f'{arg!r}' for arg in inputs] +
|
||||
[f'{k}={v!r}'
|
||||
for k, v in kwargs.items()])
|
||||
args = inputs + kwargs.get('out', ())
|
||||
types_string = ', '.join(repr(type(arg).__name__) for arg in args)
|
||||
return ('operand type(s) all returned NotImplemented from '
|
||||
f'__array_ufunc__({ufunc!r}, {method!r}, {args_string}): {types_string}'
|
||||
)
|
||||
|
||||
|
||||
def array_function_errmsg_formatter(public_api, types):
|
||||
""" Format the error message for when __array_ufunc__ gives up. """
|
||||
func_name = f'{public_api.__module__}.{public_api.__name__}'
|
||||
return (f"no implementation found for '{func_name}' on types that implement "
|
||||
f'__array_function__: {list(types)}')
|
||||
|
||||
|
||||
def _ufunc_doc_signature_formatter(ufunc):
|
||||
"""
|
||||
Builds a signature string which resembles PEP 457
|
||||
|
||||
This is used to construct the first line of the docstring
|
||||
"""
|
||||
|
||||
# input arguments are simple
|
||||
if ufunc.nin == 1:
|
||||
in_args = 'x'
|
||||
else:
|
||||
in_args = ', '.join(f'x{i + 1}' for i in range(ufunc.nin))
|
||||
|
||||
# output arguments are both keyword or positional
|
||||
if ufunc.nout == 0:
|
||||
out_args = ', /, out=()'
|
||||
elif ufunc.nout == 1:
|
||||
out_args = ', /, out=None'
|
||||
else:
|
||||
out_args = '[, {positional}], / [, out={default}]'.format(
|
||||
positional=', '.join(
|
||||
f'out{i + 1}' for i in range(ufunc.nout)),
|
||||
default=repr((None,) * ufunc.nout)
|
||||
)
|
||||
|
||||
# keyword only args depend on whether this is a gufunc
|
||||
kwargs = (
|
||||
", casting='same_kind'"
|
||||
", order='K'"
|
||||
", dtype=None"
|
||||
", subok=True"
|
||||
)
|
||||
|
||||
# NOTE: gufuncs may or may not support the `axis` parameter
|
||||
if ufunc.signature is None:
|
||||
kwargs = f", where=True{kwargs}[, signature]"
|
||||
else:
|
||||
kwargs += "[, signature, axes, axis]"
|
||||
|
||||
# join all the parts together
|
||||
return f'{ufunc.__name__}({in_args}{out_args}, *{kwargs})'
|
||||
|
||||
|
||||
def npy_ctypes_check(cls):
|
||||
# determine if a class comes from ctypes, in order to work around
|
||||
# a bug in the buffer protocol for those objects, bpo-10746
|
||||
try:
|
||||
# ctypes class are new-style, so have an __mro__. This probably fails
|
||||
# for ctypes classes with multiple inheritance.
|
||||
if IS_PYPY:
|
||||
# (..., _ctypes.basics._CData, Bufferable, object)
|
||||
ctype_base = cls.__mro__[-3]
|
||||
else:
|
||||
# # (..., _ctypes._CData, object)
|
||||
ctype_base = cls.__mro__[-2]
|
||||
# right now, they're part of the _ctypes module
|
||||
return '_ctypes' in ctype_base.__module__
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
# used to handle the _NoValue default argument for na_object
|
||||
# in the C implementation of the __reduce__ method for stringdtype
|
||||
def _convert_to_stringdtype_kwargs(coerce, na_object=_NoValue):
|
||||
if na_object is _NoValue:
|
||||
return StringDType(coerce=coerce)
|
||||
return StringDType(coerce=coerce, na_object=na_object)
|
||||
72
lib/python3.11/site-packages/numpy/_core/_internal.pyi
Normal file
72
lib/python3.11/site-packages/numpy/_core/_internal.pyi
Normal file
@ -0,0 +1,72 @@
|
||||
import ctypes as ct
|
||||
import re
|
||||
from collections.abc import Callable, Iterable
|
||||
from typing import Any, Final, Generic, Self, overload
|
||||
|
||||
from typing_extensions import TypeVar, deprecated
|
||||
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
from numpy.ctypeslib import c_intp
|
||||
|
||||
_CastT = TypeVar("_CastT", bound=ct._CanCastTo)
|
||||
_T_co = TypeVar("_T_co", covariant=True)
|
||||
_CT = TypeVar("_CT", bound=ct._CData)
|
||||
_PT_co = TypeVar("_PT_co", bound=int | None, default=None, covariant=True)
|
||||
|
||||
###
|
||||
|
||||
IS_PYPY: Final[bool] = ...
|
||||
|
||||
format_re: Final[re.Pattern[str]] = ...
|
||||
sep_re: Final[re.Pattern[str]] = ...
|
||||
space_re: Final[re.Pattern[str]] = ...
|
||||
|
||||
###
|
||||
|
||||
# TODO: Let the likes of `shape_as` and `strides_as` return `None`
|
||||
# for 0D arrays once we've got shape-support
|
||||
|
||||
class _ctypes(Generic[_PT_co]):
|
||||
@overload
|
||||
def __init__(self: _ctypes[None], /, array: npt.NDArray[Any], ptr: None = None) -> None: ...
|
||||
@overload
|
||||
def __init__(self, /, array: npt.NDArray[Any], ptr: _PT_co) -> None: ...
|
||||
|
||||
#
|
||||
@property
|
||||
def data(self) -> _PT_co: ...
|
||||
@property
|
||||
def shape(self) -> ct.Array[c_intp]: ...
|
||||
@property
|
||||
def strides(self) -> ct.Array[c_intp]: ...
|
||||
@property
|
||||
def _as_parameter_(self) -> ct.c_void_p: ...
|
||||
|
||||
#
|
||||
def data_as(self, /, obj: type[_CastT]) -> _CastT: ...
|
||||
def shape_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ...
|
||||
def strides_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ...
|
||||
|
||||
#
|
||||
@deprecated('"get_data" is deprecated. Use "data" instead')
|
||||
def get_data(self, /) -> _PT_co: ...
|
||||
@deprecated('"get_shape" is deprecated. Use "shape" instead')
|
||||
def get_shape(self, /) -> ct.Array[c_intp]: ...
|
||||
@deprecated('"get_strides" is deprecated. Use "strides" instead')
|
||||
def get_strides(self, /) -> ct.Array[c_intp]: ...
|
||||
@deprecated('"get_as_parameter" is deprecated. Use "_as_parameter_" instead')
|
||||
def get_as_parameter(self, /) -> ct.c_void_p: ...
|
||||
|
||||
class dummy_ctype(Generic[_T_co]):
|
||||
_cls: type[_T_co]
|
||||
|
||||
def __init__(self, /, cls: type[_T_co]) -> None: ...
|
||||
def __eq__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
|
||||
def __ne__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
|
||||
def __mul__(self, other: object, /) -> Self: ...
|
||||
def __call__(self, /, *other: object) -> _T_co: ...
|
||||
|
||||
def array_ufunc_errmsg_formatter(dummy: object, ufunc: np.ufunc, method: str, *inputs: object, **kwargs: object) -> str: ...
|
||||
def array_function_errmsg_formatter(public_api: Callable[..., object], types: Iterable[str]) -> str: ...
|
||||
def npy_ctypes_check(cls: type) -> bool: ...
|
||||
355
lib/python3.11/site-packages/numpy/_core/_machar.py
Normal file
355
lib/python3.11/site-packages/numpy/_core/_machar.py
Normal file
@ -0,0 +1,355 @@
|
||||
"""
|
||||
Machine arithmetic - determine the parameters of the
|
||||
floating-point arithmetic system
|
||||
|
||||
Author: Pearu Peterson, September 2003
|
||||
|
||||
"""
|
||||
__all__ = ['MachAr']
|
||||
|
||||
from ._ufunc_config import errstate
|
||||
from .fromnumeric import any
|
||||
|
||||
# Need to speed this up...especially for longdouble
|
||||
|
||||
# Deprecated 2021-10-20, NumPy 1.22
|
||||
class MachAr:
|
||||
"""
|
||||
Diagnosing machine parameters.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
ibeta : int
|
||||
Radix in which numbers are represented.
|
||||
it : int
|
||||
Number of base-`ibeta` digits in the floating point mantissa M.
|
||||
machep : int
|
||||
Exponent of the smallest (most negative) power of `ibeta` that,
|
||||
added to 1.0, gives something different from 1.0
|
||||
eps : float
|
||||
Floating-point number ``beta**machep`` (floating point precision)
|
||||
negep : int
|
||||
Exponent of the smallest power of `ibeta` that, subtracted
|
||||
from 1.0, gives something different from 1.0.
|
||||
epsneg : float
|
||||
Floating-point number ``beta**negep``.
|
||||
iexp : int
|
||||
Number of bits in the exponent (including its sign and bias).
|
||||
minexp : int
|
||||
Smallest (most negative) power of `ibeta` consistent with there
|
||||
being no leading zeros in the mantissa.
|
||||
xmin : float
|
||||
Floating-point number ``beta**minexp`` (the smallest [in
|
||||
magnitude] positive floating point number with full precision).
|
||||
maxexp : int
|
||||
Smallest (positive) power of `ibeta` that causes overflow.
|
||||
xmax : float
|
||||
``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
|
||||
usable floating value).
|
||||
irnd : int
|
||||
In ``range(6)``, information on what kind of rounding is done
|
||||
in addition, and on how underflow is handled.
|
||||
ngrd : int
|
||||
Number of 'guard digits' used when truncating the product
|
||||
of two mantissas to fit the representation.
|
||||
epsilon : float
|
||||
Same as `eps`.
|
||||
tiny : float
|
||||
An alias for `smallest_normal`, kept for backwards compatibility.
|
||||
huge : float
|
||||
Same as `xmax`.
|
||||
precision : float
|
||||
``- int(-log10(eps))``
|
||||
resolution : float
|
||||
``- 10**(-precision)``
|
||||
smallest_normal : float
|
||||
The smallest positive floating point number with 1 as leading bit in
|
||||
the mantissa following IEEE-754. Same as `xmin`.
|
||||
smallest_subnormal : float
|
||||
The smallest positive floating point number with 0 as leading bit in
|
||||
the mantissa following IEEE-754.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
float_conv : function, optional
|
||||
Function that converts an integer or integer array to a float
|
||||
or float array. Default is `float`.
|
||||
int_conv : function, optional
|
||||
Function that converts a float or float array to an integer or
|
||||
integer array. Default is `int`.
|
||||
float_to_float : function, optional
|
||||
Function that converts a float array to float. Default is `float`.
|
||||
Note that this does not seem to do anything useful in the current
|
||||
implementation.
|
||||
float_to_str : function, optional
|
||||
Function that converts a single float to a string. Default is
|
||||
``lambda v:'%24.16e' %v``.
|
||||
title : str, optional
|
||||
Title that is printed in the string representation of `MachAr`.
|
||||
|
||||
See Also
|
||||
--------
|
||||
finfo : Machine limits for floating point types.
|
||||
iinfo : Machine limits for integer types.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Press, Teukolsky, Vetterling and Flannery,
|
||||
"Numerical Recipes in C++," 2nd ed,
|
||||
Cambridge University Press, 2002, p. 31.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, float_conv=float, int_conv=int,
|
||||
float_to_float=float,
|
||||
float_to_str=lambda v: f'{v:24.16e}',
|
||||
title='Python floating point number'):
|
||||
"""
|
||||
|
||||
float_conv - convert integer to float (array)
|
||||
int_conv - convert float (array) to integer
|
||||
float_to_float - convert float array to float
|
||||
float_to_str - convert array float to str
|
||||
title - description of used floating point numbers
|
||||
|
||||
"""
|
||||
# We ignore all errors here because we are purposely triggering
|
||||
# underflow to detect the properties of the running arch.
|
||||
with errstate(under='ignore'):
|
||||
self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
|
||||
|
||||
def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
|
||||
max_iterN = 10000
|
||||
msg = "Did not converge after %d tries with %s"
|
||||
one = float_conv(1)
|
||||
two = one + one
|
||||
zero = one - one
|
||||
|
||||
# Do we really need to do this? Aren't they 2 and 2.0?
|
||||
# Determine ibeta and beta
|
||||
a = one
|
||||
for _ in range(max_iterN):
|
||||
a = a + a
|
||||
temp = a + one
|
||||
temp1 = temp - a
|
||||
if any(temp1 - one != zero):
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
b = one
|
||||
for _ in range(max_iterN):
|
||||
b = b + b
|
||||
temp = a + b
|
||||
itemp = int_conv(temp - a)
|
||||
if any(itemp != 0):
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
ibeta = itemp
|
||||
beta = float_conv(ibeta)
|
||||
|
||||
# Determine it and irnd
|
||||
it = -1
|
||||
b = one
|
||||
for _ in range(max_iterN):
|
||||
it = it + 1
|
||||
b = b * beta
|
||||
temp = b + one
|
||||
temp1 = temp - b
|
||||
if any(temp1 - one != zero):
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
|
||||
betah = beta / two
|
||||
a = one
|
||||
for _ in range(max_iterN):
|
||||
a = a + a
|
||||
temp = a + one
|
||||
temp1 = temp - a
|
||||
if any(temp1 - one != zero):
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
temp = a + betah
|
||||
irnd = 0
|
||||
if any(temp - a != zero):
|
||||
irnd = 1
|
||||
tempa = a + beta
|
||||
temp = tempa + betah
|
||||
if irnd == 0 and any(temp - tempa != zero):
|
||||
irnd = 2
|
||||
|
||||
# Determine negep and epsneg
|
||||
negep = it + 3
|
||||
betain = one / beta
|
||||
a = one
|
||||
for i in range(negep):
|
||||
a = a * betain
|
||||
b = a
|
||||
for _ in range(max_iterN):
|
||||
temp = one - a
|
||||
if any(temp - one != zero):
|
||||
break
|
||||
a = a * beta
|
||||
negep = negep - 1
|
||||
# Prevent infinite loop on PPC with gcc 4.0:
|
||||
if negep < 0:
|
||||
raise RuntimeError("could not determine machine tolerance "
|
||||
"for 'negep', locals() -> %s" % (locals()))
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
negep = -negep
|
||||
epsneg = a
|
||||
|
||||
# Determine machep and eps
|
||||
machep = - it - 3
|
||||
a = b
|
||||
|
||||
for _ in range(max_iterN):
|
||||
temp = one + a
|
||||
if any(temp - one != zero):
|
||||
break
|
||||
a = a * beta
|
||||
machep = machep + 1
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
eps = a
|
||||
|
||||
# Determine ngrd
|
||||
ngrd = 0
|
||||
temp = one + eps
|
||||
if irnd == 0 and any(temp * one - one != zero):
|
||||
ngrd = 1
|
||||
|
||||
# Determine iexp
|
||||
i = 0
|
||||
k = 1
|
||||
z = betain
|
||||
t = one + eps
|
||||
nxres = 0
|
||||
for _ in range(max_iterN):
|
||||
y = z
|
||||
z = y * y
|
||||
a = z * one # Check here for underflow
|
||||
temp = z * t
|
||||
if any(a + a == zero) or any(abs(z) >= y):
|
||||
break
|
||||
temp1 = temp * betain
|
||||
if any(temp1 * beta == z):
|
||||
break
|
||||
i = i + 1
|
||||
k = k + k
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
if ibeta != 10:
|
||||
iexp = i + 1
|
||||
mx = k + k
|
||||
else:
|
||||
iexp = 2
|
||||
iz = ibeta
|
||||
while k >= iz:
|
||||
iz = iz * ibeta
|
||||
iexp = iexp + 1
|
||||
mx = iz + iz - 1
|
||||
|
||||
# Determine minexp and xmin
|
||||
for _ in range(max_iterN):
|
||||
xmin = y
|
||||
y = y * betain
|
||||
a = y * one
|
||||
temp = y * t
|
||||
if any((a + a) != zero) and any(abs(y) < xmin):
|
||||
k = k + 1
|
||||
temp1 = temp * betain
|
||||
if any(temp1 * beta == y) and any(temp != y):
|
||||
nxres = 3
|
||||
xmin = y
|
||||
break
|
||||
else:
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
minexp = -k
|
||||
|
||||
# Determine maxexp, xmax
|
||||
if mx <= k + k - 3 and ibeta != 10:
|
||||
mx = mx + mx
|
||||
iexp = iexp + 1
|
||||
maxexp = mx + minexp
|
||||
irnd = irnd + nxres
|
||||
if irnd >= 2:
|
||||
maxexp = maxexp - 2
|
||||
i = maxexp + minexp
|
||||
if ibeta == 2 and not i:
|
||||
maxexp = maxexp - 1
|
||||
if i > 20:
|
||||
maxexp = maxexp - 1
|
||||
if any(a != y):
|
||||
maxexp = maxexp - 2
|
||||
xmax = one - epsneg
|
||||
if any(xmax * one != xmax):
|
||||
xmax = one - beta * epsneg
|
||||
xmax = xmax / (xmin * beta * beta * beta)
|
||||
i = maxexp + minexp + 3
|
||||
for j in range(i):
|
||||
if ibeta == 2:
|
||||
xmax = xmax + xmax
|
||||
else:
|
||||
xmax = xmax * beta
|
||||
|
||||
smallest_subnormal = abs(xmin / beta ** (it))
|
||||
|
||||
self.ibeta = ibeta
|
||||
self.it = it
|
||||
self.negep = negep
|
||||
self.epsneg = float_to_float(epsneg)
|
||||
self._str_epsneg = float_to_str(epsneg)
|
||||
self.machep = machep
|
||||
self.eps = float_to_float(eps)
|
||||
self._str_eps = float_to_str(eps)
|
||||
self.ngrd = ngrd
|
||||
self.iexp = iexp
|
||||
self.minexp = minexp
|
||||
self.xmin = float_to_float(xmin)
|
||||
self._str_xmin = float_to_str(xmin)
|
||||
self.maxexp = maxexp
|
||||
self.xmax = float_to_float(xmax)
|
||||
self._str_xmax = float_to_str(xmax)
|
||||
self.irnd = irnd
|
||||
|
||||
self.title = title
|
||||
# Commonly used parameters
|
||||
self.epsilon = self.eps
|
||||
self.tiny = self.xmin
|
||||
self.huge = self.xmax
|
||||
self.smallest_normal = self.xmin
|
||||
self._str_smallest_normal = float_to_str(self.xmin)
|
||||
self.smallest_subnormal = float_to_float(smallest_subnormal)
|
||||
self._str_smallest_subnormal = float_to_str(smallest_subnormal)
|
||||
|
||||
import math
|
||||
self.precision = int(-math.log10(float_to_float(self.eps)))
|
||||
ten = two + two + two + two + two
|
||||
resolution = ten ** (-self.precision)
|
||||
self.resolution = float_to_float(resolution)
|
||||
self._str_resolution = float_to_str(resolution)
|
||||
|
||||
def __str__(self):
|
||||
fmt = (
|
||||
'Machine parameters for %(title)s\n'
|
||||
'---------------------------------------------------------------------\n'
|
||||
'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
|
||||
'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
|
||||
'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
|
||||
'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
|
||||
'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
|
||||
'smallest_normal=%(smallest_normal)s '
|
||||
'smallest_subnormal=%(smallest_subnormal)s\n'
|
||||
'---------------------------------------------------------------------\n'
|
||||
)
|
||||
return fmt % self.__dict__
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print(MachAr())
|
||||
55
lib/python3.11/site-packages/numpy/_core/_machar.pyi
Normal file
55
lib/python3.11/site-packages/numpy/_core/_machar.pyi
Normal file
@ -0,0 +1,55 @@
|
||||
from collections.abc import Iterable
|
||||
from typing import Any, Final, TypeVar, overload
|
||||
|
||||
import numpy as np
|
||||
from numpy import _CastingKind
|
||||
from numpy._utils import set_module as set_module
|
||||
|
||||
###
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]])
|
||||
_ExceptionT = TypeVar("_ExceptionT", bound=Exception)
|
||||
|
||||
###
|
||||
|
||||
class UFuncTypeError(TypeError):
|
||||
ufunc: Final[np.ufunc]
|
||||
def __init__(self, /, ufunc: np.ufunc) -> None: ...
|
||||
|
||||
class _UFuncNoLoopError(UFuncTypeError):
|
||||
dtypes: tuple[np.dtype, ...]
|
||||
def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ...
|
||||
|
||||
class _UFuncBinaryResolutionError(_UFuncNoLoopError):
|
||||
dtypes: tuple[np.dtype, np.dtype]
|
||||
def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ...
|
||||
|
||||
class _UFuncCastingError(UFuncTypeError):
|
||||
casting: Final[_CastingKind]
|
||||
from_: Final[np.dtype]
|
||||
to: Final[np.dtype]
|
||||
def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ...
|
||||
|
||||
class _UFuncInputCastingError(_UFuncCastingError):
|
||||
in_i: Final[int]
|
||||
def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ...
|
||||
|
||||
class _UFuncOutputCastingError(_UFuncCastingError):
|
||||
out_i: Final[int]
|
||||
def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ...
|
||||
|
||||
class _ArrayMemoryError(MemoryError):
|
||||
shape: tuple[int, ...]
|
||||
dtype: np.dtype
|
||||
def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ...
|
||||
@property
|
||||
def _total_size(self) -> int: ...
|
||||
@staticmethod
|
||||
def _size_to_string(num_bytes: int) -> str: ...
|
||||
|
||||
@overload
|
||||
def _unpack_tuple(tup: tuple[_T]) -> _T: ...
|
||||
@overload
|
||||
def _unpack_tuple(tup: _TupleT) -> _TupleT: ...
|
||||
def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ...
|
||||
255
lib/python3.11/site-packages/numpy/_core/_methods.py
Normal file
255
lib/python3.11/site-packages/numpy/_core/_methods.py
Normal file
@ -0,0 +1,255 @@
|
||||
"""
|
||||
Array methods which are called by both the C-code for the method
|
||||
and the Python code for the NumPy-namespace function
|
||||
|
||||
"""
|
||||
import os
|
||||
import pickle
|
||||
import warnings
|
||||
from contextlib import nullcontext
|
||||
|
||||
import numpy as np
|
||||
from numpy._core import multiarray as mu
|
||||
from numpy._core import numerictypes as nt
|
||||
from numpy._core import umath as um
|
||||
from numpy._core.multiarray import asanyarray
|
||||
from numpy._globals import _NoValue
|
||||
|
||||
# save those O(100) nanoseconds!
|
||||
bool_dt = mu.dtype("bool")
|
||||
umr_maximum = um.maximum.reduce
|
||||
umr_minimum = um.minimum.reduce
|
||||
umr_sum = um.add.reduce
|
||||
umr_prod = um.multiply.reduce
|
||||
umr_bitwise_count = um.bitwise_count
|
||||
umr_any = um.logical_or.reduce
|
||||
umr_all = um.logical_and.reduce
|
||||
|
||||
# Complex types to -> (2,)float view for fast-path computation in _var()
|
||||
_complex_to_float = {
|
||||
nt.dtype(nt.csingle): nt.dtype(nt.single),
|
||||
nt.dtype(nt.cdouble): nt.dtype(nt.double),
|
||||
}
|
||||
# Special case for windows: ensure double takes precedence
|
||||
if nt.dtype(nt.longdouble) != nt.dtype(nt.double):
|
||||
_complex_to_float.update({
|
||||
nt.dtype(nt.clongdouble): nt.dtype(nt.longdouble),
|
||||
})
|
||||
|
||||
# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
|
||||
# small reductions
|
||||
def _amax(a, axis=None, out=None, keepdims=False,
|
||||
initial=_NoValue, where=True):
|
||||
return umr_maximum(a, axis, None, out, keepdims, initial, where)
|
||||
|
||||
def _amin(a, axis=None, out=None, keepdims=False,
|
||||
initial=_NoValue, where=True):
|
||||
return umr_minimum(a, axis, None, out, keepdims, initial, where)
|
||||
|
||||
def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
|
||||
initial=_NoValue, where=True):
|
||||
return umr_sum(a, axis, dtype, out, keepdims, initial, where)
|
||||
|
||||
def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
|
||||
initial=_NoValue, where=True):
|
||||
return umr_prod(a, axis, dtype, out, keepdims, initial, where)
|
||||
|
||||
def _any(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
|
||||
# By default, return a boolean for any and all
|
||||
if dtype is None:
|
||||
dtype = bool_dt
|
||||
# Parsing keyword arguments is currently fairly slow, so avoid it for now
|
||||
if where is True:
|
||||
return umr_any(a, axis, dtype, out, keepdims)
|
||||
return umr_any(a, axis, dtype, out, keepdims, where=where)
|
||||
|
||||
def _all(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
|
||||
# By default, return a boolean for any and all
|
||||
if dtype is None:
|
||||
dtype = bool_dt
|
||||
# Parsing keyword arguments is currently fairly slow, so avoid it for now
|
||||
if where is True:
|
||||
return umr_all(a, axis, dtype, out, keepdims)
|
||||
return umr_all(a, axis, dtype, out, keepdims, where=where)
|
||||
|
||||
def _count_reduce_items(arr, axis, keepdims=False, where=True):
|
||||
# fast-path for the default case
|
||||
if where is True:
|
||||
# no boolean mask given, calculate items according to axis
|
||||
if axis is None:
|
||||
axis = tuple(range(arr.ndim))
|
||||
elif not isinstance(axis, tuple):
|
||||
axis = (axis,)
|
||||
items = 1
|
||||
for ax in axis:
|
||||
items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)]
|
||||
items = nt.intp(items)
|
||||
else:
|
||||
# TODO: Optimize case when `where` is broadcast along a non-reduction
|
||||
# axis and full sum is more excessive than needed.
|
||||
|
||||
# guarded to protect circular imports
|
||||
from numpy.lib._stride_tricks_impl import broadcast_to
|
||||
# count True values in (potentially broadcasted) boolean mask
|
||||
items = umr_sum(broadcast_to(where, arr.shape), axis, nt.intp, None,
|
||||
keepdims)
|
||||
return items
|
||||
|
||||
def _clip(a, min=None, max=None, out=None, **kwargs):
|
||||
if a.dtype.kind in "iu":
|
||||
# If min/max is a Python integer, deal with out-of-bound values here.
|
||||
# (This enforces NEP 50 rules as no value based promotion is done.)
|
||||
if type(min) is int and min <= np.iinfo(a.dtype).min:
|
||||
min = None
|
||||
if type(max) is int and max >= np.iinfo(a.dtype).max:
|
||||
max = None
|
||||
|
||||
if min is None and max is None:
|
||||
# return identity
|
||||
return um.positive(a, out=out, **kwargs)
|
||||
elif min is None:
|
||||
return um.minimum(a, max, out=out, **kwargs)
|
||||
elif max is None:
|
||||
return um.maximum(a, min, out=out, **kwargs)
|
||||
else:
|
||||
return um.clip(a, min, max, out=out, **kwargs)
|
||||
|
||||
def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
|
||||
arr = asanyarray(a)
|
||||
|
||||
is_float16_result = False
|
||||
|
||||
rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
|
||||
if rcount == 0 if where is True else umr_any(rcount == 0, axis=None):
|
||||
warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2)
|
||||
|
||||
# Cast bool, unsigned int, and int to float64 by default
|
||||
if dtype is None:
|
||||
if issubclass(arr.dtype.type, (nt.integer, nt.bool)):
|
||||
dtype = mu.dtype('f8')
|
||||
elif issubclass(arr.dtype.type, nt.float16):
|
||||
dtype = mu.dtype('f4')
|
||||
is_float16_result = True
|
||||
|
||||
ret = umr_sum(arr, axis, dtype, out, keepdims, where=where)
|
||||
if isinstance(ret, mu.ndarray):
|
||||
ret = um.true_divide(
|
||||
ret, rcount, out=ret, casting='unsafe', subok=False)
|
||||
if is_float16_result and out is None:
|
||||
ret = arr.dtype.type(ret)
|
||||
elif hasattr(ret, 'dtype'):
|
||||
if is_float16_result:
|
||||
ret = arr.dtype.type(ret / rcount)
|
||||
else:
|
||||
ret = ret.dtype.type(ret / rcount)
|
||||
else:
|
||||
ret = ret / rcount
|
||||
|
||||
return ret
|
||||
|
||||
def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
|
||||
where=True, mean=None):
|
||||
arr = asanyarray(a)
|
||||
|
||||
rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
|
||||
# Make this warning show up on top.
|
||||
if ddof >= rcount if where is True else umr_any(ddof >= rcount, axis=None):
|
||||
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning,
|
||||
stacklevel=2)
|
||||
|
||||
# Cast bool, unsigned int, and int to float64 by default
|
||||
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool)):
|
||||
dtype = mu.dtype('f8')
|
||||
|
||||
if mean is not None:
|
||||
arrmean = mean
|
||||
else:
|
||||
# Compute the mean.
|
||||
# Note that if dtype is not of inexact type then arraymean will
|
||||
# not be either.
|
||||
arrmean = umr_sum(arr, axis, dtype, keepdims=True, where=where)
|
||||
# The shape of rcount has to match arrmean to not change the shape of
|
||||
# out in broadcasting. Otherwise, it cannot be stored back to arrmean.
|
||||
if rcount.ndim == 0:
|
||||
# fast-path for default case when where is True
|
||||
div = rcount
|
||||
else:
|
||||
# matching rcount to arrmean when where is specified as array
|
||||
div = rcount.reshape(arrmean.shape)
|
||||
if isinstance(arrmean, mu.ndarray):
|
||||
arrmean = um.true_divide(arrmean, div, out=arrmean,
|
||||
casting='unsafe', subok=False)
|
||||
elif hasattr(arrmean, "dtype"):
|
||||
arrmean = arrmean.dtype.type(arrmean / rcount)
|
||||
else:
|
||||
arrmean = arrmean / rcount
|
||||
|
||||
# Compute sum of squared deviations from mean
|
||||
# Note that x may not be inexact and that we need it to be an array,
|
||||
# not a scalar.
|
||||
x = asanyarray(arr - arrmean)
|
||||
|
||||
if issubclass(arr.dtype.type, (nt.floating, nt.integer)):
|
||||
x = um.multiply(x, x, out=x)
|
||||
# Fast-paths for built-in complex types
|
||||
elif x.dtype in _complex_to_float:
|
||||
xv = x.view(dtype=(_complex_to_float[x.dtype], (2,)))
|
||||
um.multiply(xv, xv, out=xv)
|
||||
x = um.add(xv[..., 0], xv[..., 1], out=x.real).real
|
||||
# Most general case; includes handling object arrays containing imaginary
|
||||
# numbers and complex types with non-native byteorder
|
||||
else:
|
||||
x = um.multiply(x, um.conjugate(x), out=x).real
|
||||
|
||||
ret = umr_sum(x, axis, dtype, out, keepdims=keepdims, where=where)
|
||||
|
||||
# Compute degrees of freedom and make sure it is not negative.
|
||||
rcount = um.maximum(rcount - ddof, 0)
|
||||
|
||||
# divide by degrees of freedom
|
||||
if isinstance(ret, mu.ndarray):
|
||||
ret = um.true_divide(
|
||||
ret, rcount, out=ret, casting='unsafe', subok=False)
|
||||
elif hasattr(ret, 'dtype'):
|
||||
ret = ret.dtype.type(ret / rcount)
|
||||
else:
|
||||
ret = ret / rcount
|
||||
|
||||
return ret
|
||||
|
||||
def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
|
||||
where=True, mean=None):
|
||||
ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
|
||||
keepdims=keepdims, where=where, mean=mean)
|
||||
|
||||
if isinstance(ret, mu.ndarray):
|
||||
ret = um.sqrt(ret, out=ret)
|
||||
elif hasattr(ret, 'dtype'):
|
||||
ret = ret.dtype.type(um.sqrt(ret))
|
||||
else:
|
||||
ret = um.sqrt(ret)
|
||||
|
||||
return ret
|
||||
|
||||
def _ptp(a, axis=None, out=None, keepdims=False):
|
||||
return um.subtract(
|
||||
umr_maximum(a, axis, None, out, keepdims),
|
||||
umr_minimum(a, axis, None, None, keepdims),
|
||||
out
|
||||
)
|
||||
|
||||
def _dump(self, file, protocol=2):
|
||||
if hasattr(file, 'write'):
|
||||
ctx = nullcontext(file)
|
||||
else:
|
||||
ctx = open(os.fspath(file), "wb")
|
||||
with ctx as f:
|
||||
pickle.dump(self, f, protocol=protocol)
|
||||
|
||||
def _dumps(self, protocol=2):
|
||||
return pickle.dumps(self, protocol=protocol)
|
||||
|
||||
def _bitwise_count(a, out=None, *, where=True, casting='same_kind',
|
||||
order='K', dtype=None, subok=True):
|
||||
return umr_bitwise_count(a, out, where=where, casting=casting,
|
||||
order=order, dtype=dtype, subok=subok)
|
||||
22
lib/python3.11/site-packages/numpy/_core/_methods.pyi
Normal file
22
lib/python3.11/site-packages/numpy/_core/_methods.pyi
Normal file
@ -0,0 +1,22 @@
|
||||
from collections.abc import Callable
|
||||
from typing import Any, Concatenate, TypeAlias
|
||||
|
||||
import numpy as np
|
||||
|
||||
from . import _exceptions as _exceptions
|
||||
|
||||
###
|
||||
|
||||
_Reduce2: TypeAlias = Callable[Concatenate[object, ...], Any]
|
||||
|
||||
###
|
||||
|
||||
bool_dt: np.dtype[np.bool] = ...
|
||||
umr_maximum: _Reduce2 = ...
|
||||
umr_minimum: _Reduce2 = ...
|
||||
umr_sum: _Reduce2 = ...
|
||||
umr_prod: _Reduce2 = ...
|
||||
umr_bitwise_count = np.bitwise_count
|
||||
umr_any: _Reduce2 = ...
|
||||
umr_all: _Reduce2 = ...
|
||||
_complex_to_float: dict[np.dtype[np.complexfloating], np.dtype[np.floating]] = ...
|
||||
BIN
lib/python3.11/site-packages/numpy/_core/_multiarray_tests.cpython-311-darwin.so
Executable file
BIN
lib/python3.11/site-packages/numpy/_core/_multiarray_tests.cpython-311-darwin.so
Executable file
Binary file not shown.
BIN
lib/python3.11/site-packages/numpy/_core/_multiarray_umath.cpython-311-darwin.so
Executable file
BIN
lib/python3.11/site-packages/numpy/_core/_multiarray_umath.cpython-311-darwin.so
Executable file
Binary file not shown.
Binary file not shown.
BIN
lib/python3.11/site-packages/numpy/_core/_rational_tests.cpython-311-darwin.so
Executable file
BIN
lib/python3.11/site-packages/numpy/_core/_rational_tests.cpython-311-darwin.so
Executable file
Binary file not shown.
BIN
lib/python3.11/site-packages/numpy/_core/_simd.cpython-311-darwin.so
Executable file
BIN
lib/python3.11/site-packages/numpy/_core/_simd.cpython-311-darwin.so
Executable file
Binary file not shown.
25
lib/python3.11/site-packages/numpy/_core/_simd.pyi
Normal file
25
lib/python3.11/site-packages/numpy/_core/_simd.pyi
Normal file
@ -0,0 +1,25 @@
|
||||
from types import ModuleType
|
||||
from typing import TypedDict, type_check_only
|
||||
|
||||
# NOTE: these 5 are only defined on systems with an intel processor
|
||||
SSE42: ModuleType | None = ...
|
||||
FMA3: ModuleType | None = ...
|
||||
AVX2: ModuleType | None = ...
|
||||
AVX512F: ModuleType | None = ...
|
||||
AVX512_SKX: ModuleType | None = ...
|
||||
|
||||
baseline: ModuleType | None = ...
|
||||
|
||||
@type_check_only
|
||||
class SimdTargets(TypedDict):
|
||||
SSE42: ModuleType | None
|
||||
AVX2: ModuleType | None
|
||||
FMA3: ModuleType | None
|
||||
AVX512F: ModuleType | None
|
||||
AVX512_SKX: ModuleType | None
|
||||
baseline: ModuleType | None
|
||||
|
||||
targets: SimdTargets = ...
|
||||
|
||||
def clear_floatstatus() -> None: ...
|
||||
def get_floatstatus() -> int: ...
|
||||
100
lib/python3.11/site-packages/numpy/_core/_string_helpers.py
Normal file
100
lib/python3.11/site-packages/numpy/_core/_string_helpers.py
Normal file
@ -0,0 +1,100 @@
|
||||
"""
|
||||
String-handling utilities to avoid locale-dependence.
|
||||
|
||||
Used primarily to generate type name aliases.
|
||||
"""
|
||||
# "import string" is costly to import!
|
||||
# Construct the translation tables directly
|
||||
# "A" = chr(65), "a" = chr(97)
|
||||
_all_chars = tuple(map(chr, range(256)))
|
||||
_ascii_upper = _all_chars[65:65 + 26]
|
||||
_ascii_lower = _all_chars[97:97 + 26]
|
||||
LOWER_TABLE = _all_chars[:65] + _ascii_lower + _all_chars[65 + 26:]
|
||||
UPPER_TABLE = _all_chars[:97] + _ascii_upper + _all_chars[97 + 26:]
|
||||
|
||||
|
||||
def english_lower(s):
|
||||
""" Apply English case rules to convert ASCII strings to all lower case.
|
||||
|
||||
This is an internal utility function to replace calls to str.lower() such
|
||||
that we can avoid changing behavior with changing locales. In particular,
|
||||
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
|
||||
both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
s : str
|
||||
|
||||
Returns
|
||||
-------
|
||||
lowered : str
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from numpy._core.numerictypes import english_lower
|
||||
>>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
|
||||
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
|
||||
>>> english_lower('')
|
||||
''
|
||||
"""
|
||||
lowered = s.translate(LOWER_TABLE)
|
||||
return lowered
|
||||
|
||||
|
||||
def english_upper(s):
|
||||
""" Apply English case rules to convert ASCII strings to all upper case.
|
||||
|
||||
This is an internal utility function to replace calls to str.upper() such
|
||||
that we can avoid changing behavior with changing locales. In particular,
|
||||
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
|
||||
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
s : str
|
||||
|
||||
Returns
|
||||
-------
|
||||
uppered : str
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from numpy._core.numerictypes import english_upper
|
||||
>>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
|
||||
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
|
||||
>>> english_upper('')
|
||||
''
|
||||
"""
|
||||
uppered = s.translate(UPPER_TABLE)
|
||||
return uppered
|
||||
|
||||
|
||||
def english_capitalize(s):
|
||||
""" Apply English case rules to convert the first character of an ASCII
|
||||
string to upper case.
|
||||
|
||||
This is an internal utility function to replace calls to str.capitalize()
|
||||
such that we can avoid changing behavior with changing locales.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
s : str
|
||||
|
||||
Returns
|
||||
-------
|
||||
capitalized : str
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from numpy._core.numerictypes import english_capitalize
|
||||
>>> english_capitalize('int8')
|
||||
'Int8'
|
||||
>>> english_capitalize('Int8')
|
||||
'Int8'
|
||||
>>> english_capitalize('')
|
||||
''
|
||||
"""
|
||||
if s:
|
||||
return english_upper(s[0]) + s[1:]
|
||||
else:
|
||||
return s
|
||||
12
lib/python3.11/site-packages/numpy/_core/_string_helpers.pyi
Normal file
12
lib/python3.11/site-packages/numpy/_core/_string_helpers.pyi
Normal file
@ -0,0 +1,12 @@
|
||||
from typing import Final
|
||||
|
||||
_all_chars: Final[tuple[str, ...]] = ...
|
||||
_ascii_upper: Final[tuple[str, ...]] = ...
|
||||
_ascii_lower: Final[tuple[str, ...]] = ...
|
||||
|
||||
LOWER_TABLE: Final[tuple[str, ...]] = ...
|
||||
UPPER_TABLE: Final[tuple[str, ...]] = ...
|
||||
|
||||
def english_lower(s: str) -> str: ...
|
||||
def english_upper(s: str) -> str: ...
|
||||
def english_capitalize(s: str) -> str: ...
|
||||
Binary file not shown.
119
lib/python3.11/site-packages/numpy/_core/_type_aliases.py
Normal file
119
lib/python3.11/site-packages/numpy/_core/_type_aliases.py
Normal file
@ -0,0 +1,119 @@
|
||||
"""
|
||||
Due to compatibility, numpy has a very large number of different naming
|
||||
conventions for the scalar types (those subclassing from `numpy.generic`).
|
||||
This file produces a convoluted set of dictionaries mapping names to types,
|
||||
and sometimes other mappings too.
|
||||
|
||||
.. data:: allTypes
|
||||
A dictionary of names to types that will be exposed as attributes through
|
||||
``np._core.numerictypes.*``
|
||||
|
||||
.. data:: sctypeDict
|
||||
Similar to `allTypes`, but maps a broader set of aliases to their types.
|
||||
|
||||
.. data:: sctypes
|
||||
A dictionary keyed by a "type group" string, providing a list of types
|
||||
under that group.
|
||||
|
||||
"""
|
||||
|
||||
import numpy._core.multiarray as ma
|
||||
from numpy._core.multiarray import dtype, typeinfo
|
||||
|
||||
######################################
|
||||
# Building `sctypeDict` and `allTypes`
|
||||
######################################
|
||||
|
||||
sctypeDict = {}
|
||||
allTypes = {}
|
||||
c_names_dict = {}
|
||||
|
||||
_abstract_type_names = {
|
||||
"generic", "integer", "inexact", "floating", "number",
|
||||
"flexible", "character", "complexfloating", "unsignedinteger",
|
||||
"signedinteger"
|
||||
}
|
||||
|
||||
for _abstract_type_name in _abstract_type_names:
|
||||
allTypes[_abstract_type_name] = getattr(ma, _abstract_type_name)
|
||||
|
||||
for k, v in typeinfo.items():
|
||||
if k.startswith("NPY_") and v not in c_names_dict:
|
||||
c_names_dict[k[4:]] = v
|
||||
else:
|
||||
concrete_type = v.type
|
||||
allTypes[k] = concrete_type
|
||||
sctypeDict[k] = concrete_type
|
||||
|
||||
_aliases = {
|
||||
"double": "float64",
|
||||
"cdouble": "complex128",
|
||||
"single": "float32",
|
||||
"csingle": "complex64",
|
||||
"half": "float16",
|
||||
"bool_": "bool",
|
||||
# Default integer:
|
||||
"int_": "intp",
|
||||
"uint": "uintp",
|
||||
}
|
||||
|
||||
for k, v in _aliases.items():
|
||||
sctypeDict[k] = allTypes[v]
|
||||
allTypes[k] = allTypes[v]
|
||||
|
||||
# extra aliases are added only to `sctypeDict`
|
||||
# to support dtype name access, such as`np.dtype("float")`
|
||||
_extra_aliases = {
|
||||
"float": "float64",
|
||||
"complex": "complex128",
|
||||
"object": "object_",
|
||||
"bytes": "bytes_",
|
||||
"a": "bytes_",
|
||||
"int": "int_",
|
||||
"str": "str_",
|
||||
"unicode": "str_",
|
||||
}
|
||||
|
||||
for k, v in _extra_aliases.items():
|
||||
sctypeDict[k] = allTypes[v]
|
||||
|
||||
# include extended precision sized aliases
|
||||
for is_complex, full_name in [(False, "longdouble"), (True, "clongdouble")]:
|
||||
longdouble_type: type = allTypes[full_name]
|
||||
|
||||
bits: int = dtype(longdouble_type).itemsize * 8
|
||||
base_name: str = "complex" if is_complex else "float"
|
||||
extended_prec_name: str = f"{base_name}{bits}"
|
||||
if extended_prec_name not in allTypes:
|
||||
sctypeDict[extended_prec_name] = longdouble_type
|
||||
allTypes[extended_prec_name] = longdouble_type
|
||||
|
||||
|
||||
####################
|
||||
# Building `sctypes`
|
||||
####################
|
||||
|
||||
sctypes = {"int": set(), "uint": set(), "float": set(),
|
||||
"complex": set(), "others": set()}
|
||||
|
||||
for type_info in typeinfo.values():
|
||||
if type_info.kind in ["M", "m"]: # exclude timedelta and datetime
|
||||
continue
|
||||
|
||||
concrete_type = type_info.type
|
||||
|
||||
# find proper group for each concrete type
|
||||
for type_group, abstract_type in [
|
||||
("int", ma.signedinteger), ("uint", ma.unsignedinteger),
|
||||
("float", ma.floating), ("complex", ma.complexfloating),
|
||||
("others", ma.generic)
|
||||
]:
|
||||
if issubclass(concrete_type, abstract_type):
|
||||
sctypes[type_group].add(concrete_type)
|
||||
break
|
||||
|
||||
# sort sctype groups by bitsize
|
||||
for sctype_key in sctypes.keys():
|
||||
sctype_list = list(sctypes[sctype_key])
|
||||
sctype_list.sort(key=lambda x: dtype(x).itemsize)
|
||||
sctypes[sctype_key] = sctype_list
|
||||
97
lib/python3.11/site-packages/numpy/_core/_type_aliases.pyi
Normal file
97
lib/python3.11/site-packages/numpy/_core/_type_aliases.pyi
Normal file
@ -0,0 +1,97 @@
|
||||
from collections.abc import Collection
|
||||
from typing import Final, TypeAlias, TypedDict, type_check_only
|
||||
from typing import Literal as L
|
||||
|
||||
import numpy as np
|
||||
|
||||
__all__ = (
|
||||
"_abstract_type_names",
|
||||
"_aliases",
|
||||
"_extra_aliases",
|
||||
"allTypes",
|
||||
"c_names_dict",
|
||||
"sctypeDict",
|
||||
"sctypes",
|
||||
)
|
||||
|
||||
sctypeDict: Final[dict[str, type[np.generic]]]
|
||||
allTypes: Final[dict[str, type[np.generic]]]
|
||||
|
||||
@type_check_only
|
||||
class _CNamesDict(TypedDict):
|
||||
BOOL: np.dtype[np.bool]
|
||||
HALF: np.dtype[np.half]
|
||||
FLOAT: np.dtype[np.single]
|
||||
DOUBLE: np.dtype[np.double]
|
||||
LONGDOUBLE: np.dtype[np.longdouble]
|
||||
CFLOAT: np.dtype[np.csingle]
|
||||
CDOUBLE: np.dtype[np.cdouble]
|
||||
CLONGDOUBLE: np.dtype[np.clongdouble]
|
||||
STRING: np.dtype[np.bytes_]
|
||||
UNICODE: np.dtype[np.str_]
|
||||
VOID: np.dtype[np.void]
|
||||
OBJECT: np.dtype[np.object_]
|
||||
DATETIME: np.dtype[np.datetime64]
|
||||
TIMEDELTA: np.dtype[np.timedelta64]
|
||||
BYTE: np.dtype[np.byte]
|
||||
UBYTE: np.dtype[np.ubyte]
|
||||
SHORT: np.dtype[np.short]
|
||||
USHORT: np.dtype[np.ushort]
|
||||
INT: np.dtype[np.intc]
|
||||
UINT: np.dtype[np.uintc]
|
||||
LONG: np.dtype[np.long]
|
||||
ULONG: np.dtype[np.ulong]
|
||||
LONGLONG: np.dtype[np.longlong]
|
||||
ULONGLONG: np.dtype[np.ulonglong]
|
||||
|
||||
c_names_dict: Final[_CNamesDict]
|
||||
|
||||
_AbstractTypeName: TypeAlias = L[
|
||||
"generic",
|
||||
"flexible",
|
||||
"character",
|
||||
"number",
|
||||
"integer",
|
||||
"inexact",
|
||||
"unsignedinteger",
|
||||
"signedinteger",
|
||||
"floating",
|
||||
"complexfloating",
|
||||
]
|
||||
_abstract_type_names: Final[set[_AbstractTypeName]]
|
||||
|
||||
@type_check_only
|
||||
class _AliasesType(TypedDict):
|
||||
double: L["float64"]
|
||||
cdouble: L["complex128"]
|
||||
single: L["float32"]
|
||||
csingle: L["complex64"]
|
||||
half: L["float16"]
|
||||
bool_: L["bool"]
|
||||
int_: L["intp"]
|
||||
uint: L["intp"]
|
||||
|
||||
_aliases: Final[_AliasesType]
|
||||
|
||||
@type_check_only
|
||||
class _ExtraAliasesType(TypedDict):
|
||||
float: L["float64"]
|
||||
complex: L["complex128"]
|
||||
object: L["object_"]
|
||||
bytes: L["bytes_"]
|
||||
a: L["bytes_"]
|
||||
int: L["int_"]
|
||||
str: L["str_"]
|
||||
unicode: L["str_"]
|
||||
|
||||
_extra_aliases: Final[_ExtraAliasesType]
|
||||
|
||||
@type_check_only
|
||||
class _SCTypes(TypedDict):
|
||||
int: Collection[type[np.signedinteger]]
|
||||
uint: Collection[type[np.unsignedinteger]]
|
||||
float: Collection[type[np.floating]]
|
||||
complex: Collection[type[np.complexfloating]]
|
||||
others: Collection[type[np.flexible | np.bool | np.object_]]
|
||||
|
||||
sctypes: Final[_SCTypes]
|
||||
489
lib/python3.11/site-packages/numpy/_core/_ufunc_config.py
Normal file
489
lib/python3.11/site-packages/numpy/_core/_ufunc_config.py
Normal file
@ -0,0 +1,489 @@
|
||||
"""
|
||||
Functions for changing global ufunc configuration
|
||||
|
||||
This provides helpers which wrap `_get_extobj_dict` and `_make_extobj`, and
|
||||
`_extobj_contextvar` from umath.
|
||||
"""
|
||||
import functools
|
||||
|
||||
from numpy._utils import set_module
|
||||
|
||||
from .umath import _extobj_contextvar, _get_extobj_dict, _make_extobj
|
||||
|
||||
__all__ = [
|
||||
"seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall",
|
||||
"errstate"
|
||||
]
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
|
||||
"""
|
||||
Set how floating-point errors are handled.
|
||||
|
||||
Note that operations on integer scalar types (such as `int16`) are
|
||||
handled like floating point, and are affected by these settings.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
||||
Set treatment for all types of floating-point errors at once:
|
||||
|
||||
- ignore: Take no action when the exception occurs.
|
||||
- warn: Print a :exc:`RuntimeWarning` (via the Python `warnings`
|
||||
module).
|
||||
- raise: Raise a :exc:`FloatingPointError`.
|
||||
- call: Call a function specified using the `seterrcall` function.
|
||||
- print: Print a warning directly to ``stdout``.
|
||||
- log: Record error in a Log object specified by `seterrcall`.
|
||||
|
||||
The default is not to change the current behavior.
|
||||
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
||||
Treatment for division by zero.
|
||||
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
||||
Treatment for floating-point overflow.
|
||||
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
||||
Treatment for floating-point underflow.
|
||||
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
||||
Treatment for invalid floating-point operation.
|
||||
|
||||
Returns
|
||||
-------
|
||||
old_settings : dict
|
||||
Dictionary containing the old settings.
|
||||
|
||||
See also
|
||||
--------
|
||||
seterrcall : Set a callback function for the 'call' mode.
|
||||
geterr, geterrcall, errstate
|
||||
|
||||
Notes
|
||||
-----
|
||||
The floating-point exceptions are defined in the IEEE 754 standard [1]_:
|
||||
|
||||
- Division by zero: infinite result obtained from finite numbers.
|
||||
- Overflow: result too large to be expressed.
|
||||
- Underflow: result so close to zero that some precision
|
||||
was lost.
|
||||
- Invalid operation: result is not an expressible number, typically
|
||||
indicates that a NaN was produced.
|
||||
|
||||
.. [1] https://en.wikipedia.org/wiki/IEEE_754
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> orig_settings = np.seterr(all='ignore') # seterr to known value
|
||||
>>> np.int16(32000) * np.int16(3)
|
||||
np.int16(30464)
|
||||
>>> np.seterr(over='raise')
|
||||
{'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
|
||||
>>> old_settings = np.seterr(all='warn', over='raise')
|
||||
>>> np.int16(32000) * np.int16(3)
|
||||
Traceback (most recent call last):
|
||||
File "<stdin>", line 1, in <module>
|
||||
FloatingPointError: overflow encountered in scalar multiply
|
||||
|
||||
>>> old_settings = np.seterr(all='print')
|
||||
>>> np.geterr()
|
||||
{'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}
|
||||
>>> np.int16(32000) * np.int16(3)
|
||||
np.int16(30464)
|
||||
>>> np.seterr(**orig_settings) # restore original
|
||||
{'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}
|
||||
|
||||
"""
|
||||
|
||||
old = _get_extobj_dict()
|
||||
# The errstate doesn't include call and bufsize, so pop them:
|
||||
old.pop("call", None)
|
||||
old.pop("bufsize", None)
|
||||
|
||||
extobj = _make_extobj(
|
||||
all=all, divide=divide, over=over, under=under, invalid=invalid)
|
||||
_extobj_contextvar.set(extobj)
|
||||
return old
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
def geterr():
|
||||
"""
|
||||
Get the current way of handling floating-point errors.
|
||||
|
||||
Returns
|
||||
-------
|
||||
res : dict
|
||||
A dictionary with keys "divide", "over", "under", and "invalid",
|
||||
whose values are from the strings "ignore", "print", "log", "warn",
|
||||
"raise", and "call". The keys represent possible floating-point
|
||||
exceptions, and the values define how these exceptions are handled.
|
||||
|
||||
See Also
|
||||
--------
|
||||
geterrcall, seterr, seterrcall
|
||||
|
||||
Notes
|
||||
-----
|
||||
For complete documentation of the types of floating-point exceptions and
|
||||
treatment options, see `seterr`.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.geterr()
|
||||
{'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'}
|
||||
>>> np.arange(3.) / np.arange(3.) # doctest: +SKIP
|
||||
array([nan, 1., 1.])
|
||||
RuntimeWarning: invalid value encountered in divide
|
||||
|
||||
>>> oldsettings = np.seterr(all='warn', invalid='raise')
|
||||
>>> np.geterr()
|
||||
{'divide': 'warn', 'over': 'warn', 'under': 'warn', 'invalid': 'raise'}
|
||||
>>> np.arange(3.) / np.arange(3.)
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
FloatingPointError: invalid value encountered in divide
|
||||
>>> oldsettings = np.seterr(**oldsettings) # restore original
|
||||
|
||||
"""
|
||||
res = _get_extobj_dict()
|
||||
# The "geterr" doesn't include call and bufsize,:
|
||||
res.pop("call", None)
|
||||
res.pop("bufsize", None)
|
||||
return res
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
def setbufsize(size):
|
||||
"""
|
||||
Set the size of the buffer used in ufuncs.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
The scope of setting the buffer is tied to the `numpy.errstate`
|
||||
context. Exiting a ``with errstate():`` will also restore the bufsize.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
size : int
|
||||
Size of buffer.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bufsize : int
|
||||
Previous size of ufunc buffer in bytes.
|
||||
|
||||
Examples
|
||||
--------
|
||||
When exiting a `numpy.errstate` context manager the bufsize is restored:
|
||||
|
||||
>>> import numpy as np
|
||||
>>> with np.errstate():
|
||||
... np.setbufsize(4096)
|
||||
... print(np.getbufsize())
|
||||
...
|
||||
8192
|
||||
4096
|
||||
>>> np.getbufsize()
|
||||
8192
|
||||
|
||||
"""
|
||||
old = _get_extobj_dict()["bufsize"]
|
||||
extobj = _make_extobj(bufsize=size)
|
||||
_extobj_contextvar.set(extobj)
|
||||
return old
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
def getbufsize():
|
||||
"""
|
||||
Return the size of the buffer used in ufuncs.
|
||||
|
||||
Returns
|
||||
-------
|
||||
getbufsize : int
|
||||
Size of ufunc buffer in bytes.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.getbufsize()
|
||||
8192
|
||||
|
||||
"""
|
||||
return _get_extobj_dict()["bufsize"]
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
def seterrcall(func):
|
||||
"""
|
||||
Set the floating-point error callback function or log object.
|
||||
|
||||
There are two ways to capture floating-point error messages. The first
|
||||
is to set the error-handler to 'call', using `seterr`. Then, set
|
||||
the function to call using this function.
|
||||
|
||||
The second is to set the error-handler to 'log', using `seterr`.
|
||||
Floating-point errors then trigger a call to the 'write' method of
|
||||
the provided object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
func : callable f(err, flag) or object with write method
|
||||
Function to call upon floating-point errors ('call'-mode) or
|
||||
object whose 'write' method is used to log such message ('log'-mode).
|
||||
|
||||
The call function takes two arguments. The first is a string describing
|
||||
the type of error (such as "divide by zero", "overflow", "underflow",
|
||||
or "invalid value"), and the second is the status flag. The flag is a
|
||||
byte, whose four least-significant bits indicate the type of error, one
|
||||
of "divide", "over", "under", "invalid"::
|
||||
|
||||
[0 0 0 0 divide over under invalid]
|
||||
|
||||
In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
|
||||
|
||||
If an object is provided, its write method should take one argument,
|
||||
a string.
|
||||
|
||||
Returns
|
||||
-------
|
||||
h : callable, log instance or None
|
||||
The old error handler.
|
||||
|
||||
See Also
|
||||
--------
|
||||
seterr, geterr, geterrcall
|
||||
|
||||
Examples
|
||||
--------
|
||||
Callback upon error:
|
||||
|
||||
>>> def err_handler(type, flag):
|
||||
... print("Floating point error (%s), with flag %s" % (type, flag))
|
||||
...
|
||||
|
||||
>>> import numpy as np
|
||||
|
||||
>>> orig_handler = np.seterrcall(err_handler)
|
||||
>>> orig_err = np.seterr(all='call')
|
||||
|
||||
>>> np.array([1, 2, 3]) / 0.0
|
||||
Floating point error (divide by zero), with flag 1
|
||||
array([inf, inf, inf])
|
||||
|
||||
>>> np.seterrcall(orig_handler)
|
||||
<function err_handler at 0x...>
|
||||
>>> np.seterr(**orig_err)
|
||||
{'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'}
|
||||
|
||||
Log error message:
|
||||
|
||||
>>> class Log:
|
||||
... def write(self, msg):
|
||||
... print("LOG: %s" % msg)
|
||||
...
|
||||
|
||||
>>> log = Log()
|
||||
>>> saved_handler = np.seterrcall(log)
|
||||
>>> save_err = np.seterr(all='log')
|
||||
|
||||
>>> np.array([1, 2, 3]) / 0.0
|
||||
LOG: Warning: divide by zero encountered in divide
|
||||
array([inf, inf, inf])
|
||||
|
||||
>>> np.seterrcall(orig_handler)
|
||||
<numpy.Log object at 0x...>
|
||||
>>> np.seterr(**orig_err)
|
||||
{'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'}
|
||||
|
||||
"""
|
||||
old = _get_extobj_dict()["call"]
|
||||
extobj = _make_extobj(call=func)
|
||||
_extobj_contextvar.set(extobj)
|
||||
return old
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
def geterrcall():
|
||||
"""
|
||||
Return the current callback function used on floating-point errors.
|
||||
|
||||
When the error handling for a floating-point error (one of "divide",
|
||||
"over", "under", or "invalid") is set to 'call' or 'log', the function
|
||||
that is called or the log instance that is written to is returned by
|
||||
`geterrcall`. This function or log instance has been set with
|
||||
`seterrcall`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
errobj : callable, log instance or None
|
||||
The current error handler. If no handler was set through `seterrcall`,
|
||||
``None`` is returned.
|
||||
|
||||
See Also
|
||||
--------
|
||||
seterrcall, seterr, geterr
|
||||
|
||||
Notes
|
||||
-----
|
||||
For complete documentation of the types of floating-point exceptions and
|
||||
treatment options, see `seterr`.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.geterrcall() # we did not yet set a handler, returns None
|
||||
|
||||
>>> orig_settings = np.seterr(all='call')
|
||||
>>> def err_handler(type, flag):
|
||||
... print("Floating point error (%s), with flag %s" % (type, flag))
|
||||
>>> old_handler = np.seterrcall(err_handler)
|
||||
>>> np.array([1, 2, 3]) / 0.0
|
||||
Floating point error (divide by zero), with flag 1
|
||||
array([inf, inf, inf])
|
||||
|
||||
>>> cur_handler = np.geterrcall()
|
||||
>>> cur_handler is err_handler
|
||||
True
|
||||
>>> old_settings = np.seterr(**orig_settings) # restore original
|
||||
>>> old_handler = np.seterrcall(None) # restore original
|
||||
|
||||
"""
|
||||
return _get_extobj_dict()["call"]
|
||||
|
||||
|
||||
class _unspecified:
|
||||
pass
|
||||
|
||||
|
||||
_Unspecified = _unspecified()
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
class errstate:
|
||||
"""
|
||||
errstate(**kwargs)
|
||||
|
||||
Context manager for floating-point error handling.
|
||||
|
||||
Using an instance of `errstate` as a context manager allows statements in
|
||||
that context to execute with a known error handling behavior. Upon entering
|
||||
the context the error handling is set with `seterr` and `seterrcall`, and
|
||||
upon exiting it is reset to what it was before.
|
||||
|
||||
.. versionchanged:: 1.17.0
|
||||
`errstate` is also usable as a function decorator, saving
|
||||
a level of indentation if an entire function is wrapped.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
`errstate` is now fully thread and asyncio safe, but may not be
|
||||
entered more than once.
|
||||
It is not safe to decorate async functions using ``errstate``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
kwargs : {divide, over, under, invalid}
|
||||
Keyword arguments. The valid keywords are the possible floating-point
|
||||
exceptions. Each keyword should have a string value that defines the
|
||||
treatment for the particular error. Possible values are
|
||||
{'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
|
||||
|
||||
See Also
|
||||
--------
|
||||
seterr, geterr, seterrcall, geterrcall
|
||||
|
||||
Notes
|
||||
-----
|
||||
For complete documentation of the types of floating-point exceptions and
|
||||
treatment options, see `seterr`.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> olderr = np.seterr(all='ignore') # Set error handling to known state.
|
||||
|
||||
>>> np.arange(3) / 0.
|
||||
array([nan, inf, inf])
|
||||
>>> with np.errstate(divide='ignore'):
|
||||
... np.arange(3) / 0.
|
||||
array([nan, inf, inf])
|
||||
|
||||
>>> np.sqrt(-1)
|
||||
np.float64(nan)
|
||||
>>> with np.errstate(invalid='raise'):
|
||||
... np.sqrt(-1)
|
||||
Traceback (most recent call last):
|
||||
File "<stdin>", line 2, in <module>
|
||||
FloatingPointError: invalid value encountered in sqrt
|
||||
|
||||
Outside the context the error handling behavior has not changed:
|
||||
|
||||
>>> np.geterr()
|
||||
{'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
|
||||
>>> olderr = np.seterr(**olderr) # restore original state
|
||||
|
||||
"""
|
||||
__slots__ = (
|
||||
"_all",
|
||||
"_call",
|
||||
"_divide",
|
||||
"_invalid",
|
||||
"_over",
|
||||
"_token",
|
||||
"_under",
|
||||
)
|
||||
|
||||
def __init__(self, *, call=_Unspecified,
|
||||
all=None, divide=None, over=None, under=None, invalid=None):
|
||||
self._token = None
|
||||
self._call = call
|
||||
self._all = all
|
||||
self._divide = divide
|
||||
self._over = over
|
||||
self._under = under
|
||||
self._invalid = invalid
|
||||
|
||||
def __enter__(self):
|
||||
# Note that __call__ duplicates much of this logic
|
||||
if self._token is not None:
|
||||
raise TypeError("Cannot enter `np.errstate` twice.")
|
||||
if self._call is _Unspecified:
|
||||
extobj = _make_extobj(
|
||||
all=self._all, divide=self._divide, over=self._over,
|
||||
under=self._under, invalid=self._invalid)
|
||||
else:
|
||||
extobj = _make_extobj(
|
||||
call=self._call,
|
||||
all=self._all, divide=self._divide, over=self._over,
|
||||
under=self._under, invalid=self._invalid)
|
||||
|
||||
self._token = _extobj_contextvar.set(extobj)
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
_extobj_contextvar.reset(self._token)
|
||||
|
||||
def __call__(self, func):
|
||||
# We need to customize `__call__` compared to `ContextDecorator`
|
||||
# because we must store the token per-thread so cannot store it on
|
||||
# the instance (we could create a new instance for this).
|
||||
# This duplicates the code from `__enter__`.
|
||||
@functools.wraps(func)
|
||||
def inner(*args, **kwargs):
|
||||
if self._call is _Unspecified:
|
||||
extobj = _make_extobj(
|
||||
all=self._all, divide=self._divide, over=self._over,
|
||||
under=self._under, invalid=self._invalid)
|
||||
else:
|
||||
extobj = _make_extobj(
|
||||
call=self._call,
|
||||
all=self._all, divide=self._divide, over=self._over,
|
||||
under=self._under, invalid=self._invalid)
|
||||
|
||||
_token = _extobj_contextvar.set(extobj)
|
||||
try:
|
||||
# Call the original, decorated, function:
|
||||
return func(*args, **kwargs)
|
||||
finally:
|
||||
_extobj_contextvar.reset(_token)
|
||||
|
||||
return inner
|
||||
32
lib/python3.11/site-packages/numpy/_core/_ufunc_config.pyi
Normal file
32
lib/python3.11/site-packages/numpy/_core/_ufunc_config.pyi
Normal file
@ -0,0 +1,32 @@
|
||||
from collections.abc import Callable
|
||||
from typing import Any, Literal, TypeAlias, TypedDict, type_check_only
|
||||
|
||||
from _typeshed import SupportsWrite
|
||||
|
||||
from numpy import errstate as errstate
|
||||
|
||||
_ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"]
|
||||
_ErrFunc: TypeAlias = Callable[[str, int], Any]
|
||||
_ErrCall: TypeAlias = _ErrFunc | SupportsWrite[str]
|
||||
|
||||
@type_check_only
|
||||
class _ErrDict(TypedDict):
|
||||
divide: _ErrKind
|
||||
over: _ErrKind
|
||||
under: _ErrKind
|
||||
invalid: _ErrKind
|
||||
|
||||
def seterr(
|
||||
all: _ErrKind | None = ...,
|
||||
divide: _ErrKind | None = ...,
|
||||
over: _ErrKind | None = ...,
|
||||
under: _ErrKind | None = ...,
|
||||
invalid: _ErrKind | None = ...,
|
||||
) -> _ErrDict: ...
|
||||
def geterr() -> _ErrDict: ...
|
||||
def setbufsize(size: int) -> int: ...
|
||||
def getbufsize() -> int: ...
|
||||
def seterrcall(func: _ErrCall | None) -> _ErrCall | None: ...
|
||||
def geterrcall() -> _ErrCall | None: ...
|
||||
|
||||
# See `numpy/__init__.pyi` for the `errstate` class and `no_nep5_warnings`
|
||||
BIN
lib/python3.11/site-packages/numpy/_core/_umath_tests.cpython-311-darwin.so
Executable file
BIN
lib/python3.11/site-packages/numpy/_core/_umath_tests.cpython-311-darwin.so
Executable file
Binary file not shown.
1775
lib/python3.11/site-packages/numpy/_core/arrayprint.py
Normal file
1775
lib/python3.11/site-packages/numpy/_core/arrayprint.py
Normal file
File diff suppressed because it is too large
Load Diff
238
lib/python3.11/site-packages/numpy/_core/arrayprint.pyi
Normal file
238
lib/python3.11/site-packages/numpy/_core/arrayprint.pyi
Normal file
@ -0,0 +1,238 @@
|
||||
from collections.abc import Callable
|
||||
|
||||
# Using a private class is by no means ideal, but it is simply a consequence
|
||||
# of a `contextlib.context` returning an instance of aforementioned class
|
||||
from contextlib import _GeneratorContextManager
|
||||
from typing import (
|
||||
Any,
|
||||
Final,
|
||||
Literal,
|
||||
SupportsIndex,
|
||||
TypeAlias,
|
||||
TypedDict,
|
||||
overload,
|
||||
type_check_only,
|
||||
)
|
||||
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import numpy as np
|
||||
from numpy._globals import _NoValueType
|
||||
from numpy._typing import NDArray, _CharLike_co, _FloatLike_co
|
||||
|
||||
__all__ = [
|
||||
"array2string",
|
||||
"array_repr",
|
||||
"array_str",
|
||||
"format_float_positional",
|
||||
"format_float_scientific",
|
||||
"get_printoptions",
|
||||
"printoptions",
|
||||
"set_printoptions",
|
||||
]
|
||||
|
||||
###
|
||||
|
||||
_FloatMode: TypeAlias = Literal["fixed", "unique", "maxprec", "maxprec_equal"]
|
||||
_LegacyNoStyle: TypeAlias = Literal["1.21", "1.25", "2.1", False]
|
||||
_Legacy: TypeAlias = Literal["1.13", _LegacyNoStyle]
|
||||
_Sign: TypeAlias = Literal["-", "+", " "]
|
||||
_Trim: TypeAlias = Literal["k", ".", "0", "-"]
|
||||
_ReprFunc: TypeAlias = Callable[[NDArray[Any]], str]
|
||||
|
||||
@type_check_only
|
||||
class _FormatDict(TypedDict, total=False):
|
||||
bool: Callable[[np.bool], str]
|
||||
int: Callable[[np.integer], str]
|
||||
timedelta: Callable[[np.timedelta64], str]
|
||||
datetime: Callable[[np.datetime64], str]
|
||||
float: Callable[[np.floating], str]
|
||||
longfloat: Callable[[np.longdouble], str]
|
||||
complexfloat: Callable[[np.complexfloating], str]
|
||||
longcomplexfloat: Callable[[np.clongdouble], str]
|
||||
void: Callable[[np.void], str]
|
||||
numpystr: Callable[[_CharLike_co], str]
|
||||
object: Callable[[object], str]
|
||||
all: Callable[[object], str]
|
||||
int_kind: Callable[[np.integer], str]
|
||||
float_kind: Callable[[np.floating], str]
|
||||
complex_kind: Callable[[np.complexfloating], str]
|
||||
str_kind: Callable[[_CharLike_co], str]
|
||||
|
||||
@type_check_only
|
||||
class _FormatOptions(TypedDict):
|
||||
precision: int
|
||||
threshold: int
|
||||
edgeitems: int
|
||||
linewidth: int
|
||||
suppress: bool
|
||||
nanstr: str
|
||||
infstr: str
|
||||
formatter: _FormatDict | None
|
||||
sign: _Sign
|
||||
floatmode: _FloatMode
|
||||
legacy: _Legacy
|
||||
|
||||
###
|
||||
|
||||
__docformat__: Final = "restructuredtext" # undocumented
|
||||
|
||||
def set_printoptions(
|
||||
precision: SupportsIndex | None = ...,
|
||||
threshold: int | None = ...,
|
||||
edgeitems: int | None = ...,
|
||||
linewidth: int | None = ...,
|
||||
suppress: bool | None = ...,
|
||||
nanstr: str | None = ...,
|
||||
infstr: str | None = ...,
|
||||
formatter: _FormatDict | None = ...,
|
||||
sign: _Sign | None = None,
|
||||
floatmode: _FloatMode | None = None,
|
||||
*,
|
||||
legacy: _Legacy | None = None,
|
||||
override_repr: _ReprFunc | None = None,
|
||||
) -> None: ...
|
||||
def get_printoptions() -> _FormatOptions: ...
|
||||
|
||||
# public numpy export
|
||||
@overload # no style
|
||||
def array2string(
|
||||
a: NDArray[Any],
|
||||
max_line_width: int | None = None,
|
||||
precision: SupportsIndex | None = None,
|
||||
suppress_small: bool | None = None,
|
||||
separator: str = " ",
|
||||
prefix: str = "",
|
||||
style: _NoValueType = ...,
|
||||
formatter: _FormatDict | None = None,
|
||||
threshold: int | None = None,
|
||||
edgeitems: int | None = None,
|
||||
sign: _Sign | None = None,
|
||||
floatmode: _FloatMode | None = None,
|
||||
suffix: str = "",
|
||||
*,
|
||||
legacy: _Legacy | None = None,
|
||||
) -> str: ...
|
||||
@overload # style=<given> (positional), legacy="1.13"
|
||||
def array2string(
|
||||
a: NDArray[Any],
|
||||
max_line_width: int | None,
|
||||
precision: SupportsIndex | None,
|
||||
suppress_small: bool | None,
|
||||
separator: str,
|
||||
prefix: str,
|
||||
style: _ReprFunc,
|
||||
formatter: _FormatDict | None = None,
|
||||
threshold: int | None = None,
|
||||
edgeitems: int | None = None,
|
||||
sign: _Sign | None = None,
|
||||
floatmode: _FloatMode | None = None,
|
||||
suffix: str = "",
|
||||
*,
|
||||
legacy: Literal["1.13"],
|
||||
) -> str: ...
|
||||
@overload # style=<given> (keyword), legacy="1.13"
|
||||
def array2string(
|
||||
a: NDArray[Any],
|
||||
max_line_width: int | None = None,
|
||||
precision: SupportsIndex | None = None,
|
||||
suppress_small: bool | None = None,
|
||||
separator: str = " ",
|
||||
prefix: str = "",
|
||||
*,
|
||||
style: _ReprFunc,
|
||||
formatter: _FormatDict | None = None,
|
||||
threshold: int | None = None,
|
||||
edgeitems: int | None = None,
|
||||
sign: _Sign | None = None,
|
||||
floatmode: _FloatMode | None = None,
|
||||
suffix: str = "",
|
||||
legacy: Literal["1.13"],
|
||||
) -> str: ...
|
||||
@overload # style=<given> (positional), legacy!="1.13"
|
||||
@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode")
|
||||
def array2string(
|
||||
a: NDArray[Any],
|
||||
max_line_width: int | None,
|
||||
precision: SupportsIndex | None,
|
||||
suppress_small: bool | None,
|
||||
separator: str,
|
||||
prefix: str,
|
||||
style: _ReprFunc,
|
||||
formatter: _FormatDict | None = None,
|
||||
threshold: int | None = None,
|
||||
edgeitems: int | None = None,
|
||||
sign: _Sign | None = None,
|
||||
floatmode: _FloatMode | None = None,
|
||||
suffix: str = "",
|
||||
*,
|
||||
legacy: _LegacyNoStyle | None = None,
|
||||
) -> str: ...
|
||||
@overload # style=<given> (keyword), legacy="1.13"
|
||||
@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode")
|
||||
def array2string(
|
||||
a: NDArray[Any],
|
||||
max_line_width: int | None = None,
|
||||
precision: SupportsIndex | None = None,
|
||||
suppress_small: bool | None = None,
|
||||
separator: str = " ",
|
||||
prefix: str = "",
|
||||
*,
|
||||
style: _ReprFunc,
|
||||
formatter: _FormatDict | None = None,
|
||||
threshold: int | None = None,
|
||||
edgeitems: int | None = None,
|
||||
sign: _Sign | None = None,
|
||||
floatmode: _FloatMode | None = None,
|
||||
suffix: str = "",
|
||||
legacy: _LegacyNoStyle | None = None,
|
||||
) -> str: ...
|
||||
|
||||
def format_float_scientific(
|
||||
x: _FloatLike_co,
|
||||
precision: int | None = ...,
|
||||
unique: bool = ...,
|
||||
trim: _Trim = "k",
|
||||
sign: bool = ...,
|
||||
pad_left: int | None = ...,
|
||||
exp_digits: int | None = ...,
|
||||
min_digits: int | None = ...,
|
||||
) -> str: ...
|
||||
def format_float_positional(
|
||||
x: _FloatLike_co,
|
||||
precision: int | None = ...,
|
||||
unique: bool = ...,
|
||||
fractional: bool = ...,
|
||||
trim: _Trim = "k",
|
||||
sign: bool = ...,
|
||||
pad_left: int | None = ...,
|
||||
pad_right: int | None = ...,
|
||||
min_digits: int | None = ...,
|
||||
) -> str: ...
|
||||
def array_repr(
|
||||
arr: NDArray[Any],
|
||||
max_line_width: int | None = ...,
|
||||
precision: SupportsIndex | None = ...,
|
||||
suppress_small: bool | None = ...,
|
||||
) -> str: ...
|
||||
def array_str(
|
||||
a: NDArray[Any],
|
||||
max_line_width: int | None = ...,
|
||||
precision: SupportsIndex | None = ...,
|
||||
suppress_small: bool | None = ...,
|
||||
) -> str: ...
|
||||
def printoptions(
|
||||
precision: SupportsIndex | None = ...,
|
||||
threshold: int | None = ...,
|
||||
edgeitems: int | None = ...,
|
||||
linewidth: int | None = ...,
|
||||
suppress: bool | None = ...,
|
||||
nanstr: str | None = ...,
|
||||
infstr: str | None = ...,
|
||||
formatter: _FormatDict | None = ...,
|
||||
sign: _Sign | None = None,
|
||||
floatmode: _FloatMode | None = None,
|
||||
*,
|
||||
legacy: _Legacy | None = None,
|
||||
override_repr: _ReprFunc | None = None,
|
||||
) -> _GeneratorContextManager[_FormatOptions]: ...
|
||||
13
lib/python3.11/site-packages/numpy/_core/cversions.py
Normal file
13
lib/python3.11/site-packages/numpy/_core/cversions.py
Normal file
@ -0,0 +1,13 @@
|
||||
"""Simple script to compute the api hash of the current API.
|
||||
|
||||
The API has is defined by numpy_api_order and ufunc_api_order.
|
||||
|
||||
"""
|
||||
from os.path import dirname
|
||||
|
||||
from code_generators.genapi import fullapi_hash
|
||||
from code_generators.numpy_api import full_api
|
||||
|
||||
if __name__ == '__main__':
|
||||
curdir = dirname(__file__)
|
||||
print(fullapi_hash(full_api))
|
||||
1427
lib/python3.11/site-packages/numpy/_core/defchararray.py
Normal file
1427
lib/python3.11/site-packages/numpy/_core/defchararray.py
Normal file
File diff suppressed because it is too large
Load Diff
1135
lib/python3.11/site-packages/numpy/_core/defchararray.pyi
Normal file
1135
lib/python3.11/site-packages/numpy/_core/defchararray.pyi
Normal file
File diff suppressed because it is too large
Load Diff
1498
lib/python3.11/site-packages/numpy/_core/einsumfunc.py
Normal file
1498
lib/python3.11/site-packages/numpy/_core/einsumfunc.py
Normal file
File diff suppressed because it is too large
Load Diff
184
lib/python3.11/site-packages/numpy/_core/einsumfunc.pyi
Normal file
184
lib/python3.11/site-packages/numpy/_core/einsumfunc.pyi
Normal file
@ -0,0 +1,184 @@
|
||||
from collections.abc import Sequence
|
||||
from typing import Any, Literal, TypeAlias, TypeVar, overload
|
||||
|
||||
import numpy as np
|
||||
from numpy import _OrderKACF, number
|
||||
from numpy._typing import (
|
||||
NDArray,
|
||||
_ArrayLikeBool_co,
|
||||
_ArrayLikeComplex_co,
|
||||
_ArrayLikeFloat_co,
|
||||
_ArrayLikeInt_co,
|
||||
_ArrayLikeObject_co,
|
||||
_ArrayLikeUInt_co,
|
||||
_DTypeLikeBool,
|
||||
_DTypeLikeComplex,
|
||||
_DTypeLikeComplex_co,
|
||||
_DTypeLikeFloat,
|
||||
_DTypeLikeInt,
|
||||
_DTypeLikeObject,
|
||||
_DTypeLikeUInt,
|
||||
)
|
||||
|
||||
__all__ = ["einsum", "einsum_path"]
|
||||
|
||||
_ArrayT = TypeVar(
|
||||
"_ArrayT",
|
||||
bound=NDArray[np.bool | number],
|
||||
)
|
||||
|
||||
_OptimizeKind: TypeAlias = bool | Literal["greedy", "optimal"] | Sequence[Any] | None
|
||||
_CastingSafe: TypeAlias = Literal["no", "equiv", "safe", "same_kind"]
|
||||
_CastingUnsafe: TypeAlias = Literal["unsafe"]
|
||||
|
||||
# TODO: Properly handle the `casting`-based combinatorics
|
||||
# TODO: We need to evaluate the content `__subscripts` in order
|
||||
# to identify whether or an array or scalar is returned. At a cursory
|
||||
# glance this seems like something that can quite easily be done with
|
||||
# a mypy plugin.
|
||||
# Something like `is_scalar = bool(__subscripts.partition("->")[-1])`
|
||||
@overload
|
||||
def einsum(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: _ArrayLikeBool_co,
|
||||
out: None = ...,
|
||||
dtype: _DTypeLikeBool | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
casting: _CastingSafe = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def einsum(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: _ArrayLikeUInt_co,
|
||||
out: None = ...,
|
||||
dtype: _DTypeLikeUInt | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
casting: _CastingSafe = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def einsum(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: _ArrayLikeInt_co,
|
||||
out: None = ...,
|
||||
dtype: _DTypeLikeInt | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
casting: _CastingSafe = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def einsum(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: _ArrayLikeFloat_co,
|
||||
out: None = ...,
|
||||
dtype: _DTypeLikeFloat | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
casting: _CastingSafe = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def einsum(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: _ArrayLikeComplex_co,
|
||||
out: None = ...,
|
||||
dtype: _DTypeLikeComplex | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
casting: _CastingSafe = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def einsum(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: Any,
|
||||
casting: _CastingUnsafe,
|
||||
dtype: _DTypeLikeComplex_co | None = ...,
|
||||
out: None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def einsum(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: _ArrayLikeComplex_co,
|
||||
out: _ArrayT,
|
||||
dtype: _DTypeLikeComplex_co | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
casting: _CastingSafe = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
) -> _ArrayT: ...
|
||||
@overload
|
||||
def einsum(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: Any,
|
||||
out: _ArrayT,
|
||||
casting: _CastingUnsafe,
|
||||
dtype: _DTypeLikeComplex_co | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
) -> _ArrayT: ...
|
||||
|
||||
@overload
|
||||
def einsum(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: _ArrayLikeObject_co,
|
||||
out: None = ...,
|
||||
dtype: _DTypeLikeObject | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
casting: _CastingSafe = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def einsum(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: Any,
|
||||
casting: _CastingUnsafe,
|
||||
dtype: _DTypeLikeObject | None = ...,
|
||||
out: None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def einsum(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: _ArrayLikeObject_co,
|
||||
out: _ArrayT,
|
||||
dtype: _DTypeLikeObject | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
casting: _CastingSafe = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
) -> _ArrayT: ...
|
||||
@overload
|
||||
def einsum(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: Any,
|
||||
out: _ArrayT,
|
||||
casting: _CastingUnsafe,
|
||||
dtype: _DTypeLikeObject | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
) -> _ArrayT: ...
|
||||
|
||||
# NOTE: `einsum_call` is a hidden kwarg unavailable for public use.
|
||||
# It is therefore excluded from the signatures below.
|
||||
# NOTE: In practice the list consists of a `str` (first element)
|
||||
# and a variable number of integer tuples.
|
||||
def einsum_path(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: _ArrayLikeComplex_co | _DTypeLikeObject,
|
||||
optimize: _OptimizeKind = "greedy",
|
||||
einsum_call: Literal[False] = False,
|
||||
) -> tuple[list[Any], str]: ...
|
||||
4269
lib/python3.11/site-packages/numpy/_core/fromnumeric.py
Normal file
4269
lib/python3.11/site-packages/numpy/_core/fromnumeric.py
Normal file
File diff suppressed because it is too large
Load Diff
1750
lib/python3.11/site-packages/numpy/_core/fromnumeric.pyi
Normal file
1750
lib/python3.11/site-packages/numpy/_core/fromnumeric.pyi
Normal file
File diff suppressed because it is too large
Load Diff
545
lib/python3.11/site-packages/numpy/_core/function_base.py
Normal file
545
lib/python3.11/site-packages/numpy/_core/function_base.py
Normal file
@ -0,0 +1,545 @@
|
||||
import functools
|
||||
import operator
|
||||
import types
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
from numpy._core import overrides
|
||||
from numpy._core._multiarray_umath import _array_converter
|
||||
from numpy._core.multiarray import add_docstring
|
||||
|
||||
from . import numeric as _nx
|
||||
from .numeric import asanyarray, nan, ndim, result_type
|
||||
|
||||
__all__ = ['logspace', 'linspace', 'geomspace']
|
||||
|
||||
|
||||
array_function_dispatch = functools.partial(
|
||||
overrides.array_function_dispatch, module='numpy')
|
||||
|
||||
|
||||
def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None,
|
||||
dtype=None, axis=None, *, device=None):
|
||||
return (start, stop)
|
||||
|
||||
|
||||
@array_function_dispatch(_linspace_dispatcher)
|
||||
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
|
||||
axis=0, *, device=None):
|
||||
"""
|
||||
Return evenly spaced numbers over a specified interval.
|
||||
|
||||
Returns `num` evenly spaced samples, calculated over the
|
||||
interval [`start`, `stop`].
|
||||
|
||||
The endpoint of the interval can optionally be excluded.
|
||||
|
||||
.. versionchanged:: 1.20.0
|
||||
Values are rounded towards ``-inf`` instead of ``0`` when an
|
||||
integer ``dtype`` is specified. The old behavior can
|
||||
still be obtained with ``np.linspace(start, stop, num).astype(int)``
|
||||
|
||||
Parameters
|
||||
----------
|
||||
start : array_like
|
||||
The starting value of the sequence.
|
||||
stop : array_like
|
||||
The end value of the sequence, unless `endpoint` is set to False.
|
||||
In that case, the sequence consists of all but the last of ``num + 1``
|
||||
evenly spaced samples, so that `stop` is excluded. Note that the step
|
||||
size changes when `endpoint` is False.
|
||||
num : int, optional
|
||||
Number of samples to generate. Default is 50. Must be non-negative.
|
||||
endpoint : bool, optional
|
||||
If True, `stop` is the last sample. Otherwise, it is not included.
|
||||
Default is True.
|
||||
retstep : bool, optional
|
||||
If True, return (`samples`, `step`), where `step` is the spacing
|
||||
between samples.
|
||||
dtype : dtype, optional
|
||||
The type of the output array. If `dtype` is not given, the data type
|
||||
is inferred from `start` and `stop`. The inferred dtype will never be
|
||||
an integer; `float` is chosen even if the arguments would produce an
|
||||
array of integers.
|
||||
axis : int, optional
|
||||
The axis in the result to store the samples. Relevant only if start
|
||||
or stop are array-like. By default (0), the samples will be along a
|
||||
new axis inserted at the beginning. Use -1 to get an axis at the end.
|
||||
device : str, optional
|
||||
The device on which to place the created array. Default: None.
|
||||
For Array-API interoperability only, so must be ``"cpu"`` if passed.
|
||||
|
||||
.. versionadded:: 2.0.0
|
||||
|
||||
Returns
|
||||
-------
|
||||
samples : ndarray
|
||||
There are `num` equally spaced samples in the closed interval
|
||||
``[start, stop]`` or the half-open interval ``[start, stop)``
|
||||
(depending on whether `endpoint` is True or False).
|
||||
step : float, optional
|
||||
Only returned if `retstep` is True
|
||||
|
||||
Size of spacing between samples.
|
||||
|
||||
|
||||
See Also
|
||||
--------
|
||||
arange : Similar to `linspace`, but uses a step size (instead of the
|
||||
number of samples).
|
||||
geomspace : Similar to `linspace`, but with numbers spaced evenly on a log
|
||||
scale (a geometric progression).
|
||||
logspace : Similar to `geomspace`, but with the end points specified as
|
||||
logarithms.
|
||||
:ref:`how-to-partition`
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.linspace(2.0, 3.0, num=5)
|
||||
array([2. , 2.25, 2.5 , 2.75, 3. ])
|
||||
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
|
||||
array([2. , 2.2, 2.4, 2.6, 2.8])
|
||||
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
|
||||
(array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
|
||||
|
||||
Graphical illustration:
|
||||
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> N = 8
|
||||
>>> y = np.zeros(N)
|
||||
>>> x1 = np.linspace(0, 10, N, endpoint=True)
|
||||
>>> x2 = np.linspace(0, 10, N, endpoint=False)
|
||||
>>> plt.plot(x1, y, 'o')
|
||||
[<matplotlib.lines.Line2D object at 0x...>]
|
||||
>>> plt.plot(x2, y + 0.5, 'o')
|
||||
[<matplotlib.lines.Line2D object at 0x...>]
|
||||
>>> plt.ylim([-0.5, 1])
|
||||
(-0.5, 1)
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
num = operator.index(num)
|
||||
if num < 0:
|
||||
raise ValueError(
|
||||
f"Number of samples, {num}, must be non-negative."
|
||||
)
|
||||
div = (num - 1) if endpoint else num
|
||||
|
||||
conv = _array_converter(start, stop)
|
||||
start, stop = conv.as_arrays()
|
||||
dt = conv.result_type(ensure_inexact=True)
|
||||
|
||||
if dtype is None:
|
||||
dtype = dt
|
||||
integer_dtype = False
|
||||
else:
|
||||
integer_dtype = _nx.issubdtype(dtype, _nx.integer)
|
||||
|
||||
# Use `dtype=type(dt)` to enforce a floating point evaluation:
|
||||
delta = np.subtract(stop, start, dtype=type(dt))
|
||||
y = _nx.arange(
|
||||
0, num, dtype=dt, device=device
|
||||
).reshape((-1,) + (1,) * ndim(delta))
|
||||
|
||||
# In-place multiplication y *= delta/div is faster, but prevents
|
||||
# the multiplicant from overriding what class is produced, and thus
|
||||
# prevents, e.g. use of Quantities, see gh-7142. Hence, we multiply
|
||||
# in place only for standard scalar types.
|
||||
if div > 0:
|
||||
_mult_inplace = _nx.isscalar(delta)
|
||||
step = delta / div
|
||||
any_step_zero = (
|
||||
step == 0 if _mult_inplace else _nx.asanyarray(step == 0).any())
|
||||
if any_step_zero:
|
||||
# Special handling for denormal numbers, gh-5437
|
||||
y /= div
|
||||
if _mult_inplace:
|
||||
y *= delta
|
||||
else:
|
||||
y = y * delta
|
||||
elif _mult_inplace:
|
||||
y *= step
|
||||
else:
|
||||
y = y * step
|
||||
else:
|
||||
# sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0)
|
||||
# have an undefined step
|
||||
step = nan
|
||||
# Multiply with delta to allow possible override of output class.
|
||||
y = y * delta
|
||||
|
||||
y += start
|
||||
|
||||
if endpoint and num > 1:
|
||||
y[-1, ...] = stop
|
||||
|
||||
if axis != 0:
|
||||
y = _nx.moveaxis(y, 0, axis)
|
||||
|
||||
if integer_dtype:
|
||||
_nx.floor(y, out=y)
|
||||
|
||||
y = conv.wrap(y.astype(dtype, copy=False))
|
||||
if retstep:
|
||||
return y, step
|
||||
else:
|
||||
return y
|
||||
|
||||
|
||||
def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None,
|
||||
dtype=None, axis=None):
|
||||
return (start, stop, base)
|
||||
|
||||
|
||||
@array_function_dispatch(_logspace_dispatcher)
|
||||
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,
|
||||
axis=0):
|
||||
"""
|
||||
Return numbers spaced evenly on a log scale.
|
||||
|
||||
In linear space, the sequence starts at ``base ** start``
|
||||
(`base` to the power of `start`) and ends with ``base ** stop``
|
||||
(see `endpoint` below).
|
||||
|
||||
.. versionchanged:: 1.25.0
|
||||
Non-scalar 'base` is now supported
|
||||
|
||||
Parameters
|
||||
----------
|
||||
start : array_like
|
||||
``base ** start`` is the starting value of the sequence.
|
||||
stop : array_like
|
||||
``base ** stop`` is the final value of the sequence, unless `endpoint`
|
||||
is False. In that case, ``num + 1`` values are spaced over the
|
||||
interval in log-space, of which all but the last (a sequence of
|
||||
length `num`) are returned.
|
||||
num : integer, optional
|
||||
Number of samples to generate. Default is 50.
|
||||
endpoint : boolean, optional
|
||||
If true, `stop` is the last sample. Otherwise, it is not included.
|
||||
Default is True.
|
||||
base : array_like, optional
|
||||
The base of the log space. The step size between the elements in
|
||||
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
|
||||
Default is 10.0.
|
||||
dtype : dtype
|
||||
The type of the output array. If `dtype` is not given, the data type
|
||||
is inferred from `start` and `stop`. The inferred type will never be
|
||||
an integer; `float` is chosen even if the arguments would produce an
|
||||
array of integers.
|
||||
axis : int, optional
|
||||
The axis in the result to store the samples. Relevant only if start,
|
||||
stop, or base are array-like. By default (0), the samples will be
|
||||
along a new axis inserted at the beginning. Use -1 to get an axis at
|
||||
the end.
|
||||
|
||||
Returns
|
||||
-------
|
||||
samples : ndarray
|
||||
`num` samples, equally spaced on a log scale.
|
||||
|
||||
See Also
|
||||
--------
|
||||
arange : Similar to linspace, with the step size specified instead of the
|
||||
number of samples. Note that, when used with a float endpoint, the
|
||||
endpoint may or may not be included.
|
||||
linspace : Similar to logspace, but with the samples uniformly distributed
|
||||
in linear space, instead of log space.
|
||||
geomspace : Similar to logspace, but with endpoints specified directly.
|
||||
:ref:`how-to-partition`
|
||||
|
||||
Notes
|
||||
-----
|
||||
If base is a scalar, logspace is equivalent to the code
|
||||
|
||||
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
|
||||
... # doctest: +SKIP
|
||||
>>> power(base, y).astype(dtype)
|
||||
... # doctest: +SKIP
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.logspace(2.0, 3.0, num=4)
|
||||
array([ 100. , 215.443469 , 464.15888336, 1000. ])
|
||||
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
|
||||
array([100. , 177.827941 , 316.22776602, 562.34132519])
|
||||
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
|
||||
array([4. , 5.0396842 , 6.34960421, 8. ])
|
||||
>>> np.logspace(2.0, 3.0, num=4, base=[2.0, 3.0], axis=-1)
|
||||
array([[ 4. , 5.0396842 , 6.34960421, 8. ],
|
||||
[ 9. , 12.98024613, 18.72075441, 27. ]])
|
||||
|
||||
Graphical illustration:
|
||||
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> N = 10
|
||||
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
|
||||
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
|
||||
>>> y = np.zeros(N)
|
||||
>>> plt.plot(x1, y, 'o')
|
||||
[<matplotlib.lines.Line2D object at 0x...>]
|
||||
>>> plt.plot(x2, y + 0.5, 'o')
|
||||
[<matplotlib.lines.Line2D object at 0x...>]
|
||||
>>> plt.ylim([-0.5, 1])
|
||||
(-0.5, 1)
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
if not isinstance(base, (float, int)) and np.ndim(base):
|
||||
# If base is non-scalar, broadcast it with the others, since it
|
||||
# may influence how axis is interpreted.
|
||||
ndmax = np.broadcast(start, stop, base).ndim
|
||||
start, stop, base = (
|
||||
np.array(a, copy=None, subok=True, ndmin=ndmax)
|
||||
for a in (start, stop, base)
|
||||
)
|
||||
base = np.expand_dims(base, axis=axis)
|
||||
y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis)
|
||||
if dtype is None:
|
||||
return _nx.power(base, y)
|
||||
return _nx.power(base, y).astype(dtype, copy=False)
|
||||
|
||||
|
||||
def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None,
|
||||
axis=None):
|
||||
return (start, stop)
|
||||
|
||||
|
||||
@array_function_dispatch(_geomspace_dispatcher)
|
||||
def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
|
||||
"""
|
||||
Return numbers spaced evenly on a log scale (a geometric progression).
|
||||
|
||||
This is similar to `logspace`, but with endpoints specified directly.
|
||||
Each output sample is a constant multiple of the previous.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
start : array_like
|
||||
The starting value of the sequence.
|
||||
stop : array_like
|
||||
The final value of the sequence, unless `endpoint` is False.
|
||||
In that case, ``num + 1`` values are spaced over the
|
||||
interval in log-space, of which all but the last (a sequence of
|
||||
length `num`) are returned.
|
||||
num : integer, optional
|
||||
Number of samples to generate. Default is 50.
|
||||
endpoint : boolean, optional
|
||||
If true, `stop` is the last sample. Otherwise, it is not included.
|
||||
Default is True.
|
||||
dtype : dtype
|
||||
The type of the output array. If `dtype` is not given, the data type
|
||||
is inferred from `start` and `stop`. The inferred dtype will never be
|
||||
an integer; `float` is chosen even if the arguments would produce an
|
||||
array of integers.
|
||||
axis : int, optional
|
||||
The axis in the result to store the samples. Relevant only if start
|
||||
or stop are array-like. By default (0), the samples will be along a
|
||||
new axis inserted at the beginning. Use -1 to get an axis at the end.
|
||||
|
||||
Returns
|
||||
-------
|
||||
samples : ndarray
|
||||
`num` samples, equally spaced on a log scale.
|
||||
|
||||
See Also
|
||||
--------
|
||||
logspace : Similar to geomspace, but with endpoints specified using log
|
||||
and base.
|
||||
linspace : Similar to geomspace, but with arithmetic instead of geometric
|
||||
progression.
|
||||
arange : Similar to linspace, with the step size specified instead of the
|
||||
number of samples.
|
||||
:ref:`how-to-partition`
|
||||
|
||||
Notes
|
||||
-----
|
||||
If the inputs or dtype are complex, the output will follow a logarithmic
|
||||
spiral in the complex plane. (There are an infinite number of spirals
|
||||
passing through two points; the output will follow the shortest such path.)
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.geomspace(1, 1000, num=4)
|
||||
array([ 1., 10., 100., 1000.])
|
||||
>>> np.geomspace(1, 1000, num=3, endpoint=False)
|
||||
array([ 1., 10., 100.])
|
||||
>>> np.geomspace(1, 1000, num=4, endpoint=False)
|
||||
array([ 1. , 5.62341325, 31.6227766 , 177.827941 ])
|
||||
>>> np.geomspace(1, 256, num=9)
|
||||
array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.])
|
||||
|
||||
Note that the above may not produce exact integers:
|
||||
|
||||
>>> np.geomspace(1, 256, num=9, dtype=int)
|
||||
array([ 1, 2, 4, 7, 16, 32, 63, 127, 256])
|
||||
>>> np.around(np.geomspace(1, 256, num=9)).astype(int)
|
||||
array([ 1, 2, 4, 8, 16, 32, 64, 128, 256])
|
||||
|
||||
Negative, decreasing, and complex inputs are allowed:
|
||||
|
||||
>>> np.geomspace(1000, 1, num=4)
|
||||
array([1000., 100., 10., 1.])
|
||||
>>> np.geomspace(-1000, -1, num=4)
|
||||
array([-1000., -100., -10., -1.])
|
||||
>>> np.geomspace(1j, 1000j, num=4) # Straight line
|
||||
array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j])
|
||||
>>> np.geomspace(-1+0j, 1+0j, num=5) # Circle
|
||||
array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j,
|
||||
6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j,
|
||||
1.00000000e+00+0.00000000e+00j])
|
||||
|
||||
Graphical illustration of `endpoint` parameter:
|
||||
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> N = 10
|
||||
>>> y = np.zeros(N)
|
||||
>>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o')
|
||||
[<matplotlib.lines.Line2D object at 0x...>]
|
||||
>>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o')
|
||||
[<matplotlib.lines.Line2D object at 0x...>]
|
||||
>>> plt.axis([0.5, 2000, 0, 3])
|
||||
[0.5, 2000, 0, 3]
|
||||
>>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both')
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
start = asanyarray(start)
|
||||
stop = asanyarray(stop)
|
||||
if _nx.any(start == 0) or _nx.any(stop == 0):
|
||||
raise ValueError('Geometric sequence cannot include zero')
|
||||
|
||||
dt = result_type(start, stop, float(num), _nx.zeros((), dtype))
|
||||
if dtype is None:
|
||||
dtype = dt
|
||||
else:
|
||||
# complex to dtype('complex128'), for instance
|
||||
dtype = _nx.dtype(dtype)
|
||||
|
||||
# Promote both arguments to the same dtype in case, for instance, one is
|
||||
# complex and another is negative and log would produce NaN otherwise.
|
||||
# Copy since we may change things in-place further down.
|
||||
start = start.astype(dt, copy=True)
|
||||
stop = stop.astype(dt, copy=True)
|
||||
|
||||
# Allow negative real values and ensure a consistent result for complex
|
||||
# (including avoiding negligible real or imaginary parts in output) by
|
||||
# rotating start to positive real, calculating, then undoing rotation.
|
||||
out_sign = _nx.sign(start)
|
||||
start /= out_sign
|
||||
stop = stop / out_sign
|
||||
|
||||
log_start = _nx.log10(start)
|
||||
log_stop = _nx.log10(stop)
|
||||
result = logspace(log_start, log_stop, num=num,
|
||||
endpoint=endpoint, base=10.0, dtype=dt)
|
||||
|
||||
# Make sure the endpoints match the start and stop arguments. This is
|
||||
# necessary because np.exp(np.log(x)) is not necessarily equal to x.
|
||||
if num > 0:
|
||||
result[0] = start
|
||||
if num > 1 and endpoint:
|
||||
result[-1] = stop
|
||||
|
||||
result *= out_sign
|
||||
|
||||
if axis != 0:
|
||||
result = _nx.moveaxis(result, 0, axis)
|
||||
|
||||
return result.astype(dtype, copy=False)
|
||||
|
||||
|
||||
def _needs_add_docstring(obj):
|
||||
"""
|
||||
Returns true if the only way to set the docstring of `obj` from python is
|
||||
via add_docstring.
|
||||
|
||||
This function errs on the side of being overly conservative.
|
||||
"""
|
||||
Py_TPFLAGS_HEAPTYPE = 1 << 9
|
||||
|
||||
if isinstance(obj, (types.FunctionType, types.MethodType, property)):
|
||||
return False
|
||||
|
||||
if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _add_docstring(obj, doc, warn_on_python):
|
||||
if warn_on_python and not _needs_add_docstring(obj):
|
||||
warnings.warn(
|
||||
f"add_newdoc was used on a pure-python object {obj}. "
|
||||
"Prefer to attach it directly to the source.",
|
||||
UserWarning,
|
||||
stacklevel=3)
|
||||
try:
|
||||
add_docstring(obj, doc)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def add_newdoc(place, obj, doc, warn_on_python=True):
|
||||
"""
|
||||
Add documentation to an existing object, typically one defined in C
|
||||
|
||||
The purpose is to allow easier editing of the docstrings without requiring
|
||||
a re-compile. This exists primarily for internal use within numpy itself.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
place : str
|
||||
The absolute name of the module to import from
|
||||
obj : str or None
|
||||
The name of the object to add documentation to, typically a class or
|
||||
function name.
|
||||
doc : {str, Tuple[str, str], List[Tuple[str, str]]}
|
||||
If a string, the documentation to apply to `obj`
|
||||
|
||||
If a tuple, then the first element is interpreted as an attribute
|
||||
of `obj` and the second as the docstring to apply -
|
||||
``(method, docstring)``
|
||||
|
||||
If a list, then each element of the list should be a tuple of length
|
||||
two - ``[(method1, docstring1), (method2, docstring2), ...]``
|
||||
warn_on_python : bool
|
||||
If True, the default, emit `UserWarning` if this is used to attach
|
||||
documentation to a pure-python object.
|
||||
|
||||
Notes
|
||||
-----
|
||||
This routine never raises an error if the docstring can't be written, but
|
||||
will raise an error if the object being documented does not exist.
|
||||
|
||||
This routine cannot modify read-only docstrings, as appear
|
||||
in new-style classes or built-in functions. Because this
|
||||
routine never raises an error the caller must check manually
|
||||
that the docstrings were changed.
|
||||
|
||||
Since this function grabs the ``char *`` from a c-level str object and puts
|
||||
it into the ``tp_doc`` slot of the type of `obj`, it violates a number of
|
||||
C-API best-practices, by:
|
||||
|
||||
- modifying a `PyTypeObject` after calling `PyType_Ready`
|
||||
- calling `Py_INCREF` on the str and losing the reference, so the str
|
||||
will never be released
|
||||
|
||||
If possible it should be avoided.
|
||||
"""
|
||||
new = getattr(__import__(place, globals(), {}, [obj]), obj)
|
||||
if isinstance(doc, str):
|
||||
if "${ARRAY_FUNCTION_LIKE}" in doc:
|
||||
doc = overrides.get_array_function_like_doc(new, doc)
|
||||
_add_docstring(new, doc.strip(), warn_on_python)
|
||||
elif isinstance(doc, tuple):
|
||||
attr, docstring = doc
|
||||
_add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
|
||||
elif isinstance(doc, list):
|
||||
for attr, docstring in doc:
|
||||
_add_docstring(
|
||||
getattr(new, attr), docstring.strip(), warn_on_python
|
||||
)
|
||||
278
lib/python3.11/site-packages/numpy/_core/function_base.pyi
Normal file
278
lib/python3.11/site-packages/numpy/_core/function_base.pyi
Normal file
@ -0,0 +1,278 @@
|
||||
from typing import Literal as L
|
||||
from typing import SupportsIndex, TypeAlias, TypeVar, overload
|
||||
|
||||
from _typeshed import Incomplete
|
||||
|
||||
import numpy as np
|
||||
from numpy._typing import (
|
||||
DTypeLike,
|
||||
NDArray,
|
||||
_ArrayLikeComplex_co,
|
||||
_ArrayLikeFloat_co,
|
||||
_DTypeLike,
|
||||
)
|
||||
from numpy._typing._array_like import _DualArrayLike
|
||||
|
||||
__all__ = ["geomspace", "linspace", "logspace"]
|
||||
|
||||
_ScalarT = TypeVar("_ScalarT", bound=np.generic)
|
||||
|
||||
_ToArrayFloat64: TypeAlias = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float]
|
||||
|
||||
@overload
|
||||
def linspace(
|
||||
start: _ToArrayFloat64,
|
||||
stop: _ToArrayFloat64,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
retstep: L[False] = False,
|
||||
dtype: None = None,
|
||||
axis: SupportsIndex = 0,
|
||||
*,
|
||||
device: L["cpu"] | None = None,
|
||||
) -> NDArray[np.float64]: ...
|
||||
@overload
|
||||
def linspace(
|
||||
start: _ArrayLikeFloat_co,
|
||||
stop: _ArrayLikeFloat_co,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
retstep: L[False] = False,
|
||||
dtype: None = None,
|
||||
axis: SupportsIndex = 0,
|
||||
*,
|
||||
device: L["cpu"] | None = None,
|
||||
) -> NDArray[np.floating]: ...
|
||||
@overload
|
||||
def linspace(
|
||||
start: _ArrayLikeComplex_co,
|
||||
stop: _ArrayLikeComplex_co,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
retstep: L[False] = False,
|
||||
dtype: None = None,
|
||||
axis: SupportsIndex = 0,
|
||||
*,
|
||||
device: L["cpu"] | None = None,
|
||||
) -> NDArray[np.complexfloating]: ...
|
||||
@overload
|
||||
def linspace(
|
||||
start: _ArrayLikeComplex_co,
|
||||
stop: _ArrayLikeComplex_co,
|
||||
num: SupportsIndex,
|
||||
endpoint: bool,
|
||||
retstep: L[False],
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
axis: SupportsIndex = 0,
|
||||
*,
|
||||
device: L["cpu"] | None = None,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def linspace(
|
||||
start: _ArrayLikeComplex_co,
|
||||
stop: _ArrayLikeComplex_co,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
retstep: L[False] = False,
|
||||
*,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
axis: SupportsIndex = 0,
|
||||
device: L["cpu"] | None = None,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def linspace(
|
||||
start: _ArrayLikeComplex_co,
|
||||
stop: _ArrayLikeComplex_co,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
retstep: L[False] = False,
|
||||
dtype: DTypeLike | None = None,
|
||||
axis: SupportsIndex = 0,
|
||||
*,
|
||||
device: L["cpu"] | None = None,
|
||||
) -> NDArray[Incomplete]: ...
|
||||
@overload
|
||||
def linspace(
|
||||
start: _ToArrayFloat64,
|
||||
stop: _ToArrayFloat64,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
*,
|
||||
retstep: L[True],
|
||||
dtype: None = None,
|
||||
axis: SupportsIndex = 0,
|
||||
device: L["cpu"] | None = None,
|
||||
) -> tuple[NDArray[np.float64], np.float64]: ...
|
||||
@overload
|
||||
def linspace(
|
||||
start: _ArrayLikeFloat_co,
|
||||
stop: _ArrayLikeFloat_co,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
*,
|
||||
retstep: L[True],
|
||||
dtype: None = None,
|
||||
axis: SupportsIndex = 0,
|
||||
device: L["cpu"] | None = None,
|
||||
) -> tuple[NDArray[np.floating], np.floating]: ...
|
||||
@overload
|
||||
def linspace(
|
||||
start: _ArrayLikeComplex_co,
|
||||
stop: _ArrayLikeComplex_co,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
*,
|
||||
retstep: L[True],
|
||||
dtype: None = None,
|
||||
axis: SupportsIndex = 0,
|
||||
device: L["cpu"] | None = None,
|
||||
) -> tuple[NDArray[np.complexfloating], np.complexfloating]: ...
|
||||
@overload
|
||||
def linspace(
|
||||
start: _ArrayLikeComplex_co,
|
||||
stop: _ArrayLikeComplex_co,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
*,
|
||||
retstep: L[True],
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
axis: SupportsIndex = 0,
|
||||
device: L["cpu"] | None = None,
|
||||
) -> tuple[NDArray[_ScalarT], _ScalarT]: ...
|
||||
@overload
|
||||
def linspace(
|
||||
start: _ArrayLikeComplex_co,
|
||||
stop: _ArrayLikeComplex_co,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
*,
|
||||
retstep: L[True],
|
||||
dtype: DTypeLike | None = None,
|
||||
axis: SupportsIndex = 0,
|
||||
device: L["cpu"] | None = None,
|
||||
) -> tuple[NDArray[Incomplete], Incomplete]: ...
|
||||
|
||||
@overload
|
||||
def logspace(
|
||||
start: _ToArrayFloat64,
|
||||
stop: _ToArrayFloat64,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
base: _ToArrayFloat64 = 10.0,
|
||||
dtype: None = None,
|
||||
axis: SupportsIndex = 0,
|
||||
) -> NDArray[np.float64]: ...
|
||||
@overload
|
||||
def logspace(
|
||||
start: _ArrayLikeFloat_co,
|
||||
stop: _ArrayLikeFloat_co,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
base: _ArrayLikeFloat_co = 10.0,
|
||||
dtype: None = None,
|
||||
axis: SupportsIndex = 0,
|
||||
) -> NDArray[np.floating]: ...
|
||||
@overload
|
||||
def logspace(
|
||||
start: _ArrayLikeComplex_co,
|
||||
stop: _ArrayLikeComplex_co,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
base: _ArrayLikeComplex_co = 10.0,
|
||||
dtype: None = None,
|
||||
axis: SupportsIndex = 0,
|
||||
) -> NDArray[np.complexfloating]: ...
|
||||
@overload
|
||||
def logspace(
|
||||
start: _ArrayLikeComplex_co,
|
||||
stop: _ArrayLikeComplex_co,
|
||||
num: SupportsIndex,
|
||||
endpoint: bool,
|
||||
base: _ArrayLikeComplex_co,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
axis: SupportsIndex = 0,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def logspace(
|
||||
start: _ArrayLikeComplex_co,
|
||||
stop: _ArrayLikeComplex_co,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
base: _ArrayLikeComplex_co = 10.0,
|
||||
*,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
axis: SupportsIndex = 0,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def logspace(
|
||||
start: _ArrayLikeComplex_co,
|
||||
stop: _ArrayLikeComplex_co,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
base: _ArrayLikeComplex_co = 10.0,
|
||||
dtype: DTypeLike | None = None,
|
||||
axis: SupportsIndex = 0,
|
||||
) -> NDArray[Incomplete]: ...
|
||||
|
||||
@overload
|
||||
def geomspace(
|
||||
start: _ToArrayFloat64,
|
||||
stop: _ToArrayFloat64,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
dtype: None = None,
|
||||
axis: SupportsIndex = 0,
|
||||
) -> NDArray[np.float64]: ...
|
||||
@overload
|
||||
def geomspace(
|
||||
start: _ArrayLikeFloat_co,
|
||||
stop: _ArrayLikeFloat_co,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
dtype: None = None,
|
||||
axis: SupportsIndex = 0,
|
||||
) -> NDArray[np.floating]: ...
|
||||
@overload
|
||||
def geomspace(
|
||||
start: _ArrayLikeComplex_co,
|
||||
stop: _ArrayLikeComplex_co,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
dtype: None = None,
|
||||
axis: SupportsIndex = 0,
|
||||
) -> NDArray[np.complexfloating]: ...
|
||||
@overload
|
||||
def geomspace(
|
||||
start: _ArrayLikeComplex_co,
|
||||
stop: _ArrayLikeComplex_co,
|
||||
num: SupportsIndex,
|
||||
endpoint: bool,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
axis: SupportsIndex = 0,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def geomspace(
|
||||
start: _ArrayLikeComplex_co,
|
||||
stop: _ArrayLikeComplex_co,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
*,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
axis: SupportsIndex = 0,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def geomspace(
|
||||
start: _ArrayLikeComplex_co,
|
||||
stop: _ArrayLikeComplex_co,
|
||||
num: SupportsIndex = 50,
|
||||
endpoint: bool = True,
|
||||
dtype: DTypeLike | None = None,
|
||||
axis: SupportsIndex = 0,
|
||||
) -> NDArray[Incomplete]: ...
|
||||
|
||||
def add_newdoc(
|
||||
place: str,
|
||||
obj: str,
|
||||
doc: str | tuple[str, str] | list[tuple[str, str]],
|
||||
warn_on_python: bool = True,
|
||||
) -> None: ...
|
||||
748
lib/python3.11/site-packages/numpy/_core/getlimits.py
Normal file
748
lib/python3.11/site-packages/numpy/_core/getlimits.py
Normal file
@ -0,0 +1,748 @@
|
||||
"""Machine limits for Float32 and Float64 and (long double) if available...
|
||||
|
||||
"""
|
||||
__all__ = ['finfo', 'iinfo']
|
||||
|
||||
import types
|
||||
import warnings
|
||||
|
||||
from numpy._utils import set_module
|
||||
|
||||
from . import numeric
|
||||
from . import numerictypes as ntypes
|
||||
from ._machar import MachAr
|
||||
from .numeric import array, inf, nan
|
||||
from .umath import exp2, isnan, log10, nextafter
|
||||
|
||||
|
||||
def _fr0(a):
|
||||
"""fix rank-0 --> rank-1"""
|
||||
if a.ndim == 0:
|
||||
a = a.copy()
|
||||
a.shape = (1,)
|
||||
return a
|
||||
|
||||
|
||||
def _fr1(a):
|
||||
"""fix rank > 0 --> rank-0"""
|
||||
if a.size == 1:
|
||||
a = a.copy()
|
||||
a.shape = ()
|
||||
return a
|
||||
|
||||
|
||||
class MachArLike:
|
||||
""" Object to simulate MachAr instance """
|
||||
def __init__(self, ftype, *, eps, epsneg, huge, tiny,
|
||||
ibeta, smallest_subnormal=None, **kwargs):
|
||||
self.params = _MACHAR_PARAMS[ftype]
|
||||
self.ftype = ftype
|
||||
self.title = self.params['title']
|
||||
# Parameter types same as for discovered MachAr object.
|
||||
if not smallest_subnormal:
|
||||
self._smallest_subnormal = nextafter(
|
||||
self.ftype(0), self.ftype(1), dtype=self.ftype)
|
||||
else:
|
||||
self._smallest_subnormal = smallest_subnormal
|
||||
self.epsilon = self.eps = self._float_to_float(eps)
|
||||
self.epsneg = self._float_to_float(epsneg)
|
||||
self.xmax = self.huge = self._float_to_float(huge)
|
||||
self.xmin = self._float_to_float(tiny)
|
||||
self.smallest_normal = self.tiny = self._float_to_float(tiny)
|
||||
self.ibeta = self.params['itype'](ibeta)
|
||||
self.__dict__.update(kwargs)
|
||||
self.precision = int(-log10(self.eps))
|
||||
self.resolution = self._float_to_float(
|
||||
self._float_conv(10) ** (-self.precision))
|
||||
self._str_eps = self._float_to_str(self.eps)
|
||||
self._str_epsneg = self._float_to_str(self.epsneg)
|
||||
self._str_xmin = self._float_to_str(self.xmin)
|
||||
self._str_xmax = self._float_to_str(self.xmax)
|
||||
self._str_resolution = self._float_to_str(self.resolution)
|
||||
self._str_smallest_normal = self._float_to_str(self.xmin)
|
||||
|
||||
@property
|
||||
def smallest_subnormal(self):
|
||||
"""Return the value for the smallest subnormal.
|
||||
|
||||
Returns
|
||||
-------
|
||||
smallest_subnormal : float
|
||||
value for the smallest subnormal.
|
||||
|
||||
Warns
|
||||
-----
|
||||
UserWarning
|
||||
If the calculated value for the smallest subnormal is zero.
|
||||
"""
|
||||
# Check that the calculated value is not zero, in case it raises a
|
||||
# warning.
|
||||
value = self._smallest_subnormal
|
||||
if self.ftype(0) == value:
|
||||
warnings.warn(
|
||||
f'The value of the smallest subnormal for {self.ftype} type is zero.',
|
||||
UserWarning, stacklevel=2)
|
||||
|
||||
return self._float_to_float(value)
|
||||
|
||||
@property
|
||||
def _str_smallest_subnormal(self):
|
||||
"""Return the string representation of the smallest subnormal."""
|
||||
return self._float_to_str(self.smallest_subnormal)
|
||||
|
||||
def _float_to_float(self, value):
|
||||
"""Converts float to float.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
value : float
|
||||
value to be converted.
|
||||
"""
|
||||
return _fr1(self._float_conv(value))
|
||||
|
||||
def _float_conv(self, value):
|
||||
"""Converts float to conv.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
value : float
|
||||
value to be converted.
|
||||
"""
|
||||
return array([value], self.ftype)
|
||||
|
||||
def _float_to_str(self, value):
|
||||
"""Converts float to str.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
value : float
|
||||
value to be converted.
|
||||
"""
|
||||
return self.params['fmt'] % array(_fr0(value)[0], self.ftype)
|
||||
|
||||
|
||||
_convert_to_float = {
|
||||
ntypes.csingle: ntypes.single,
|
||||
ntypes.complex128: ntypes.float64,
|
||||
ntypes.clongdouble: ntypes.longdouble
|
||||
}
|
||||
|
||||
# Parameters for creating MachAr / MachAr-like objects
|
||||
_title_fmt = 'numpy {} precision floating point number'
|
||||
_MACHAR_PARAMS = {
|
||||
ntypes.double: {
|
||||
'itype': ntypes.int64,
|
||||
'fmt': '%24.16e',
|
||||
'title': _title_fmt.format('double')},
|
||||
ntypes.single: {
|
||||
'itype': ntypes.int32,
|
||||
'fmt': '%15.7e',
|
||||
'title': _title_fmt.format('single')},
|
||||
ntypes.longdouble: {
|
||||
'itype': ntypes.longlong,
|
||||
'fmt': '%s',
|
||||
'title': _title_fmt.format('long double')},
|
||||
ntypes.half: {
|
||||
'itype': ntypes.int16,
|
||||
'fmt': '%12.5e',
|
||||
'title': _title_fmt.format('half')}}
|
||||
|
||||
# Key to identify the floating point type. Key is result of
|
||||
#
|
||||
# ftype = np.longdouble # or float64, float32, etc.
|
||||
# v = (ftype(-1.0) / ftype(10.0))
|
||||
# v.view(v.dtype.newbyteorder('<')).tobytes()
|
||||
#
|
||||
# Uses division to work around deficiencies in strtold on some platforms.
|
||||
# See:
|
||||
# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure
|
||||
|
||||
_KNOWN_TYPES = {}
|
||||
def _register_type(machar, bytepat):
|
||||
_KNOWN_TYPES[bytepat] = machar
|
||||
|
||||
|
||||
_float_ma = {}
|
||||
|
||||
|
||||
def _register_known_types():
|
||||
# Known parameters for float16
|
||||
# See docstring of MachAr class for description of parameters.
|
||||
f16 = ntypes.float16
|
||||
float16_ma = MachArLike(f16,
|
||||
machep=-10,
|
||||
negep=-11,
|
||||
minexp=-14,
|
||||
maxexp=16,
|
||||
it=10,
|
||||
iexp=5,
|
||||
ibeta=2,
|
||||
irnd=5,
|
||||
ngrd=0,
|
||||
eps=exp2(f16(-10)),
|
||||
epsneg=exp2(f16(-11)),
|
||||
huge=f16(65504),
|
||||
tiny=f16(2 ** -14))
|
||||
_register_type(float16_ma, b'f\xae')
|
||||
_float_ma[16] = float16_ma
|
||||
|
||||
# Known parameters for float32
|
||||
f32 = ntypes.float32
|
||||
float32_ma = MachArLike(f32,
|
||||
machep=-23,
|
||||
negep=-24,
|
||||
minexp=-126,
|
||||
maxexp=128,
|
||||
it=23,
|
||||
iexp=8,
|
||||
ibeta=2,
|
||||
irnd=5,
|
||||
ngrd=0,
|
||||
eps=exp2(f32(-23)),
|
||||
epsneg=exp2(f32(-24)),
|
||||
huge=f32((1 - 2 ** -24) * 2**128),
|
||||
tiny=exp2(f32(-126)))
|
||||
_register_type(float32_ma, b'\xcd\xcc\xcc\xbd')
|
||||
_float_ma[32] = float32_ma
|
||||
|
||||
# Known parameters for float64
|
||||
f64 = ntypes.float64
|
||||
epsneg_f64 = 2.0 ** -53.0
|
||||
tiny_f64 = 2.0 ** -1022.0
|
||||
float64_ma = MachArLike(f64,
|
||||
machep=-52,
|
||||
negep=-53,
|
||||
minexp=-1022,
|
||||
maxexp=1024,
|
||||
it=52,
|
||||
iexp=11,
|
||||
ibeta=2,
|
||||
irnd=5,
|
||||
ngrd=0,
|
||||
eps=2.0 ** -52.0,
|
||||
epsneg=epsneg_f64,
|
||||
huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4),
|
||||
tiny=tiny_f64)
|
||||
_register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf')
|
||||
_float_ma[64] = float64_ma
|
||||
|
||||
# Known parameters for IEEE 754 128-bit binary float
|
||||
ld = ntypes.longdouble
|
||||
epsneg_f128 = exp2(ld(-113))
|
||||
tiny_f128 = exp2(ld(-16382))
|
||||
# Ignore runtime error when this is not f128
|
||||
with numeric.errstate(all='ignore'):
|
||||
huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4)
|
||||
float128_ma = MachArLike(ld,
|
||||
machep=-112,
|
||||
negep=-113,
|
||||
minexp=-16382,
|
||||
maxexp=16384,
|
||||
it=112,
|
||||
iexp=15,
|
||||
ibeta=2,
|
||||
irnd=5,
|
||||
ngrd=0,
|
||||
eps=exp2(ld(-112)),
|
||||
epsneg=epsneg_f128,
|
||||
huge=huge_f128,
|
||||
tiny=tiny_f128)
|
||||
# IEEE 754 128-bit binary float
|
||||
_register_type(float128_ma,
|
||||
b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
|
||||
_float_ma[128] = float128_ma
|
||||
|
||||
# Known parameters for float80 (Intel 80-bit extended precision)
|
||||
epsneg_f80 = exp2(ld(-64))
|
||||
tiny_f80 = exp2(ld(-16382))
|
||||
# Ignore runtime error when this is not f80
|
||||
with numeric.errstate(all='ignore'):
|
||||
huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4)
|
||||
float80_ma = MachArLike(ld,
|
||||
machep=-63,
|
||||
negep=-64,
|
||||
minexp=-16382,
|
||||
maxexp=16384,
|
||||
it=63,
|
||||
iexp=15,
|
||||
ibeta=2,
|
||||
irnd=5,
|
||||
ngrd=0,
|
||||
eps=exp2(ld(-63)),
|
||||
epsneg=epsneg_f80,
|
||||
huge=huge_f80,
|
||||
tiny=tiny_f80)
|
||||
# float80, first 10 bytes containing actual storage
|
||||
_register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf')
|
||||
_float_ma[80] = float80_ma
|
||||
|
||||
# Guessed / known parameters for double double; see:
|
||||
# https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic
|
||||
# These numbers have the same exponent range as float64, but extended
|
||||
# number of digits in the significand.
|
||||
huge_dd = nextafter(ld(inf), ld(0), dtype=ld)
|
||||
# As the smallest_normal in double double is so hard to calculate we set
|
||||
# it to NaN.
|
||||
smallest_normal_dd = nan
|
||||
# Leave the same value for the smallest subnormal as double
|
||||
smallest_subnormal_dd = ld(nextafter(0., 1.))
|
||||
float_dd_ma = MachArLike(ld,
|
||||
machep=-105,
|
||||
negep=-106,
|
||||
minexp=-1022,
|
||||
maxexp=1024,
|
||||
it=105,
|
||||
iexp=11,
|
||||
ibeta=2,
|
||||
irnd=5,
|
||||
ngrd=0,
|
||||
eps=exp2(ld(-105)),
|
||||
epsneg=exp2(ld(-106)),
|
||||
huge=huge_dd,
|
||||
tiny=smallest_normal_dd,
|
||||
smallest_subnormal=smallest_subnormal_dd)
|
||||
# double double; low, high order (e.g. PPC 64)
|
||||
_register_type(float_dd_ma,
|
||||
b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf')
|
||||
# double double; high, low order (e.g. PPC 64 le)
|
||||
_register_type(float_dd_ma,
|
||||
b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<')
|
||||
_float_ma['dd'] = float_dd_ma
|
||||
|
||||
|
||||
def _get_machar(ftype):
|
||||
""" Get MachAr instance or MachAr-like instance
|
||||
|
||||
Get parameters for floating point type, by first trying signatures of
|
||||
various known floating point types, then, if none match, attempting to
|
||||
identify parameters by analysis.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ftype : class
|
||||
Numpy floating point type class (e.g. ``np.float64``)
|
||||
|
||||
Returns
|
||||
-------
|
||||
ma_like : instance of :class:`MachAr` or :class:`MachArLike`
|
||||
Object giving floating point parameters for `ftype`.
|
||||
|
||||
Warns
|
||||
-----
|
||||
UserWarning
|
||||
If the binary signature of the float type is not in the dictionary of
|
||||
known float types.
|
||||
"""
|
||||
params = _MACHAR_PARAMS.get(ftype)
|
||||
if params is None:
|
||||
raise ValueError(repr(ftype))
|
||||
# Detect known / suspected types
|
||||
# ftype(-1.0) / ftype(10.0) is better than ftype('-0.1') because stold
|
||||
# may be deficient
|
||||
key = (ftype(-1.0) / ftype(10.))
|
||||
key = key.view(key.dtype.newbyteorder("<")).tobytes()
|
||||
ma_like = None
|
||||
if ftype == ntypes.longdouble:
|
||||
# Could be 80 bit == 10 byte extended precision, where last bytes can
|
||||
# be random garbage.
|
||||
# Comparing first 10 bytes to pattern first to avoid branching on the
|
||||
# random garbage.
|
||||
ma_like = _KNOWN_TYPES.get(key[:10])
|
||||
if ma_like is None:
|
||||
# see if the full key is known.
|
||||
ma_like = _KNOWN_TYPES.get(key)
|
||||
if ma_like is None and len(key) == 16:
|
||||
# machine limits could be f80 masquerading as np.float128,
|
||||
# find all keys with length 16 and make new dict, but make the keys
|
||||
# only 10 bytes long, the last bytes can be random garbage
|
||||
_kt = {k[:10]: v for k, v in _KNOWN_TYPES.items() if len(k) == 16}
|
||||
ma_like = _kt.get(key[:10])
|
||||
if ma_like is not None:
|
||||
return ma_like
|
||||
# Fall back to parameter discovery
|
||||
warnings.warn(
|
||||
f'Signature {key} for {ftype} does not match any known type: '
|
||||
'falling back to type probe function.\n'
|
||||
'This warnings indicates broken support for the dtype!',
|
||||
UserWarning, stacklevel=2)
|
||||
return _discovered_machar(ftype)
|
||||
|
||||
|
||||
def _discovered_machar(ftype):
|
||||
""" Create MachAr instance with found information on float types
|
||||
|
||||
TODO: MachAr should be retired completely ideally. We currently only
|
||||
ever use it system with broken longdouble (valgrind, WSL).
|
||||
"""
|
||||
params = _MACHAR_PARAMS[ftype]
|
||||
return MachAr(lambda v: array([v], ftype),
|
||||
lambda v: _fr0(v.astype(params['itype']))[0],
|
||||
lambda v: array(_fr0(v)[0], ftype),
|
||||
lambda v: params['fmt'] % array(_fr0(v)[0], ftype),
|
||||
params['title'])
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
class finfo:
|
||||
"""
|
||||
finfo(dtype)
|
||||
|
||||
Machine limits for floating point types.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
bits : int
|
||||
The number of bits occupied by the type.
|
||||
dtype : dtype
|
||||
Returns the dtype for which `finfo` returns information. For complex
|
||||
input, the returned dtype is the associated ``float*`` dtype for its
|
||||
real and complex components.
|
||||
eps : float
|
||||
The difference between 1.0 and the next smallest representable float
|
||||
larger than 1.0. For example, for 64-bit binary floats in the IEEE-754
|
||||
standard, ``eps = 2**-52``, approximately 2.22e-16.
|
||||
epsneg : float
|
||||
The difference between 1.0 and the next smallest representable float
|
||||
less than 1.0. For example, for 64-bit binary floats in the IEEE-754
|
||||
standard, ``epsneg = 2**-53``, approximately 1.11e-16.
|
||||
iexp : int
|
||||
The number of bits in the exponent portion of the floating point
|
||||
representation.
|
||||
machep : int
|
||||
The exponent that yields `eps`.
|
||||
max : floating point number of the appropriate type
|
||||
The largest representable number.
|
||||
maxexp : int
|
||||
The smallest positive power of the base (2) that causes overflow.
|
||||
min : floating point number of the appropriate type
|
||||
The smallest representable number, typically ``-max``.
|
||||
minexp : int
|
||||
The most negative power of the base (2) consistent with there
|
||||
being no leading 0's in the mantissa.
|
||||
negep : int
|
||||
The exponent that yields `epsneg`.
|
||||
nexp : int
|
||||
The number of bits in the exponent including its sign and bias.
|
||||
nmant : int
|
||||
The number of bits in the mantissa.
|
||||
precision : int
|
||||
The approximate number of decimal digits to which this kind of
|
||||
float is precise.
|
||||
resolution : floating point number of the appropriate type
|
||||
The approximate decimal resolution of this type, i.e.,
|
||||
``10**-precision``.
|
||||
tiny : float
|
||||
An alias for `smallest_normal`, kept for backwards compatibility.
|
||||
smallest_normal : float
|
||||
The smallest positive floating point number with 1 as leading bit in
|
||||
the mantissa following IEEE-754 (see Notes).
|
||||
smallest_subnormal : float
|
||||
The smallest positive floating point number with 0 as leading bit in
|
||||
the mantissa following IEEE-754.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dtype : float, dtype, or instance
|
||||
Kind of floating point or complex floating point
|
||||
data-type about which to get information.
|
||||
|
||||
See Also
|
||||
--------
|
||||
iinfo : The equivalent for integer data types.
|
||||
spacing : The distance between a value and the nearest adjacent number
|
||||
nextafter : The next floating point value after x1 towards x2
|
||||
|
||||
Notes
|
||||
-----
|
||||
For developers of NumPy: do not instantiate this at the module level.
|
||||
The initial calculation of these parameters is expensive and negatively
|
||||
impacts import times. These objects are cached, so calling ``finfo()``
|
||||
repeatedly inside your functions is not a problem.
|
||||
|
||||
Note that ``smallest_normal`` is not actually the smallest positive
|
||||
representable value in a NumPy floating point type. As in the IEEE-754
|
||||
standard [1]_, NumPy floating point types make use of subnormal numbers to
|
||||
fill the gap between 0 and ``smallest_normal``. However, subnormal numbers
|
||||
may have significantly reduced precision [2]_.
|
||||
|
||||
This function can also be used for complex data types as well. If used,
|
||||
the output will be the same as the corresponding real float type
|
||||
(e.g. numpy.finfo(numpy.csingle) is the same as numpy.finfo(numpy.single)).
|
||||
However, the output is true for the real and imaginary components.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] IEEE Standard for Floating-Point Arithmetic, IEEE Std 754-2008,
|
||||
pp.1-70, 2008, https://doi.org/10.1109/IEEESTD.2008.4610935
|
||||
.. [2] Wikipedia, "Denormal Numbers",
|
||||
https://en.wikipedia.org/wiki/Denormal_number
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.finfo(np.float64).dtype
|
||||
dtype('float64')
|
||||
>>> np.finfo(np.complex64).dtype
|
||||
dtype('float32')
|
||||
|
||||
"""
|
||||
|
||||
_finfo_cache = {}
|
||||
|
||||
__class_getitem__ = classmethod(types.GenericAlias)
|
||||
|
||||
def __new__(cls, dtype):
|
||||
try:
|
||||
obj = cls._finfo_cache.get(dtype) # most common path
|
||||
if obj is not None:
|
||||
return obj
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
if dtype is None:
|
||||
# Deprecated in NumPy 1.25, 2023-01-16
|
||||
warnings.warn(
|
||||
"finfo() dtype cannot be None. This behavior will "
|
||||
"raise an error in the future. (Deprecated in NumPy 1.25)",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
|
||||
try:
|
||||
dtype = numeric.dtype(dtype)
|
||||
except TypeError:
|
||||
# In case a float instance was given
|
||||
dtype = numeric.dtype(type(dtype))
|
||||
|
||||
obj = cls._finfo_cache.get(dtype)
|
||||
if obj is not None:
|
||||
return obj
|
||||
dtypes = [dtype]
|
||||
newdtype = ntypes.obj2sctype(dtype)
|
||||
if newdtype is not dtype:
|
||||
dtypes.append(newdtype)
|
||||
dtype = newdtype
|
||||
if not issubclass(dtype, numeric.inexact):
|
||||
raise ValueError(f"data type {dtype!r} not inexact")
|
||||
obj = cls._finfo_cache.get(dtype)
|
||||
if obj is not None:
|
||||
return obj
|
||||
if not issubclass(dtype, numeric.floating):
|
||||
newdtype = _convert_to_float[dtype]
|
||||
if newdtype is not dtype:
|
||||
# dtype changed, for example from complex128 to float64
|
||||
dtypes.append(newdtype)
|
||||
dtype = newdtype
|
||||
|
||||
obj = cls._finfo_cache.get(dtype, None)
|
||||
if obj is not None:
|
||||
# the original dtype was not in the cache, but the new
|
||||
# dtype is in the cache. we add the original dtypes to
|
||||
# the cache and return the result
|
||||
for dt in dtypes:
|
||||
cls._finfo_cache[dt] = obj
|
||||
return obj
|
||||
obj = object.__new__(cls)._init(dtype)
|
||||
for dt in dtypes:
|
||||
cls._finfo_cache[dt] = obj
|
||||
return obj
|
||||
|
||||
def _init(self, dtype):
|
||||
self.dtype = numeric.dtype(dtype)
|
||||
machar = _get_machar(dtype)
|
||||
|
||||
for word in ['precision', 'iexp',
|
||||
'maxexp', 'minexp', 'negep',
|
||||
'machep']:
|
||||
setattr(self, word, getattr(machar, word))
|
||||
for word in ['resolution', 'epsneg', 'smallest_subnormal']:
|
||||
setattr(self, word, getattr(machar, word).flat[0])
|
||||
self.bits = self.dtype.itemsize * 8
|
||||
self.max = machar.huge.flat[0]
|
||||
self.min = -self.max
|
||||
self.eps = machar.eps.flat[0]
|
||||
self.nexp = machar.iexp
|
||||
self.nmant = machar.it
|
||||
self._machar = machar
|
||||
self._str_tiny = machar._str_xmin.strip()
|
||||
self._str_max = machar._str_xmax.strip()
|
||||
self._str_epsneg = machar._str_epsneg.strip()
|
||||
self._str_eps = machar._str_eps.strip()
|
||||
self._str_resolution = machar._str_resolution.strip()
|
||||
self._str_smallest_normal = machar._str_smallest_normal.strip()
|
||||
self._str_smallest_subnormal = machar._str_smallest_subnormal.strip()
|
||||
return self
|
||||
|
||||
def __str__(self):
|
||||
fmt = (
|
||||
'Machine parameters for %(dtype)s\n'
|
||||
'---------------------------------------------------------------\n'
|
||||
'precision = %(precision)3s resolution = %(_str_resolution)s\n'
|
||||
'machep = %(machep)6s eps = %(_str_eps)s\n'
|
||||
'negep = %(negep)6s epsneg = %(_str_epsneg)s\n'
|
||||
'minexp = %(minexp)6s tiny = %(_str_tiny)s\n'
|
||||
'maxexp = %(maxexp)6s max = %(_str_max)s\n'
|
||||
'nexp = %(nexp)6s min = -max\n'
|
||||
'smallest_normal = %(_str_smallest_normal)s '
|
||||
'smallest_subnormal = %(_str_smallest_subnormal)s\n'
|
||||
'---------------------------------------------------------------\n'
|
||||
)
|
||||
return fmt % self.__dict__
|
||||
|
||||
def __repr__(self):
|
||||
c = self.__class__.__name__
|
||||
d = self.__dict__.copy()
|
||||
d['klass'] = c
|
||||
return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s,"
|
||||
" max=%(_str_max)s, dtype=%(dtype)s)") % d)
|
||||
|
||||
@property
|
||||
def smallest_normal(self):
|
||||
"""Return the value for the smallest normal.
|
||||
|
||||
Returns
|
||||
-------
|
||||
smallest_normal : float
|
||||
Value for the smallest normal.
|
||||
|
||||
Warns
|
||||
-----
|
||||
UserWarning
|
||||
If the calculated value for the smallest normal is requested for
|
||||
double-double.
|
||||
"""
|
||||
# This check is necessary because the value for smallest_normal is
|
||||
# platform dependent for longdouble types.
|
||||
if isnan(self._machar.smallest_normal.flat[0]):
|
||||
warnings.warn(
|
||||
'The value of smallest normal is undefined for double double',
|
||||
UserWarning, stacklevel=2)
|
||||
return self._machar.smallest_normal.flat[0]
|
||||
|
||||
@property
|
||||
def tiny(self):
|
||||
"""Return the value for tiny, alias of smallest_normal.
|
||||
|
||||
Returns
|
||||
-------
|
||||
tiny : float
|
||||
Value for the smallest normal, alias of smallest_normal.
|
||||
|
||||
Warns
|
||||
-----
|
||||
UserWarning
|
||||
If the calculated value for the smallest normal is requested for
|
||||
double-double.
|
||||
"""
|
||||
return self.smallest_normal
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
class iinfo:
|
||||
"""
|
||||
iinfo(type)
|
||||
|
||||
Machine limits for integer types.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
bits : int
|
||||
The number of bits occupied by the type.
|
||||
dtype : dtype
|
||||
Returns the dtype for which `iinfo` returns information.
|
||||
min : int
|
||||
The smallest integer expressible by the type.
|
||||
max : int
|
||||
The largest integer expressible by the type.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
int_type : integer type, dtype, or instance
|
||||
The kind of integer data type to get information about.
|
||||
|
||||
See Also
|
||||
--------
|
||||
finfo : The equivalent for floating point data types.
|
||||
|
||||
Examples
|
||||
--------
|
||||
With types:
|
||||
|
||||
>>> import numpy as np
|
||||
>>> ii16 = np.iinfo(np.int16)
|
||||
>>> ii16.min
|
||||
-32768
|
||||
>>> ii16.max
|
||||
32767
|
||||
>>> ii32 = np.iinfo(np.int32)
|
||||
>>> ii32.min
|
||||
-2147483648
|
||||
>>> ii32.max
|
||||
2147483647
|
||||
|
||||
With instances:
|
||||
|
||||
>>> ii32 = np.iinfo(np.int32(10))
|
||||
>>> ii32.min
|
||||
-2147483648
|
||||
>>> ii32.max
|
||||
2147483647
|
||||
|
||||
"""
|
||||
|
||||
_min_vals = {}
|
||||
_max_vals = {}
|
||||
|
||||
__class_getitem__ = classmethod(types.GenericAlias)
|
||||
|
||||
def __init__(self, int_type):
|
||||
try:
|
||||
self.dtype = numeric.dtype(int_type)
|
||||
except TypeError:
|
||||
self.dtype = numeric.dtype(type(int_type))
|
||||
self.kind = self.dtype.kind
|
||||
self.bits = self.dtype.itemsize * 8
|
||||
self.key = "%s%d" % (self.kind, self.bits)
|
||||
if self.kind not in 'iu':
|
||||
raise ValueError(f"Invalid integer data type {self.kind!r}.")
|
||||
|
||||
@property
|
||||
def min(self):
|
||||
"""Minimum value of given dtype."""
|
||||
if self.kind == 'u':
|
||||
return 0
|
||||
else:
|
||||
try:
|
||||
val = iinfo._min_vals[self.key]
|
||||
except KeyError:
|
||||
val = int(-(1 << (self.bits - 1)))
|
||||
iinfo._min_vals[self.key] = val
|
||||
return val
|
||||
|
||||
@property
|
||||
def max(self):
|
||||
"""Maximum value of given dtype."""
|
||||
try:
|
||||
val = iinfo._max_vals[self.key]
|
||||
except KeyError:
|
||||
if self.kind == 'u':
|
||||
val = int((1 << self.bits) - 1)
|
||||
else:
|
||||
val = int((1 << (self.bits - 1)) - 1)
|
||||
iinfo._max_vals[self.key] = val
|
||||
return val
|
||||
|
||||
def __str__(self):
|
||||
"""String representation."""
|
||||
fmt = (
|
||||
'Machine parameters for %(dtype)s\n'
|
||||
'---------------------------------------------------------------\n'
|
||||
'min = %(min)s\n'
|
||||
'max = %(max)s\n'
|
||||
'---------------------------------------------------------------\n'
|
||||
)
|
||||
return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
|
||||
|
||||
def __repr__(self):
|
||||
return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__,
|
||||
self.min, self.max, self.dtype)
|
||||
3
lib/python3.11/site-packages/numpy/_core/getlimits.pyi
Normal file
3
lib/python3.11/site-packages/numpy/_core/getlimits.pyi
Normal file
@ -0,0 +1,3 @@
|
||||
from numpy import finfo, iinfo
|
||||
|
||||
__all__ = ["finfo", "iinfo"]
|
||||
@ -0,0 +1,376 @@
|
||||
|
||||
/* These pointers will be stored in the C-object for use in other
|
||||
extension modules
|
||||
*/
|
||||
|
||||
void *PyArray_API[] = {
|
||||
(void *) PyArray_GetNDArrayCVersion,
|
||||
NULL,
|
||||
(void *) &PyArray_Type,
|
||||
(void *) &PyArrayDescr_Type,
|
||||
NULL,
|
||||
(void *) &PyArrayIter_Type,
|
||||
(void *) &PyArrayMultiIter_Type,
|
||||
(int *) &NPY_NUMUSERTYPES,
|
||||
(void *) &PyBoolArrType_Type,
|
||||
(void *) &_PyArrayScalar_BoolValues,
|
||||
(void *) &PyGenericArrType_Type,
|
||||
(void *) &PyNumberArrType_Type,
|
||||
(void *) &PyIntegerArrType_Type,
|
||||
(void *) &PySignedIntegerArrType_Type,
|
||||
(void *) &PyUnsignedIntegerArrType_Type,
|
||||
(void *) &PyInexactArrType_Type,
|
||||
(void *) &PyFloatingArrType_Type,
|
||||
(void *) &PyComplexFloatingArrType_Type,
|
||||
(void *) &PyFlexibleArrType_Type,
|
||||
(void *) &PyCharacterArrType_Type,
|
||||
(void *) &PyByteArrType_Type,
|
||||
(void *) &PyShortArrType_Type,
|
||||
(void *) &PyIntArrType_Type,
|
||||
(void *) &PyLongArrType_Type,
|
||||
(void *) &PyLongLongArrType_Type,
|
||||
(void *) &PyUByteArrType_Type,
|
||||
(void *) &PyUShortArrType_Type,
|
||||
(void *) &PyUIntArrType_Type,
|
||||
(void *) &PyULongArrType_Type,
|
||||
(void *) &PyULongLongArrType_Type,
|
||||
(void *) &PyFloatArrType_Type,
|
||||
(void *) &PyDoubleArrType_Type,
|
||||
(void *) &PyLongDoubleArrType_Type,
|
||||
(void *) &PyCFloatArrType_Type,
|
||||
(void *) &PyCDoubleArrType_Type,
|
||||
(void *) &PyCLongDoubleArrType_Type,
|
||||
(void *) &PyObjectArrType_Type,
|
||||
(void *) &PyStringArrType_Type,
|
||||
(void *) &PyUnicodeArrType_Type,
|
||||
(void *) &PyVoidArrType_Type,
|
||||
NULL,
|
||||
NULL,
|
||||
(void *) PyArray_INCREF,
|
||||
(void *) PyArray_XDECREF,
|
||||
(void *) PyArray_SetStringFunction,
|
||||
(void *) PyArray_DescrFromType,
|
||||
(void *) PyArray_TypeObjectFromType,
|
||||
(void *) PyArray_Zero,
|
||||
(void *) PyArray_One,
|
||||
(void *) PyArray_CastToType,
|
||||
(void *) PyArray_CopyInto,
|
||||
(void *) PyArray_CopyAnyInto,
|
||||
(void *) PyArray_CanCastSafely,
|
||||
(void *) PyArray_CanCastTo,
|
||||
(void *) PyArray_ObjectType,
|
||||
(void *) PyArray_DescrFromObject,
|
||||
(void *) PyArray_ConvertToCommonType,
|
||||
(void *) PyArray_DescrFromScalar,
|
||||
(void *) PyArray_DescrFromTypeObject,
|
||||
(void *) PyArray_Size,
|
||||
(void *) PyArray_Scalar,
|
||||
(void *) PyArray_FromScalar,
|
||||
(void *) PyArray_ScalarAsCtype,
|
||||
(void *) PyArray_CastScalarToCtype,
|
||||
(void *) PyArray_CastScalarDirect,
|
||||
(void *) PyArray_Pack,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
(void *) PyArray_FromAny,
|
||||
(void *) PyArray_EnsureArray,
|
||||
(void *) PyArray_EnsureAnyArray,
|
||||
(void *) PyArray_FromFile,
|
||||
(void *) PyArray_FromString,
|
||||
(void *) PyArray_FromBuffer,
|
||||
(void *) PyArray_FromIter,
|
||||
(void *) PyArray_Return,
|
||||
(void *) PyArray_GetField,
|
||||
(void *) PyArray_SetField,
|
||||
(void *) PyArray_Byteswap,
|
||||
(void *) PyArray_Resize,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
(void *) PyArray_CopyObject,
|
||||
(void *) PyArray_NewCopy,
|
||||
(void *) PyArray_ToList,
|
||||
(void *) PyArray_ToString,
|
||||
(void *) PyArray_ToFile,
|
||||
(void *) PyArray_Dump,
|
||||
(void *) PyArray_Dumps,
|
||||
(void *) PyArray_ValidType,
|
||||
(void *) PyArray_UpdateFlags,
|
||||
(void *) PyArray_New,
|
||||
(void *) PyArray_NewFromDescr,
|
||||
(void *) PyArray_DescrNew,
|
||||
(void *) PyArray_DescrNewFromType,
|
||||
(void *) PyArray_GetPriority,
|
||||
(void *) PyArray_IterNew,
|
||||
(void *) PyArray_MultiIterNew,
|
||||
(void *) PyArray_PyIntAsInt,
|
||||
(void *) PyArray_PyIntAsIntp,
|
||||
(void *) PyArray_Broadcast,
|
||||
NULL,
|
||||
(void *) PyArray_FillWithScalar,
|
||||
(void *) PyArray_CheckStrides,
|
||||
(void *) PyArray_DescrNewByteorder,
|
||||
(void *) PyArray_IterAllButAxis,
|
||||
(void *) PyArray_CheckFromAny,
|
||||
(void *) PyArray_FromArray,
|
||||
(void *) PyArray_FromInterface,
|
||||
(void *) PyArray_FromStructInterface,
|
||||
(void *) PyArray_FromArrayAttr,
|
||||
(void *) PyArray_ScalarKind,
|
||||
(void *) PyArray_CanCoerceScalar,
|
||||
NULL,
|
||||
(void *) PyArray_CanCastScalar,
|
||||
NULL,
|
||||
(void *) PyArray_RemoveSmallest,
|
||||
(void *) PyArray_ElementStrides,
|
||||
(void *) PyArray_Item_INCREF,
|
||||
(void *) PyArray_Item_XDECREF,
|
||||
NULL,
|
||||
(void *) PyArray_Transpose,
|
||||
(void *) PyArray_TakeFrom,
|
||||
(void *) PyArray_PutTo,
|
||||
(void *) PyArray_PutMask,
|
||||
(void *) PyArray_Repeat,
|
||||
(void *) PyArray_Choose,
|
||||
(void *) PyArray_Sort,
|
||||
(void *) PyArray_ArgSort,
|
||||
(void *) PyArray_SearchSorted,
|
||||
(void *) PyArray_ArgMax,
|
||||
(void *) PyArray_ArgMin,
|
||||
(void *) PyArray_Reshape,
|
||||
(void *) PyArray_Newshape,
|
||||
(void *) PyArray_Squeeze,
|
||||
(void *) PyArray_View,
|
||||
(void *) PyArray_SwapAxes,
|
||||
(void *) PyArray_Max,
|
||||
(void *) PyArray_Min,
|
||||
(void *) PyArray_Ptp,
|
||||
(void *) PyArray_Mean,
|
||||
(void *) PyArray_Trace,
|
||||
(void *) PyArray_Diagonal,
|
||||
(void *) PyArray_Clip,
|
||||
(void *) PyArray_Conjugate,
|
||||
(void *) PyArray_Nonzero,
|
||||
(void *) PyArray_Std,
|
||||
(void *) PyArray_Sum,
|
||||
(void *) PyArray_CumSum,
|
||||
(void *) PyArray_Prod,
|
||||
(void *) PyArray_CumProd,
|
||||
(void *) PyArray_All,
|
||||
(void *) PyArray_Any,
|
||||
(void *) PyArray_Compress,
|
||||
(void *) PyArray_Flatten,
|
||||
(void *) PyArray_Ravel,
|
||||
(void *) PyArray_MultiplyList,
|
||||
(void *) PyArray_MultiplyIntList,
|
||||
(void *) PyArray_GetPtr,
|
||||
(void *) PyArray_CompareLists,
|
||||
(void *) PyArray_AsCArray,
|
||||
NULL,
|
||||
NULL,
|
||||
(void *) PyArray_Free,
|
||||
(void *) PyArray_Converter,
|
||||
(void *) PyArray_IntpFromSequence,
|
||||
(void *) PyArray_Concatenate,
|
||||
(void *) PyArray_InnerProduct,
|
||||
(void *) PyArray_MatrixProduct,
|
||||
NULL,
|
||||
(void *) PyArray_Correlate,
|
||||
NULL,
|
||||
(void *) PyArray_DescrConverter,
|
||||
(void *) PyArray_DescrConverter2,
|
||||
(void *) PyArray_IntpConverter,
|
||||
(void *) PyArray_BufferConverter,
|
||||
(void *) PyArray_AxisConverter,
|
||||
(void *) PyArray_BoolConverter,
|
||||
(void *) PyArray_ByteorderConverter,
|
||||
(void *) PyArray_OrderConverter,
|
||||
(void *) PyArray_EquivTypes,
|
||||
(void *) PyArray_Zeros,
|
||||
(void *) PyArray_Empty,
|
||||
(void *) PyArray_Where,
|
||||
(void *) PyArray_Arange,
|
||||
(void *) PyArray_ArangeObj,
|
||||
(void *) PyArray_SortkindConverter,
|
||||
(void *) PyArray_LexSort,
|
||||
(void *) PyArray_Round,
|
||||
(void *) PyArray_EquivTypenums,
|
||||
(void *) PyArray_RegisterDataType,
|
||||
(void *) PyArray_RegisterCastFunc,
|
||||
(void *) PyArray_RegisterCanCast,
|
||||
(void *) PyArray_InitArrFuncs,
|
||||
(void *) PyArray_IntTupleFromIntp,
|
||||
NULL,
|
||||
(void *) PyArray_ClipmodeConverter,
|
||||
(void *) PyArray_OutputConverter,
|
||||
(void *) PyArray_BroadcastToShape,
|
||||
NULL,
|
||||
NULL,
|
||||
(void *) PyArray_DescrAlignConverter,
|
||||
(void *) PyArray_DescrAlignConverter2,
|
||||
(void *) PyArray_SearchsideConverter,
|
||||
(void *) PyArray_CheckAxis,
|
||||
(void *) PyArray_OverflowMultiplyList,
|
||||
NULL,
|
||||
(void *) PyArray_MultiIterFromObjects,
|
||||
(void *) PyArray_GetEndianness,
|
||||
(void *) PyArray_GetNDArrayCFeatureVersion,
|
||||
(void *) PyArray_Correlate2,
|
||||
(void *) PyArray_NeighborhoodIterNew,
|
||||
(void *) &PyTimeIntegerArrType_Type,
|
||||
(void *) &PyDatetimeArrType_Type,
|
||||
(void *) &PyTimedeltaArrType_Type,
|
||||
(void *) &PyHalfArrType_Type,
|
||||
(void *) &NpyIter_Type,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
(void *) NpyIter_GetTransferFlags,
|
||||
(void *) NpyIter_New,
|
||||
(void *) NpyIter_MultiNew,
|
||||
(void *) NpyIter_AdvancedNew,
|
||||
(void *) NpyIter_Copy,
|
||||
(void *) NpyIter_Deallocate,
|
||||
(void *) NpyIter_HasDelayedBufAlloc,
|
||||
(void *) NpyIter_HasExternalLoop,
|
||||
(void *) NpyIter_EnableExternalLoop,
|
||||
(void *) NpyIter_GetInnerStrideArray,
|
||||
(void *) NpyIter_GetInnerLoopSizePtr,
|
||||
(void *) NpyIter_Reset,
|
||||
(void *) NpyIter_ResetBasePointers,
|
||||
(void *) NpyIter_ResetToIterIndexRange,
|
||||
(void *) NpyIter_GetNDim,
|
||||
(void *) NpyIter_GetNOp,
|
||||
(void *) NpyIter_GetIterNext,
|
||||
(void *) NpyIter_GetIterSize,
|
||||
(void *) NpyIter_GetIterIndexRange,
|
||||
(void *) NpyIter_GetIterIndex,
|
||||
(void *) NpyIter_GotoIterIndex,
|
||||
(void *) NpyIter_HasMultiIndex,
|
||||
(void *) NpyIter_GetShape,
|
||||
(void *) NpyIter_GetGetMultiIndex,
|
||||
(void *) NpyIter_GotoMultiIndex,
|
||||
(void *) NpyIter_RemoveMultiIndex,
|
||||
(void *) NpyIter_HasIndex,
|
||||
(void *) NpyIter_IsBuffered,
|
||||
(void *) NpyIter_IsGrowInner,
|
||||
(void *) NpyIter_GetBufferSize,
|
||||
(void *) NpyIter_GetIndexPtr,
|
||||
(void *) NpyIter_GotoIndex,
|
||||
(void *) NpyIter_GetDataPtrArray,
|
||||
(void *) NpyIter_GetDescrArray,
|
||||
(void *) NpyIter_GetOperandArray,
|
||||
(void *) NpyIter_GetIterView,
|
||||
(void *) NpyIter_GetReadFlags,
|
||||
(void *) NpyIter_GetWriteFlags,
|
||||
(void *) NpyIter_DebugPrint,
|
||||
(void *) NpyIter_IterationNeedsAPI,
|
||||
(void *) NpyIter_GetInnerFixedStrideArray,
|
||||
(void *) NpyIter_RemoveAxis,
|
||||
(void *) NpyIter_GetAxisStrideArray,
|
||||
(void *) NpyIter_RequiresBuffering,
|
||||
(void *) NpyIter_GetInitialDataPtrArray,
|
||||
(void *) NpyIter_CreateCompatibleStrides,
|
||||
(void *) PyArray_CastingConverter,
|
||||
(void *) PyArray_CountNonzero,
|
||||
(void *) PyArray_PromoteTypes,
|
||||
(void *) PyArray_MinScalarType,
|
||||
(void *) PyArray_ResultType,
|
||||
(void *) PyArray_CanCastArrayTo,
|
||||
(void *) PyArray_CanCastTypeTo,
|
||||
(void *) PyArray_EinsteinSum,
|
||||
(void *) PyArray_NewLikeArray,
|
||||
NULL,
|
||||
(void *) PyArray_ConvertClipmodeSequence,
|
||||
(void *) PyArray_MatrixProduct2,
|
||||
(void *) NpyIter_IsFirstVisit,
|
||||
(void *) PyArray_SetBaseObject,
|
||||
(void *) PyArray_CreateSortedStridePerm,
|
||||
(void *) PyArray_RemoveAxesInPlace,
|
||||
(void *) PyArray_DebugPrint,
|
||||
(void *) PyArray_FailUnlessWriteable,
|
||||
(void *) PyArray_SetUpdateIfCopyBase,
|
||||
(void *) PyDataMem_NEW,
|
||||
(void *) PyDataMem_FREE,
|
||||
(void *) PyDataMem_RENEW,
|
||||
NULL,
|
||||
(NPY_CASTING *) &NPY_DEFAULT_ASSIGN_CASTING,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
(void *) PyArray_Partition,
|
||||
(void *) PyArray_ArgPartition,
|
||||
(void *) PyArray_SelectkindConverter,
|
||||
(void *) PyDataMem_NEW_ZEROED,
|
||||
(void *) PyArray_CheckAnyScalarExact,
|
||||
NULL,
|
||||
(void *) PyArray_ResolveWritebackIfCopy,
|
||||
(void *) PyArray_SetWritebackIfCopyBase,
|
||||
(void *) PyDataMem_SetHandler,
|
||||
(void *) PyDataMem_GetHandler,
|
||||
(PyObject* *) &PyDataMem_DefaultHandler,
|
||||
(void *) NpyDatetime_ConvertDatetime64ToDatetimeStruct,
|
||||
(void *) NpyDatetime_ConvertDatetimeStructToDatetime64,
|
||||
(void *) NpyDatetime_ConvertPyDateTimeToDatetimeStruct,
|
||||
(void *) NpyDatetime_GetDatetimeISO8601StrLen,
|
||||
(void *) NpyDatetime_MakeISO8601Datetime,
|
||||
(void *) NpyDatetime_ParseISO8601Datetime,
|
||||
(void *) NpyString_load,
|
||||
(void *) NpyString_pack,
|
||||
(void *) NpyString_pack_null,
|
||||
(void *) NpyString_acquire_allocator,
|
||||
(void *) NpyString_acquire_allocators,
|
||||
(void *) NpyString_release_allocator,
|
||||
(void *) NpyString_release_allocators,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
(void *) PyArray_GetDefaultDescr,
|
||||
(void *) PyArrayInitDTypeMeta_FromSpec,
|
||||
(void *) PyArray_CommonDType,
|
||||
(void *) PyArray_PromoteDTypeSequence,
|
||||
(void *) _PyDataType_GetArrFuncs,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL
|
||||
};
|
||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,54 @@
|
||||
|
||||
/* These pointers will be stored in the C-object for use in other
|
||||
extension modules
|
||||
*/
|
||||
|
||||
void *PyUFunc_API[] = {
|
||||
(void *) &PyUFunc_Type,
|
||||
(void *) PyUFunc_FromFuncAndData,
|
||||
(void *) PyUFunc_RegisterLoopForType,
|
||||
NULL,
|
||||
(void *) PyUFunc_f_f_As_d_d,
|
||||
(void *) PyUFunc_d_d,
|
||||
(void *) PyUFunc_f_f,
|
||||
(void *) PyUFunc_g_g,
|
||||
(void *) PyUFunc_F_F_As_D_D,
|
||||
(void *) PyUFunc_F_F,
|
||||
(void *) PyUFunc_D_D,
|
||||
(void *) PyUFunc_G_G,
|
||||
(void *) PyUFunc_O_O,
|
||||
(void *) PyUFunc_ff_f_As_dd_d,
|
||||
(void *) PyUFunc_ff_f,
|
||||
(void *) PyUFunc_dd_d,
|
||||
(void *) PyUFunc_gg_g,
|
||||
(void *) PyUFunc_FF_F_As_DD_D,
|
||||
(void *) PyUFunc_DD_D,
|
||||
(void *) PyUFunc_FF_F,
|
||||
(void *) PyUFunc_GG_G,
|
||||
(void *) PyUFunc_OO_O,
|
||||
(void *) PyUFunc_O_O_method,
|
||||
(void *) PyUFunc_OO_O_method,
|
||||
(void *) PyUFunc_On_Om,
|
||||
NULL,
|
||||
NULL,
|
||||
(void *) PyUFunc_clearfperr,
|
||||
(void *) PyUFunc_getfperr,
|
||||
NULL,
|
||||
(void *) PyUFunc_ReplaceLoopBySignature,
|
||||
(void *) PyUFunc_FromFuncAndDataAndSignature,
|
||||
NULL,
|
||||
(void *) PyUFunc_e_e,
|
||||
(void *) PyUFunc_e_e_As_f_f,
|
||||
(void *) PyUFunc_e_e_As_d_d,
|
||||
(void *) PyUFunc_ee_e,
|
||||
(void *) PyUFunc_ee_e_As_ff_f,
|
||||
(void *) PyUFunc_ee_e_As_dd_d,
|
||||
(void *) PyUFunc_DefaultTypeResolver,
|
||||
(void *) PyUFunc_ValidateCasting,
|
||||
(void *) PyUFunc_RegisterLoopForDescr,
|
||||
(void *) PyUFunc_FromFuncAndDataAndSignatureAndIdentity,
|
||||
(void *) PyUFunc_AddLoopFromSpec,
|
||||
(void *) PyUFunc_AddPromoter,
|
||||
(void *) PyUFunc_AddWrappingLoop,
|
||||
(void *) PyUFunc_GiveFloatingpointErrors
|
||||
};
|
||||
@ -0,0 +1,341 @@
|
||||
|
||||
#ifdef _UMATHMODULE
|
||||
|
||||
extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
|
||||
|
||||
extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
|
||||
|
||||
NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData \
|
||||
(PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int);
|
||||
NPY_NO_EXPORT int PyUFunc_RegisterLoopForType \
|
||||
(PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_f_f_As_d_d \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_d_d \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_f_f \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_g_g \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_F_F_As_D_D \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_F_F \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_D_D \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_G_G \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_O_O \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_ff_f_As_dd_d \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_ff_f \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_dd_d \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_gg_g \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_FF_F_As_DD_D \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_DD_D \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_FF_F \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_GG_G \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_OO_O \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_O_O_method \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_OO_O_method \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_On_Om \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_clearfperr \
|
||||
(void);
|
||||
NPY_NO_EXPORT int PyUFunc_getfperr \
|
||||
(void);
|
||||
NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature \
|
||||
(PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *);
|
||||
NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignature \
|
||||
(PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int, const char *);
|
||||
NPY_NO_EXPORT void PyUFunc_e_e \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_e_e_As_f_f \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_e_e_As_d_d \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_ee_e \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_ee_e_As_ff_f \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_ee_e_As_dd_d \
|
||||
(char **, npy_intp const *, npy_intp const *, void *);
|
||||
NPY_NO_EXPORT int PyUFunc_DefaultTypeResolver \
|
||||
(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **);
|
||||
NPY_NO_EXPORT int PyUFunc_ValidateCasting \
|
||||
(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr *const *);
|
||||
NPY_NO_EXPORT int PyUFunc_RegisterLoopForDescr \
|
||||
(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *);
|
||||
NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignatureAndIdentity \
|
||||
(PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *);
|
||||
NPY_NO_EXPORT int PyUFunc_AddLoopFromSpec \
|
||||
(PyObject *, PyArrayMethod_Spec *);
|
||||
NPY_NO_EXPORT int PyUFunc_AddPromoter \
|
||||
(PyObject *, PyObject *, PyObject *);
|
||||
NPY_NO_EXPORT int PyUFunc_AddWrappingLoop \
|
||||
(PyObject *, PyArray_DTypeMeta *new_dtypes[], PyArray_DTypeMeta *wrapped_dtypes[], PyArrayMethod_TranslateGivenDescriptors *, PyArrayMethod_TranslateLoopDescriptors *);
|
||||
NPY_NO_EXPORT int PyUFunc_GiveFloatingpointErrors \
|
||||
(const char *, int);
|
||||
|
||||
#else
|
||||
|
||||
#if defined(PY_UFUNC_UNIQUE_SYMBOL)
|
||||
#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL
|
||||
#endif
|
||||
|
||||
/* By default do not export API in an .so (was never the case on windows) */
|
||||
#ifndef NPY_API_SYMBOL_ATTRIBUTE
|
||||
#define NPY_API_SYMBOL_ATTRIBUTE NPY_VISIBILITY_HIDDEN
|
||||
#endif
|
||||
|
||||
#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC)
|
||||
extern NPY_API_SYMBOL_ATTRIBUTE void **PyUFunc_API;
|
||||
#else
|
||||
#if defined(PY_UFUNC_UNIQUE_SYMBOL)
|
||||
NPY_API_SYMBOL_ATTRIBUTE void **PyUFunc_API;
|
||||
#else
|
||||
static void **PyUFunc_API=NULL;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0])
|
||||
#define PyUFunc_FromFuncAndData \
|
||||
(*(PyObject * (*)(PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int)) \
|
||||
PyUFunc_API[1])
|
||||
#define PyUFunc_RegisterLoopForType \
|
||||
(*(int (*)(PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *)) \
|
||||
PyUFunc_API[2])
|
||||
#define PyUFunc_f_f_As_d_d \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[4])
|
||||
#define PyUFunc_d_d \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[5])
|
||||
#define PyUFunc_f_f \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[6])
|
||||
#define PyUFunc_g_g \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[7])
|
||||
#define PyUFunc_F_F_As_D_D \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[8])
|
||||
#define PyUFunc_F_F \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[9])
|
||||
#define PyUFunc_D_D \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[10])
|
||||
#define PyUFunc_G_G \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[11])
|
||||
#define PyUFunc_O_O \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[12])
|
||||
#define PyUFunc_ff_f_As_dd_d \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[13])
|
||||
#define PyUFunc_ff_f \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[14])
|
||||
#define PyUFunc_dd_d \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[15])
|
||||
#define PyUFunc_gg_g \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[16])
|
||||
#define PyUFunc_FF_F_As_DD_D \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[17])
|
||||
#define PyUFunc_DD_D \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[18])
|
||||
#define PyUFunc_FF_F \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[19])
|
||||
#define PyUFunc_GG_G \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[20])
|
||||
#define PyUFunc_OO_O \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[21])
|
||||
#define PyUFunc_O_O_method \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[22])
|
||||
#define PyUFunc_OO_O_method \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[23])
|
||||
#define PyUFunc_On_Om \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[24])
|
||||
#define PyUFunc_clearfperr \
|
||||
(*(void (*)(void)) \
|
||||
PyUFunc_API[27])
|
||||
#define PyUFunc_getfperr \
|
||||
(*(int (*)(void)) \
|
||||
PyUFunc_API[28])
|
||||
#define PyUFunc_ReplaceLoopBySignature \
|
||||
(*(int (*)(PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *)) \
|
||||
PyUFunc_API[30])
|
||||
#define PyUFunc_FromFuncAndDataAndSignature \
|
||||
(*(PyObject * (*)(PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int, const char *)) \
|
||||
PyUFunc_API[31])
|
||||
#define PyUFunc_e_e \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[33])
|
||||
#define PyUFunc_e_e_As_f_f \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[34])
|
||||
#define PyUFunc_e_e_As_d_d \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[35])
|
||||
#define PyUFunc_ee_e \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[36])
|
||||
#define PyUFunc_ee_e_As_ff_f \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[37])
|
||||
#define PyUFunc_ee_e_As_dd_d \
|
||||
(*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
|
||||
PyUFunc_API[38])
|
||||
#define PyUFunc_DefaultTypeResolver \
|
||||
(*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **)) \
|
||||
PyUFunc_API[39])
|
||||
#define PyUFunc_ValidateCasting \
|
||||
(*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr *const *)) \
|
||||
PyUFunc_API[40])
|
||||
#define PyUFunc_RegisterLoopForDescr \
|
||||
(*(int (*)(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *)) \
|
||||
PyUFunc_API[41])
|
||||
|
||||
#if NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION
|
||||
#define PyUFunc_FromFuncAndDataAndSignatureAndIdentity \
|
||||
(*(PyObject * (*)(PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *)) \
|
||||
PyUFunc_API[42])
|
||||
#endif
|
||||
|
||||
#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
|
||||
#define PyUFunc_AddLoopFromSpec \
|
||||
(*(int (*)(PyObject *, PyArrayMethod_Spec *)) \
|
||||
PyUFunc_API[43])
|
||||
#endif
|
||||
|
||||
#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
|
||||
#define PyUFunc_AddPromoter \
|
||||
(*(int (*)(PyObject *, PyObject *, PyObject *)) \
|
||||
PyUFunc_API[44])
|
||||
#endif
|
||||
|
||||
#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
|
||||
#define PyUFunc_AddWrappingLoop \
|
||||
(*(int (*)(PyObject *, PyArray_DTypeMeta *new_dtypes[], PyArray_DTypeMeta *wrapped_dtypes[], PyArrayMethod_TranslateGivenDescriptors *, PyArrayMethod_TranslateLoopDescriptors *)) \
|
||||
PyUFunc_API[45])
|
||||
#endif
|
||||
|
||||
#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
|
||||
#define PyUFunc_GiveFloatingpointErrors \
|
||||
(*(int (*)(const char *, int)) \
|
||||
PyUFunc_API[46])
|
||||
#endif
|
||||
|
||||
static inline int
|
||||
_import_umath(void)
|
||||
{
|
||||
PyObject *c_api;
|
||||
PyObject *numpy = PyImport_ImportModule("numpy._core._multiarray_umath");
|
||||
if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) {
|
||||
PyErr_Clear();
|
||||
numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
|
||||
}
|
||||
|
||||
if (numpy == NULL) {
|
||||
PyErr_SetString(PyExc_ImportError,
|
||||
"_multiarray_umath failed to import");
|
||||
return -1;
|
||||
}
|
||||
|
||||
c_api = PyObject_GetAttrString(numpy, "_UFUNC_API");
|
||||
Py_DECREF(numpy);
|
||||
if (c_api == NULL) {
|
||||
PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!PyCapsule_CheckExact(c_api)) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object");
|
||||
Py_DECREF(c_api);
|
||||
return -1;
|
||||
}
|
||||
PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL);
|
||||
Py_DECREF(c_api);
|
||||
if (PyUFunc_API == NULL) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define import_umath() \
|
||||
do {\
|
||||
UFUNC_NOFPE\
|
||||
if (_import_umath() < 0) {\
|
||||
PyErr_Print();\
|
||||
PyErr_SetString(PyExc_ImportError,\
|
||||
"numpy._core.umath failed to import");\
|
||||
return NULL;\
|
||||
}\
|
||||
} while(0)
|
||||
|
||||
#define import_umath1(ret) \
|
||||
do {\
|
||||
UFUNC_NOFPE\
|
||||
if (_import_umath() < 0) {\
|
||||
PyErr_Print();\
|
||||
PyErr_SetString(PyExc_ImportError,\
|
||||
"numpy._core.umath failed to import");\
|
||||
return ret;\
|
||||
}\
|
||||
} while(0)
|
||||
|
||||
#define import_umath2(ret, msg) \
|
||||
do {\
|
||||
UFUNC_NOFPE\
|
||||
if (_import_umath() < 0) {\
|
||||
PyErr_Print();\
|
||||
PyErr_SetString(PyExc_ImportError, msg);\
|
||||
return ret;\
|
||||
}\
|
||||
} while(0)
|
||||
|
||||
#define import_ufunc() \
|
||||
do {\
|
||||
UFUNC_NOFPE\
|
||||
if (_import_umath() < 0) {\
|
||||
PyErr_Print();\
|
||||
PyErr_SetString(PyExc_ImportError,\
|
||||
"numpy._core.umath failed to import");\
|
||||
}\
|
||||
} while(0)
|
||||
|
||||
|
||||
static inline int
|
||||
PyUFunc_ImportUFuncAPI()
|
||||
{
|
||||
if (NPY_UNLIKELY(PyUFunc_API == NULL)) {
|
||||
import_umath1(-1);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
@ -0,0 +1,90 @@
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_
|
||||
#error You should not include this header directly
|
||||
#endif
|
||||
/*
|
||||
* Private API (here for inline)
|
||||
*/
|
||||
static inline int
|
||||
_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter);
|
||||
|
||||
/*
|
||||
* Update to next item of the iterator
|
||||
*
|
||||
* Note: this simply increment the coordinates vector, last dimension
|
||||
* incremented first , i.e, for dimension 3
|
||||
* ...
|
||||
* -1, -1, -1
|
||||
* -1, -1, 0
|
||||
* -1, -1, 1
|
||||
* ....
|
||||
* -1, 0, -1
|
||||
* -1, 0, 0
|
||||
* ....
|
||||
* 0, -1, -1
|
||||
* 0, -1, 0
|
||||
* ....
|
||||
*/
|
||||
#define _UPDATE_COORD_ITER(c) \
|
||||
wb = iter->coordinates[c] < iter->bounds[c][1]; \
|
||||
if (wb) { \
|
||||
iter->coordinates[c] += 1; \
|
||||
return 0; \
|
||||
} \
|
||||
else { \
|
||||
iter->coordinates[c] = iter->bounds[c][0]; \
|
||||
}
|
||||
|
||||
static inline int
|
||||
_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter)
|
||||
{
|
||||
npy_intp i, wb;
|
||||
|
||||
for (i = iter->nd - 1; i >= 0; --i) {
|
||||
_UPDATE_COORD_ITER(i)
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Version optimized for 2d arrays, manual loop unrolling
|
||||
*/
|
||||
static inline int
|
||||
_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter)
|
||||
{
|
||||
npy_intp wb;
|
||||
|
||||
_UPDATE_COORD_ITER(1)
|
||||
_UPDATE_COORD_ITER(0)
|
||||
|
||||
return 0;
|
||||
}
|
||||
#undef _UPDATE_COORD_ITER
|
||||
|
||||
/*
|
||||
* Advance to the next neighbour
|
||||
*/
|
||||
static inline int
|
||||
PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter)
|
||||
{
|
||||
_PyArrayNeighborhoodIter_IncrCoord (iter);
|
||||
iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset functions
|
||||
*/
|
||||
static inline int
|
||||
PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter)
|
||||
{
|
||||
npy_intp i;
|
||||
|
||||
for (i = 0; i < iter->nd; ++i) {
|
||||
iter->coordinates[i] = iter->bounds[i][0];
|
||||
}
|
||||
iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -0,0 +1,33 @@
|
||||
/* #undef NPY_HAVE_ENDIAN_H */
|
||||
|
||||
#define NPY_SIZEOF_SHORT 2
|
||||
#define NPY_SIZEOF_INT 4
|
||||
#define NPY_SIZEOF_LONG 8
|
||||
#define NPY_SIZEOF_FLOAT 4
|
||||
#define NPY_SIZEOF_COMPLEX_FLOAT 8
|
||||
#define NPY_SIZEOF_DOUBLE 8
|
||||
#define NPY_SIZEOF_COMPLEX_DOUBLE 16
|
||||
#define NPY_SIZEOF_LONGDOUBLE 8
|
||||
#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16
|
||||
#define NPY_SIZEOF_PY_INTPTR_T 8
|
||||
#define NPY_SIZEOF_INTP 8
|
||||
#define NPY_SIZEOF_UINTP 8
|
||||
#define NPY_SIZEOF_WCHAR_T 4
|
||||
#define NPY_SIZEOF_OFF_T 8
|
||||
#define NPY_SIZEOF_PY_LONG_LONG 8
|
||||
#define NPY_SIZEOF_LONGLONG 8
|
||||
|
||||
/*
|
||||
* Defined to 1 or 0. Note that Pyodide hardcodes NPY_NO_SMP (and other defines
|
||||
* in this header) for better cross-compilation, so don't rename them without a
|
||||
* good reason.
|
||||
*/
|
||||
#define NPY_NO_SMP 0
|
||||
|
||||
#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
|
||||
#define NPY_ABI_VERSION 0x02000000
|
||||
#define NPY_API_VERSION 0x00000014
|
||||
|
||||
#ifndef __STDC_FORMAT_MACROS
|
||||
#define __STDC_FORMAT_MACROS 1
|
||||
#endif
|
||||
@ -0,0 +1,86 @@
|
||||
/*
|
||||
* Public exposure of the DType Classes. These are tricky to expose
|
||||
* via the Python API, so they are exposed through this header for now.
|
||||
*
|
||||
* These definitions are only relevant for the public API and we reserve
|
||||
* the slots 320-360 in the API table generation for this (currently).
|
||||
*
|
||||
* TODO: This file should be consolidated with the API table generation
|
||||
* (although not sure the current generation is worth preserving).
|
||||
*/
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_
|
||||
|
||||
#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD)
|
||||
|
||||
/* All of these require NumPy 2.0 support */
|
||||
#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
|
||||
|
||||
/*
|
||||
* The type of the DType metaclass
|
||||
*/
|
||||
#define PyArrayDTypeMeta_Type (*(PyTypeObject *)(PyArray_API + 320)[0])
|
||||
/*
|
||||
* NumPy's builtin DTypes:
|
||||
*/
|
||||
#define PyArray_BoolDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[1])
|
||||
/* Integers */
|
||||
#define PyArray_ByteDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[2])
|
||||
#define PyArray_UByteDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[3])
|
||||
#define PyArray_ShortDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[4])
|
||||
#define PyArray_UShortDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[5])
|
||||
#define PyArray_IntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[6])
|
||||
#define PyArray_UIntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[7])
|
||||
#define PyArray_LongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[8])
|
||||
#define PyArray_ULongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[9])
|
||||
#define PyArray_LongLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[10])
|
||||
#define PyArray_ULongLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[11])
|
||||
/* Integer aliases */
|
||||
#define PyArray_Int8DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[12])
|
||||
#define PyArray_UInt8DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[13])
|
||||
#define PyArray_Int16DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[14])
|
||||
#define PyArray_UInt16DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[15])
|
||||
#define PyArray_Int32DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[16])
|
||||
#define PyArray_UInt32DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[17])
|
||||
#define PyArray_Int64DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[18])
|
||||
#define PyArray_UInt64DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[19])
|
||||
#define PyArray_IntpDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[20])
|
||||
#define PyArray_UIntpDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[21])
|
||||
/* Floats */
|
||||
#define PyArray_HalfDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[22])
|
||||
#define PyArray_FloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[23])
|
||||
#define PyArray_DoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[24])
|
||||
#define PyArray_LongDoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[25])
|
||||
/* Complex */
|
||||
#define PyArray_CFloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[26])
|
||||
#define PyArray_CDoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[27])
|
||||
#define PyArray_CLongDoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[28])
|
||||
/* String/Bytes */
|
||||
#define PyArray_BytesDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[29])
|
||||
#define PyArray_UnicodeDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[30])
|
||||
/* Datetime/Timedelta */
|
||||
#define PyArray_DatetimeDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[31])
|
||||
#define PyArray_TimedeltaDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[32])
|
||||
/* Object/Void */
|
||||
#define PyArray_ObjectDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[33])
|
||||
#define PyArray_VoidDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[34])
|
||||
/* Python types (used as markers for scalars) */
|
||||
#define PyArray_PyLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[35])
|
||||
#define PyArray_PyFloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[36])
|
||||
#define PyArray_PyComplexDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[37])
|
||||
/* Default integer type */
|
||||
#define PyArray_DefaultIntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[38])
|
||||
/* New non-legacy DTypes follow in the order they were added */
|
||||
#define PyArray_StringDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[39])
|
||||
|
||||
/* NOTE: offset 40 is free */
|
||||
|
||||
/* Need to start with a larger offset again for the abstract classes: */
|
||||
#define PyArray_IntAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[366])
|
||||
#define PyArray_FloatAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[367])
|
||||
#define PyArray_ComplexAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[368])
|
||||
|
||||
#endif /* NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION */
|
||||
|
||||
#endif /* NPY_INTERNAL_BUILD */
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ */
|
||||
@ -0,0 +1,7 @@
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_
|
||||
#define Py_ARRAYOBJECT_H
|
||||
|
||||
#include "ndarrayobject.h"
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ */
|
||||
@ -0,0 +1,196 @@
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_
|
||||
|
||||
#ifndef _MULTIARRAYMODULE
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_bool obval;
|
||||
} PyBoolScalarObject;
|
||||
#endif
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
signed char obval;
|
||||
} PyByteScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
short obval;
|
||||
} PyShortScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
int obval;
|
||||
} PyIntScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
long obval;
|
||||
} PyLongScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_longlong obval;
|
||||
} PyLongLongScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
unsigned char obval;
|
||||
} PyUByteScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
unsigned short obval;
|
||||
} PyUShortScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
unsigned int obval;
|
||||
} PyUIntScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
unsigned long obval;
|
||||
} PyULongScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_ulonglong obval;
|
||||
} PyULongLongScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_half obval;
|
||||
} PyHalfScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
float obval;
|
||||
} PyFloatScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
double obval;
|
||||
} PyDoubleScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_longdouble obval;
|
||||
} PyLongDoubleScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_cfloat obval;
|
||||
} PyCFloatScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_cdouble obval;
|
||||
} PyCDoubleScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_clongdouble obval;
|
||||
} PyCLongDoubleScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
PyObject * obval;
|
||||
} PyObjectScalarObject;
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_datetime obval;
|
||||
PyArray_DatetimeMetaData obmeta;
|
||||
} PyDatetimeScalarObject;
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_timedelta obval;
|
||||
PyArray_DatetimeMetaData obmeta;
|
||||
} PyTimedeltaScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
char obval;
|
||||
} PyScalarObject;
|
||||
|
||||
#define PyStringScalarObject PyBytesObject
|
||||
#ifndef Py_LIMITED_API
|
||||
typedef struct {
|
||||
/* note that the PyObject_HEAD macro lives right here */
|
||||
PyUnicodeObject base;
|
||||
Py_UCS4 *obval;
|
||||
#if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION
|
||||
char *buffer_fmt;
|
||||
#endif
|
||||
} PyUnicodeScalarObject;
|
||||
#endif
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_VAR_HEAD
|
||||
char *obval;
|
||||
#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD
|
||||
/* Internally use the subclass to allow accessing names/fields */
|
||||
_PyArray_LegacyDescr *descr;
|
||||
#else
|
||||
PyArray_Descr *descr;
|
||||
#endif
|
||||
int flags;
|
||||
PyObject *base;
|
||||
#if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION
|
||||
void *_buffer_info; /* private buffer info, tagged to allow warning */
|
||||
#endif
|
||||
} PyVoidScalarObject;
|
||||
|
||||
/* Macros
|
||||
Py<Cls><bitsize>ScalarObject
|
||||
Py<Cls><bitsize>ArrType_Type
|
||||
are defined in ndarrayobject.h
|
||||
*/
|
||||
|
||||
#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0])))
|
||||
#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1])))
|
||||
#define PyArrayScalar_FromLong(i) \
|
||||
((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)])))
|
||||
#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \
|
||||
return Py_INCREF(PyArrayScalar_FromLong(i)), \
|
||||
PyArrayScalar_FromLong(i)
|
||||
#define PyArrayScalar_RETURN_FALSE \
|
||||
return Py_INCREF(PyArrayScalar_False), \
|
||||
PyArrayScalar_False
|
||||
#define PyArrayScalar_RETURN_TRUE \
|
||||
return Py_INCREF(PyArrayScalar_True), \
|
||||
PyArrayScalar_True
|
||||
|
||||
#define PyArrayScalar_New(cls) \
|
||||
Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0)
|
||||
#ifndef Py_LIMITED_API
|
||||
/* For the limited API, use PyArray_ScalarAsCtype instead */
|
||||
#define PyArrayScalar_VAL(obj, cls) \
|
||||
((Py##cls##ScalarObject *)obj)->obval
|
||||
#define PyArrayScalar_ASSIGN(obj, cls, val) \
|
||||
PyArrayScalar_VAL(obj, cls) = val
|
||||
#endif
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ */
|
||||
@ -0,0 +1,480 @@
|
||||
/*
|
||||
* The public DType API
|
||||
*/
|
||||
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_
|
||||
|
||||
struct PyArrayMethodObject_tag;
|
||||
|
||||
/*
|
||||
* Largely opaque struct for DType classes (i.e. metaclass instances).
|
||||
* The internal definition is currently in `ndarraytypes.h` (export is a bit
|
||||
* more complex because `PyArray_Descr` is a DTypeMeta internally but not
|
||||
* externally).
|
||||
*/
|
||||
#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD)
|
||||
|
||||
#ifndef Py_LIMITED_API
|
||||
|
||||
typedef struct PyArray_DTypeMeta_tag {
|
||||
PyHeapTypeObject super;
|
||||
|
||||
/*
|
||||
* Most DTypes will have a singleton default instance, for the
|
||||
* parametric legacy DTypes (bytes, string, void, datetime) this
|
||||
* may be a pointer to the *prototype* instance?
|
||||
*/
|
||||
PyArray_Descr *singleton;
|
||||
/* Copy of the legacy DTypes type number, usually invalid. */
|
||||
int type_num;
|
||||
|
||||
/* The type object of the scalar instances (may be NULL?) */
|
||||
PyTypeObject *scalar_type;
|
||||
/*
|
||||
* DType flags to signal legacy, parametric, or
|
||||
* abstract. But plenty of space for additional information/flags.
|
||||
*/
|
||||
npy_uint64 flags;
|
||||
|
||||
/*
|
||||
* Use indirection in order to allow a fixed size for this struct.
|
||||
* A stable ABI size makes creating a static DType less painful
|
||||
* while also ensuring flexibility for all opaque API (with one
|
||||
* indirection due the pointer lookup).
|
||||
*/
|
||||
void *dt_slots;
|
||||
/* Allow growing (at the moment also beyond this) */
|
||||
void *reserved[3];
|
||||
} PyArray_DTypeMeta;
|
||||
|
||||
#else
|
||||
|
||||
typedef PyTypeObject PyArray_DTypeMeta;
|
||||
|
||||
#endif /* Py_LIMITED_API */
|
||||
|
||||
#endif /* not internal build */
|
||||
|
||||
/*
|
||||
* ******************************************************
|
||||
* ArrayMethod API (Casting and UFuncs)
|
||||
* ******************************************************
|
||||
*/
|
||||
|
||||
|
||||
typedef enum {
|
||||
/* Flag for whether the GIL is required */
|
||||
NPY_METH_REQUIRES_PYAPI = 1 << 0,
|
||||
/*
|
||||
* Some functions cannot set floating point error flags, this flag
|
||||
* gives us the option (not requirement) to skip floating point error
|
||||
* setup/check. No function should set error flags and ignore them
|
||||
* since it would interfere with chaining operations (e.g. casting).
|
||||
*/
|
||||
NPY_METH_NO_FLOATINGPOINT_ERRORS = 1 << 1,
|
||||
/* Whether the method supports unaligned access (not runtime) */
|
||||
NPY_METH_SUPPORTS_UNALIGNED = 1 << 2,
|
||||
/*
|
||||
* Used for reductions to allow reordering the operation. At this point
|
||||
* assume that if set, it also applies to normal operations though!
|
||||
*/
|
||||
NPY_METH_IS_REORDERABLE = 1 << 3,
|
||||
/*
|
||||
* Private flag for now for *logic* functions. The logical functions
|
||||
* `logical_or` and `logical_and` can always cast the inputs to booleans
|
||||
* "safely" (because that is how the cast to bool is defined).
|
||||
* @seberg: I am not sure this is the best way to handle this, so its
|
||||
* private for now (also it is very limited anyway).
|
||||
* There is one "exception". NA aware dtypes cannot cast to bool
|
||||
* (hopefully), so the `??->?` loop should error even with this flag.
|
||||
* But a second NA fallback loop will be necessary.
|
||||
*/
|
||||
_NPY_METH_FORCE_CAST_INPUTS = 1 << 17,
|
||||
|
||||
/* All flags which can change at runtime */
|
||||
NPY_METH_RUNTIME_FLAGS = (
|
||||
NPY_METH_REQUIRES_PYAPI |
|
||||
NPY_METH_NO_FLOATINGPOINT_ERRORS),
|
||||
} NPY_ARRAYMETHOD_FLAGS;
|
||||
|
||||
|
||||
typedef struct PyArrayMethod_Context_tag {
|
||||
/* The caller, which is typically the original ufunc. May be NULL */
|
||||
PyObject *caller;
|
||||
/* The method "self". Currently an opaque object. */
|
||||
struct PyArrayMethodObject_tag *method;
|
||||
|
||||
/* Operand descriptors, filled in by resolve_descriptors */
|
||||
PyArray_Descr *const *descriptors;
|
||||
/* Structure may grow (this is harmless for DType authors) */
|
||||
} PyArrayMethod_Context;
|
||||
|
||||
|
||||
/*
|
||||
* The main object for creating a new ArrayMethod. We use the typical `slots`
|
||||
* mechanism used by the Python limited API (see below for the slot defs).
|
||||
*/
|
||||
typedef struct {
|
||||
const char *name;
|
||||
int nin, nout;
|
||||
NPY_CASTING casting;
|
||||
NPY_ARRAYMETHOD_FLAGS flags;
|
||||
PyArray_DTypeMeta **dtypes;
|
||||
PyType_Slot *slots;
|
||||
} PyArrayMethod_Spec;
|
||||
|
||||
|
||||
/*
|
||||
* ArrayMethod slots
|
||||
* -----------------
|
||||
*
|
||||
* SLOTS IDs For the ArrayMethod creation, once fully public, IDs are fixed
|
||||
* but can be deprecated and arbitrarily extended.
|
||||
*/
|
||||
#define _NPY_METH_resolve_descriptors_with_scalars 1
|
||||
#define NPY_METH_resolve_descriptors 2
|
||||
#define NPY_METH_get_loop 3
|
||||
#define NPY_METH_get_reduction_initial 4
|
||||
/* specific loops for constructions/default get_loop: */
|
||||
#define NPY_METH_strided_loop 5
|
||||
#define NPY_METH_contiguous_loop 6
|
||||
#define NPY_METH_unaligned_strided_loop 7
|
||||
#define NPY_METH_unaligned_contiguous_loop 8
|
||||
#define NPY_METH_contiguous_indexed_loop 9
|
||||
#define _NPY_METH_static_data 10
|
||||
|
||||
|
||||
/*
|
||||
* The resolve descriptors function, must be able to handle NULL values for
|
||||
* all output (but not input) `given_descrs` and fill `loop_descrs`.
|
||||
* Return -1 on error or 0 if the operation is not possible without an error
|
||||
* set. (This may still be in flux.)
|
||||
* Otherwise must return the "casting safety", for normal functions, this is
|
||||
* almost always "safe" (or even "equivalent"?).
|
||||
*
|
||||
* `resolve_descriptors` is optional if all output DTypes are non-parametric.
|
||||
*/
|
||||
typedef NPY_CASTING (PyArrayMethod_ResolveDescriptors)(
|
||||
/* "method" is currently opaque (necessary e.g. to wrap Python) */
|
||||
struct PyArrayMethodObject_tag *method,
|
||||
/* DTypes the method was created for */
|
||||
PyArray_DTypeMeta *const *dtypes,
|
||||
/* Input descriptors (instances). Outputs may be NULL. */
|
||||
PyArray_Descr *const *given_descrs,
|
||||
/* Exact loop descriptors to use, must not hold references on error */
|
||||
PyArray_Descr **loop_descrs,
|
||||
npy_intp *view_offset);
|
||||
|
||||
|
||||
/*
|
||||
* Rarely needed, slightly more powerful version of `resolve_descriptors`.
|
||||
* See also `PyArrayMethod_ResolveDescriptors` for details on shared arguments.
|
||||
*
|
||||
* NOTE: This function is private now as it is unclear how and what to pass
|
||||
* exactly as additional information to allow dealing with the scalars.
|
||||
* See also gh-24915.
|
||||
*/
|
||||
typedef NPY_CASTING (PyArrayMethod_ResolveDescriptorsWithScalar)(
|
||||
struct PyArrayMethodObject_tag *method,
|
||||
PyArray_DTypeMeta *const *dtypes,
|
||||
/* Unlike above, these can have any DType and we may allow NULL. */
|
||||
PyArray_Descr *const *given_descrs,
|
||||
/*
|
||||
* Input scalars or NULL. Only ever passed for python scalars.
|
||||
* WARNING: In some cases, a loop may be explicitly selected and the
|
||||
* value passed is not available (NULL) or does not have the
|
||||
* expected type.
|
||||
*/
|
||||
PyObject *const *input_scalars,
|
||||
PyArray_Descr **loop_descrs,
|
||||
npy_intp *view_offset);
|
||||
|
||||
|
||||
|
||||
typedef int (PyArrayMethod_StridedLoop)(PyArrayMethod_Context *context,
|
||||
char *const *data, const npy_intp *dimensions, const npy_intp *strides,
|
||||
NpyAuxData *transferdata);
|
||||
|
||||
|
||||
typedef int (PyArrayMethod_GetLoop)(
|
||||
PyArrayMethod_Context *context,
|
||||
int aligned, int move_references,
|
||||
const npy_intp *strides,
|
||||
PyArrayMethod_StridedLoop **out_loop,
|
||||
NpyAuxData **out_transferdata,
|
||||
NPY_ARRAYMETHOD_FLAGS *flags);
|
||||
|
||||
/**
|
||||
* Query an ArrayMethod for the initial value for use in reduction.
|
||||
*
|
||||
* @param context The arraymethod context, mainly to access the descriptors.
|
||||
* @param reduction_is_empty Whether the reduction is empty. When it is, the
|
||||
* value returned may differ. In this case it is a "default" value that
|
||||
* may differ from the "identity" value normally used. For example:
|
||||
* - `0.0` is the default for `sum([])`. But `-0.0` is the correct
|
||||
* identity otherwise as it preserves the sign for `sum([-0.0])`.
|
||||
* - We use no identity for object, but return the default of `0` and `1`
|
||||
* for the empty `sum([], dtype=object)` and `prod([], dtype=object)`.
|
||||
* This allows `np.sum(np.array(["a", "b"], dtype=object))` to work.
|
||||
* - `-inf` or `INT_MIN` for `max` is an identity, but at least `INT_MIN`
|
||||
* not a good *default* when there are no items.
|
||||
* @param initial Pointer to initial data to be filled (if possible)
|
||||
*
|
||||
* @returns -1, 0, or 1 indicating error, no initial value, and initial being
|
||||
* successfully filled. Errors must not be given where 0 is correct, NumPy
|
||||
* may call this even when not strictly necessary.
|
||||
*/
|
||||
typedef int (PyArrayMethod_GetReductionInitial)(
|
||||
PyArrayMethod_Context *context, npy_bool reduction_is_empty,
|
||||
void *initial);
|
||||
|
||||
/*
|
||||
* The following functions are only used by the wrapping array method defined
|
||||
* in umath/wrapping_array_method.c
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* The function to convert the given descriptors (passed in to
|
||||
* `resolve_descriptors`) and translates them for the wrapped loop.
|
||||
* The new descriptors MUST be viewable with the old ones, `NULL` must be
|
||||
* supported (for outputs) and should normally be forwarded.
|
||||
*
|
||||
* The function must clean up on error.
|
||||
*
|
||||
* NOTE: We currently assume that this translation gives "viewable" results.
|
||||
* I.e. there is no additional casting related to the wrapping process.
|
||||
* In principle that could be supported, but not sure it is useful.
|
||||
* This currently also means that e.g. alignment must apply identically
|
||||
* to the new dtypes.
|
||||
*
|
||||
* TODO: Due to the fact that `resolve_descriptors` is also used for `can_cast`
|
||||
* there is no way to "pass out" the result of this function. This means
|
||||
* it will be called twice for every ufunc call.
|
||||
* (I am considering including `auxdata` as an "optional" parameter to
|
||||
* `resolve_descriptors`, so that it can be filled there if not NULL.)
|
||||
*/
|
||||
typedef int (PyArrayMethod_TranslateGivenDescriptors)(int nin, int nout,
|
||||
PyArray_DTypeMeta *const wrapped_dtypes[],
|
||||
PyArray_Descr *const given_descrs[], PyArray_Descr *new_descrs[]);
|
||||
|
||||
/**
|
||||
* The function to convert the actual loop descriptors (as returned by the
|
||||
* original `resolve_descriptors` function) to the ones the output array
|
||||
* should use.
|
||||
* This function must return "viewable" types, it must not mutate them in any
|
||||
* form that would break the inner-loop logic. Does not need to support NULL.
|
||||
*
|
||||
* The function must clean up on error.
|
||||
*
|
||||
* @param nin Number of input arguments
|
||||
* @param nout Number of output arguments
|
||||
* @param new_dtypes The DTypes of the output (usually probably not needed)
|
||||
* @param given_descrs Original given_descrs to the resolver, necessary to
|
||||
* fetch any information related to the new dtypes from the original.
|
||||
* @param original_descrs The `loop_descrs` returned by the wrapped loop.
|
||||
* @param loop_descrs The output descriptors, compatible to `original_descrs`.
|
||||
*
|
||||
* @returns 0 on success, -1 on failure.
|
||||
*/
|
||||
typedef int (PyArrayMethod_TranslateLoopDescriptors)(int nin, int nout,
|
||||
PyArray_DTypeMeta *const new_dtypes[], PyArray_Descr *const given_descrs[],
|
||||
PyArray_Descr *original_descrs[], PyArray_Descr *loop_descrs[]);
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* A traverse loop working on a single array. This is similar to the general
|
||||
* strided-loop function. This is designed for loops that need to visit every
|
||||
* element of a single array.
|
||||
*
|
||||
* Currently this is used for array clearing, via the NPY_DT_get_clear_loop
|
||||
* API hook, and zero-filling, via the NPY_DT_get_fill_zero_loop API hook.
|
||||
* These are most useful for handling arrays storing embedded references to
|
||||
* python objects or heap-allocated data.
|
||||
*
|
||||
* The `void *traverse_context` is passed in because we may need to pass in
|
||||
* Interpreter state or similar in the future, but we don't want to pass in
|
||||
* a full context (with pointers to dtypes, method, caller which all make
|
||||
* no sense for a traverse function).
|
||||
*
|
||||
* We assume for now that this context can be just passed through in the
|
||||
* the future (for structured dtypes).
|
||||
*
|
||||
*/
|
||||
typedef int (PyArrayMethod_TraverseLoop)(
|
||||
void *traverse_context, const PyArray_Descr *descr, char *data,
|
||||
npy_intp size, npy_intp stride, NpyAuxData *auxdata);
|
||||
|
||||
|
||||
/*
|
||||
* Simplified get_loop function specific to dtype traversal
|
||||
*
|
||||
* It should set the flags needed for the traversal loop and set out_loop to the
|
||||
* loop function, which must be a valid PyArrayMethod_TraverseLoop
|
||||
* pointer. Currently this is used for zero-filling and clearing arrays storing
|
||||
* embedded references.
|
||||
*
|
||||
*/
|
||||
typedef int (PyArrayMethod_GetTraverseLoop)(
|
||||
void *traverse_context, const PyArray_Descr *descr,
|
||||
int aligned, npy_intp fixed_stride,
|
||||
PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_auxdata,
|
||||
NPY_ARRAYMETHOD_FLAGS *flags);
|
||||
|
||||
|
||||
/*
|
||||
* Type of the C promoter function, which must be wrapped into a
|
||||
* PyCapsule with name "numpy._ufunc_promoter".
|
||||
*
|
||||
* Note that currently the output dtypes are always NULL unless they are
|
||||
* also part of the signature. This is an implementation detail and could
|
||||
* change in the future. However, in general promoters should not have a
|
||||
* need for output dtypes.
|
||||
* (There are potential use-cases, these are currently unsupported.)
|
||||
*/
|
||||
typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc,
|
||||
PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[],
|
||||
PyArray_DTypeMeta *new_op_dtypes[]);
|
||||
|
||||
/*
|
||||
* ****************************
|
||||
* DTYPE API
|
||||
* ****************************
|
||||
*/
|
||||
|
||||
#define NPY_DT_ABSTRACT 1 << 1
|
||||
#define NPY_DT_PARAMETRIC 1 << 2
|
||||
#define NPY_DT_NUMERIC 1 << 3
|
||||
|
||||
/*
|
||||
* These correspond to slots in the NPY_DType_Slots struct and must
|
||||
* be in the same order as the members of that struct. If new slots
|
||||
* get added or old slots get removed NPY_NUM_DTYPE_SLOTS must also
|
||||
* be updated
|
||||
*/
|
||||
|
||||
#define NPY_DT_discover_descr_from_pyobject 1
|
||||
// this slot is considered private because its API hasn't been decided
|
||||
#define _NPY_DT_is_known_scalar_type 2
|
||||
#define NPY_DT_default_descr 3
|
||||
#define NPY_DT_common_dtype 4
|
||||
#define NPY_DT_common_instance 5
|
||||
#define NPY_DT_ensure_canonical 6
|
||||
#define NPY_DT_setitem 7
|
||||
#define NPY_DT_getitem 8
|
||||
#define NPY_DT_get_clear_loop 9
|
||||
#define NPY_DT_get_fill_zero_loop 10
|
||||
#define NPY_DT_finalize_descr 11
|
||||
|
||||
// These PyArray_ArrFunc slots will be deprecated and replaced eventually
|
||||
// getitem and setitem can be defined as a performance optimization;
|
||||
// by default the user dtypes call `legacy_getitem_using_DType` and
|
||||
// `legacy_setitem_using_DType`, respectively. This functionality is
|
||||
// only supported for basic NumPy DTypes.
|
||||
|
||||
|
||||
// used to separate dtype slots from arrfuncs slots
|
||||
// intended only for internal use but defined here for clarity
|
||||
#define _NPY_DT_ARRFUNCS_OFFSET (1 << 10)
|
||||
|
||||
// Cast is disabled
|
||||
// #define NPY_DT_PyArray_ArrFuncs_cast 0 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
|
||||
#define NPY_DT_PyArray_ArrFuncs_getitem 1 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
#define NPY_DT_PyArray_ArrFuncs_setitem 2 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
|
||||
// Copyswap is disabled
|
||||
// #define NPY_DT_PyArray_ArrFuncs_copyswapn 3 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
// #define NPY_DT_PyArray_ArrFuncs_copyswap 4 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
#define NPY_DT_PyArray_ArrFuncs_compare 5 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
#define NPY_DT_PyArray_ArrFuncs_argmax 6 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
#define NPY_DT_PyArray_ArrFuncs_dotfunc 7 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
#define NPY_DT_PyArray_ArrFuncs_scanfunc 8 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
#define NPY_DT_PyArray_ArrFuncs_fromstr 9 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
#define NPY_DT_PyArray_ArrFuncs_nonzero 10 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
#define NPY_DT_PyArray_ArrFuncs_fill 11 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
#define NPY_DT_PyArray_ArrFuncs_fillwithscalar 12 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
#define NPY_DT_PyArray_ArrFuncs_sort 13 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
#define NPY_DT_PyArray_ArrFuncs_argsort 14 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
|
||||
// Casting related slots are disabled. See
|
||||
// https://github.com/numpy/numpy/pull/23173#discussion_r1101098163
|
||||
// #define NPY_DT_PyArray_ArrFuncs_castdict 15 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
// #define NPY_DT_PyArray_ArrFuncs_scalarkind 16 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
// #define NPY_DT_PyArray_ArrFuncs_cancastscalarkindto 17 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
// #define NPY_DT_PyArray_ArrFuncs_cancastto 18 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
|
||||
// These are deprecated in NumPy 1.19, so are disabled here.
|
||||
// #define NPY_DT_PyArray_ArrFuncs_fastclip 19 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
// #define NPY_DT_PyArray_ArrFuncs_fastputmask 20 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
// #define NPY_DT_PyArray_ArrFuncs_fasttake 21 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
#define NPY_DT_PyArray_ArrFuncs_argmin 22 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
|
||||
|
||||
// TODO: These slots probably still need some thought, and/or a way to "grow"?
|
||||
typedef struct {
|
||||
PyTypeObject *typeobj; /* type of python scalar or NULL */
|
||||
int flags; /* flags, including parametric and abstract */
|
||||
/* NULL terminated cast definitions. Use NULL for the newly created DType */
|
||||
PyArrayMethod_Spec **casts;
|
||||
PyType_Slot *slots;
|
||||
/* Baseclass or NULL (will always subclass `np.dtype`) */
|
||||
PyTypeObject *baseclass;
|
||||
} PyArrayDTypeMeta_Spec;
|
||||
|
||||
|
||||
typedef PyArray_Descr *(PyArrayDTypeMeta_DiscoverDescrFromPyobject)(
|
||||
PyArray_DTypeMeta *cls, PyObject *obj);
|
||||
|
||||
/*
|
||||
* Before making this public, we should decide whether it should pass
|
||||
* the type, or allow looking at the object. A possible use-case:
|
||||
* `np.array(np.array([0]), dtype=np.ndarray)`
|
||||
* Could consider arrays that are not `dtype=ndarray` "scalars".
|
||||
*/
|
||||
typedef int (PyArrayDTypeMeta_IsKnownScalarType)(
|
||||
PyArray_DTypeMeta *cls, PyTypeObject *obj);
|
||||
|
||||
typedef PyArray_Descr *(PyArrayDTypeMeta_DefaultDescriptor)(PyArray_DTypeMeta *cls);
|
||||
typedef PyArray_DTypeMeta *(PyArrayDTypeMeta_CommonDType)(
|
||||
PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2);
|
||||
|
||||
|
||||
/*
|
||||
* Convenience utility for getting a reference to the DType metaclass associated
|
||||
* with a dtype instance.
|
||||
*/
|
||||
#define NPY_DTYPE(descr) ((PyArray_DTypeMeta *)Py_TYPE(descr))
|
||||
|
||||
static inline PyArray_DTypeMeta *
|
||||
NPY_DT_NewRef(PyArray_DTypeMeta *o) {
|
||||
Py_INCREF((PyObject *)o);
|
||||
return o;
|
||||
}
|
||||
|
||||
|
||||
typedef PyArray_Descr *(PyArrayDTypeMeta_CommonInstance)(
|
||||
PyArray_Descr *dtype1, PyArray_Descr *dtype2);
|
||||
typedef PyArray_Descr *(PyArrayDTypeMeta_EnsureCanonical)(PyArray_Descr *dtype);
|
||||
/*
|
||||
* Returns either a new reference to *dtype* or a new descriptor instance
|
||||
* initialized with the same parameters as *dtype*. The caller cannot know
|
||||
* which choice a dtype will make. This function is called just before the
|
||||
* array buffer is created for a newly created array, it is not called for
|
||||
* views and the descriptor returned by this function is attached to the array.
|
||||
*/
|
||||
typedef PyArray_Descr *(PyArrayDTypeMeta_FinalizeDescriptor)(PyArray_Descr *dtype);
|
||||
|
||||
/*
|
||||
* TODO: These two functions are currently only used for experimental DType
|
||||
* API support. Their relation should be "reversed": NumPy should
|
||||
* always use them internally.
|
||||
* There are open points about "casting safety" though, e.g. setting
|
||||
* elements is currently always unsafe.
|
||||
*/
|
||||
typedef int(PyArrayDTypeMeta_SetItem)(PyArray_Descr *, PyObject *, char *);
|
||||
typedef PyObject *(PyArrayDTypeMeta_GetItem)(PyArray_Descr *, char *);
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ */
|
||||
@ -0,0 +1,70 @@
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_
|
||||
|
||||
#include <Python.h>
|
||||
#include <numpy/npy_math.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Half-precision routines
|
||||
*/
|
||||
|
||||
/* Conversions */
|
||||
float npy_half_to_float(npy_half h);
|
||||
double npy_half_to_double(npy_half h);
|
||||
npy_half npy_float_to_half(float f);
|
||||
npy_half npy_double_to_half(double d);
|
||||
/* Comparisons */
|
||||
int npy_half_eq(npy_half h1, npy_half h2);
|
||||
int npy_half_ne(npy_half h1, npy_half h2);
|
||||
int npy_half_le(npy_half h1, npy_half h2);
|
||||
int npy_half_lt(npy_half h1, npy_half h2);
|
||||
int npy_half_ge(npy_half h1, npy_half h2);
|
||||
int npy_half_gt(npy_half h1, npy_half h2);
|
||||
/* faster *_nonan variants for when you know h1 and h2 are not NaN */
|
||||
int npy_half_eq_nonan(npy_half h1, npy_half h2);
|
||||
int npy_half_lt_nonan(npy_half h1, npy_half h2);
|
||||
int npy_half_le_nonan(npy_half h1, npy_half h2);
|
||||
/* Miscellaneous functions */
|
||||
int npy_half_iszero(npy_half h);
|
||||
int npy_half_isnan(npy_half h);
|
||||
int npy_half_isinf(npy_half h);
|
||||
int npy_half_isfinite(npy_half h);
|
||||
int npy_half_signbit(npy_half h);
|
||||
npy_half npy_half_copysign(npy_half x, npy_half y);
|
||||
npy_half npy_half_spacing(npy_half h);
|
||||
npy_half npy_half_nextafter(npy_half x, npy_half y);
|
||||
npy_half npy_half_divmod(npy_half x, npy_half y, npy_half *modulus);
|
||||
|
||||
/*
|
||||
* Half-precision constants
|
||||
*/
|
||||
|
||||
#define NPY_HALF_ZERO (0x0000u)
|
||||
#define NPY_HALF_PZERO (0x0000u)
|
||||
#define NPY_HALF_NZERO (0x8000u)
|
||||
#define NPY_HALF_ONE (0x3c00u)
|
||||
#define NPY_HALF_NEGONE (0xbc00u)
|
||||
#define NPY_HALF_PINF (0x7c00u)
|
||||
#define NPY_HALF_NINF (0xfc00u)
|
||||
#define NPY_HALF_NAN (0x7e00u)
|
||||
|
||||
#define NPY_MAX_HALF (0x7bffu)
|
||||
|
||||
/*
|
||||
* Bit-level conversions
|
||||
*/
|
||||
|
||||
npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f);
|
||||
npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d);
|
||||
npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h);
|
||||
npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ */
|
||||
@ -0,0 +1,304 @@
|
||||
/*
|
||||
* DON'T INCLUDE THIS DIRECTLY.
|
||||
*/
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <Python.h>
|
||||
#include "ndarraytypes.h"
|
||||
#include "dtype_api.h"
|
||||
|
||||
/* Includes the "function" C-API -- these are all stored in a
|
||||
list of pointers --- one for each file
|
||||
The two lists are concatenated into one in multiarray.
|
||||
|
||||
They are available as import_array()
|
||||
*/
|
||||
|
||||
#include "__multiarray_api.h"
|
||||
|
||||
/*
|
||||
* Include any definitions which are defined differently for 1.x and 2.x
|
||||
* (Symbols only available on 2.x are not there, but rather guarded.)
|
||||
*/
|
||||
#include "npy_2_compat.h"
|
||||
|
||||
/* C-API that requires previous API to be defined */
|
||||
|
||||
#define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type)
|
||||
|
||||
#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type)
|
||||
#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type)
|
||||
|
||||
#define PyArray_HasArrayInterfaceType(op, type, context, out) \
|
||||
((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \
|
||||
(((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \
|
||||
(((out)=PyArray_FromArrayAttr(op, type, context)) != \
|
||||
Py_NotImplemented))
|
||||
|
||||
#define PyArray_HasArrayInterface(op, out) \
|
||||
PyArray_HasArrayInterfaceType(op, NULL, NULL, out)
|
||||
|
||||
#define PyArray_IsZeroDim(op) (PyArray_Check(op) && \
|
||||
(PyArray_NDIM((PyArrayObject *)op) == 0))
|
||||
|
||||
#define PyArray_IsScalar(obj, cls) \
|
||||
(PyObject_TypeCheck(obj, &Py##cls##ArrType_Type))
|
||||
|
||||
#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \
|
||||
PyArray_IsZeroDim(m))
|
||||
#define PyArray_IsPythonNumber(obj) \
|
||||
(PyFloat_Check(obj) || PyComplex_Check(obj) || \
|
||||
PyLong_Check(obj) || PyBool_Check(obj))
|
||||
#define PyArray_IsIntegerScalar(obj) (PyLong_Check(obj) \
|
||||
|| PyArray_IsScalar((obj), Integer))
|
||||
#define PyArray_IsPythonScalar(obj) \
|
||||
(PyArray_IsPythonNumber(obj) || PyBytes_Check(obj) || \
|
||||
PyUnicode_Check(obj))
|
||||
|
||||
#define PyArray_IsAnyScalar(obj) \
|
||||
(PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj))
|
||||
|
||||
#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \
|
||||
PyArray_CheckScalar(obj))
|
||||
|
||||
|
||||
#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \
|
||||
Py_INCREF(m), (m) : \
|
||||
(PyArrayObject *)(PyArray_Copy(m)))
|
||||
|
||||
#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \
|
||||
PyArray_CompareLists(PyArray_DIMS(a1), \
|
||||
PyArray_DIMS(a2), \
|
||||
PyArray_NDIM(a1)))
|
||||
|
||||
#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m))
|
||||
#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m))
|
||||
#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL)
|
||||
|
||||
#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \
|
||||
NULL)
|
||||
|
||||
#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \
|
||||
PyArray_DescrFromType(type), 0, 0, 0, NULL)
|
||||
|
||||
#define PyArray_FROM_OTF(m, type, flags) \
|
||||
PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \
|
||||
(((flags) & NPY_ARRAY_ENSURECOPY) ? \
|
||||
((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL)
|
||||
|
||||
#define PyArray_FROMANY(m, type, min, max, flags) \
|
||||
PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \
|
||||
(((flags) & NPY_ARRAY_ENSURECOPY) ? \
|
||||
(flags) | NPY_ARRAY_DEFAULT : (flags)), NULL)
|
||||
|
||||
#define PyArray_ZEROS(m, dims, type, is_f_order) \
|
||||
PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order)
|
||||
|
||||
#define PyArray_EMPTY(m, dims, type, is_f_order) \
|
||||
PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order)
|
||||
|
||||
#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \
|
||||
PyArray_NBYTES(obj))
|
||||
|
||||
#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \
|
||||
PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
|
||||
max_depth, NPY_ARRAY_DEFAULT, NULL)
|
||||
|
||||
#define PyArray_EquivArrTypes(a1, a2) \
|
||||
PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2))
|
||||
|
||||
#define PyArray_EquivByteorders(b1, b2) \
|
||||
(((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2)))
|
||||
|
||||
#define PyArray_SimpleNew(nd, dims, typenum) \
|
||||
PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL)
|
||||
|
||||
#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \
|
||||
PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \
|
||||
data, 0, NPY_ARRAY_CARRAY, NULL)
|
||||
|
||||
#define PyArray_SimpleNewFromDescr(nd, dims, descr) \
|
||||
PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \
|
||||
NULL, NULL, 0, NULL)
|
||||
|
||||
#define PyArray_ToScalar(data, arr) \
|
||||
PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr)
|
||||
|
||||
|
||||
/* These might be faster without the dereferencing of obj
|
||||
going on inside -- of course an optimizing compiler should
|
||||
inline the constants inside a for loop making it a moot point
|
||||
*/
|
||||
|
||||
#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \
|
||||
(i)*PyArray_STRIDES(obj)[0]))
|
||||
|
||||
#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \
|
||||
(i)*PyArray_STRIDES(obj)[0] + \
|
||||
(j)*PyArray_STRIDES(obj)[1]))
|
||||
|
||||
#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \
|
||||
(i)*PyArray_STRIDES(obj)[0] + \
|
||||
(j)*PyArray_STRIDES(obj)[1] + \
|
||||
(k)*PyArray_STRIDES(obj)[2]))
|
||||
|
||||
#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \
|
||||
(i)*PyArray_STRIDES(obj)[0] + \
|
||||
(j)*PyArray_STRIDES(obj)[1] + \
|
||||
(k)*PyArray_STRIDES(obj)[2] + \
|
||||
(l)*PyArray_STRIDES(obj)[3]))
|
||||
|
||||
static inline void
|
||||
PyArray_DiscardWritebackIfCopy(PyArrayObject *arr)
|
||||
{
|
||||
PyArrayObject_fields *fa = (PyArrayObject_fields *)arr;
|
||||
if (fa && fa->base) {
|
||||
if (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY) {
|
||||
PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE);
|
||||
Py_DECREF(fa->base);
|
||||
fa->base = NULL;
|
||||
PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#define PyArray_DESCR_REPLACE(descr) do { \
|
||||
PyArray_Descr *_new_; \
|
||||
_new_ = PyArray_DescrNew(descr); \
|
||||
Py_XDECREF(descr); \
|
||||
descr = _new_; \
|
||||
} while(0)
|
||||
|
||||
/* Copy should always return contiguous array */
|
||||
#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER)
|
||||
|
||||
#define PyArray_FromObject(op, type, min_depth, max_depth) \
|
||||
PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
|
||||
max_depth, NPY_ARRAY_BEHAVED | \
|
||||
NPY_ARRAY_ENSUREARRAY, NULL)
|
||||
|
||||
#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \
|
||||
PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
|
||||
max_depth, NPY_ARRAY_DEFAULT | \
|
||||
NPY_ARRAY_ENSUREARRAY, NULL)
|
||||
|
||||
#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \
|
||||
PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
|
||||
max_depth, NPY_ARRAY_ENSURECOPY | \
|
||||
NPY_ARRAY_DEFAULT | \
|
||||
NPY_ARRAY_ENSUREARRAY, NULL)
|
||||
|
||||
#define PyArray_Cast(mp, type_num) \
|
||||
PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0)
|
||||
|
||||
#define PyArray_Take(ap, items, axis) \
|
||||
PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE)
|
||||
|
||||
#define PyArray_Put(ap, items, values) \
|
||||
PyArray_PutTo(ap, items, values, NPY_RAISE)
|
||||
|
||||
|
||||
/*
|
||||
Check to see if this key in the dictionary is the "title"
|
||||
entry of the tuple (i.e. a duplicate dictionary entry in the fields
|
||||
dict).
|
||||
*/
|
||||
|
||||
static inline int
|
||||
NPY_TITLE_KEY_check(PyObject *key, PyObject *value)
|
||||
{
|
||||
PyObject *title;
|
||||
if (PyTuple_Size(value) != 3) {
|
||||
return 0;
|
||||
}
|
||||
title = PyTuple_GetItem(value, 2);
|
||||
if (key == title) {
|
||||
return 1;
|
||||
}
|
||||
#ifdef PYPY_VERSION
|
||||
/*
|
||||
* On PyPy, dictionary keys do not always preserve object identity.
|
||||
* Fall back to comparison by value.
|
||||
*/
|
||||
if (PyUnicode_Check(title) && PyUnicode_Check(key)) {
|
||||
return PyUnicode_Compare(title, key) == 0 ? 1 : 0;
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Macro, for backward compat with "if NPY_TITLE_KEY(key, value) { ..." */
|
||||
#define NPY_TITLE_KEY(key, value) (NPY_TITLE_KEY_check((key), (value)))
|
||||
|
||||
#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1)
|
||||
#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1)
|
||||
|
||||
|
||||
/*
|
||||
* These macros and functions unfortunately require runtime version checks
|
||||
* that are only defined in `npy_2_compat.h`. For that reasons they cannot be
|
||||
* part of `ndarraytypes.h` which tries to be self contained.
|
||||
*/
|
||||
|
||||
static inline npy_intp
|
||||
PyArray_ITEMSIZE(const PyArrayObject *arr)
|
||||
{
|
||||
return PyDataType_ELSIZE(((PyArrayObject_fields *)arr)->descr);
|
||||
}
|
||||
|
||||
#define PyDataType_HASFIELDS(obj) (PyDataType_ISLEGACY((PyArray_Descr*)(obj)) && PyDataType_NAMES((PyArray_Descr*)(obj)) != NULL)
|
||||
#define PyDataType_HASSUBARRAY(dtype) (PyDataType_ISLEGACY(dtype) && PyDataType_SUBARRAY(dtype) != NULL)
|
||||
#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \
|
||||
!PyDataType_HASFIELDS(dtype))
|
||||
|
||||
#define PyDataType_FLAGCHK(dtype, flag) \
|
||||
((PyDataType_FLAGS(dtype) & (flag)) == (flag))
|
||||
|
||||
#define PyDataType_REFCHK(dtype) \
|
||||
PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)
|
||||
|
||||
#define NPY_BEGIN_THREADS_DESCR(dtype) \
|
||||
do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \
|
||||
NPY_BEGIN_THREADS;} while (0);
|
||||
|
||||
#define NPY_END_THREADS_DESCR(dtype) \
|
||||
do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \
|
||||
NPY_END_THREADS; } while (0);
|
||||
|
||||
#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD)
|
||||
/* The internal copy of this is now defined in `dtypemeta.h` */
|
||||
/*
|
||||
* `PyArray_Scalar` is the same as this function but converts will convert
|
||||
* most NumPy types to Python scalars.
|
||||
*/
|
||||
static inline PyObject *
|
||||
PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr)
|
||||
{
|
||||
return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->getitem(
|
||||
(void *)itemptr, (PyArrayObject *)arr);
|
||||
}
|
||||
|
||||
/*
|
||||
* SETITEM should only be used if it is known that the value is a scalar
|
||||
* and of a type understood by the arrays dtype.
|
||||
* Use `PyArray_Pack` if the value may be of a different dtype.
|
||||
*/
|
||||
static inline int
|
||||
PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v)
|
||||
{
|
||||
return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->setitem(v, itemptr, arr);
|
||||
}
|
||||
#endif /* not internal */
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ */
|
||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,249 @@
|
||||
/*
|
||||
* This header file defines relevant features which:
|
||||
* - Require runtime inspection depending on the NumPy version.
|
||||
* - May be needed when compiling with an older version of NumPy to allow
|
||||
* a smooth transition.
|
||||
*
|
||||
* As such, it is shipped with NumPy 2.0, but designed to be vendored in full
|
||||
* or parts by downstream projects.
|
||||
*
|
||||
* It must be included after any other includes. `import_array()` must have
|
||||
* been called in the scope or version dependency will misbehave, even when
|
||||
* only `PyUFunc_` API is used.
|
||||
*
|
||||
* If required complicated defs (with inline functions) should be written as:
|
||||
*
|
||||
* #if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
|
||||
* Simple definition when NumPy 2.0 API is guaranteed.
|
||||
* #else
|
||||
* static inline definition of a 1.x compatibility shim
|
||||
* #if NPY_ABI_VERSION < 0x02000000
|
||||
* Make 1.x compatibility shim the public API (1.x only branch)
|
||||
* #else
|
||||
* Runtime dispatched version (1.x or 2.x)
|
||||
* #endif
|
||||
* #endif
|
||||
*
|
||||
* An internal build always passes NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
|
||||
*/
|
||||
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPAT_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPAT_H_
|
||||
|
||||
/*
|
||||
* New macros for accessing real and complex part of a complex number can be
|
||||
* found in "npy_2_complexcompat.h".
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* This header is meant to be included by downstream directly for 1.x compat.
|
||||
* In that case we need to ensure that users first included the full headers
|
||||
* and not just `ndarraytypes.h`.
|
||||
*/
|
||||
|
||||
#ifndef NPY_FEATURE_VERSION
|
||||
#error "The NumPy 2 compat header requires `import_array()` for which " \
|
||||
"the `ndarraytypes.h` header include is not sufficient. Please " \
|
||||
"include it after `numpy/ndarrayobject.h` or similar.\n" \
|
||||
"To simplify inclusion, you may use `PyArray_ImportNumPy()` " \
|
||||
"which is defined in the compat header and is lightweight (can be)."
|
||||
#endif
|
||||
|
||||
#if NPY_ABI_VERSION < 0x02000000
|
||||
/*
|
||||
* Define 2.0 feature version as it is needed below to decide whether we
|
||||
* compile for both 1.x and 2.x (defining it guarantees 1.x only).
|
||||
*/
|
||||
#define NPY_2_0_API_VERSION 0x00000012
|
||||
/*
|
||||
* If we are compiling with NumPy 1.x, PyArray_RUNTIME_VERSION so we
|
||||
* pretend the `PyArray_RUNTIME_VERSION` is `NPY_FEATURE_VERSION`.
|
||||
* This allows downstream to use `PyArray_RUNTIME_VERSION` if they need to.
|
||||
*/
|
||||
#define PyArray_RUNTIME_VERSION NPY_FEATURE_VERSION
|
||||
/* Compiling on NumPy 1.x where these are the same: */
|
||||
#define PyArray_DescrProto PyArray_Descr
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Define a better way to call `_import_array()` to simplify backporting as
|
||||
* we now require imports more often (necessary to make ABI flexible).
|
||||
*/
|
||||
#ifdef import_array1
|
||||
|
||||
static inline int
|
||||
PyArray_ImportNumPyAPI(void)
|
||||
{
|
||||
if (NPY_UNLIKELY(PyArray_API == NULL)) {
|
||||
import_array1(-1);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* import_array1 */
|
||||
|
||||
|
||||
/*
|
||||
* NPY_DEFAULT_INT
|
||||
*
|
||||
* The default integer has changed, `NPY_DEFAULT_INT` is available at runtime
|
||||
* for use as type number, e.g. `PyArray_DescrFromType(NPY_DEFAULT_INT)`.
|
||||
*
|
||||
* NPY_RAVEL_AXIS
|
||||
*
|
||||
* This was introduced in NumPy 2.0 to allow indicating that an axis should be
|
||||
* raveled in an operation. Before NumPy 2.0, NPY_MAXDIMS was used for this purpose.
|
||||
*
|
||||
* NPY_MAXDIMS
|
||||
*
|
||||
* A constant indicating the maximum number dimensions allowed when creating
|
||||
* an ndarray.
|
||||
*
|
||||
* NPY_NTYPES_LEGACY
|
||||
*
|
||||
* The number of built-in NumPy dtypes.
|
||||
*/
|
||||
#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
|
||||
#define NPY_DEFAULT_INT NPY_INTP
|
||||
#define NPY_RAVEL_AXIS NPY_MIN_INT
|
||||
#define NPY_MAXARGS 64
|
||||
|
||||
#elif NPY_ABI_VERSION < 0x02000000
|
||||
#define NPY_DEFAULT_INT NPY_LONG
|
||||
#define NPY_RAVEL_AXIS 32
|
||||
#define NPY_MAXARGS 32
|
||||
|
||||
/* Aliases of 2.x names to 1.x only equivalent names */
|
||||
#define NPY_NTYPES NPY_NTYPES_LEGACY
|
||||
#define PyArray_DescrProto PyArray_Descr
|
||||
#define _PyArray_LegacyDescr PyArray_Descr
|
||||
/* NumPy 2 definition always works, but add it for 1.x only */
|
||||
#define PyDataType_ISLEGACY(dtype) (1)
|
||||
#else
|
||||
#define NPY_DEFAULT_INT \
|
||||
(PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_INTP : NPY_LONG)
|
||||
#define NPY_RAVEL_AXIS \
|
||||
(PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_MIN_INT : 32)
|
||||
#define NPY_MAXARGS \
|
||||
(PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? 64 : 32)
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Access inline functions for descriptor fields. Except for the first
|
||||
* few fields, these needed to be moved (elsize, alignment) for
|
||||
* additional space. Or they are descriptor specific and are not generally
|
||||
* available anymore (metadata, c_metadata, subarray, names, fields).
|
||||
*
|
||||
* Most of these are defined via the `DESCR_ACCESSOR` macro helper.
|
||||
*/
|
||||
#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION || NPY_ABI_VERSION < 0x02000000
|
||||
/* Compiling for 1.x or 2.x only, direct field access is OK: */
|
||||
|
||||
static inline void
|
||||
PyDataType_SET_ELSIZE(PyArray_Descr *dtype, npy_intp size)
|
||||
{
|
||||
dtype->elsize = size;
|
||||
}
|
||||
|
||||
static inline npy_uint64
|
||||
PyDataType_FLAGS(const PyArray_Descr *dtype)
|
||||
{
|
||||
#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
|
||||
return dtype->flags;
|
||||
#else
|
||||
return (unsigned char)dtype->flags; /* Need unsigned cast on 1.x */
|
||||
#endif
|
||||
}
|
||||
|
||||
#define DESCR_ACCESSOR(FIELD, field, type, legacy_only) \
|
||||
static inline type \
|
||||
PyDataType_##FIELD(const PyArray_Descr *dtype) { \
|
||||
if (legacy_only && !PyDataType_ISLEGACY(dtype)) { \
|
||||
return (type)0; \
|
||||
} \
|
||||
return ((_PyArray_LegacyDescr *)dtype)->field; \
|
||||
}
|
||||
#else /* compiling for both 1.x and 2.x */
|
||||
|
||||
static inline void
|
||||
PyDataType_SET_ELSIZE(PyArray_Descr *dtype, npy_intp size)
|
||||
{
|
||||
if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) {
|
||||
((_PyArray_DescrNumPy2 *)dtype)->elsize = size;
|
||||
}
|
||||
else {
|
||||
((PyArray_DescrProto *)dtype)->elsize = (int)size;
|
||||
}
|
||||
}
|
||||
|
||||
static inline npy_uint64
|
||||
PyDataType_FLAGS(const PyArray_Descr *dtype)
|
||||
{
|
||||
if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) {
|
||||
return ((_PyArray_DescrNumPy2 *)dtype)->flags;
|
||||
}
|
||||
else {
|
||||
return (unsigned char)((PyArray_DescrProto *)dtype)->flags;
|
||||
}
|
||||
}
|
||||
|
||||
/* Cast to LegacyDescr always fine but needed when `legacy_only` */
|
||||
#define DESCR_ACCESSOR(FIELD, field, type, legacy_only) \
|
||||
static inline type \
|
||||
PyDataType_##FIELD(const PyArray_Descr *dtype) { \
|
||||
if (legacy_only && !PyDataType_ISLEGACY(dtype)) { \
|
||||
return (type)0; \
|
||||
} \
|
||||
if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) { \
|
||||
return ((_PyArray_LegacyDescr *)dtype)->field; \
|
||||
} \
|
||||
else { \
|
||||
return ((PyArray_DescrProto *)dtype)->field; \
|
||||
} \
|
||||
}
|
||||
#endif
|
||||
|
||||
DESCR_ACCESSOR(ELSIZE, elsize, npy_intp, 0)
|
||||
DESCR_ACCESSOR(ALIGNMENT, alignment, npy_intp, 0)
|
||||
DESCR_ACCESSOR(METADATA, metadata, PyObject *, 1)
|
||||
DESCR_ACCESSOR(SUBARRAY, subarray, PyArray_ArrayDescr *, 1)
|
||||
DESCR_ACCESSOR(NAMES, names, PyObject *, 1)
|
||||
DESCR_ACCESSOR(FIELDS, fields, PyObject *, 1)
|
||||
DESCR_ACCESSOR(C_METADATA, c_metadata, NpyAuxData *, 1)
|
||||
|
||||
#undef DESCR_ACCESSOR
|
||||
|
||||
|
||||
#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD)
|
||||
#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
|
||||
static inline PyArray_ArrFuncs *
|
||||
PyDataType_GetArrFuncs(const PyArray_Descr *descr)
|
||||
{
|
||||
return _PyDataType_GetArrFuncs(descr);
|
||||
}
|
||||
#elif NPY_ABI_VERSION < 0x02000000
|
||||
static inline PyArray_ArrFuncs *
|
||||
PyDataType_GetArrFuncs(const PyArray_Descr *descr)
|
||||
{
|
||||
return descr->f;
|
||||
}
|
||||
#else
|
||||
static inline PyArray_ArrFuncs *
|
||||
PyDataType_GetArrFuncs(const PyArray_Descr *descr)
|
||||
{
|
||||
if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) {
|
||||
return _PyDataType_GetArrFuncs(descr);
|
||||
}
|
||||
else {
|
||||
return ((PyArray_DescrProto *)descr)->f;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* not internal build */
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPAT_H_ */
|
||||
@ -0,0 +1,28 @@
|
||||
/* This header is designed to be copy-pasted into downstream packages, since it provides
|
||||
a compatibility layer between the old C struct complex types and the new native C99
|
||||
complex types. The new macros are in numpy/npy_math.h, which is why it is included here. */
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPLEXCOMPAT_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPLEXCOMPAT_H_
|
||||
|
||||
#include <numpy/npy_math.h>
|
||||
|
||||
#ifndef NPY_CSETREALF
|
||||
#define NPY_CSETREALF(c, r) (c)->real = (r)
|
||||
#endif
|
||||
#ifndef NPY_CSETIMAGF
|
||||
#define NPY_CSETIMAGF(c, i) (c)->imag = (i)
|
||||
#endif
|
||||
#ifndef NPY_CSETREAL
|
||||
#define NPY_CSETREAL(c, r) (c)->real = (r)
|
||||
#endif
|
||||
#ifndef NPY_CSETIMAG
|
||||
#define NPY_CSETIMAG(c, i) (c)->imag = (i)
|
||||
#endif
|
||||
#ifndef NPY_CSETREALL
|
||||
#define NPY_CSETREALL(c, r) (c)->real = (r)
|
||||
#endif
|
||||
#ifndef NPY_CSETIMAGL
|
||||
#define NPY_CSETIMAGL(c, i) (c)->imag = (i)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@ -0,0 +1,374 @@
|
||||
/*
|
||||
* This is a convenience header file providing compatibility utilities
|
||||
* for supporting different minor versions of Python 3.
|
||||
* It was originally used to support the transition from Python 2,
|
||||
* hence the "3k" naming.
|
||||
*
|
||||
* If you want to use this for your own projects, it's recommended to make a
|
||||
* copy of it. We don't provide backwards compatibility guarantees.
|
||||
*/
|
||||
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_
|
||||
|
||||
#include <Python.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "npy_common.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Python13 removes _PyLong_AsInt */
|
||||
static inline int
|
||||
Npy__PyLong_AsInt(PyObject *obj)
|
||||
{
|
||||
int overflow;
|
||||
long result = PyLong_AsLongAndOverflow(obj, &overflow);
|
||||
|
||||
/* INT_MAX and INT_MIN are defined in Python.h */
|
||||
if (overflow || result > INT_MAX || result < INT_MIN) {
|
||||
/* XXX: could be cute and give a different
|
||||
message for overflow == -1 */
|
||||
PyErr_SetString(PyExc_OverflowError,
|
||||
"Python int too large to convert to C int");
|
||||
return -1;
|
||||
}
|
||||
return (int)result;
|
||||
}
|
||||
|
||||
#if defined _MSC_VER && _MSC_VER >= 1900
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
/*
|
||||
* Macros to protect CRT calls against instant termination when passed an
|
||||
* invalid parameter (https://bugs.python.org/issue23524).
|
||||
*/
|
||||
extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler;
|
||||
#define NPY_BEGIN_SUPPRESS_IPH { _invalid_parameter_handler _Py_old_handler = \
|
||||
_set_thread_local_invalid_parameter_handler(_Py_silent_invalid_parameter_handler);
|
||||
#define NPY_END_SUPPRESS_IPH _set_thread_local_invalid_parameter_handler(_Py_old_handler); }
|
||||
|
||||
#else
|
||||
|
||||
#define NPY_BEGIN_SUPPRESS_IPH
|
||||
#define NPY_END_SUPPRESS_IPH
|
||||
|
||||
#endif /* _MSC_VER >= 1900 */
|
||||
|
||||
/*
|
||||
* PyFile_* compatibility
|
||||
*/
|
||||
|
||||
/*
|
||||
* Get a FILE* handle to the file represented by the Python object
|
||||
*/
|
||||
static inline FILE*
|
||||
npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos)
|
||||
{
|
||||
int fd, fd2, unbuf;
|
||||
Py_ssize_t fd2_tmp;
|
||||
PyObject *ret, *os, *io, *io_raw;
|
||||
npy_off_t pos;
|
||||
FILE *handle;
|
||||
|
||||
/* Flush first to ensure things end up in the file in the correct order */
|
||||
ret = PyObject_CallMethod(file, "flush", "");
|
||||
if (ret == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
Py_DECREF(ret);
|
||||
fd = PyObject_AsFileDescriptor(file);
|
||||
if (fd == -1) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* The handle needs to be dup'd because we have to call fclose
|
||||
* at the end
|
||||
*/
|
||||
os = PyImport_ImportModule("os");
|
||||
if (os == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
ret = PyObject_CallMethod(os, "dup", "i", fd);
|
||||
Py_DECREF(os);
|
||||
if (ret == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
fd2_tmp = PyNumber_AsSsize_t(ret, PyExc_IOError);
|
||||
Py_DECREF(ret);
|
||||
if (fd2_tmp == -1 && PyErr_Occurred()) {
|
||||
return NULL;
|
||||
}
|
||||
if (fd2_tmp < INT_MIN || fd2_tmp > INT_MAX) {
|
||||
PyErr_SetString(PyExc_IOError,
|
||||
"Getting an 'int' from os.dup() failed");
|
||||
return NULL;
|
||||
}
|
||||
fd2 = (int)fd2_tmp;
|
||||
|
||||
/* Convert to FILE* handle */
|
||||
#ifdef _WIN32
|
||||
NPY_BEGIN_SUPPRESS_IPH
|
||||
handle = _fdopen(fd2, mode);
|
||||
NPY_END_SUPPRESS_IPH
|
||||
#else
|
||||
handle = fdopen(fd2, mode);
|
||||
#endif
|
||||
if (handle == NULL) {
|
||||
PyErr_SetString(PyExc_IOError,
|
||||
"Getting a FILE* from a Python file object via "
|
||||
"_fdopen failed. If you built NumPy, you probably "
|
||||
"linked with the wrong debug/release runtime");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Record the original raw file handle position */
|
||||
*orig_pos = npy_ftell(handle);
|
||||
if (*orig_pos == -1) {
|
||||
/* The io module is needed to determine if buffering is used */
|
||||
io = PyImport_ImportModule("io");
|
||||
if (io == NULL) {
|
||||
fclose(handle);
|
||||
return NULL;
|
||||
}
|
||||
/* File object instances of RawIOBase are unbuffered */
|
||||
io_raw = PyObject_GetAttrString(io, "RawIOBase");
|
||||
Py_DECREF(io);
|
||||
if (io_raw == NULL) {
|
||||
fclose(handle);
|
||||
return NULL;
|
||||
}
|
||||
unbuf = PyObject_IsInstance(file, io_raw);
|
||||
Py_DECREF(io_raw);
|
||||
if (unbuf == 1) {
|
||||
/* Succeed if the IO is unbuffered */
|
||||
return handle;
|
||||
}
|
||||
else {
|
||||
PyErr_SetString(PyExc_IOError, "obtaining file position failed");
|
||||
fclose(handle);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Seek raw handle to the Python-side position */
|
||||
ret = PyObject_CallMethod(file, "tell", "");
|
||||
if (ret == NULL) {
|
||||
fclose(handle);
|
||||
return NULL;
|
||||
}
|
||||
pos = PyLong_AsLongLong(ret);
|
||||
Py_DECREF(ret);
|
||||
if (PyErr_Occurred()) {
|
||||
fclose(handle);
|
||||
return NULL;
|
||||
}
|
||||
if (npy_fseek(handle, pos, SEEK_SET) == -1) {
|
||||
PyErr_SetString(PyExc_IOError, "seeking file failed");
|
||||
fclose(handle);
|
||||
return NULL;
|
||||
}
|
||||
return handle;
|
||||
}
|
||||
|
||||
/*
|
||||
* Close the dup-ed file handle, and seek the Python one to the current position
|
||||
*/
|
||||
static inline int
|
||||
npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos)
|
||||
{
|
||||
int fd, unbuf;
|
||||
PyObject *ret, *io, *io_raw;
|
||||
npy_off_t position;
|
||||
|
||||
position = npy_ftell(handle);
|
||||
|
||||
/* Close the FILE* handle */
|
||||
fclose(handle);
|
||||
|
||||
/*
|
||||
* Restore original file handle position, in order to not confuse
|
||||
* Python-side data structures
|
||||
*/
|
||||
fd = PyObject_AsFileDescriptor(file);
|
||||
if (fd == -1) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) {
|
||||
|
||||
/* The io module is needed to determine if buffering is used */
|
||||
io = PyImport_ImportModule("io");
|
||||
if (io == NULL) {
|
||||
return -1;
|
||||
}
|
||||
/* File object instances of RawIOBase are unbuffered */
|
||||
io_raw = PyObject_GetAttrString(io, "RawIOBase");
|
||||
Py_DECREF(io);
|
||||
if (io_raw == NULL) {
|
||||
return -1;
|
||||
}
|
||||
unbuf = PyObject_IsInstance(file, io_raw);
|
||||
Py_DECREF(io_raw);
|
||||
if (unbuf == 1) {
|
||||
/* Succeed if the IO is unbuffered */
|
||||
return 0;
|
||||
}
|
||||
else {
|
||||
PyErr_SetString(PyExc_IOError, "seeking file failed");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (position == -1) {
|
||||
PyErr_SetString(PyExc_IOError, "obtaining file position failed");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Seek Python-side handle to the FILE* handle position */
|
||||
ret = PyObject_CallMethod(file, "seek", NPY_OFF_T_PYFMT "i", position, 0);
|
||||
if (ret == NULL) {
|
||||
return -1;
|
||||
}
|
||||
Py_DECREF(ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline PyObject*
|
||||
npy_PyFile_OpenFile(PyObject *filename, const char *mode)
|
||||
{
|
||||
PyObject *open;
|
||||
open = PyDict_GetItemString(PyEval_GetBuiltins(), "open");
|
||||
if (open == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return PyObject_CallFunction(open, "Os", filename, mode);
|
||||
}
|
||||
|
||||
static inline int
|
||||
npy_PyFile_CloseFile(PyObject *file)
|
||||
{
|
||||
PyObject *ret;
|
||||
|
||||
ret = PyObject_CallMethod(file, "close", NULL);
|
||||
if (ret == NULL) {
|
||||
return -1;
|
||||
}
|
||||
Py_DECREF(ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This is a copy of _PyErr_ChainExceptions, which
|
||||
* is no longer exported from Python3.12
|
||||
*/
|
||||
static inline void
|
||||
npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb)
|
||||
{
|
||||
if (exc == NULL)
|
||||
return;
|
||||
|
||||
if (PyErr_Occurred()) {
|
||||
PyObject *exc2, *val2, *tb2;
|
||||
PyErr_Fetch(&exc2, &val2, &tb2);
|
||||
PyErr_NormalizeException(&exc, &val, &tb);
|
||||
if (tb != NULL) {
|
||||
PyException_SetTraceback(val, tb);
|
||||
Py_DECREF(tb);
|
||||
}
|
||||
Py_DECREF(exc);
|
||||
PyErr_NormalizeException(&exc2, &val2, &tb2);
|
||||
PyException_SetContext(val2, val);
|
||||
PyErr_Restore(exc2, val2, tb2);
|
||||
}
|
||||
else {
|
||||
PyErr_Restore(exc, val, tb);
|
||||
}
|
||||
}
|
||||
|
||||
/* This is a copy of _PyErr_ChainExceptions, with:
|
||||
* __cause__ used instead of __context__
|
||||
*/
|
||||
static inline void
|
||||
npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb)
|
||||
{
|
||||
if (exc == NULL)
|
||||
return;
|
||||
|
||||
if (PyErr_Occurred()) {
|
||||
PyObject *exc2, *val2, *tb2;
|
||||
PyErr_Fetch(&exc2, &val2, &tb2);
|
||||
PyErr_NormalizeException(&exc, &val, &tb);
|
||||
if (tb != NULL) {
|
||||
PyException_SetTraceback(val, tb);
|
||||
Py_DECREF(tb);
|
||||
}
|
||||
Py_DECREF(exc);
|
||||
PyErr_NormalizeException(&exc2, &val2, &tb2);
|
||||
PyException_SetCause(val2, val);
|
||||
PyErr_Restore(exc2, val2, tb2);
|
||||
}
|
||||
else {
|
||||
PyErr_Restore(exc, val, tb);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* PyCObject functions adapted to PyCapsules.
|
||||
*
|
||||
* The main job here is to get rid of the improved error handling
|
||||
* of PyCapsules. It's a shame...
|
||||
*/
|
||||
static inline PyObject *
|
||||
NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *))
|
||||
{
|
||||
PyObject *ret = PyCapsule_New(ptr, NULL, dtor);
|
||||
if (ret == NULL) {
|
||||
PyErr_Clear();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline PyObject *
|
||||
NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *))
|
||||
{
|
||||
PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor);
|
||||
if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) {
|
||||
PyErr_Clear();
|
||||
Py_DECREF(ret);
|
||||
ret = NULL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void *
|
||||
NpyCapsule_AsVoidPtr(PyObject *obj)
|
||||
{
|
||||
void *ret = PyCapsule_GetPointer(obj, NULL);
|
||||
if (ret == NULL) {
|
||||
PyErr_Clear();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void *
|
||||
NpyCapsule_GetDesc(PyObject *obj)
|
||||
{
|
||||
return PyCapsule_GetContext(obj);
|
||||
}
|
||||
|
||||
static inline int
|
||||
NpyCapsule_Check(PyObject *ptr)
|
||||
{
|
||||
return PyCapsule_CheckExact(ptr);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ */
|
||||
@ -0,0 +1,977 @@
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_
|
||||
|
||||
/* need Python.h for npy_intp, npy_uintp */
|
||||
#include <Python.h>
|
||||
|
||||
/* numpconfig.h is auto-generated */
|
||||
#include "numpyconfig.h"
|
||||
#ifdef HAVE_NPY_CONFIG_H
|
||||
#include <npy_config.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* using static inline modifiers when defining npy_math functions
|
||||
* allows the compiler to make optimizations when possible
|
||||
*/
|
||||
#ifndef NPY_INLINE_MATH
|
||||
#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD
|
||||
#define NPY_INLINE_MATH 1
|
||||
#else
|
||||
#define NPY_INLINE_MATH 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* gcc does not unroll even with -O3
|
||||
* use with care, unrolling on modern cpus rarely speeds things up
|
||||
*/
|
||||
#ifdef HAVE_ATTRIBUTE_OPTIMIZE_UNROLL_LOOPS
|
||||
#define NPY_GCC_UNROLL_LOOPS \
|
||||
__attribute__((optimize("unroll-loops")))
|
||||
#else
|
||||
#define NPY_GCC_UNROLL_LOOPS
|
||||
#endif
|
||||
|
||||
/* highest gcc optimization level, enabled autovectorizer */
|
||||
#ifdef HAVE_ATTRIBUTE_OPTIMIZE_OPT_3
|
||||
#define NPY_GCC_OPT_3 __attribute__((optimize("O3")))
|
||||
#else
|
||||
#define NPY_GCC_OPT_3
|
||||
#endif
|
||||
|
||||
/*
|
||||
* mark an argument (starting from 1) that must not be NULL and is not checked
|
||||
* DO NOT USE IF FUNCTION CHECKS FOR NULL!! the compiler will remove the check
|
||||
*/
|
||||
#ifdef HAVE_ATTRIBUTE_NONNULL
|
||||
#define NPY_GCC_NONNULL(n) __attribute__((nonnull(n)))
|
||||
#else
|
||||
#define NPY_GCC_NONNULL(n)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* give a hint to the compiler which branch is more likely or unlikely
|
||||
* to occur, e.g. rare error cases:
|
||||
*
|
||||
* if (NPY_UNLIKELY(failure == 0))
|
||||
* return NULL;
|
||||
*
|
||||
* the double !! is to cast the expression (e.g. NULL) to a boolean required by
|
||||
* the intrinsic
|
||||
*/
|
||||
#ifdef HAVE___BUILTIN_EXPECT
|
||||
#define NPY_LIKELY(x) __builtin_expect(!!(x), 1)
|
||||
#define NPY_UNLIKELY(x) __builtin_expect(!!(x), 0)
|
||||
#else
|
||||
#define NPY_LIKELY(x) (x)
|
||||
#define NPY_UNLIKELY(x) (x)
|
||||
#endif
|
||||
|
||||
#ifdef HAVE___BUILTIN_PREFETCH
|
||||
/* unlike _mm_prefetch also works on non-x86 */
|
||||
#define NPY_PREFETCH(x, rw, loc) __builtin_prefetch((x), (rw), (loc))
|
||||
#else
|
||||
#ifdef NPY_HAVE_SSE
|
||||
/* _MM_HINT_ET[01] (rw = 1) unsupported, only available in gcc >= 4.9 */
|
||||
#define NPY_PREFETCH(x, rw, loc) _mm_prefetch((x), loc == 0 ? _MM_HINT_NTA : \
|
||||
(loc == 1 ? _MM_HINT_T2 : \
|
||||
(loc == 2 ? _MM_HINT_T1 : \
|
||||
(loc == 3 ? _MM_HINT_T0 : -1))))
|
||||
#else
|
||||
#define NPY_PREFETCH(x, rw,loc)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* `NPY_INLINE` kept for backwards compatibility; use `inline` instead */
|
||||
#if defined(_MSC_VER) && !defined(__clang__)
|
||||
#define NPY_INLINE __inline
|
||||
/* clang included here to handle clang-cl on Windows */
|
||||
#elif defined(__GNUC__) || defined(__clang__)
|
||||
#if defined(__STRICT_ANSI__)
|
||||
#define NPY_INLINE __inline__
|
||||
#else
|
||||
#define NPY_INLINE inline
|
||||
#endif
|
||||
#else
|
||||
#define NPY_INLINE
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#define NPY_FINLINE static __forceinline
|
||||
#elif defined(__GNUC__)
|
||||
#define NPY_FINLINE static inline __attribute__((always_inline))
|
||||
#else
|
||||
#define NPY_FINLINE static
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#define NPY_NOINLINE static __declspec(noinline)
|
||||
#elif defined(__GNUC__) || defined(__clang__)
|
||||
#define NPY_NOINLINE static __attribute__((noinline))
|
||||
#else
|
||||
#define NPY_NOINLINE static
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
#define NPY_TLS thread_local
|
||||
#elif defined(HAVE_THREAD_LOCAL)
|
||||
#define NPY_TLS thread_local
|
||||
#elif defined(HAVE__THREAD_LOCAL)
|
||||
#define NPY_TLS _Thread_local
|
||||
#elif defined(HAVE___THREAD)
|
||||
#define NPY_TLS __thread
|
||||
#elif defined(HAVE___DECLSPEC_THREAD_)
|
||||
#define NPY_TLS __declspec(thread)
|
||||
#else
|
||||
#define NPY_TLS
|
||||
#endif
|
||||
|
||||
#ifdef WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE
|
||||
#define NPY_RETURNS_BORROWED_REF \
|
||||
__attribute__((cpychecker_returns_borrowed_ref))
|
||||
#else
|
||||
#define NPY_RETURNS_BORROWED_REF
|
||||
#endif
|
||||
|
||||
#ifdef WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE
|
||||
#define NPY_STEALS_REF_TO_ARG(n) \
|
||||
__attribute__((cpychecker_steals_reference_to_arg(n)))
|
||||
#else
|
||||
#define NPY_STEALS_REF_TO_ARG(n)
|
||||
#endif
|
||||
|
||||
/* 64 bit file position support, also on win-amd64. Issue gh-2256 */
|
||||
#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400) || \
|
||||
defined(__MINGW32__) || defined(__MINGW64__)
|
||||
#include <io.h>
|
||||
|
||||
#define npy_fseek _fseeki64
|
||||
#define npy_ftell _ftelli64
|
||||
#define npy_lseek _lseeki64
|
||||
#define npy_off_t npy_int64
|
||||
|
||||
#if NPY_SIZEOF_INT == 8
|
||||
#define NPY_OFF_T_PYFMT "i"
|
||||
#elif NPY_SIZEOF_LONG == 8
|
||||
#define NPY_OFF_T_PYFMT "l"
|
||||
#elif NPY_SIZEOF_LONGLONG == 8
|
||||
#define NPY_OFF_T_PYFMT "L"
|
||||
#else
|
||||
#error Unsupported size for type off_t
|
||||
#endif
|
||||
#else
|
||||
#ifdef HAVE_FSEEKO
|
||||
#define npy_fseek fseeko
|
||||
#else
|
||||
#define npy_fseek fseek
|
||||
#endif
|
||||
#ifdef HAVE_FTELLO
|
||||
#define npy_ftell ftello
|
||||
#else
|
||||
#define npy_ftell ftell
|
||||
#endif
|
||||
#include <sys/types.h>
|
||||
#ifndef _WIN32
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
#define npy_lseek lseek
|
||||
#define npy_off_t off_t
|
||||
|
||||
#if NPY_SIZEOF_OFF_T == NPY_SIZEOF_SHORT
|
||||
#define NPY_OFF_T_PYFMT "h"
|
||||
#elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_INT
|
||||
#define NPY_OFF_T_PYFMT "i"
|
||||
#elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONG
|
||||
#define NPY_OFF_T_PYFMT "l"
|
||||
#elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONGLONG
|
||||
#define NPY_OFF_T_PYFMT "L"
|
||||
#else
|
||||
#error Unsupported size for type off_t
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* enums for detected endianness */
|
||||
enum {
|
||||
NPY_CPU_UNKNOWN_ENDIAN,
|
||||
NPY_CPU_LITTLE,
|
||||
NPY_CPU_BIG
|
||||
};
|
||||
|
||||
/*
|
||||
* This is to typedef npy_intp to the appropriate size for Py_ssize_t.
|
||||
* (Before NumPy 2.0 we used Py_intptr_t and Py_uintptr_t from `pyport.h`.)
|
||||
*/
|
||||
typedef Py_ssize_t npy_intp;
|
||||
typedef size_t npy_uintp;
|
||||
|
||||
/*
|
||||
* Define sizes that were not defined in numpyconfig.h.
|
||||
*/
|
||||
#define NPY_SIZEOF_CHAR 1
|
||||
#define NPY_SIZEOF_BYTE 1
|
||||
#define NPY_SIZEOF_DATETIME 8
|
||||
#define NPY_SIZEOF_TIMEDELTA 8
|
||||
#define NPY_SIZEOF_HALF 2
|
||||
#define NPY_SIZEOF_CFLOAT NPY_SIZEOF_COMPLEX_FLOAT
|
||||
#define NPY_SIZEOF_CDOUBLE NPY_SIZEOF_COMPLEX_DOUBLE
|
||||
#define NPY_SIZEOF_CLONGDOUBLE NPY_SIZEOF_COMPLEX_LONGDOUBLE
|
||||
|
||||
#ifdef constchar
|
||||
#undef constchar
|
||||
#endif
|
||||
|
||||
#define NPY_SSIZE_T_PYFMT "n"
|
||||
#define constchar char
|
||||
|
||||
/* NPY_INTP_FMT Note:
|
||||
* Unlike the other NPY_*_FMT macros, which are used with PyOS_snprintf,
|
||||
* NPY_INTP_FMT is used with PyErr_Format and PyUnicode_FromFormat. Those
|
||||
* functions use different formatting codes that are portably specified
|
||||
* according to the Python documentation. See issue gh-2388.
|
||||
*/
|
||||
#if NPY_SIZEOF_INTP == NPY_SIZEOF_LONG
|
||||
#define NPY_INTP NPY_LONG
|
||||
#define NPY_UINTP NPY_ULONG
|
||||
#define PyIntpArrType_Type PyLongArrType_Type
|
||||
#define PyUIntpArrType_Type PyULongArrType_Type
|
||||
#define NPY_MAX_INTP NPY_MAX_LONG
|
||||
#define NPY_MIN_INTP NPY_MIN_LONG
|
||||
#define NPY_MAX_UINTP NPY_MAX_ULONG
|
||||
#define NPY_INTP_FMT "ld"
|
||||
#elif NPY_SIZEOF_INTP == NPY_SIZEOF_INT
|
||||
#define NPY_INTP NPY_INT
|
||||
#define NPY_UINTP NPY_UINT
|
||||
#define PyIntpArrType_Type PyIntArrType_Type
|
||||
#define PyUIntpArrType_Type PyUIntArrType_Type
|
||||
#define NPY_MAX_INTP NPY_MAX_INT
|
||||
#define NPY_MIN_INTP NPY_MIN_INT
|
||||
#define NPY_MAX_UINTP NPY_MAX_UINT
|
||||
#define NPY_INTP_FMT "d"
|
||||
#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_INTP == NPY_SIZEOF_LONGLONG)
|
||||
#define NPY_INTP NPY_LONGLONG
|
||||
#define NPY_UINTP NPY_ULONGLONG
|
||||
#define PyIntpArrType_Type PyLongLongArrType_Type
|
||||
#define PyUIntpArrType_Type PyULongLongArrType_Type
|
||||
#define NPY_MAX_INTP NPY_MAX_LONGLONG
|
||||
#define NPY_MIN_INTP NPY_MIN_LONGLONG
|
||||
#define NPY_MAX_UINTP NPY_MAX_ULONGLONG
|
||||
#define NPY_INTP_FMT "lld"
|
||||
#else
|
||||
#error "Failed to correctly define NPY_INTP and NPY_UINTP"
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Some platforms don't define bool, long long, or long double.
|
||||
* Handle that here.
|
||||
*/
|
||||
#define NPY_BYTE_FMT "hhd"
|
||||
#define NPY_UBYTE_FMT "hhu"
|
||||
#define NPY_SHORT_FMT "hd"
|
||||
#define NPY_USHORT_FMT "hu"
|
||||
#define NPY_INT_FMT "d"
|
||||
#define NPY_UINT_FMT "u"
|
||||
#define NPY_LONG_FMT "ld"
|
||||
#define NPY_ULONG_FMT "lu"
|
||||
#define NPY_HALF_FMT "g"
|
||||
#define NPY_FLOAT_FMT "g"
|
||||
#define NPY_DOUBLE_FMT "g"
|
||||
|
||||
|
||||
#ifdef PY_LONG_LONG
|
||||
typedef PY_LONG_LONG npy_longlong;
|
||||
typedef unsigned PY_LONG_LONG npy_ulonglong;
|
||||
# ifdef _MSC_VER
|
||||
# define NPY_LONGLONG_FMT "I64d"
|
||||
# define NPY_ULONGLONG_FMT "I64u"
|
||||
# else
|
||||
# define NPY_LONGLONG_FMT "lld"
|
||||
# define NPY_ULONGLONG_FMT "llu"
|
||||
# endif
|
||||
# ifdef _MSC_VER
|
||||
# define NPY_LONGLONG_SUFFIX(x) (x##i64)
|
||||
# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64)
|
||||
# else
|
||||
# define NPY_LONGLONG_SUFFIX(x) (x##LL)
|
||||
# define NPY_ULONGLONG_SUFFIX(x) (x##ULL)
|
||||
# endif
|
||||
#else
|
||||
typedef long npy_longlong;
|
||||
typedef unsigned long npy_ulonglong;
|
||||
# define NPY_LONGLONG_SUFFIX(x) (x##L)
|
||||
# define NPY_ULONGLONG_SUFFIX(x) (x##UL)
|
||||
#endif
|
||||
|
||||
|
||||
typedef unsigned char npy_bool;
|
||||
#define NPY_FALSE 0
|
||||
#define NPY_TRUE 1
|
||||
/*
|
||||
* `NPY_SIZEOF_LONGDOUBLE` isn't usually equal to sizeof(long double).
|
||||
* In some certain cases, it may forced to be equal to sizeof(double)
|
||||
* even against the compiler implementation and the same goes for
|
||||
* `complex long double`.
|
||||
*
|
||||
* Therefore, avoid `long double`, use `npy_longdouble` instead,
|
||||
* and when it comes to standard math functions make sure of using
|
||||
* the double version when `NPY_SIZEOF_LONGDOUBLE` == `NPY_SIZEOF_DOUBLE`.
|
||||
* For example:
|
||||
* npy_longdouble *ptr, x;
|
||||
* #if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE
|
||||
* npy_longdouble r = modf(x, ptr);
|
||||
* #else
|
||||
* npy_longdouble r = modfl(x, ptr);
|
||||
* #endif
|
||||
*
|
||||
* See https://github.com/numpy/numpy/issues/20348
|
||||
*/
|
||||
#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE
|
||||
#define NPY_LONGDOUBLE_FMT "g"
|
||||
#define longdouble_t double
|
||||
typedef double npy_longdouble;
|
||||
#else
|
||||
#define NPY_LONGDOUBLE_FMT "Lg"
|
||||
#define longdouble_t long double
|
||||
typedef long double npy_longdouble;
|
||||
#endif
|
||||
|
||||
#ifndef Py_USING_UNICODE
|
||||
#error Must use Python with unicode enabled.
|
||||
#endif
|
||||
|
||||
|
||||
typedef signed char npy_byte;
|
||||
typedef unsigned char npy_ubyte;
|
||||
typedef unsigned short npy_ushort;
|
||||
typedef unsigned int npy_uint;
|
||||
typedef unsigned long npy_ulong;
|
||||
|
||||
/* These are for completeness */
|
||||
typedef char npy_char;
|
||||
typedef short npy_short;
|
||||
typedef int npy_int;
|
||||
typedef long npy_long;
|
||||
typedef float npy_float;
|
||||
typedef double npy_double;
|
||||
|
||||
typedef Py_hash_t npy_hash_t;
|
||||
#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP
|
||||
|
||||
#if defined(__cplusplus)
|
||||
|
||||
typedef struct
|
||||
{
|
||||
double _Val[2];
|
||||
} npy_cdouble;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
float _Val[2];
|
||||
} npy_cfloat;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
long double _Val[2];
|
||||
} npy_clongdouble;
|
||||
|
||||
#else
|
||||
|
||||
#include <complex.h>
|
||||
|
||||
|
||||
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
|
||||
typedef _Dcomplex npy_cdouble;
|
||||
typedef _Fcomplex npy_cfloat;
|
||||
typedef _Lcomplex npy_clongdouble;
|
||||
#else /* !defined(_MSC_VER) || defined(__INTEL_COMPILER) */
|
||||
typedef double _Complex npy_cdouble;
|
||||
typedef float _Complex npy_cfloat;
|
||||
typedef longdouble_t _Complex npy_clongdouble;
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* numarray-style bit-width typedefs
|
||||
*/
|
||||
#define NPY_MAX_INT8 127
|
||||
#define NPY_MIN_INT8 -128
|
||||
#define NPY_MAX_UINT8 255
|
||||
#define NPY_MAX_INT16 32767
|
||||
#define NPY_MIN_INT16 -32768
|
||||
#define NPY_MAX_UINT16 65535
|
||||
#define NPY_MAX_INT32 2147483647
|
||||
#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1)
|
||||
#define NPY_MAX_UINT32 4294967295U
|
||||
#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807)
|
||||
#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1))
|
||||
#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615)
|
||||
#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864)
|
||||
#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1))
|
||||
#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728)
|
||||
#define NPY_MIN_DATETIME NPY_MIN_INT64
|
||||
#define NPY_MAX_DATETIME NPY_MAX_INT64
|
||||
#define NPY_MIN_TIMEDELTA NPY_MIN_INT64
|
||||
#define NPY_MAX_TIMEDELTA NPY_MAX_INT64
|
||||
|
||||
/* Need to find the number of bits for each type and
|
||||
make definitions accordingly.
|
||||
|
||||
C states that sizeof(char) == 1 by definition
|
||||
|
||||
So, just using the sizeof keyword won't help.
|
||||
|
||||
It also looks like Python itself uses sizeof(char) quite a
|
||||
bit, which by definition should be 1 all the time.
|
||||
|
||||
Idea: Make Use of CHAR_BIT which should tell us how many
|
||||
BITS per CHARACTER
|
||||
*/
|
||||
|
||||
/* Include platform definitions -- These are in the C89/90 standard */
|
||||
#include <limits.h>
|
||||
#define NPY_MAX_BYTE SCHAR_MAX
|
||||
#define NPY_MIN_BYTE SCHAR_MIN
|
||||
#define NPY_MAX_UBYTE UCHAR_MAX
|
||||
#define NPY_MAX_SHORT SHRT_MAX
|
||||
#define NPY_MIN_SHORT SHRT_MIN
|
||||
#define NPY_MAX_USHORT USHRT_MAX
|
||||
#define NPY_MAX_INT INT_MAX
|
||||
#ifndef INT_MIN
|
||||
#define INT_MIN (-INT_MAX - 1)
|
||||
#endif
|
||||
#define NPY_MIN_INT INT_MIN
|
||||
#define NPY_MAX_UINT UINT_MAX
|
||||
#define NPY_MAX_LONG LONG_MAX
|
||||
#define NPY_MIN_LONG LONG_MIN
|
||||
#define NPY_MAX_ULONG ULONG_MAX
|
||||
|
||||
#define NPY_BITSOF_BOOL (sizeof(npy_bool) * CHAR_BIT)
|
||||
#define NPY_BITSOF_CHAR CHAR_BIT
|
||||
#define NPY_BITSOF_BYTE (NPY_SIZEOF_BYTE * CHAR_BIT)
|
||||
#define NPY_BITSOF_SHORT (NPY_SIZEOF_SHORT * CHAR_BIT)
|
||||
#define NPY_BITSOF_INT (NPY_SIZEOF_INT * CHAR_BIT)
|
||||
#define NPY_BITSOF_LONG (NPY_SIZEOF_LONG * CHAR_BIT)
|
||||
#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG * CHAR_BIT)
|
||||
#define NPY_BITSOF_INTP (NPY_SIZEOF_INTP * CHAR_BIT)
|
||||
#define NPY_BITSOF_HALF (NPY_SIZEOF_HALF * CHAR_BIT)
|
||||
#define NPY_BITSOF_FLOAT (NPY_SIZEOF_FLOAT * CHAR_BIT)
|
||||
#define NPY_BITSOF_DOUBLE (NPY_SIZEOF_DOUBLE * CHAR_BIT)
|
||||
#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE * CHAR_BIT)
|
||||
#define NPY_BITSOF_CFLOAT (NPY_SIZEOF_CFLOAT * CHAR_BIT)
|
||||
#define NPY_BITSOF_CDOUBLE (NPY_SIZEOF_CDOUBLE * CHAR_BIT)
|
||||
#define NPY_BITSOF_CLONGDOUBLE (NPY_SIZEOF_CLONGDOUBLE * CHAR_BIT)
|
||||
#define NPY_BITSOF_DATETIME (NPY_SIZEOF_DATETIME * CHAR_BIT)
|
||||
#define NPY_BITSOF_TIMEDELTA (NPY_SIZEOF_TIMEDELTA * CHAR_BIT)
|
||||
|
||||
#if NPY_BITSOF_LONG == 8
|
||||
#define NPY_INT8 NPY_LONG
|
||||
#define NPY_UINT8 NPY_ULONG
|
||||
typedef long npy_int8;
|
||||
typedef unsigned long npy_uint8;
|
||||
#define PyInt8ScalarObject PyLongScalarObject
|
||||
#define PyInt8ArrType_Type PyLongArrType_Type
|
||||
#define PyUInt8ScalarObject PyULongScalarObject
|
||||
#define PyUInt8ArrType_Type PyULongArrType_Type
|
||||
#define NPY_INT8_FMT NPY_LONG_FMT
|
||||
#define NPY_UINT8_FMT NPY_ULONG_FMT
|
||||
#elif NPY_BITSOF_LONG == 16
|
||||
#define NPY_INT16 NPY_LONG
|
||||
#define NPY_UINT16 NPY_ULONG
|
||||
typedef long npy_int16;
|
||||
typedef unsigned long npy_uint16;
|
||||
#define PyInt16ScalarObject PyLongScalarObject
|
||||
#define PyInt16ArrType_Type PyLongArrType_Type
|
||||
#define PyUInt16ScalarObject PyULongScalarObject
|
||||
#define PyUInt16ArrType_Type PyULongArrType_Type
|
||||
#define NPY_INT16_FMT NPY_LONG_FMT
|
||||
#define NPY_UINT16_FMT NPY_ULONG_FMT
|
||||
#elif NPY_BITSOF_LONG == 32
|
||||
#define NPY_INT32 NPY_LONG
|
||||
#define NPY_UINT32 NPY_ULONG
|
||||
typedef long npy_int32;
|
||||
typedef unsigned long npy_uint32;
|
||||
typedef unsigned long npy_ucs4;
|
||||
#define PyInt32ScalarObject PyLongScalarObject
|
||||
#define PyInt32ArrType_Type PyLongArrType_Type
|
||||
#define PyUInt32ScalarObject PyULongScalarObject
|
||||
#define PyUInt32ArrType_Type PyULongArrType_Type
|
||||
#define NPY_INT32_FMT NPY_LONG_FMT
|
||||
#define NPY_UINT32_FMT NPY_ULONG_FMT
|
||||
#elif NPY_BITSOF_LONG == 64
|
||||
#define NPY_INT64 NPY_LONG
|
||||
#define NPY_UINT64 NPY_ULONG
|
||||
typedef long npy_int64;
|
||||
typedef unsigned long npy_uint64;
|
||||
#define PyInt64ScalarObject PyLongScalarObject
|
||||
#define PyInt64ArrType_Type PyLongArrType_Type
|
||||
#define PyUInt64ScalarObject PyULongScalarObject
|
||||
#define PyUInt64ArrType_Type PyULongArrType_Type
|
||||
#define NPY_INT64_FMT NPY_LONG_FMT
|
||||
#define NPY_UINT64_FMT NPY_ULONG_FMT
|
||||
#define MyPyLong_FromInt64 PyLong_FromLong
|
||||
#define MyPyLong_AsInt64 PyLong_AsLong
|
||||
#endif
|
||||
|
||||
#if NPY_BITSOF_LONGLONG == 8
|
||||
# ifndef NPY_INT8
|
||||
# define NPY_INT8 NPY_LONGLONG
|
||||
# define NPY_UINT8 NPY_ULONGLONG
|
||||
typedef npy_longlong npy_int8;
|
||||
typedef npy_ulonglong npy_uint8;
|
||||
# define PyInt8ScalarObject PyLongLongScalarObject
|
||||
# define PyInt8ArrType_Type PyLongLongArrType_Type
|
||||
# define PyUInt8ScalarObject PyULongLongScalarObject
|
||||
# define PyUInt8ArrType_Type PyULongLongArrType_Type
|
||||
#define NPY_INT8_FMT NPY_LONGLONG_FMT
|
||||
#define NPY_UINT8_FMT NPY_ULONGLONG_FMT
|
||||
# endif
|
||||
# define NPY_MAX_LONGLONG NPY_MAX_INT8
|
||||
# define NPY_MIN_LONGLONG NPY_MIN_INT8
|
||||
# define NPY_MAX_ULONGLONG NPY_MAX_UINT8
|
||||
#elif NPY_BITSOF_LONGLONG == 16
|
||||
# ifndef NPY_INT16
|
||||
# define NPY_INT16 NPY_LONGLONG
|
||||
# define NPY_UINT16 NPY_ULONGLONG
|
||||
typedef npy_longlong npy_int16;
|
||||
typedef npy_ulonglong npy_uint16;
|
||||
# define PyInt16ScalarObject PyLongLongScalarObject
|
||||
# define PyInt16ArrType_Type PyLongLongArrType_Type
|
||||
# define PyUInt16ScalarObject PyULongLongScalarObject
|
||||
# define PyUInt16ArrType_Type PyULongLongArrType_Type
|
||||
#define NPY_INT16_FMT NPY_LONGLONG_FMT
|
||||
#define NPY_UINT16_FMT NPY_ULONGLONG_FMT
|
||||
# endif
|
||||
# define NPY_MAX_LONGLONG NPY_MAX_INT16
|
||||
# define NPY_MIN_LONGLONG NPY_MIN_INT16
|
||||
# define NPY_MAX_ULONGLONG NPY_MAX_UINT16
|
||||
#elif NPY_BITSOF_LONGLONG == 32
|
||||
# ifndef NPY_INT32
|
||||
# define NPY_INT32 NPY_LONGLONG
|
||||
# define NPY_UINT32 NPY_ULONGLONG
|
||||
typedef npy_longlong npy_int32;
|
||||
typedef npy_ulonglong npy_uint32;
|
||||
typedef npy_ulonglong npy_ucs4;
|
||||
# define PyInt32ScalarObject PyLongLongScalarObject
|
||||
# define PyInt32ArrType_Type PyLongLongArrType_Type
|
||||
# define PyUInt32ScalarObject PyULongLongScalarObject
|
||||
# define PyUInt32ArrType_Type PyULongLongArrType_Type
|
||||
#define NPY_INT32_FMT NPY_LONGLONG_FMT
|
||||
#define NPY_UINT32_FMT NPY_ULONGLONG_FMT
|
||||
# endif
|
||||
# define NPY_MAX_LONGLONG NPY_MAX_INT32
|
||||
# define NPY_MIN_LONGLONG NPY_MIN_INT32
|
||||
# define NPY_MAX_ULONGLONG NPY_MAX_UINT32
|
||||
#elif NPY_BITSOF_LONGLONG == 64
|
||||
# ifndef NPY_INT64
|
||||
# define NPY_INT64 NPY_LONGLONG
|
||||
# define NPY_UINT64 NPY_ULONGLONG
|
||||
typedef npy_longlong npy_int64;
|
||||
typedef npy_ulonglong npy_uint64;
|
||||
# define PyInt64ScalarObject PyLongLongScalarObject
|
||||
# define PyInt64ArrType_Type PyLongLongArrType_Type
|
||||
# define PyUInt64ScalarObject PyULongLongScalarObject
|
||||
# define PyUInt64ArrType_Type PyULongLongArrType_Type
|
||||
#define NPY_INT64_FMT NPY_LONGLONG_FMT
|
||||
#define NPY_UINT64_FMT NPY_ULONGLONG_FMT
|
||||
# define MyPyLong_FromInt64 PyLong_FromLongLong
|
||||
# define MyPyLong_AsInt64 PyLong_AsLongLong
|
||||
# endif
|
||||
# define NPY_MAX_LONGLONG NPY_MAX_INT64
|
||||
# define NPY_MIN_LONGLONG NPY_MIN_INT64
|
||||
# define NPY_MAX_ULONGLONG NPY_MAX_UINT64
|
||||
#endif
|
||||
|
||||
#if NPY_BITSOF_INT == 8
|
||||
#ifndef NPY_INT8
|
||||
#define NPY_INT8 NPY_INT
|
||||
#define NPY_UINT8 NPY_UINT
|
||||
typedef int npy_int8;
|
||||
typedef unsigned int npy_uint8;
|
||||
# define PyInt8ScalarObject PyIntScalarObject
|
||||
# define PyInt8ArrType_Type PyIntArrType_Type
|
||||
# define PyUInt8ScalarObject PyUIntScalarObject
|
||||
# define PyUInt8ArrType_Type PyUIntArrType_Type
|
||||
#define NPY_INT8_FMT NPY_INT_FMT
|
||||
#define NPY_UINT8_FMT NPY_UINT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_INT == 16
|
||||
#ifndef NPY_INT16
|
||||
#define NPY_INT16 NPY_INT
|
||||
#define NPY_UINT16 NPY_UINT
|
||||
typedef int npy_int16;
|
||||
typedef unsigned int npy_uint16;
|
||||
# define PyInt16ScalarObject PyIntScalarObject
|
||||
# define PyInt16ArrType_Type PyIntArrType_Type
|
||||
# define PyUInt16ScalarObject PyIntUScalarObject
|
||||
# define PyUInt16ArrType_Type PyIntUArrType_Type
|
||||
#define NPY_INT16_FMT NPY_INT_FMT
|
||||
#define NPY_UINT16_FMT NPY_UINT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_INT == 32
|
||||
#ifndef NPY_INT32
|
||||
#define NPY_INT32 NPY_INT
|
||||
#define NPY_UINT32 NPY_UINT
|
||||
typedef int npy_int32;
|
||||
typedef unsigned int npy_uint32;
|
||||
typedef unsigned int npy_ucs4;
|
||||
# define PyInt32ScalarObject PyIntScalarObject
|
||||
# define PyInt32ArrType_Type PyIntArrType_Type
|
||||
# define PyUInt32ScalarObject PyUIntScalarObject
|
||||
# define PyUInt32ArrType_Type PyUIntArrType_Type
|
||||
#define NPY_INT32_FMT NPY_INT_FMT
|
||||
#define NPY_UINT32_FMT NPY_UINT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_INT == 64
|
||||
#ifndef NPY_INT64
|
||||
#define NPY_INT64 NPY_INT
|
||||
#define NPY_UINT64 NPY_UINT
|
||||
typedef int npy_int64;
|
||||
typedef unsigned int npy_uint64;
|
||||
# define PyInt64ScalarObject PyIntScalarObject
|
||||
# define PyInt64ArrType_Type PyIntArrType_Type
|
||||
# define PyUInt64ScalarObject PyUIntScalarObject
|
||||
# define PyUInt64ArrType_Type PyUIntArrType_Type
|
||||
#define NPY_INT64_FMT NPY_INT_FMT
|
||||
#define NPY_UINT64_FMT NPY_UINT_FMT
|
||||
# define MyPyLong_FromInt64 PyLong_FromLong
|
||||
# define MyPyLong_AsInt64 PyLong_AsLong
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if NPY_BITSOF_SHORT == 8
|
||||
#ifndef NPY_INT8
|
||||
#define NPY_INT8 NPY_SHORT
|
||||
#define NPY_UINT8 NPY_USHORT
|
||||
typedef short npy_int8;
|
||||
typedef unsigned short npy_uint8;
|
||||
# define PyInt8ScalarObject PyShortScalarObject
|
||||
# define PyInt8ArrType_Type PyShortArrType_Type
|
||||
# define PyUInt8ScalarObject PyUShortScalarObject
|
||||
# define PyUInt8ArrType_Type PyUShortArrType_Type
|
||||
#define NPY_INT8_FMT NPY_SHORT_FMT
|
||||
#define NPY_UINT8_FMT NPY_USHORT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_SHORT == 16
|
||||
#ifndef NPY_INT16
|
||||
#define NPY_INT16 NPY_SHORT
|
||||
#define NPY_UINT16 NPY_USHORT
|
||||
typedef short npy_int16;
|
||||
typedef unsigned short npy_uint16;
|
||||
# define PyInt16ScalarObject PyShortScalarObject
|
||||
# define PyInt16ArrType_Type PyShortArrType_Type
|
||||
# define PyUInt16ScalarObject PyUShortScalarObject
|
||||
# define PyUInt16ArrType_Type PyUShortArrType_Type
|
||||
#define NPY_INT16_FMT NPY_SHORT_FMT
|
||||
#define NPY_UINT16_FMT NPY_USHORT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_SHORT == 32
|
||||
#ifndef NPY_INT32
|
||||
#define NPY_INT32 NPY_SHORT
|
||||
#define NPY_UINT32 NPY_USHORT
|
||||
typedef short npy_int32;
|
||||
typedef unsigned short npy_uint32;
|
||||
typedef unsigned short npy_ucs4;
|
||||
# define PyInt32ScalarObject PyShortScalarObject
|
||||
# define PyInt32ArrType_Type PyShortArrType_Type
|
||||
# define PyUInt32ScalarObject PyUShortScalarObject
|
||||
# define PyUInt32ArrType_Type PyUShortArrType_Type
|
||||
#define NPY_INT32_FMT NPY_SHORT_FMT
|
||||
#define NPY_UINT32_FMT NPY_USHORT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_SHORT == 64
|
||||
#ifndef NPY_INT64
|
||||
#define NPY_INT64 NPY_SHORT
|
||||
#define NPY_UINT64 NPY_USHORT
|
||||
typedef short npy_int64;
|
||||
typedef unsigned short npy_uint64;
|
||||
# define PyInt64ScalarObject PyShortScalarObject
|
||||
# define PyInt64ArrType_Type PyShortArrType_Type
|
||||
# define PyUInt64ScalarObject PyUShortScalarObject
|
||||
# define PyUInt64ArrType_Type PyUShortArrType_Type
|
||||
#define NPY_INT64_FMT NPY_SHORT_FMT
|
||||
#define NPY_UINT64_FMT NPY_USHORT_FMT
|
||||
# define MyPyLong_FromInt64 PyLong_FromLong
|
||||
# define MyPyLong_AsInt64 PyLong_AsLong
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
#if NPY_BITSOF_CHAR == 8
|
||||
#ifndef NPY_INT8
|
||||
#define NPY_INT8 NPY_BYTE
|
||||
#define NPY_UINT8 NPY_UBYTE
|
||||
typedef signed char npy_int8;
|
||||
typedef unsigned char npy_uint8;
|
||||
# define PyInt8ScalarObject PyByteScalarObject
|
||||
# define PyInt8ArrType_Type PyByteArrType_Type
|
||||
# define PyUInt8ScalarObject PyUByteScalarObject
|
||||
# define PyUInt8ArrType_Type PyUByteArrType_Type
|
||||
#define NPY_INT8_FMT NPY_BYTE_FMT
|
||||
#define NPY_UINT8_FMT NPY_UBYTE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_CHAR == 16
|
||||
#ifndef NPY_INT16
|
||||
#define NPY_INT16 NPY_BYTE
|
||||
#define NPY_UINT16 NPY_UBYTE
|
||||
typedef signed char npy_int16;
|
||||
typedef unsigned char npy_uint16;
|
||||
# define PyInt16ScalarObject PyByteScalarObject
|
||||
# define PyInt16ArrType_Type PyByteArrType_Type
|
||||
# define PyUInt16ScalarObject PyUByteScalarObject
|
||||
# define PyUInt16ArrType_Type PyUByteArrType_Type
|
||||
#define NPY_INT16_FMT NPY_BYTE_FMT
|
||||
#define NPY_UINT16_FMT NPY_UBYTE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_CHAR == 32
|
||||
#ifndef NPY_INT32
|
||||
#define NPY_INT32 NPY_BYTE
|
||||
#define NPY_UINT32 NPY_UBYTE
|
||||
typedef signed char npy_int32;
|
||||
typedef unsigned char npy_uint32;
|
||||
typedef unsigned char npy_ucs4;
|
||||
# define PyInt32ScalarObject PyByteScalarObject
|
||||
# define PyInt32ArrType_Type PyByteArrType_Type
|
||||
# define PyUInt32ScalarObject PyUByteScalarObject
|
||||
# define PyUInt32ArrType_Type PyUByteArrType_Type
|
||||
#define NPY_INT32_FMT NPY_BYTE_FMT
|
||||
#define NPY_UINT32_FMT NPY_UBYTE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_CHAR == 64
|
||||
#ifndef NPY_INT64
|
||||
#define NPY_INT64 NPY_BYTE
|
||||
#define NPY_UINT64 NPY_UBYTE
|
||||
typedef signed char npy_int64;
|
||||
typedef unsigned char npy_uint64;
|
||||
# define PyInt64ScalarObject PyByteScalarObject
|
||||
# define PyInt64ArrType_Type PyByteArrType_Type
|
||||
# define PyUInt64ScalarObject PyUByteScalarObject
|
||||
# define PyUInt64ArrType_Type PyUByteArrType_Type
|
||||
#define NPY_INT64_FMT NPY_BYTE_FMT
|
||||
#define NPY_UINT64_FMT NPY_UBYTE_FMT
|
||||
# define MyPyLong_FromInt64 PyLong_FromLong
|
||||
# define MyPyLong_AsInt64 PyLong_AsLong
|
||||
#endif
|
||||
#elif NPY_BITSOF_CHAR == 128
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
#if NPY_BITSOF_DOUBLE == 32
|
||||
#ifndef NPY_FLOAT32
|
||||
#define NPY_FLOAT32 NPY_DOUBLE
|
||||
#define NPY_COMPLEX64 NPY_CDOUBLE
|
||||
typedef double npy_float32;
|
||||
typedef npy_cdouble npy_complex64;
|
||||
# define PyFloat32ScalarObject PyDoubleScalarObject
|
||||
# define PyComplex64ScalarObject PyCDoubleScalarObject
|
||||
# define PyFloat32ArrType_Type PyDoubleArrType_Type
|
||||
# define PyComplex64ArrType_Type PyCDoubleArrType_Type
|
||||
#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT
|
||||
#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_DOUBLE == 64
|
||||
#ifndef NPY_FLOAT64
|
||||
#define NPY_FLOAT64 NPY_DOUBLE
|
||||
#define NPY_COMPLEX128 NPY_CDOUBLE
|
||||
typedef double npy_float64;
|
||||
typedef npy_cdouble npy_complex128;
|
||||
# define PyFloat64ScalarObject PyDoubleScalarObject
|
||||
# define PyComplex128ScalarObject PyCDoubleScalarObject
|
||||
# define PyFloat64ArrType_Type PyDoubleArrType_Type
|
||||
# define PyComplex128ArrType_Type PyCDoubleArrType_Type
|
||||
#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT
|
||||
#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_DOUBLE == 80
|
||||
#ifndef NPY_FLOAT80
|
||||
#define NPY_FLOAT80 NPY_DOUBLE
|
||||
#define NPY_COMPLEX160 NPY_CDOUBLE
|
||||
typedef double npy_float80;
|
||||
typedef npy_cdouble npy_complex160;
|
||||
# define PyFloat80ScalarObject PyDoubleScalarObject
|
||||
# define PyComplex160ScalarObject PyCDoubleScalarObject
|
||||
# define PyFloat80ArrType_Type PyDoubleArrType_Type
|
||||
# define PyComplex160ArrType_Type PyCDoubleArrType_Type
|
||||
#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT
|
||||
#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_DOUBLE == 96
|
||||
#ifndef NPY_FLOAT96
|
||||
#define NPY_FLOAT96 NPY_DOUBLE
|
||||
#define NPY_COMPLEX192 NPY_CDOUBLE
|
||||
typedef double npy_float96;
|
||||
typedef npy_cdouble npy_complex192;
|
||||
# define PyFloat96ScalarObject PyDoubleScalarObject
|
||||
# define PyComplex192ScalarObject PyCDoubleScalarObject
|
||||
# define PyFloat96ArrType_Type PyDoubleArrType_Type
|
||||
# define PyComplex192ArrType_Type PyCDoubleArrType_Type
|
||||
#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT
|
||||
#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_DOUBLE == 128
|
||||
#ifndef NPY_FLOAT128
|
||||
#define NPY_FLOAT128 NPY_DOUBLE
|
||||
#define NPY_COMPLEX256 NPY_CDOUBLE
|
||||
typedef double npy_float128;
|
||||
typedef npy_cdouble npy_complex256;
|
||||
# define PyFloat128ScalarObject PyDoubleScalarObject
|
||||
# define PyComplex256ScalarObject PyCDoubleScalarObject
|
||||
# define PyFloat128ArrType_Type PyDoubleArrType_Type
|
||||
# define PyComplex256ArrType_Type PyCDoubleArrType_Type
|
||||
#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT
|
||||
#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
#if NPY_BITSOF_FLOAT == 32
|
||||
#ifndef NPY_FLOAT32
|
||||
#define NPY_FLOAT32 NPY_FLOAT
|
||||
#define NPY_COMPLEX64 NPY_CFLOAT
|
||||
typedef float npy_float32;
|
||||
typedef npy_cfloat npy_complex64;
|
||||
# define PyFloat32ScalarObject PyFloatScalarObject
|
||||
# define PyComplex64ScalarObject PyCFloatScalarObject
|
||||
# define PyFloat32ArrType_Type PyFloatArrType_Type
|
||||
# define PyComplex64ArrType_Type PyCFloatArrType_Type
|
||||
#define NPY_FLOAT32_FMT NPY_FLOAT_FMT
|
||||
#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_FLOAT == 64
|
||||
#ifndef NPY_FLOAT64
|
||||
#define NPY_FLOAT64 NPY_FLOAT
|
||||
#define NPY_COMPLEX128 NPY_CFLOAT
|
||||
typedef float npy_float64;
|
||||
typedef npy_cfloat npy_complex128;
|
||||
# define PyFloat64ScalarObject PyFloatScalarObject
|
||||
# define PyComplex128ScalarObject PyCFloatScalarObject
|
||||
# define PyFloat64ArrType_Type PyFloatArrType_Type
|
||||
# define PyComplex128ArrType_Type PyCFloatArrType_Type
|
||||
#define NPY_FLOAT64_FMT NPY_FLOAT_FMT
|
||||
#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_FLOAT == 80
|
||||
#ifndef NPY_FLOAT80
|
||||
#define NPY_FLOAT80 NPY_FLOAT
|
||||
#define NPY_COMPLEX160 NPY_CFLOAT
|
||||
typedef float npy_float80;
|
||||
typedef npy_cfloat npy_complex160;
|
||||
# define PyFloat80ScalarObject PyFloatScalarObject
|
||||
# define PyComplex160ScalarObject PyCFloatScalarObject
|
||||
# define PyFloat80ArrType_Type PyFloatArrType_Type
|
||||
# define PyComplex160ArrType_Type PyCFloatArrType_Type
|
||||
#define NPY_FLOAT80_FMT NPY_FLOAT_FMT
|
||||
#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_FLOAT == 96
|
||||
#ifndef NPY_FLOAT96
|
||||
#define NPY_FLOAT96 NPY_FLOAT
|
||||
#define NPY_COMPLEX192 NPY_CFLOAT
|
||||
typedef float npy_float96;
|
||||
typedef npy_cfloat npy_complex192;
|
||||
# define PyFloat96ScalarObject PyFloatScalarObject
|
||||
# define PyComplex192ScalarObject PyCFloatScalarObject
|
||||
# define PyFloat96ArrType_Type PyFloatArrType_Type
|
||||
# define PyComplex192ArrType_Type PyCFloatArrType_Type
|
||||
#define NPY_FLOAT96_FMT NPY_FLOAT_FMT
|
||||
#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_FLOAT == 128
|
||||
#ifndef NPY_FLOAT128
|
||||
#define NPY_FLOAT128 NPY_FLOAT
|
||||
#define NPY_COMPLEX256 NPY_CFLOAT
|
||||
typedef float npy_float128;
|
||||
typedef npy_cfloat npy_complex256;
|
||||
# define PyFloat128ScalarObject PyFloatScalarObject
|
||||
# define PyComplex256ScalarObject PyCFloatScalarObject
|
||||
# define PyFloat128ArrType_Type PyFloatArrType_Type
|
||||
# define PyComplex256ArrType_Type PyCFloatArrType_Type
|
||||
#define NPY_FLOAT128_FMT NPY_FLOAT_FMT
|
||||
#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* half/float16 isn't a floating-point type in C */
|
||||
#define NPY_FLOAT16 NPY_HALF
|
||||
typedef npy_uint16 npy_half;
|
||||
typedef npy_half npy_float16;
|
||||
|
||||
#if NPY_BITSOF_LONGDOUBLE == 32
|
||||
#ifndef NPY_FLOAT32
|
||||
#define NPY_FLOAT32 NPY_LONGDOUBLE
|
||||
#define NPY_COMPLEX64 NPY_CLONGDOUBLE
|
||||
typedef npy_longdouble npy_float32;
|
||||
typedef npy_clongdouble npy_complex64;
|
||||
# define PyFloat32ScalarObject PyLongDoubleScalarObject
|
||||
# define PyComplex64ScalarObject PyCLongDoubleScalarObject
|
||||
# define PyFloat32ArrType_Type PyLongDoubleArrType_Type
|
||||
# define PyComplex64ArrType_Type PyCLongDoubleArrType_Type
|
||||
#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT
|
||||
#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_LONGDOUBLE == 64
|
||||
#ifndef NPY_FLOAT64
|
||||
#define NPY_FLOAT64 NPY_LONGDOUBLE
|
||||
#define NPY_COMPLEX128 NPY_CLONGDOUBLE
|
||||
typedef npy_longdouble npy_float64;
|
||||
typedef npy_clongdouble npy_complex128;
|
||||
# define PyFloat64ScalarObject PyLongDoubleScalarObject
|
||||
# define PyComplex128ScalarObject PyCLongDoubleScalarObject
|
||||
# define PyFloat64ArrType_Type PyLongDoubleArrType_Type
|
||||
# define PyComplex128ArrType_Type PyCLongDoubleArrType_Type
|
||||
#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT
|
||||
#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_LONGDOUBLE == 80
|
||||
#ifndef NPY_FLOAT80
|
||||
#define NPY_FLOAT80 NPY_LONGDOUBLE
|
||||
#define NPY_COMPLEX160 NPY_CLONGDOUBLE
|
||||
typedef npy_longdouble npy_float80;
|
||||
typedef npy_clongdouble npy_complex160;
|
||||
# define PyFloat80ScalarObject PyLongDoubleScalarObject
|
||||
# define PyComplex160ScalarObject PyCLongDoubleScalarObject
|
||||
# define PyFloat80ArrType_Type PyLongDoubleArrType_Type
|
||||
# define PyComplex160ArrType_Type PyCLongDoubleArrType_Type
|
||||
#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT
|
||||
#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_LONGDOUBLE == 96
|
||||
#ifndef NPY_FLOAT96
|
||||
#define NPY_FLOAT96 NPY_LONGDOUBLE
|
||||
#define NPY_COMPLEX192 NPY_CLONGDOUBLE
|
||||
typedef npy_longdouble npy_float96;
|
||||
typedef npy_clongdouble npy_complex192;
|
||||
# define PyFloat96ScalarObject PyLongDoubleScalarObject
|
||||
# define PyComplex192ScalarObject PyCLongDoubleScalarObject
|
||||
# define PyFloat96ArrType_Type PyLongDoubleArrType_Type
|
||||
# define PyComplex192ArrType_Type PyCLongDoubleArrType_Type
|
||||
#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT
|
||||
#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_LONGDOUBLE == 128
|
||||
#ifndef NPY_FLOAT128
|
||||
#define NPY_FLOAT128 NPY_LONGDOUBLE
|
||||
#define NPY_COMPLEX256 NPY_CLONGDOUBLE
|
||||
typedef npy_longdouble npy_float128;
|
||||
typedef npy_clongdouble npy_complex256;
|
||||
# define PyFloat128ScalarObject PyLongDoubleScalarObject
|
||||
# define PyComplex256ScalarObject PyCLongDoubleScalarObject
|
||||
# define PyFloat128ArrType_Type PyLongDoubleArrType_Type
|
||||
# define PyComplex256ArrType_Type PyCLongDoubleArrType_Type
|
||||
#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT
|
||||
#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* datetime typedefs */
|
||||
typedef npy_int64 npy_timedelta;
|
||||
typedef npy_int64 npy_datetime;
|
||||
#define NPY_DATETIME_FMT NPY_INT64_FMT
|
||||
#define NPY_TIMEDELTA_FMT NPY_INT64_FMT
|
||||
|
||||
/* End of typedefs for numarray style bit-width names */
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ */
|
||||
124
lib/python3.11/site-packages/numpy/_core/include/numpy/npy_cpu.h
Normal file
124
lib/python3.11/site-packages/numpy/_core/include/numpy/npy_cpu.h
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
* This set (target) cpu specific macros:
|
||||
* - Possible values:
|
||||
* NPY_CPU_X86
|
||||
* NPY_CPU_AMD64
|
||||
* NPY_CPU_PPC
|
||||
* NPY_CPU_PPC64
|
||||
* NPY_CPU_PPC64LE
|
||||
* NPY_CPU_SPARC
|
||||
* NPY_CPU_S390
|
||||
* NPY_CPU_IA64
|
||||
* NPY_CPU_HPPA
|
||||
* NPY_CPU_ALPHA
|
||||
* NPY_CPU_ARMEL
|
||||
* NPY_CPU_ARMEB
|
||||
* NPY_CPU_SH_LE
|
||||
* NPY_CPU_SH_BE
|
||||
* NPY_CPU_ARCEL
|
||||
* NPY_CPU_ARCEB
|
||||
* NPY_CPU_RISCV64
|
||||
* NPY_CPU_RISCV32
|
||||
* NPY_CPU_LOONGARCH
|
||||
* NPY_CPU_WASM
|
||||
*/
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_
|
||||
|
||||
#include "numpyconfig.h"
|
||||
|
||||
#if defined( __i386__ ) || defined(i386) || defined(_M_IX86)
|
||||
/*
|
||||
* __i386__ is defined by gcc and Intel compiler on Linux,
|
||||
* _M_IX86 by VS compiler,
|
||||
* i386 by Sun compilers on opensolaris at least
|
||||
*/
|
||||
#define NPY_CPU_X86
|
||||
#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64)
|
||||
/*
|
||||
* both __x86_64__ and __amd64__ are defined by gcc
|
||||
* __x86_64 defined by sun compiler on opensolaris at least
|
||||
* _M_AMD64 defined by MS compiler
|
||||
*/
|
||||
#define NPY_CPU_AMD64
|
||||
#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)
|
||||
#define NPY_CPU_PPC64LE
|
||||
#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__)
|
||||
#define NPY_CPU_PPC64
|
||||
#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC)
|
||||
/*
|
||||
* __ppc__ is defined by gcc, I remember having seen __powerpc__ once,
|
||||
* but can't find it ATM
|
||||
* _ARCH_PPC is used by at least gcc on AIX
|
||||
* As __powerpc__ and _ARCH_PPC are also defined by PPC64 check
|
||||
* for those specifically first before defaulting to ppc
|
||||
*/
|
||||
#define NPY_CPU_PPC
|
||||
#elif defined(__sparc__) || defined(__sparc)
|
||||
/* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */
|
||||
#define NPY_CPU_SPARC
|
||||
#elif defined(__s390__)
|
||||
#define NPY_CPU_S390
|
||||
#elif defined(__ia64)
|
||||
#define NPY_CPU_IA64
|
||||
#elif defined(__hppa)
|
||||
#define NPY_CPU_HPPA
|
||||
#elif defined(__alpha__)
|
||||
#define NPY_CPU_ALPHA
|
||||
#elif defined(__arm__) || defined(__aarch64__) || defined(_M_ARM64)
|
||||
/* _M_ARM64 is defined in MSVC for ARM64 compilation on Windows */
|
||||
#if defined(__ARMEB__) || defined(__AARCH64EB__)
|
||||
#if defined(__ARM_32BIT_STATE)
|
||||
#define NPY_CPU_ARMEB_AARCH32
|
||||
#elif defined(__ARM_64BIT_STATE)
|
||||
#define NPY_CPU_ARMEB_AARCH64
|
||||
#else
|
||||
#define NPY_CPU_ARMEB
|
||||
#endif
|
||||
#elif defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64)
|
||||
#if defined(__ARM_32BIT_STATE)
|
||||
#define NPY_CPU_ARMEL_AARCH32
|
||||
#elif defined(__ARM_64BIT_STATE) || defined(_M_ARM64) || defined(__AARCH64EL__)
|
||||
#define NPY_CPU_ARMEL_AARCH64
|
||||
#else
|
||||
#define NPY_CPU_ARMEL
|
||||
#endif
|
||||
#else
|
||||
# error Unknown ARM CPU, please report this to numpy maintainers with \
|
||||
information about your platform (OS, CPU and compiler)
|
||||
#endif
|
||||
#elif defined(__sh__) && defined(__LITTLE_ENDIAN__)
|
||||
#define NPY_CPU_SH_LE
|
||||
#elif defined(__sh__) && defined(__BIG_ENDIAN__)
|
||||
#define NPY_CPU_SH_BE
|
||||
#elif defined(__MIPSEL__)
|
||||
#define NPY_CPU_MIPSEL
|
||||
#elif defined(__MIPSEB__)
|
||||
#define NPY_CPU_MIPSEB
|
||||
#elif defined(__or1k__)
|
||||
#define NPY_CPU_OR1K
|
||||
#elif defined(__mc68000__)
|
||||
#define NPY_CPU_M68K
|
||||
#elif defined(__arc__) && defined(__LITTLE_ENDIAN__)
|
||||
#define NPY_CPU_ARCEL
|
||||
#elif defined(__arc__) && defined(__BIG_ENDIAN__)
|
||||
#define NPY_CPU_ARCEB
|
||||
#elif defined(__riscv)
|
||||
#if __riscv_xlen == 64
|
||||
#define NPY_CPU_RISCV64
|
||||
#elif __riscv_xlen == 32
|
||||
#define NPY_CPU_RISCV32
|
||||
#endif
|
||||
#elif defined(__loongarch_lp64)
|
||||
#define NPY_CPU_LOONGARCH64
|
||||
#elif defined(__EMSCRIPTEN__)
|
||||
/* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */
|
||||
#define NPY_CPU_WASM
|
||||
#else
|
||||
#error Unknown CPU, please report this to numpy maintainers with \
|
||||
information about your platform (OS, CPU and compiler)
|
||||
#endif
|
||||
|
||||
#define NPY_ALIGNMENT_REQUIRED 1
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */
|
||||
@ -0,0 +1,78 @@
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_
|
||||
|
||||
/*
|
||||
* NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in
|
||||
* endian.h
|
||||
*/
|
||||
|
||||
#if defined(NPY_HAVE_ENDIAN_H) || defined(NPY_HAVE_SYS_ENDIAN_H)
|
||||
/* Use endian.h if available */
|
||||
|
||||
#if defined(NPY_HAVE_ENDIAN_H)
|
||||
#include <endian.h>
|
||||
#elif defined(NPY_HAVE_SYS_ENDIAN_H)
|
||||
#include <sys/endian.h>
|
||||
#endif
|
||||
|
||||
#if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && defined(LITTLE_ENDIAN)
|
||||
#define NPY_BYTE_ORDER BYTE_ORDER
|
||||
#define NPY_LITTLE_ENDIAN LITTLE_ENDIAN
|
||||
#define NPY_BIG_ENDIAN BIG_ENDIAN
|
||||
#elif defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && defined(_LITTLE_ENDIAN)
|
||||
#define NPY_BYTE_ORDER _BYTE_ORDER
|
||||
#define NPY_LITTLE_ENDIAN _LITTLE_ENDIAN
|
||||
#define NPY_BIG_ENDIAN _BIG_ENDIAN
|
||||
#elif defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN)
|
||||
#define NPY_BYTE_ORDER __BYTE_ORDER
|
||||
#define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN
|
||||
#define NPY_BIG_ENDIAN __BIG_ENDIAN
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef NPY_BYTE_ORDER
|
||||
/* Set endianness info using target CPU */
|
||||
#include "npy_cpu.h"
|
||||
|
||||
#define NPY_LITTLE_ENDIAN 1234
|
||||
#define NPY_BIG_ENDIAN 4321
|
||||
|
||||
#if defined(NPY_CPU_X86) \
|
||||
|| defined(NPY_CPU_AMD64) \
|
||||
|| defined(NPY_CPU_IA64) \
|
||||
|| defined(NPY_CPU_ALPHA) \
|
||||
|| defined(NPY_CPU_ARMEL) \
|
||||
|| defined(NPY_CPU_ARMEL_AARCH32) \
|
||||
|| defined(NPY_CPU_ARMEL_AARCH64) \
|
||||
|| defined(NPY_CPU_SH_LE) \
|
||||
|| defined(NPY_CPU_MIPSEL) \
|
||||
|| defined(NPY_CPU_PPC64LE) \
|
||||
|| defined(NPY_CPU_ARCEL) \
|
||||
|| defined(NPY_CPU_RISCV64) \
|
||||
|| defined(NPY_CPU_RISCV32) \
|
||||
|| defined(NPY_CPU_LOONGARCH) \
|
||||
|| defined(NPY_CPU_WASM)
|
||||
#define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
|
||||
|
||||
#elif defined(NPY_CPU_PPC) \
|
||||
|| defined(NPY_CPU_SPARC) \
|
||||
|| defined(NPY_CPU_S390) \
|
||||
|| defined(NPY_CPU_HPPA) \
|
||||
|| defined(NPY_CPU_PPC64) \
|
||||
|| defined(NPY_CPU_ARMEB) \
|
||||
|| defined(NPY_CPU_ARMEB_AARCH32) \
|
||||
|| defined(NPY_CPU_ARMEB_AARCH64) \
|
||||
|| defined(NPY_CPU_SH_BE) \
|
||||
|| defined(NPY_CPU_MIPSEB) \
|
||||
|| defined(NPY_CPU_OR1K) \
|
||||
|| defined(NPY_CPU_M68K) \
|
||||
|| defined(NPY_CPU_ARCEB)
|
||||
#define NPY_BYTE_ORDER NPY_BIG_ENDIAN
|
||||
|
||||
#else
|
||||
#error Unknown CPU: can not set endianness
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ */
|
||||
@ -0,0 +1,602 @@
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_
|
||||
|
||||
#include <numpy/npy_common.h>
|
||||
|
||||
#include <math.h>
|
||||
|
||||
/* By adding static inline specifiers to npy_math function definitions when
|
||||
appropriate, compiler is given the opportunity to optimize */
|
||||
#if NPY_INLINE_MATH
|
||||
#define NPY_INPLACE static inline
|
||||
#else
|
||||
#define NPY_INPLACE
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define PyArray_MAX(a,b) (((a)>(b))?(a):(b))
|
||||
#define PyArray_MIN(a,b) (((a)<(b))?(a):(b))
|
||||
|
||||
/*
|
||||
* NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99
|
||||
* for INFINITY)
|
||||
*
|
||||
* XXX: I should test whether INFINITY and NAN are available on the platform
|
||||
*/
|
||||
static inline float __npy_inff(void)
|
||||
{
|
||||
const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL};
|
||||
return __bint.__f;
|
||||
}
|
||||
|
||||
static inline float __npy_nanf(void)
|
||||
{
|
||||
const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL};
|
||||
return __bint.__f;
|
||||
}
|
||||
|
||||
static inline float __npy_pzerof(void)
|
||||
{
|
||||
const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL};
|
||||
return __bint.__f;
|
||||
}
|
||||
|
||||
static inline float __npy_nzerof(void)
|
||||
{
|
||||
const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL};
|
||||
return __bint.__f;
|
||||
}
|
||||
|
||||
#define NPY_INFINITYF __npy_inff()
|
||||
#define NPY_NANF __npy_nanf()
|
||||
#define NPY_PZEROF __npy_pzerof()
|
||||
#define NPY_NZEROF __npy_nzerof()
|
||||
|
||||
#define NPY_INFINITY ((npy_double)NPY_INFINITYF)
|
||||
#define NPY_NAN ((npy_double)NPY_NANF)
|
||||
#define NPY_PZERO ((npy_double)NPY_PZEROF)
|
||||
#define NPY_NZERO ((npy_double)NPY_NZEROF)
|
||||
|
||||
#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF)
|
||||
#define NPY_NANL ((npy_longdouble)NPY_NANF)
|
||||
#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF)
|
||||
#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF)
|
||||
|
||||
/*
|
||||
* Useful constants
|
||||
*/
|
||||
#define NPY_E 2.718281828459045235360287471352662498 /* e */
|
||||
#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */
|
||||
#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */
|
||||
#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */
|
||||
#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */
|
||||
#define NPY_PI 3.141592653589793238462643383279502884 /* pi */
|
||||
#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */
|
||||
#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */
|
||||
#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */
|
||||
#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */
|
||||
#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */
|
||||
#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */
|
||||
#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */
|
||||
|
||||
#define NPY_Ef 2.718281828459045235360287471352662498F /* e */
|
||||
#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */
|
||||
#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */
|
||||
#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */
|
||||
#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */
|
||||
#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */
|
||||
#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */
|
||||
#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */
|
||||
#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */
|
||||
#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */
|
||||
#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constant */
|
||||
#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */
|
||||
#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */
|
||||
|
||||
#define NPY_El 2.718281828459045235360287471352662498L /* e */
|
||||
#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */
|
||||
#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */
|
||||
#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */
|
||||
#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */
|
||||
#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */
|
||||
#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */
|
||||
#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */
|
||||
#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */
|
||||
#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */
|
||||
#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constant */
|
||||
#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */
|
||||
#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */
|
||||
|
||||
/*
|
||||
* Integer functions.
|
||||
*/
|
||||
NPY_INPLACE npy_uint npy_gcdu(npy_uint a, npy_uint b);
|
||||
NPY_INPLACE npy_uint npy_lcmu(npy_uint a, npy_uint b);
|
||||
NPY_INPLACE npy_ulong npy_gcdul(npy_ulong a, npy_ulong b);
|
||||
NPY_INPLACE npy_ulong npy_lcmul(npy_ulong a, npy_ulong b);
|
||||
NPY_INPLACE npy_ulonglong npy_gcdull(npy_ulonglong a, npy_ulonglong b);
|
||||
NPY_INPLACE npy_ulonglong npy_lcmull(npy_ulonglong a, npy_ulonglong b);
|
||||
|
||||
NPY_INPLACE npy_int npy_gcd(npy_int a, npy_int b);
|
||||
NPY_INPLACE npy_int npy_lcm(npy_int a, npy_int b);
|
||||
NPY_INPLACE npy_long npy_gcdl(npy_long a, npy_long b);
|
||||
NPY_INPLACE npy_long npy_lcml(npy_long a, npy_long b);
|
||||
NPY_INPLACE npy_longlong npy_gcdll(npy_longlong a, npy_longlong b);
|
||||
NPY_INPLACE npy_longlong npy_lcmll(npy_longlong a, npy_longlong b);
|
||||
|
||||
NPY_INPLACE npy_ubyte npy_rshiftuhh(npy_ubyte a, npy_ubyte b);
|
||||
NPY_INPLACE npy_ubyte npy_lshiftuhh(npy_ubyte a, npy_ubyte b);
|
||||
NPY_INPLACE npy_ushort npy_rshiftuh(npy_ushort a, npy_ushort b);
|
||||
NPY_INPLACE npy_ushort npy_lshiftuh(npy_ushort a, npy_ushort b);
|
||||
NPY_INPLACE npy_uint npy_rshiftu(npy_uint a, npy_uint b);
|
||||
NPY_INPLACE npy_uint npy_lshiftu(npy_uint a, npy_uint b);
|
||||
NPY_INPLACE npy_ulong npy_rshiftul(npy_ulong a, npy_ulong b);
|
||||
NPY_INPLACE npy_ulong npy_lshiftul(npy_ulong a, npy_ulong b);
|
||||
NPY_INPLACE npy_ulonglong npy_rshiftull(npy_ulonglong a, npy_ulonglong b);
|
||||
NPY_INPLACE npy_ulonglong npy_lshiftull(npy_ulonglong a, npy_ulonglong b);
|
||||
|
||||
NPY_INPLACE npy_byte npy_rshifthh(npy_byte a, npy_byte b);
|
||||
NPY_INPLACE npy_byte npy_lshifthh(npy_byte a, npy_byte b);
|
||||
NPY_INPLACE npy_short npy_rshifth(npy_short a, npy_short b);
|
||||
NPY_INPLACE npy_short npy_lshifth(npy_short a, npy_short b);
|
||||
NPY_INPLACE npy_int npy_rshift(npy_int a, npy_int b);
|
||||
NPY_INPLACE npy_int npy_lshift(npy_int a, npy_int b);
|
||||
NPY_INPLACE npy_long npy_rshiftl(npy_long a, npy_long b);
|
||||
NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b);
|
||||
NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b);
|
||||
NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b);
|
||||
|
||||
NPY_INPLACE uint8_t npy_popcountuhh(npy_ubyte a);
|
||||
NPY_INPLACE uint8_t npy_popcountuh(npy_ushort a);
|
||||
NPY_INPLACE uint8_t npy_popcountu(npy_uint a);
|
||||
NPY_INPLACE uint8_t npy_popcountul(npy_ulong a);
|
||||
NPY_INPLACE uint8_t npy_popcountull(npy_ulonglong a);
|
||||
NPY_INPLACE uint8_t npy_popcounthh(npy_byte a);
|
||||
NPY_INPLACE uint8_t npy_popcounth(npy_short a);
|
||||
NPY_INPLACE uint8_t npy_popcount(npy_int a);
|
||||
NPY_INPLACE uint8_t npy_popcountl(npy_long a);
|
||||
NPY_INPLACE uint8_t npy_popcountll(npy_longlong a);
|
||||
|
||||
/*
|
||||
* C99 double math funcs that need fixups or are blocklist-able
|
||||
*/
|
||||
NPY_INPLACE double npy_sin(double x);
|
||||
NPY_INPLACE double npy_cos(double x);
|
||||
NPY_INPLACE double npy_tan(double x);
|
||||
NPY_INPLACE double npy_hypot(double x, double y);
|
||||
NPY_INPLACE double npy_log2(double x);
|
||||
NPY_INPLACE double npy_atan2(double x, double y);
|
||||
|
||||
/* Mandatory C99 double math funcs, no blocklisting or fixups */
|
||||
/* defined for legacy reasons, should be deprecated at some point */
|
||||
#define npy_sinh sinh
|
||||
#define npy_cosh cosh
|
||||
#define npy_tanh tanh
|
||||
#define npy_asin asin
|
||||
#define npy_acos acos
|
||||
#define npy_atan atan
|
||||
#define npy_log log
|
||||
#define npy_log10 log10
|
||||
#define npy_cbrt cbrt
|
||||
#define npy_fabs fabs
|
||||
#define npy_ceil ceil
|
||||
#define npy_fmod fmod
|
||||
#define npy_floor floor
|
||||
#define npy_expm1 expm1
|
||||
#define npy_log1p log1p
|
||||
#define npy_acosh acosh
|
||||
#define npy_asinh asinh
|
||||
#define npy_atanh atanh
|
||||
#define npy_rint rint
|
||||
#define npy_trunc trunc
|
||||
#define npy_exp2 exp2
|
||||
#define npy_frexp frexp
|
||||
#define npy_ldexp ldexp
|
||||
#define npy_copysign copysign
|
||||
#define npy_exp exp
|
||||
#define npy_sqrt sqrt
|
||||
#define npy_pow pow
|
||||
#define npy_modf modf
|
||||
#define npy_nextafter nextafter
|
||||
|
||||
double npy_spacing(double x);
|
||||
|
||||
/*
|
||||
* IEEE 754 fpu handling
|
||||
*/
|
||||
|
||||
/* use builtins to avoid function calls in tight loops
|
||||
* only available if npy_config.h is available (= numpys own build) */
|
||||
#ifdef HAVE___BUILTIN_ISNAN
|
||||
#define npy_isnan(x) __builtin_isnan(x)
|
||||
#else
|
||||
#define npy_isnan(x) isnan(x)
|
||||
#endif
|
||||
|
||||
|
||||
/* only available if npy_config.h is available (= numpys own build) */
|
||||
#ifdef HAVE___BUILTIN_ISFINITE
|
||||
#define npy_isfinite(x) __builtin_isfinite(x)
|
||||
#else
|
||||
#define npy_isfinite(x) isfinite((x))
|
||||
#endif
|
||||
|
||||
/* only available if npy_config.h is available (= numpys own build) */
|
||||
#ifdef HAVE___BUILTIN_ISINF
|
||||
#define npy_isinf(x) __builtin_isinf(x)
|
||||
#else
|
||||
#define npy_isinf(x) isinf((x))
|
||||
#endif
|
||||
|
||||
#define npy_signbit(x) signbit((x))
|
||||
|
||||
/*
|
||||
* float C99 math funcs that need fixups or are blocklist-able
|
||||
*/
|
||||
NPY_INPLACE float npy_sinf(float x);
|
||||
NPY_INPLACE float npy_cosf(float x);
|
||||
NPY_INPLACE float npy_tanf(float x);
|
||||
NPY_INPLACE float npy_expf(float x);
|
||||
NPY_INPLACE float npy_sqrtf(float x);
|
||||
NPY_INPLACE float npy_hypotf(float x, float y);
|
||||
NPY_INPLACE float npy_log2f(float x);
|
||||
NPY_INPLACE float npy_atan2f(float x, float y);
|
||||
NPY_INPLACE float npy_powf(float x, float y);
|
||||
NPY_INPLACE float npy_modff(float x, float* y);
|
||||
|
||||
/* Mandatory C99 float math funcs, no blocklisting or fixups */
|
||||
/* defined for legacy reasons, should be deprecated at some point */
|
||||
|
||||
#define npy_sinhf sinhf
|
||||
#define npy_coshf coshf
|
||||
#define npy_tanhf tanhf
|
||||
#define npy_asinf asinf
|
||||
#define npy_acosf acosf
|
||||
#define npy_atanf atanf
|
||||
#define npy_logf logf
|
||||
#define npy_log10f log10f
|
||||
#define npy_cbrtf cbrtf
|
||||
#define npy_fabsf fabsf
|
||||
#define npy_ceilf ceilf
|
||||
#define npy_fmodf fmodf
|
||||
#define npy_floorf floorf
|
||||
#define npy_expm1f expm1f
|
||||
#define npy_log1pf log1pf
|
||||
#define npy_asinhf asinhf
|
||||
#define npy_acoshf acoshf
|
||||
#define npy_atanhf atanhf
|
||||
#define npy_rintf rintf
|
||||
#define npy_truncf truncf
|
||||
#define npy_exp2f exp2f
|
||||
#define npy_frexpf frexpf
|
||||
#define npy_ldexpf ldexpf
|
||||
#define npy_copysignf copysignf
|
||||
#define npy_nextafterf nextafterf
|
||||
|
||||
float npy_spacingf(float x);
|
||||
|
||||
/*
|
||||
* long double C99 double math funcs that need fixups or are blocklist-able
|
||||
*/
|
||||
NPY_INPLACE npy_longdouble npy_sinl(npy_longdouble x);
|
||||
NPY_INPLACE npy_longdouble npy_cosl(npy_longdouble x);
|
||||
NPY_INPLACE npy_longdouble npy_tanl(npy_longdouble x);
|
||||
NPY_INPLACE npy_longdouble npy_expl(npy_longdouble x);
|
||||
NPY_INPLACE npy_longdouble npy_sqrtl(npy_longdouble x);
|
||||
NPY_INPLACE npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y);
|
||||
NPY_INPLACE npy_longdouble npy_log2l(npy_longdouble x);
|
||||
NPY_INPLACE npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y);
|
||||
NPY_INPLACE npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y);
|
||||
NPY_INPLACE npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y);
|
||||
|
||||
/* Mandatory C99 double math funcs, no blocklisting or fixups */
|
||||
/* defined for legacy reasons, should be deprecated at some point */
|
||||
#define npy_sinhl sinhl
|
||||
#define npy_coshl coshl
|
||||
#define npy_tanhl tanhl
|
||||
#define npy_fabsl fabsl
|
||||
#define npy_floorl floorl
|
||||
#define npy_ceill ceill
|
||||
#define npy_rintl rintl
|
||||
#define npy_truncl truncl
|
||||
#define npy_cbrtl cbrtl
|
||||
#define npy_log10l log10l
|
||||
#define npy_logl logl
|
||||
#define npy_expm1l expm1l
|
||||
#define npy_asinl asinl
|
||||
#define npy_acosl acosl
|
||||
#define npy_atanl atanl
|
||||
#define npy_asinhl asinhl
|
||||
#define npy_acoshl acoshl
|
||||
#define npy_atanhl atanhl
|
||||
#define npy_log1pl log1pl
|
||||
#define npy_exp2l exp2l
|
||||
#define npy_fmodl fmodl
|
||||
#define npy_frexpl frexpl
|
||||
#define npy_ldexpl ldexpl
|
||||
#define npy_copysignl copysignl
|
||||
#define npy_nextafterl nextafterl
|
||||
|
||||
npy_longdouble npy_spacingl(npy_longdouble x);
|
||||
|
||||
/*
|
||||
* Non standard functions
|
||||
*/
|
||||
NPY_INPLACE double npy_deg2rad(double x);
|
||||
NPY_INPLACE double npy_rad2deg(double x);
|
||||
NPY_INPLACE double npy_logaddexp(double x, double y);
|
||||
NPY_INPLACE double npy_logaddexp2(double x, double y);
|
||||
NPY_INPLACE double npy_divmod(double x, double y, double *modulus);
|
||||
NPY_INPLACE double npy_heaviside(double x, double h0);
|
||||
|
||||
NPY_INPLACE float npy_deg2radf(float x);
|
||||
NPY_INPLACE float npy_rad2degf(float x);
|
||||
NPY_INPLACE float npy_logaddexpf(float x, float y);
|
||||
NPY_INPLACE float npy_logaddexp2f(float x, float y);
|
||||
NPY_INPLACE float npy_divmodf(float x, float y, float *modulus);
|
||||
NPY_INPLACE float npy_heavisidef(float x, float h0);
|
||||
|
||||
NPY_INPLACE npy_longdouble npy_deg2radl(npy_longdouble x);
|
||||
NPY_INPLACE npy_longdouble npy_rad2degl(npy_longdouble x);
|
||||
NPY_INPLACE npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y);
|
||||
NPY_INPLACE npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y);
|
||||
NPY_INPLACE npy_longdouble npy_divmodl(npy_longdouble x, npy_longdouble y,
|
||||
npy_longdouble *modulus);
|
||||
NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0);
|
||||
|
||||
#define npy_degrees npy_rad2deg
|
||||
#define npy_degreesf npy_rad2degf
|
||||
#define npy_degreesl npy_rad2degl
|
||||
|
||||
#define npy_radians npy_deg2rad
|
||||
#define npy_radiansf npy_deg2radf
|
||||
#define npy_radiansl npy_deg2radl
|
||||
|
||||
/*
|
||||
* Complex declarations
|
||||
*/
|
||||
|
||||
static inline double npy_creal(const npy_cdouble z)
|
||||
{
|
||||
#if defined(__cplusplus)
|
||||
return z._Val[0];
|
||||
#else
|
||||
return creal(z);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void npy_csetreal(npy_cdouble *z, const double r)
|
||||
{
|
||||
((double *) z)[0] = r;
|
||||
}
|
||||
|
||||
static inline double npy_cimag(const npy_cdouble z)
|
||||
{
|
||||
#if defined(__cplusplus)
|
||||
return z._Val[1];
|
||||
#else
|
||||
return cimag(z);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void npy_csetimag(npy_cdouble *z, const double i)
|
||||
{
|
||||
((double *) z)[1] = i;
|
||||
}
|
||||
|
||||
static inline float npy_crealf(const npy_cfloat z)
|
||||
{
|
||||
#if defined(__cplusplus)
|
||||
return z._Val[0];
|
||||
#else
|
||||
return crealf(z);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void npy_csetrealf(npy_cfloat *z, const float r)
|
||||
{
|
||||
((float *) z)[0] = r;
|
||||
}
|
||||
|
||||
static inline float npy_cimagf(const npy_cfloat z)
|
||||
{
|
||||
#if defined(__cplusplus)
|
||||
return z._Val[1];
|
||||
#else
|
||||
return cimagf(z);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void npy_csetimagf(npy_cfloat *z, const float i)
|
||||
{
|
||||
((float *) z)[1] = i;
|
||||
}
|
||||
|
||||
static inline npy_longdouble npy_creall(const npy_clongdouble z)
|
||||
{
|
||||
#if defined(__cplusplus)
|
||||
return (npy_longdouble)z._Val[0];
|
||||
#else
|
||||
return creall(z);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void npy_csetreall(npy_clongdouble *z, const longdouble_t r)
|
||||
{
|
||||
((longdouble_t *) z)[0] = r;
|
||||
}
|
||||
|
||||
static inline npy_longdouble npy_cimagl(const npy_clongdouble z)
|
||||
{
|
||||
#if defined(__cplusplus)
|
||||
return (npy_longdouble)z._Val[1];
|
||||
#else
|
||||
return cimagl(z);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void npy_csetimagl(npy_clongdouble *z, const longdouble_t i)
|
||||
{
|
||||
((longdouble_t *) z)[1] = i;
|
||||
}
|
||||
|
||||
#define NPY_CSETREAL(z, r) npy_csetreal(z, r)
|
||||
#define NPY_CSETIMAG(z, i) npy_csetimag(z, i)
|
||||
#define NPY_CSETREALF(z, r) npy_csetrealf(z, r)
|
||||
#define NPY_CSETIMAGF(z, i) npy_csetimagf(z, i)
|
||||
#define NPY_CSETREALL(z, r) npy_csetreall(z, r)
|
||||
#define NPY_CSETIMAGL(z, i) npy_csetimagl(z, i)
|
||||
|
||||
static inline npy_cdouble npy_cpack(double x, double y)
|
||||
{
|
||||
npy_cdouble z;
|
||||
npy_csetreal(&z, x);
|
||||
npy_csetimag(&z, y);
|
||||
return z;
|
||||
}
|
||||
|
||||
static inline npy_cfloat npy_cpackf(float x, float y)
|
||||
{
|
||||
npy_cfloat z;
|
||||
npy_csetrealf(&z, x);
|
||||
npy_csetimagf(&z, y);
|
||||
return z;
|
||||
}
|
||||
|
||||
static inline npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y)
|
||||
{
|
||||
npy_clongdouble z;
|
||||
npy_csetreall(&z, x);
|
||||
npy_csetimagl(&z, y);
|
||||
return z;
|
||||
}
|
||||
|
||||
/*
|
||||
* Double precision complex functions
|
||||
*/
|
||||
double npy_cabs(npy_cdouble z);
|
||||
double npy_carg(npy_cdouble z);
|
||||
|
||||
npy_cdouble npy_cexp(npy_cdouble z);
|
||||
npy_cdouble npy_clog(npy_cdouble z);
|
||||
npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y);
|
||||
|
||||
npy_cdouble npy_csqrt(npy_cdouble z);
|
||||
|
||||
npy_cdouble npy_ccos(npy_cdouble z);
|
||||
npy_cdouble npy_csin(npy_cdouble z);
|
||||
npy_cdouble npy_ctan(npy_cdouble z);
|
||||
|
||||
npy_cdouble npy_ccosh(npy_cdouble z);
|
||||
npy_cdouble npy_csinh(npy_cdouble z);
|
||||
npy_cdouble npy_ctanh(npy_cdouble z);
|
||||
|
||||
npy_cdouble npy_cacos(npy_cdouble z);
|
||||
npy_cdouble npy_casin(npy_cdouble z);
|
||||
npy_cdouble npy_catan(npy_cdouble z);
|
||||
|
||||
npy_cdouble npy_cacosh(npy_cdouble z);
|
||||
npy_cdouble npy_casinh(npy_cdouble z);
|
||||
npy_cdouble npy_catanh(npy_cdouble z);
|
||||
|
||||
/*
|
||||
* Single precision complex functions
|
||||
*/
|
||||
float npy_cabsf(npy_cfloat z);
|
||||
float npy_cargf(npy_cfloat z);
|
||||
|
||||
npy_cfloat npy_cexpf(npy_cfloat z);
|
||||
npy_cfloat npy_clogf(npy_cfloat z);
|
||||
npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y);
|
||||
|
||||
npy_cfloat npy_csqrtf(npy_cfloat z);
|
||||
|
||||
npy_cfloat npy_ccosf(npy_cfloat z);
|
||||
npy_cfloat npy_csinf(npy_cfloat z);
|
||||
npy_cfloat npy_ctanf(npy_cfloat z);
|
||||
|
||||
npy_cfloat npy_ccoshf(npy_cfloat z);
|
||||
npy_cfloat npy_csinhf(npy_cfloat z);
|
||||
npy_cfloat npy_ctanhf(npy_cfloat z);
|
||||
|
||||
npy_cfloat npy_cacosf(npy_cfloat z);
|
||||
npy_cfloat npy_casinf(npy_cfloat z);
|
||||
npy_cfloat npy_catanf(npy_cfloat z);
|
||||
|
||||
npy_cfloat npy_cacoshf(npy_cfloat z);
|
||||
npy_cfloat npy_casinhf(npy_cfloat z);
|
||||
npy_cfloat npy_catanhf(npy_cfloat z);
|
||||
|
||||
|
||||
/*
|
||||
* Extended precision complex functions
|
||||
*/
|
||||
npy_longdouble npy_cabsl(npy_clongdouble z);
|
||||
npy_longdouble npy_cargl(npy_clongdouble z);
|
||||
|
||||
npy_clongdouble npy_cexpl(npy_clongdouble z);
|
||||
npy_clongdouble npy_clogl(npy_clongdouble z);
|
||||
npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y);
|
||||
|
||||
npy_clongdouble npy_csqrtl(npy_clongdouble z);
|
||||
|
||||
npy_clongdouble npy_ccosl(npy_clongdouble z);
|
||||
npy_clongdouble npy_csinl(npy_clongdouble z);
|
||||
npy_clongdouble npy_ctanl(npy_clongdouble z);
|
||||
|
||||
npy_clongdouble npy_ccoshl(npy_clongdouble z);
|
||||
npy_clongdouble npy_csinhl(npy_clongdouble z);
|
||||
npy_clongdouble npy_ctanhl(npy_clongdouble z);
|
||||
|
||||
npy_clongdouble npy_cacosl(npy_clongdouble z);
|
||||
npy_clongdouble npy_casinl(npy_clongdouble z);
|
||||
npy_clongdouble npy_catanl(npy_clongdouble z);
|
||||
|
||||
npy_clongdouble npy_cacoshl(npy_clongdouble z);
|
||||
npy_clongdouble npy_casinhl(npy_clongdouble z);
|
||||
npy_clongdouble npy_catanhl(npy_clongdouble z);
|
||||
|
||||
|
||||
/*
|
||||
* Functions that set the floating point error
|
||||
* status word.
|
||||
*/
|
||||
|
||||
/*
|
||||
* platform-dependent code translates floating point
|
||||
* status to an integer sum of these values
|
||||
*/
|
||||
#define NPY_FPE_DIVIDEBYZERO 1
|
||||
#define NPY_FPE_OVERFLOW 2
|
||||
#define NPY_FPE_UNDERFLOW 4
|
||||
#define NPY_FPE_INVALID 8
|
||||
|
||||
int npy_clear_floatstatus_barrier(char*);
|
||||
int npy_get_floatstatus_barrier(char*);
|
||||
/*
|
||||
* use caution with these - clang and gcc8.1 are known to reorder calls
|
||||
* to this form of the function which can defeat the check. The _barrier
|
||||
* form of the call is preferable, where the argument is
|
||||
* (char*)&local_variable
|
||||
*/
|
||||
int npy_clear_floatstatus(void);
|
||||
int npy_get_floatstatus(void);
|
||||
|
||||
void npy_set_floatstatus_divbyzero(void);
|
||||
void npy_set_floatstatus_overflow(void);
|
||||
void npy_set_floatstatus_underflow(void);
|
||||
void npy_set_floatstatus_invalid(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#if NPY_INLINE_MATH
|
||||
#include "npy_math_internal.h"
|
||||
#endif
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ */
|
||||
@ -0,0 +1,20 @@
|
||||
/*
|
||||
* This include file is provided for inclusion in Cython *.pyd files where
|
||||
* one would like to define the NPY_NO_DEPRECATED_API macro. It can be
|
||||
* included by
|
||||
*
|
||||
* cdef extern from "npy_no_deprecated_api.h": pass
|
||||
*
|
||||
*/
|
||||
#ifndef NPY_NO_DEPRECATED_API
|
||||
|
||||
/* put this check here since there may be multiple includes in C extensions. */
|
||||
#if defined(NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_) || \
|
||||
defined(NUMPY_CORE_INCLUDE_NUMPY_NPY_DEPRECATED_API_H) || \
|
||||
defined(NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_)
|
||||
#error "npy_no_deprecated_api.h" must be first among numpy includes.
|
||||
#else
|
||||
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
|
||||
#endif
|
||||
|
||||
#endif /* NPY_NO_DEPRECATED_API */
|
||||
@ -0,0 +1,42 @@
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_
|
||||
|
||||
#if defined(linux) || defined(__linux) || defined(__linux__)
|
||||
#define NPY_OS_LINUX
|
||||
#elif defined(__FreeBSD__) || defined(__NetBSD__) || \
|
||||
defined(__OpenBSD__) || defined(__DragonFly__)
|
||||
#define NPY_OS_BSD
|
||||
#ifdef __FreeBSD__
|
||||
#define NPY_OS_FREEBSD
|
||||
#elif defined(__NetBSD__)
|
||||
#define NPY_OS_NETBSD
|
||||
#elif defined(__OpenBSD__)
|
||||
#define NPY_OS_OPENBSD
|
||||
#elif defined(__DragonFly__)
|
||||
#define NPY_OS_DRAGONFLY
|
||||
#endif
|
||||
#elif defined(sun) || defined(__sun)
|
||||
#define NPY_OS_SOLARIS
|
||||
#elif defined(__CYGWIN__)
|
||||
#define NPY_OS_CYGWIN
|
||||
/* We are on Windows.*/
|
||||
#elif defined(_WIN32)
|
||||
/* We are using MinGW (64-bit or 32-bit)*/
|
||||
#if defined(__MINGW32__) || defined(__MINGW64__)
|
||||
#define NPY_OS_MINGW
|
||||
/* Otherwise, if _WIN64 is defined, we are targeting 64-bit Windows*/
|
||||
#elif defined(_WIN64)
|
||||
#define NPY_OS_WIN64
|
||||
/* Otherwise assume we are targeting 32-bit Windows*/
|
||||
#else
|
||||
#define NPY_OS_WIN32
|
||||
#endif
|
||||
#elif defined(__APPLE__)
|
||||
#define NPY_OS_DARWIN
|
||||
#elif defined(__HAIKU__)
|
||||
#define NPY_OS_HAIKU
|
||||
#else
|
||||
#define NPY_OS_UNKNOWN
|
||||
#endif
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ */
|
||||
@ -0,0 +1,182 @@
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_
|
||||
|
||||
#include "_numpyconfig.h"
|
||||
|
||||
/*
|
||||
* On Mac OS X, because there is only one configuration stage for all the archs
|
||||
* in universal builds, any macro which depends on the arch needs to be
|
||||
* hardcoded.
|
||||
*
|
||||
* Note that distutils/pip will attempt a universal2 build when Python itself
|
||||
* is built as universal2, hence this hardcoding is needed even if we do not
|
||||
* support universal2 wheels anymore (see gh-22796).
|
||||
* This code block can be removed after we have dropped the setup.py based
|
||||
* build completely.
|
||||
*/
|
||||
#ifdef __APPLE__
|
||||
#undef NPY_SIZEOF_LONG
|
||||
|
||||
#ifdef __LP64__
|
||||
#define NPY_SIZEOF_LONG 8
|
||||
#else
|
||||
#define NPY_SIZEOF_LONG 4
|
||||
#endif
|
||||
|
||||
#undef NPY_SIZEOF_LONGDOUBLE
|
||||
#undef NPY_SIZEOF_COMPLEX_LONGDOUBLE
|
||||
#ifdef HAVE_LDOUBLE_IEEE_DOUBLE_LE
|
||||
#undef HAVE_LDOUBLE_IEEE_DOUBLE_LE
|
||||
#endif
|
||||
#ifdef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE
|
||||
#undef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE
|
||||
#endif
|
||||
|
||||
#if defined(__arm64__)
|
||||
#define NPY_SIZEOF_LONGDOUBLE 8
|
||||
#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16
|
||||
#define HAVE_LDOUBLE_IEEE_DOUBLE_LE 1
|
||||
#elif defined(__x86_64)
|
||||
#define NPY_SIZEOF_LONGDOUBLE 16
|
||||
#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
|
||||
#define HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE 1
|
||||
#elif defined (__i386)
|
||||
#define NPY_SIZEOF_LONGDOUBLE 12
|
||||
#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 24
|
||||
#elif defined(__ppc__) || defined (__ppc64__)
|
||||
#define NPY_SIZEOF_LONGDOUBLE 16
|
||||
#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
|
||||
#else
|
||||
#error "unknown architecture"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* To help with both NPY_TARGET_VERSION and the NPY_NO_DEPRECATED_API macro,
|
||||
* we include API version numbers for specific versions of NumPy.
|
||||
* To exclude all API that was deprecated as of 1.7, add the following before
|
||||
* #including any NumPy headers:
|
||||
* #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
|
||||
* The same is true for NPY_TARGET_VERSION, although NumPy will default to
|
||||
* a backwards compatible build anyway.
|
||||
*/
|
||||
#define NPY_1_7_API_VERSION 0x00000007
|
||||
#define NPY_1_8_API_VERSION 0x00000008
|
||||
#define NPY_1_9_API_VERSION 0x00000009
|
||||
#define NPY_1_10_API_VERSION 0x0000000a
|
||||
#define NPY_1_11_API_VERSION 0x0000000a
|
||||
#define NPY_1_12_API_VERSION 0x0000000a
|
||||
#define NPY_1_13_API_VERSION 0x0000000b
|
||||
#define NPY_1_14_API_VERSION 0x0000000c
|
||||
#define NPY_1_15_API_VERSION 0x0000000c
|
||||
#define NPY_1_16_API_VERSION 0x0000000d
|
||||
#define NPY_1_17_API_VERSION 0x0000000d
|
||||
#define NPY_1_18_API_VERSION 0x0000000d
|
||||
#define NPY_1_19_API_VERSION 0x0000000d
|
||||
#define NPY_1_20_API_VERSION 0x0000000e
|
||||
#define NPY_1_21_API_VERSION 0x0000000e
|
||||
#define NPY_1_22_API_VERSION 0x0000000f
|
||||
#define NPY_1_23_API_VERSION 0x00000010
|
||||
#define NPY_1_24_API_VERSION 0x00000010
|
||||
#define NPY_1_25_API_VERSION 0x00000011
|
||||
#define NPY_2_0_API_VERSION 0x00000012
|
||||
#define NPY_2_1_API_VERSION 0x00000013
|
||||
#define NPY_2_2_API_VERSION 0x00000013
|
||||
#define NPY_2_3_API_VERSION 0x00000014
|
||||
|
||||
|
||||
/*
|
||||
* Binary compatibility version number. This number is increased
|
||||
* whenever the C-API is changed such that binary compatibility is
|
||||
* broken, i.e. whenever a recompile of extension modules is needed.
|
||||
*/
|
||||
#define NPY_VERSION NPY_ABI_VERSION
|
||||
|
||||
/*
|
||||
* Minor API version we are compiling to be compatible with. The version
|
||||
* Number is always increased when the API changes via: `NPY_API_VERSION`
|
||||
* (and should maybe just track the NumPy version).
|
||||
*
|
||||
* If we have an internal build, we always target the current version of
|
||||
* course.
|
||||
*
|
||||
* For downstream users, we default to an older version to provide them with
|
||||
* maximum compatibility by default. Downstream can choose to extend that
|
||||
* default, or narrow it down if they wish to use newer API. If you adjust
|
||||
* this, consider the Python version support (example for 1.25.x):
|
||||
*
|
||||
* NumPy 1.25.x supports Python: 3.9 3.10 3.11 (3.12)
|
||||
* NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9
|
||||
* NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8
|
||||
* NumPy 1.15.x supports Python: ... 3.6 3.7
|
||||
*
|
||||
* Users of the stable ABI may wish to target the last Python that is not
|
||||
* end of life. This would be 3.8 at NumPy 1.25 release time.
|
||||
* 1.17 as default was the choice of oldest-support-numpy at the time and
|
||||
* has in practice no limit (compared to 1.19). Even earlier becomes legacy.
|
||||
*/
|
||||
#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD
|
||||
/* NumPy internal build, always use current version. */
|
||||
#define NPY_FEATURE_VERSION NPY_API_VERSION
|
||||
#elif defined(NPY_TARGET_VERSION) && NPY_TARGET_VERSION
|
||||
/* user provided a target version, use it */
|
||||
#define NPY_FEATURE_VERSION NPY_TARGET_VERSION
|
||||
#else
|
||||
/* Use the default (increase when dropping Python 3.11 support) */
|
||||
#define NPY_FEATURE_VERSION NPY_1_23_API_VERSION
|
||||
#endif
|
||||
|
||||
/* Sanity check the (requested) feature version */
|
||||
#if NPY_FEATURE_VERSION > NPY_API_VERSION
|
||||
#error "NPY_TARGET_VERSION higher than NumPy headers!"
|
||||
#elif NPY_FEATURE_VERSION < NPY_1_15_API_VERSION
|
||||
/* No support for irrelevant old targets, no need for error, but warn. */
|
||||
#ifndef _MSC_VER
|
||||
#warning "Requested NumPy target lower than supported NumPy 1.15."
|
||||
#else
|
||||
#define _WARN___STR2__(x) #x
|
||||
#define _WARN___STR1__(x) _WARN___STR2__(x)
|
||||
#define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: "
|
||||
#pragma message(_WARN___LOC__"Requested NumPy target lower than supported NumPy 1.15.")
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We define a human readable translation to the Python version of NumPy
|
||||
* for error messages (and also to allow grepping the binaries for conda).
|
||||
*/
|
||||
#if NPY_FEATURE_VERSION == NPY_1_7_API_VERSION
|
||||
#define NPY_FEATURE_VERSION_STRING "1.7"
|
||||
#elif NPY_FEATURE_VERSION == NPY_1_8_API_VERSION
|
||||
#define NPY_FEATURE_VERSION_STRING "1.8"
|
||||
#elif NPY_FEATURE_VERSION == NPY_1_9_API_VERSION
|
||||
#define NPY_FEATURE_VERSION_STRING "1.9"
|
||||
#elif NPY_FEATURE_VERSION == NPY_1_10_API_VERSION /* also 1.11, 1.12 */
|
||||
#define NPY_FEATURE_VERSION_STRING "1.10"
|
||||
#elif NPY_FEATURE_VERSION == NPY_1_13_API_VERSION
|
||||
#define NPY_FEATURE_VERSION_STRING "1.13"
|
||||
#elif NPY_FEATURE_VERSION == NPY_1_14_API_VERSION /* also 1.15 */
|
||||
#define NPY_FEATURE_VERSION_STRING "1.14"
|
||||
#elif NPY_FEATURE_VERSION == NPY_1_16_API_VERSION /* also 1.17, 1.18, 1.19 */
|
||||
#define NPY_FEATURE_VERSION_STRING "1.16"
|
||||
#elif NPY_FEATURE_VERSION == NPY_1_20_API_VERSION /* also 1.21 */
|
||||
#define NPY_FEATURE_VERSION_STRING "1.20"
|
||||
#elif NPY_FEATURE_VERSION == NPY_1_22_API_VERSION
|
||||
#define NPY_FEATURE_VERSION_STRING "1.22"
|
||||
#elif NPY_FEATURE_VERSION == NPY_1_23_API_VERSION /* also 1.24 */
|
||||
#define NPY_FEATURE_VERSION_STRING "1.23"
|
||||
#elif NPY_FEATURE_VERSION == NPY_1_25_API_VERSION
|
||||
#define NPY_FEATURE_VERSION_STRING "1.25"
|
||||
#elif NPY_FEATURE_VERSION == NPY_2_0_API_VERSION
|
||||
#define NPY_FEATURE_VERSION_STRING "2.0"
|
||||
#elif NPY_FEATURE_VERSION == NPY_2_1_API_VERSION
|
||||
#define NPY_FEATURE_VERSION_STRING "2.1"
|
||||
#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION
|
||||
#define NPY_FEATURE_VERSION_STRING "2.3"
|
||||
#else
|
||||
#error "Missing version string define for new NumPy version."
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ */
|
||||
@ -0,0 +1,21 @@
|
||||
zlib License
|
||||
------------
|
||||
|
||||
Copyright (C) 2010 - 2019 ridiculous_fish, <libdivide@ridiculousfish.com>
|
||||
Copyright (C) 2016 - 2019 Kim Walisch, <kim.walisch@gmail.com>
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
@ -0,0 +1,20 @@
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_
|
||||
|
||||
#pragma once
|
||||
#include <stddef.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
|
||||
/* Must match the declaration in numpy/random/<any>.pxd */
|
||||
|
||||
typedef struct bitgen {
|
||||
void *state;
|
||||
uint64_t (*next_uint64)(void *st);
|
||||
uint32_t (*next_uint32)(void *st);
|
||||
double (*next_double)(void *st);
|
||||
uint64_t (*next_raw)(void *st);
|
||||
} bitgen_t;
|
||||
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ */
|
||||
@ -0,0 +1,209 @@
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <Python.h>
|
||||
#include "numpy/npy_common.h"
|
||||
#include <stddef.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "numpy/npy_math.h"
|
||||
#include "numpy/random/bitgen.h"
|
||||
|
||||
/*
|
||||
* RAND_INT_TYPE is used to share integer generators with RandomState which
|
||||
* used long in place of int64_t. If changing a distribution that uses
|
||||
* RAND_INT_TYPE, then the original unmodified copy must be retained for
|
||||
* use in RandomState by copying to the legacy distributions source file.
|
||||
*/
|
||||
#ifdef NP_RANDOM_LEGACY
|
||||
#define RAND_INT_TYPE long
|
||||
#define RAND_INT_MAX LONG_MAX
|
||||
#else
|
||||
#define RAND_INT_TYPE int64_t
|
||||
#define RAND_INT_MAX INT64_MAX
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#define DECLDIR __declspec(dllexport)
|
||||
#else
|
||||
#define DECLDIR extern
|
||||
#endif
|
||||
|
||||
#ifndef MIN
|
||||
#define MIN(x, y) (((x) < (y)) ? x : y)
|
||||
#define MAX(x, y) (((x) > (y)) ? x : y)
|
||||
#endif
|
||||
|
||||
#ifndef M_PI
|
||||
#define M_PI 3.14159265358979323846264338328
|
||||
#endif
|
||||
|
||||
typedef struct s_binomial_t {
|
||||
int has_binomial; /* !=0: following parameters initialized for binomial */
|
||||
double psave;
|
||||
RAND_INT_TYPE nsave;
|
||||
double r;
|
||||
double q;
|
||||
double fm;
|
||||
RAND_INT_TYPE m;
|
||||
double p1;
|
||||
double xm;
|
||||
double xl;
|
||||
double xr;
|
||||
double c;
|
||||
double laml;
|
||||
double lamr;
|
||||
double p2;
|
||||
double p3;
|
||||
double p4;
|
||||
} binomial_t;
|
||||
|
||||
DECLDIR float random_standard_uniform_f(bitgen_t *bitgen_state);
|
||||
DECLDIR double random_standard_uniform(bitgen_t *bitgen_state);
|
||||
DECLDIR void random_standard_uniform_fill(bitgen_t *, npy_intp, double *);
|
||||
DECLDIR void random_standard_uniform_fill_f(bitgen_t *, npy_intp, float *);
|
||||
|
||||
DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state);
|
||||
DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state);
|
||||
DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state);
|
||||
DECLDIR uint64_t random_uint(bitgen_t *bitgen_state);
|
||||
|
||||
DECLDIR double random_standard_exponential(bitgen_t *bitgen_state);
|
||||
DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state);
|
||||
DECLDIR void random_standard_exponential_fill(bitgen_t *, npy_intp, double *);
|
||||
DECLDIR void random_standard_exponential_fill_f(bitgen_t *, npy_intp, float *);
|
||||
DECLDIR void random_standard_exponential_inv_fill(bitgen_t *, npy_intp, double *);
|
||||
DECLDIR void random_standard_exponential_inv_fill_f(bitgen_t *, npy_intp, float *);
|
||||
|
||||
DECLDIR double random_standard_normal(bitgen_t *bitgen_state);
|
||||
DECLDIR float random_standard_normal_f(bitgen_t *bitgen_state);
|
||||
DECLDIR void random_standard_normal_fill(bitgen_t *, npy_intp, double *);
|
||||
DECLDIR void random_standard_normal_fill_f(bitgen_t *, npy_intp, float *);
|
||||
DECLDIR double random_standard_gamma(bitgen_t *bitgen_state, double shape);
|
||||
DECLDIR float random_standard_gamma_f(bitgen_t *bitgen_state, float shape);
|
||||
|
||||
DECLDIR double random_normal(bitgen_t *bitgen_state, double loc, double scale);
|
||||
|
||||
DECLDIR double random_gamma(bitgen_t *bitgen_state, double shape, double scale);
|
||||
DECLDIR float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale);
|
||||
|
||||
DECLDIR double random_exponential(bitgen_t *bitgen_state, double scale);
|
||||
DECLDIR double random_uniform(bitgen_t *bitgen_state, double lower, double range);
|
||||
DECLDIR double random_beta(bitgen_t *bitgen_state, double a, double b);
|
||||
DECLDIR double random_chisquare(bitgen_t *bitgen_state, double df);
|
||||
DECLDIR double random_f(bitgen_t *bitgen_state, double dfnum, double dfden);
|
||||
DECLDIR double random_standard_cauchy(bitgen_t *bitgen_state);
|
||||
DECLDIR double random_pareto(bitgen_t *bitgen_state, double a);
|
||||
DECLDIR double random_weibull(bitgen_t *bitgen_state, double a);
|
||||
DECLDIR double random_power(bitgen_t *bitgen_state, double a);
|
||||
DECLDIR double random_laplace(bitgen_t *bitgen_state, double loc, double scale);
|
||||
DECLDIR double random_gumbel(bitgen_t *bitgen_state, double loc, double scale);
|
||||
DECLDIR double random_logistic(bitgen_t *bitgen_state, double loc, double scale);
|
||||
DECLDIR double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma);
|
||||
DECLDIR double random_rayleigh(bitgen_t *bitgen_state, double mode);
|
||||
DECLDIR double random_standard_t(bitgen_t *bitgen_state, double df);
|
||||
DECLDIR double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
|
||||
double nonc);
|
||||
DECLDIR double random_noncentral_f(bitgen_t *bitgen_state, double dfnum,
|
||||
double dfden, double nonc);
|
||||
DECLDIR double random_wald(bitgen_t *bitgen_state, double mean, double scale);
|
||||
DECLDIR double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa);
|
||||
DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mode,
|
||||
double right);
|
||||
|
||||
DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam);
|
||||
DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n,
|
||||
double p);
|
||||
|
||||
DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p,
|
||||
int64_t n, binomial_t *binomial);
|
||||
|
||||
DECLDIR int64_t random_logseries(bitgen_t *bitgen_state, double p);
|
||||
DECLDIR int64_t random_geometric(bitgen_t *bitgen_state, double p);
|
||||
DECLDIR RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p);
|
||||
DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a);
|
||||
DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state,
|
||||
int64_t good, int64_t bad, int64_t sample);
|
||||
DECLDIR uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max);
|
||||
|
||||
/* Generate random uint64 numbers in closed interval [off, off + rng]. */
|
||||
DECLDIR uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off,
|
||||
uint64_t rng, uint64_t mask,
|
||||
bool use_masked);
|
||||
|
||||
/* Generate random uint32 numbers in closed interval [off, off + rng]. */
|
||||
DECLDIR uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state,
|
||||
uint32_t off, uint32_t rng,
|
||||
uint32_t mask, bool use_masked,
|
||||
int *bcnt, uint32_t *buf);
|
||||
DECLDIR uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state,
|
||||
uint16_t off, uint16_t rng,
|
||||
uint16_t mask, bool use_masked,
|
||||
int *bcnt, uint32_t *buf);
|
||||
DECLDIR uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, uint8_t off,
|
||||
uint8_t rng, uint8_t mask,
|
||||
bool use_masked, int *bcnt,
|
||||
uint32_t *buf);
|
||||
DECLDIR npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, npy_bool off,
|
||||
npy_bool rng, npy_bool mask,
|
||||
bool use_masked, int *bcnt,
|
||||
uint32_t *buf);
|
||||
|
||||
DECLDIR void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off,
|
||||
uint64_t rng, npy_intp cnt,
|
||||
bool use_masked, uint64_t *out);
|
||||
DECLDIR void random_bounded_uint32_fill(bitgen_t *bitgen_state, uint32_t off,
|
||||
uint32_t rng, npy_intp cnt,
|
||||
bool use_masked, uint32_t *out);
|
||||
DECLDIR void random_bounded_uint16_fill(bitgen_t *bitgen_state, uint16_t off,
|
||||
uint16_t rng, npy_intp cnt,
|
||||
bool use_masked, uint16_t *out);
|
||||
DECLDIR void random_bounded_uint8_fill(bitgen_t *bitgen_state, uint8_t off,
|
||||
uint8_t rng, npy_intp cnt,
|
||||
bool use_masked, uint8_t *out);
|
||||
DECLDIR void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off,
|
||||
npy_bool rng, npy_intp cnt,
|
||||
bool use_masked, npy_bool *out);
|
||||
|
||||
DECLDIR void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix,
|
||||
double *pix, npy_intp d, binomial_t *binomial);
|
||||
|
||||
/* multivariate hypergeometric, "count" method */
|
||||
DECLDIR int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state,
|
||||
int64_t total,
|
||||
size_t num_colors, int64_t *colors,
|
||||
int64_t nsample,
|
||||
size_t num_variates, int64_t *variates);
|
||||
|
||||
/* multivariate hypergeometric, "marginals" method */
|
||||
DECLDIR void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state,
|
||||
int64_t total,
|
||||
size_t num_colors, int64_t *colors,
|
||||
int64_t nsample,
|
||||
size_t num_variates, int64_t *variates);
|
||||
|
||||
/* Common to legacy-distributions.c and distributions.c but not exported */
|
||||
|
||||
RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state,
|
||||
RAND_INT_TYPE n,
|
||||
double p,
|
||||
binomial_t *binomial);
|
||||
RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state,
|
||||
RAND_INT_TYPE n,
|
||||
double p,
|
||||
binomial_t *binomial);
|
||||
double random_loggam(double x);
|
||||
static inline double next_double(bitgen_t *bitgen_state) {
|
||||
return bitgen_state->next_double(bitgen_state->state);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ */
|
||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,343 @@
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_
|
||||
|
||||
#include <numpy/npy_math.h>
|
||||
#include <numpy/npy_common.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The legacy generic inner loop for a standard element-wise or
|
||||
* generalized ufunc.
|
||||
*/
|
||||
typedef void (*PyUFuncGenericFunction)
|
||||
(char **args,
|
||||
npy_intp const *dimensions,
|
||||
npy_intp const *strides,
|
||||
void *innerloopdata);
|
||||
|
||||
/*
|
||||
* The most generic one-dimensional inner loop for
|
||||
* a masked standard element-wise ufunc. "Masked" here means that it skips
|
||||
* doing calculations on any items for which the maskptr array has a true
|
||||
* value.
|
||||
*/
|
||||
typedef void (PyUFunc_MaskedStridedInnerLoopFunc)(
|
||||
char **dataptrs, npy_intp *strides,
|
||||
char *maskptr, npy_intp mask_stride,
|
||||
npy_intp count,
|
||||
NpyAuxData *innerloopdata);
|
||||
|
||||
/* Forward declaration for the type resolver and loop selector typedefs */
|
||||
struct _tagPyUFuncObject;
|
||||
|
||||
/*
|
||||
* Given the operands for calling a ufunc, should determine the
|
||||
* calculation input and output data types and return an inner loop function.
|
||||
* This function should validate that the casting rule is being followed,
|
||||
* and fail if it is not.
|
||||
*
|
||||
* For backwards compatibility, the regular type resolution function does not
|
||||
* support auxiliary data with object semantics. The type resolution call
|
||||
* which returns a masked generic function returns a standard NpyAuxData
|
||||
* object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros
|
||||
* work.
|
||||
*
|
||||
* ufunc: The ufunc object.
|
||||
* casting: The 'casting' parameter provided to the ufunc.
|
||||
* operands: An array of length (ufunc->nin + ufunc->nout),
|
||||
* with the output parameters possibly NULL.
|
||||
* type_tup: Either NULL, or the type_tup passed to the ufunc.
|
||||
* out_dtypes: An array which should be populated with new
|
||||
* references to (ufunc->nin + ufunc->nout) new
|
||||
* dtypes, one for each input and output. These
|
||||
* dtypes should all be in native-endian format.
|
||||
*
|
||||
* Should return 0 on success, -1 on failure (with exception set),
|
||||
* or -2 if Py_NotImplemented should be returned.
|
||||
*/
|
||||
typedef int (PyUFunc_TypeResolutionFunc)(
|
||||
struct _tagPyUFuncObject *ufunc,
|
||||
NPY_CASTING casting,
|
||||
PyArrayObject **operands,
|
||||
PyObject *type_tup,
|
||||
PyArray_Descr **out_dtypes);
|
||||
|
||||
/*
|
||||
* This is the signature for the functions that may be assigned to the
|
||||
* `process_core_dims_func` field of the PyUFuncObject structure.
|
||||
* Implementation of this function is optional. This function is only used
|
||||
* by generalized ufuncs (i.e. those with the field `core_enabled` set to 1).
|
||||
* The function is called by the ufunc during the processing of the arguments
|
||||
* of a call of the ufunc. The function can check the core dimensions of the
|
||||
* input and output arrays and return -1 with an exception set if any
|
||||
* requirements are not satisfied. If the caller of the ufunc didn't provide
|
||||
* output arrays, the core dimensions associated with the output arrays (i.e.
|
||||
* those that are not also used in input arrays) will have the value -1 in
|
||||
* `core_dim_sizes`. This function can replace any output core dimensions
|
||||
* that are -1 with a value that is appropriate for the ufunc.
|
||||
*
|
||||
* Parameter Description
|
||||
* --------------- ------------------------------------------------------
|
||||
* ufunc The ufunc object
|
||||
* core_dim_sizes An array with length `ufunc->core_num_dim_ix`.
|
||||
* The core dimensions of the arrays passed to the ufunc
|
||||
* will have been set. If the caller of the ufunc didn't
|
||||
* provide the output array(s), the output-only core
|
||||
* dimensions will have the value -1.
|
||||
*
|
||||
* The function must not change any element in `core_dim_sizes` that is
|
||||
* not -1 on input. Doing so will result in incorrect output from the
|
||||
* ufunc, and could result in a crash of the Python interpreter.
|
||||
*
|
||||
* The function must return 0 on success, -1 on failure (with an exception
|
||||
* set).
|
||||
*/
|
||||
typedef int (PyUFunc_ProcessCoreDimsFunc)(
|
||||
struct _tagPyUFuncObject *ufunc,
|
||||
npy_intp *core_dim_sizes);
|
||||
|
||||
typedef struct _tagPyUFuncObject {
|
||||
PyObject_HEAD
|
||||
/*
|
||||
* nin: Number of inputs
|
||||
* nout: Number of outputs
|
||||
* nargs: Always nin + nout (Why is it stored?)
|
||||
*/
|
||||
int nin, nout, nargs;
|
||||
|
||||
/*
|
||||
* Identity for reduction, any of PyUFunc_One, PyUFunc_Zero
|
||||
* PyUFunc_MinusOne, PyUFunc_None, PyUFunc_ReorderableNone,
|
||||
* PyUFunc_IdentityValue.
|
||||
*/
|
||||
int identity;
|
||||
|
||||
/* Array of one-dimensional core loops */
|
||||
PyUFuncGenericFunction *functions;
|
||||
/* Array of funcdata that gets passed into the functions */
|
||||
void *const *data;
|
||||
/* The number of elements in 'functions' and 'data' */
|
||||
int ntypes;
|
||||
|
||||
/* Used to be unused field 'check_return' */
|
||||
int reserved1;
|
||||
|
||||
/* The name of the ufunc */
|
||||
const char *name;
|
||||
|
||||
/* Array of type numbers, of size ('nargs' * 'ntypes') */
|
||||
const char *types;
|
||||
|
||||
/* Documentation string */
|
||||
const char *doc;
|
||||
|
||||
void *ptr;
|
||||
PyObject *obj;
|
||||
PyObject *userloops;
|
||||
|
||||
/* generalized ufunc parameters */
|
||||
|
||||
/* 0 for scalar ufunc; 1 for generalized ufunc */
|
||||
int core_enabled;
|
||||
/* number of distinct dimension names in signature */
|
||||
int core_num_dim_ix;
|
||||
|
||||
/*
|
||||
* dimension indices of input/output argument k are stored in
|
||||
* core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1]
|
||||
*/
|
||||
|
||||
/* numbers of core dimensions of each argument */
|
||||
int *core_num_dims;
|
||||
/*
|
||||
* dimension indices in a flatted form; indices
|
||||
* are in the range of [0,core_num_dim_ix)
|
||||
*/
|
||||
int *core_dim_ixs;
|
||||
/*
|
||||
* positions of 1st core dimensions of each
|
||||
* argument in core_dim_ixs, equivalent to cumsum(core_num_dims)
|
||||
*/
|
||||
int *core_offsets;
|
||||
/* signature string for printing purpose */
|
||||
char *core_signature;
|
||||
|
||||
/*
|
||||
* A function which resolves the types and fills an array
|
||||
* with the dtypes for the inputs and outputs.
|
||||
*/
|
||||
PyUFunc_TypeResolutionFunc *type_resolver;
|
||||
|
||||
/* A dictionary to monkeypatch ufuncs */
|
||||
PyObject *dict;
|
||||
|
||||
/*
|
||||
* This was blocked off to be the "new" inner loop selector in 1.7,
|
||||
* but this was never implemented. (This is also why the above
|
||||
* selector is called the "legacy" selector.)
|
||||
*/
|
||||
#ifndef Py_LIMITED_API
|
||||
vectorcallfunc vectorcall;
|
||||
#else
|
||||
void *vectorcall;
|
||||
#endif
|
||||
|
||||
/* Was previously the `PyUFunc_MaskedInnerLoopSelectionFunc` */
|
||||
void *reserved3;
|
||||
|
||||
/*
|
||||
* List of flags for each operand when ufunc is called by nditer object.
|
||||
* These flags will be used in addition to the default flags for each
|
||||
* operand set by nditer object.
|
||||
*/
|
||||
npy_uint32 *op_flags;
|
||||
|
||||
/*
|
||||
* List of global flags used when ufunc is called by nditer object.
|
||||
* These flags will be used in addition to the default global flags
|
||||
* set by nditer object.
|
||||
*/
|
||||
npy_uint32 iter_flags;
|
||||
|
||||
/* New in NPY_API_VERSION 0x0000000D and above */
|
||||
#if NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION
|
||||
/*
|
||||
* for each core_num_dim_ix distinct dimension names,
|
||||
* the possible "frozen" size (-1 if not frozen).
|
||||
*/
|
||||
npy_intp *core_dim_sizes;
|
||||
|
||||
/*
|
||||
* for each distinct core dimension, a set of UFUNC_CORE_DIM* flags
|
||||
*/
|
||||
npy_uint32 *core_dim_flags;
|
||||
|
||||
/* Identity for reduction, when identity == PyUFunc_IdentityValue */
|
||||
PyObject *identity_value;
|
||||
#endif /* NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION */
|
||||
|
||||
/* New in NPY_API_VERSION 0x0000000F and above */
|
||||
#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION
|
||||
/* New private fields related to dispatching */
|
||||
void *_dispatch_cache;
|
||||
/* A PyListObject of `(tuple of DTypes, ArrayMethod/Promoter)` */
|
||||
PyObject *_loops;
|
||||
#endif
|
||||
#if NPY_FEATURE_VERSION >= NPY_2_1_API_VERSION
|
||||
/*
|
||||
* Optional function to process core dimensions of a gufunc.
|
||||
*/
|
||||
PyUFunc_ProcessCoreDimsFunc *process_core_dims_func;
|
||||
#endif
|
||||
} PyUFuncObject;
|
||||
|
||||
#include "arrayobject.h"
|
||||
/* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */
|
||||
/* the core dimension's size will be determined by the operands. */
|
||||
#define UFUNC_CORE_DIM_SIZE_INFERRED 0x0002
|
||||
/* the core dimension may be absent */
|
||||
#define UFUNC_CORE_DIM_CAN_IGNORE 0x0004
|
||||
/* flags inferred during execution */
|
||||
#define UFUNC_CORE_DIM_MISSING 0x00040000
|
||||
|
||||
|
||||
#define UFUNC_OBJ_ISOBJECT 1
|
||||
#define UFUNC_OBJ_NEEDS_API 2
|
||||
|
||||
|
||||
#if NPY_ALLOW_THREADS
|
||||
#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0);
|
||||
#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0);
|
||||
#else
|
||||
#define NPY_LOOP_BEGIN_THREADS
|
||||
#define NPY_LOOP_END_THREADS
|
||||
#endif
|
||||
|
||||
/*
|
||||
* UFunc has unit of 0, and the order of operations can be reordered
|
||||
* This case allows reduction with multiple axes at once.
|
||||
*/
|
||||
#define PyUFunc_Zero 0
|
||||
/*
|
||||
* UFunc has unit of 1, and the order of operations can be reordered
|
||||
* This case allows reduction with multiple axes at once.
|
||||
*/
|
||||
#define PyUFunc_One 1
|
||||
/*
|
||||
* UFunc has unit of -1, and the order of operations can be reordered
|
||||
* This case allows reduction with multiple axes at once. Intended for
|
||||
* bitwise_and reduction.
|
||||
*/
|
||||
#define PyUFunc_MinusOne 2
|
||||
/*
|
||||
* UFunc has no unit, and the order of operations cannot be reordered.
|
||||
* This case does not allow reduction with multiple axes at once.
|
||||
*/
|
||||
#define PyUFunc_None -1
|
||||
/*
|
||||
* UFunc has no unit, and the order of operations can be reordered
|
||||
* This case allows reduction with multiple axes at once.
|
||||
*/
|
||||
#define PyUFunc_ReorderableNone -2
|
||||
/*
|
||||
* UFunc unit is an identity_value, and the order of operations can be reordered
|
||||
* This case allows reduction with multiple axes at once.
|
||||
*/
|
||||
#define PyUFunc_IdentityValue -3
|
||||
|
||||
|
||||
#define UFUNC_REDUCE 0
|
||||
#define UFUNC_ACCUMULATE 1
|
||||
#define UFUNC_REDUCEAT 2
|
||||
#define UFUNC_OUTER 3
|
||||
|
||||
|
||||
typedef struct {
|
||||
int nin;
|
||||
int nout;
|
||||
PyObject *callable;
|
||||
} PyUFunc_PyFuncData;
|
||||
|
||||
/* A linked-list of function information for
|
||||
user-defined 1-d loops.
|
||||
*/
|
||||
typedef struct _loop1d_info {
|
||||
PyUFuncGenericFunction func;
|
||||
void *data;
|
||||
int *arg_types;
|
||||
struct _loop1d_info *next;
|
||||
int nargs;
|
||||
PyArray_Descr **arg_dtypes;
|
||||
} PyUFunc_Loop1d;
|
||||
|
||||
|
||||
#define UFUNC_PYVALS_NAME "UFUNC_PYVALS"
|
||||
|
||||
/* THESE MACROS ARE DEPRECATED.
|
||||
* Use npy_set_floatstatus_* in the npymath library.
|
||||
*/
|
||||
#define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO
|
||||
#define UFUNC_FPE_OVERFLOW NPY_FPE_OVERFLOW
|
||||
#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW
|
||||
#define UFUNC_FPE_INVALID NPY_FPE_INVALID
|
||||
|
||||
/* Make sure it gets defined if it isn't already */
|
||||
#ifndef UFUNC_NOFPE
|
||||
/* Clear the floating point exception default of Borland C++ */
|
||||
#if defined(__BORLANDC__)
|
||||
#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM);
|
||||
#else
|
||||
#define UFUNC_NOFPE
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include "__ufunc_api.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ */
|
||||
@ -0,0 +1,37 @@
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_
|
||||
#define NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_
|
||||
|
||||
#ifndef __COMP_NPY_UNUSED
|
||||
#if defined(__GNUC__)
|
||||
#define __COMP_NPY_UNUSED __attribute__ ((__unused__))
|
||||
#elif defined(__ICC)
|
||||
#define __COMP_NPY_UNUSED __attribute__ ((__unused__))
|
||||
#elif defined(__clang__)
|
||||
#define __COMP_NPY_UNUSED __attribute__ ((unused))
|
||||
#else
|
||||
#define __COMP_NPY_UNUSED
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) || defined(__ICC) || defined(__clang__)
|
||||
#define NPY_DECL_ALIGNED(x) __attribute__ ((aligned (x)))
|
||||
#elif defined(_MSC_VER)
|
||||
#define NPY_DECL_ALIGNED(x) __declspec(align(x))
|
||||
#else
|
||||
#define NPY_DECL_ALIGNED(x)
|
||||
#endif
|
||||
|
||||
/* Use this to tag a variable as not used. It will remove unused variable
|
||||
* warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable
|
||||
* to avoid accidental use */
|
||||
#define NPY_UNUSED(x) __NPY_UNUSED_TAGGED ## x __COMP_NPY_UNUSED
|
||||
#define NPY_EXPAND(x) x
|
||||
|
||||
#define NPY_STRINGIFY(x) #x
|
||||
#define NPY_TOSTRING(x) NPY_STRINGIFY(x)
|
||||
|
||||
#define NPY_CAT__(a, b) a ## b
|
||||
#define NPY_CAT_(a, b) NPY_CAT__(a, b)
|
||||
#define NPY_CAT(a, b) NPY_CAT_(a, b)
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ */
|
||||
BIN
lib/python3.11/site-packages/numpy/_core/lib/libnpymath.a
Normal file
BIN
lib/python3.11/site-packages/numpy/_core/lib/libnpymath.a
Normal file
Binary file not shown.
@ -0,0 +1,12 @@
|
||||
[meta]
|
||||
Name = mlib
|
||||
Description = Math library used with this version of numpy
|
||||
Version = 1.0
|
||||
|
||||
[default]
|
||||
Libs=-lm
|
||||
Cflags=
|
||||
|
||||
[msvc]
|
||||
Libs=m.lib
|
||||
Cflags=
|
||||
@ -0,0 +1,20 @@
|
||||
[meta]
|
||||
Name=npymath
|
||||
Description=Portable, core math library implementing C99 standard
|
||||
Version=0.1
|
||||
|
||||
[variables]
|
||||
pkgname=numpy._core
|
||||
prefix=${pkgdir}
|
||||
libdir=${prefix}/lib
|
||||
includedir=${prefix}/include
|
||||
|
||||
[default]
|
||||
Libs=-L${libdir} -lnpymath
|
||||
Cflags=-I${includedir}
|
||||
Requires=mlib
|
||||
|
||||
[msvc]
|
||||
Libs=/LIBPATH:${libdir} npymath.lib
|
||||
Cflags=/INCLUDE:${includedir}
|
||||
Requires=mlib
|
||||
@ -0,0 +1,7 @@
|
||||
prefix=${pcfiledir}/../..
|
||||
includedir=${prefix}/include
|
||||
|
||||
Name: numpy
|
||||
Description: NumPy is the fundamental package for scientific computing with Python.
|
||||
Version: 2.3.2
|
||||
Cflags: -I${includedir}
|
||||
363
lib/python3.11/site-packages/numpy/_core/memmap.py
Normal file
363
lib/python3.11/site-packages/numpy/_core/memmap.py
Normal file
@ -0,0 +1,363 @@
|
||||
import operator
|
||||
from contextlib import nullcontext
|
||||
|
||||
import numpy as np
|
||||
from numpy._utils import set_module
|
||||
|
||||
from .numeric import dtype, ndarray, uint8
|
||||
|
||||
__all__ = ['memmap']
|
||||
|
||||
dtypedescr = dtype
|
||||
valid_filemodes = ["r", "c", "r+", "w+"]
|
||||
writeable_filemodes = ["r+", "w+"]
|
||||
|
||||
mode_equivalents = {
|
||||
"readonly": "r",
|
||||
"copyonwrite": "c",
|
||||
"readwrite": "r+",
|
||||
"write": "w+"
|
||||
}
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
class memmap(ndarray):
|
||||
"""Create a memory-map to an array stored in a *binary* file on disk.
|
||||
|
||||
Memory-mapped files are used for accessing small segments of large files
|
||||
on disk, without reading the entire file into memory. NumPy's
|
||||
memmap's are array-like objects. This differs from Python's ``mmap``
|
||||
module, which uses file-like objects.
|
||||
|
||||
This subclass of ndarray has some unpleasant interactions with
|
||||
some operations, because it doesn't quite fit properly as a subclass.
|
||||
An alternative to using this subclass is to create the ``mmap``
|
||||
object yourself, then create an ndarray with ndarray.__new__ directly,
|
||||
passing the object created in its 'buffer=' parameter.
|
||||
|
||||
This class may at some point be turned into a factory function
|
||||
which returns a view into an mmap buffer.
|
||||
|
||||
Flush the memmap instance to write the changes to the file. Currently there
|
||||
is no API to close the underlying ``mmap``. It is tricky to ensure the
|
||||
resource is actually closed, since it may be shared between different
|
||||
memmap instances.
|
||||
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filename : str, file-like object, or pathlib.Path instance
|
||||
The file name or file object to be used as the array data buffer.
|
||||
dtype : data-type, optional
|
||||
The data-type used to interpret the file contents.
|
||||
Default is `uint8`.
|
||||
mode : {'r+', 'r', 'w+', 'c'}, optional
|
||||
The file is opened in this mode:
|
||||
|
||||
+------+-------------------------------------------------------------+
|
||||
| 'r' | Open existing file for reading only. |
|
||||
+------+-------------------------------------------------------------+
|
||||
| 'r+' | Open existing file for reading and writing. |
|
||||
+------+-------------------------------------------------------------+
|
||||
| 'w+' | Create or overwrite existing file for reading and writing. |
|
||||
| | If ``mode == 'w+'`` then `shape` must also be specified. |
|
||||
+------+-------------------------------------------------------------+
|
||||
| 'c' | Copy-on-write: assignments affect data in memory, but |
|
||||
| | changes are not saved to disk. The file on disk is |
|
||||
| | read-only. |
|
||||
+------+-------------------------------------------------------------+
|
||||
|
||||
Default is 'r+'.
|
||||
offset : int, optional
|
||||
In the file, array data starts at this offset. Since `offset` is
|
||||
measured in bytes, it should normally be a multiple of the byte-size
|
||||
of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of
|
||||
file are valid; The file will be extended to accommodate the
|
||||
additional data. By default, ``memmap`` will start at the beginning of
|
||||
the file, even if ``filename`` is a file pointer ``fp`` and
|
||||
``fp.tell() != 0``.
|
||||
shape : int or sequence of ints, optional
|
||||
The desired shape of the array. If ``mode == 'r'`` and the number
|
||||
of remaining bytes after `offset` is not a multiple of the byte-size
|
||||
of `dtype`, you must specify `shape`. By default, the returned array
|
||||
will be 1-D with the number of elements determined by file size
|
||||
and data-type.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
The shape parameter can now be any integer sequence type, previously
|
||||
types were limited to tuple and int.
|
||||
|
||||
order : {'C', 'F'}, optional
|
||||
Specify the order of the ndarray memory layout:
|
||||
:term:`row-major`, C-style or :term:`column-major`,
|
||||
Fortran-style. This only has an effect if the shape is
|
||||
greater than 1-D. The default order is 'C'.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
filename : str or pathlib.Path instance
|
||||
Path to the mapped file.
|
||||
offset : int
|
||||
Offset position in the file.
|
||||
mode : str
|
||||
File mode.
|
||||
|
||||
Methods
|
||||
-------
|
||||
flush
|
||||
Flush any changes in memory to file on disk.
|
||||
When you delete a memmap object, flush is called first to write
|
||||
changes to disk.
|
||||
|
||||
|
||||
See also
|
||||
--------
|
||||
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The memmap object can be used anywhere an ndarray is accepted.
|
||||
Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns
|
||||
``True``.
|
||||
|
||||
Memory-mapped files cannot be larger than 2GB on 32-bit systems.
|
||||
|
||||
When a memmap causes a file to be created or extended beyond its
|
||||
current size in the filesystem, the contents of the new part are
|
||||
unspecified. On systems with POSIX filesystem semantics, the extended
|
||||
part will be filled with zero bytes.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> data = np.arange(12, dtype='float32')
|
||||
>>> data.resize((3,4))
|
||||
|
||||
This example uses a temporary file so that doctest doesn't write
|
||||
files to your directory. You would use a 'normal' filename.
|
||||
|
||||
>>> from tempfile import mkdtemp
|
||||
>>> import os.path as path
|
||||
>>> filename = path.join(mkdtemp(), 'newfile.dat')
|
||||
|
||||
Create a memmap with dtype and shape that matches our data:
|
||||
|
||||
>>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))
|
||||
>>> fp
|
||||
memmap([[0., 0., 0., 0.],
|
||||
[0., 0., 0., 0.],
|
||||
[0., 0., 0., 0.]], dtype=float32)
|
||||
|
||||
Write data to memmap array:
|
||||
|
||||
>>> fp[:] = data[:]
|
||||
>>> fp
|
||||
memmap([[ 0., 1., 2., 3.],
|
||||
[ 4., 5., 6., 7.],
|
||||
[ 8., 9., 10., 11.]], dtype=float32)
|
||||
|
||||
>>> fp.filename == path.abspath(filename)
|
||||
True
|
||||
|
||||
Flushes memory changes to disk in order to read them back
|
||||
|
||||
>>> fp.flush()
|
||||
|
||||
Load the memmap and verify data was stored:
|
||||
|
||||
>>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
|
||||
>>> newfp
|
||||
memmap([[ 0., 1., 2., 3.],
|
||||
[ 4., 5., 6., 7.],
|
||||
[ 8., 9., 10., 11.]], dtype=float32)
|
||||
|
||||
Read-only memmap:
|
||||
|
||||
>>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
|
||||
>>> fpr.flags.writeable
|
||||
False
|
||||
|
||||
Copy-on-write memmap:
|
||||
|
||||
>>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4))
|
||||
>>> fpc.flags.writeable
|
||||
True
|
||||
|
||||
It's possible to assign to copy-on-write array, but values are only
|
||||
written into the memory copy of the array, and not written to disk:
|
||||
|
||||
>>> fpc
|
||||
memmap([[ 0., 1., 2., 3.],
|
||||
[ 4., 5., 6., 7.],
|
||||
[ 8., 9., 10., 11.]], dtype=float32)
|
||||
>>> fpc[0,:] = 0
|
||||
>>> fpc
|
||||
memmap([[ 0., 0., 0., 0.],
|
||||
[ 4., 5., 6., 7.],
|
||||
[ 8., 9., 10., 11.]], dtype=float32)
|
||||
|
||||
File on disk is unchanged:
|
||||
|
||||
>>> fpr
|
||||
memmap([[ 0., 1., 2., 3.],
|
||||
[ 4., 5., 6., 7.],
|
||||
[ 8., 9., 10., 11.]], dtype=float32)
|
||||
|
||||
Offset into a memmap:
|
||||
|
||||
>>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16)
|
||||
>>> fpo
|
||||
memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32)
|
||||
|
||||
"""
|
||||
|
||||
__array_priority__ = -100.0
|
||||
|
||||
def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,
|
||||
shape=None, order='C'):
|
||||
# Import here to minimize 'import numpy' overhead
|
||||
import mmap
|
||||
import os.path
|
||||
try:
|
||||
mode = mode_equivalents[mode]
|
||||
except KeyError as e:
|
||||
if mode not in valid_filemodes:
|
||||
all_modes = valid_filemodes + list(mode_equivalents.keys())
|
||||
raise ValueError(
|
||||
f"mode must be one of {all_modes!r} (got {mode!r})"
|
||||
) from None
|
||||
|
||||
if mode == 'w+' and shape is None:
|
||||
raise ValueError("shape must be given if mode == 'w+'")
|
||||
|
||||
if hasattr(filename, 'read'):
|
||||
f_ctx = nullcontext(filename)
|
||||
else:
|
||||
f_ctx = open(
|
||||
os.fspath(filename),
|
||||
('r' if mode == 'c' else mode) + 'b'
|
||||
)
|
||||
|
||||
with f_ctx as fid:
|
||||
fid.seek(0, 2)
|
||||
flen = fid.tell()
|
||||
descr = dtypedescr(dtype)
|
||||
_dbytes = descr.itemsize
|
||||
|
||||
if shape is None:
|
||||
bytes = flen - offset
|
||||
if bytes % _dbytes:
|
||||
raise ValueError("Size of available data is not a "
|
||||
"multiple of the data-type size.")
|
||||
size = bytes // _dbytes
|
||||
shape = (size,)
|
||||
else:
|
||||
if not isinstance(shape, (tuple, list)):
|
||||
try:
|
||||
shape = [operator.index(shape)]
|
||||
except TypeError:
|
||||
pass
|
||||
shape = tuple(shape)
|
||||
size = np.intp(1) # avoid overflows
|
||||
for k in shape:
|
||||
size *= k
|
||||
|
||||
bytes = int(offset + size * _dbytes)
|
||||
|
||||
if mode in ('w+', 'r+'):
|
||||
# gh-27723
|
||||
# if bytes == 0, we write out 1 byte to allow empty memmap.
|
||||
bytes = max(bytes, 1)
|
||||
if flen < bytes:
|
||||
fid.seek(bytes - 1, 0)
|
||||
fid.write(b'\0')
|
||||
fid.flush()
|
||||
|
||||
if mode == 'c':
|
||||
acc = mmap.ACCESS_COPY
|
||||
elif mode == 'r':
|
||||
acc = mmap.ACCESS_READ
|
||||
else:
|
||||
acc = mmap.ACCESS_WRITE
|
||||
|
||||
start = offset - offset % mmap.ALLOCATIONGRANULARITY
|
||||
bytes -= start
|
||||
# bytes == 0 is problematic as in mmap length=0 maps the full file.
|
||||
# See PR gh-27723 for a more detailed explanation.
|
||||
if bytes == 0 and start > 0:
|
||||
bytes += mmap.ALLOCATIONGRANULARITY
|
||||
start -= mmap.ALLOCATIONGRANULARITY
|
||||
array_offset = offset - start
|
||||
mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
|
||||
|
||||
self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,
|
||||
offset=array_offset, order=order)
|
||||
self._mmap = mm
|
||||
self.offset = offset
|
||||
self.mode = mode
|
||||
|
||||
if isinstance(filename, os.PathLike):
|
||||
# special case - if we were constructed with a pathlib.path,
|
||||
# then filename is a path object, not a string
|
||||
self.filename = filename.resolve()
|
||||
elif hasattr(fid, "name") and isinstance(fid.name, str):
|
||||
# py3 returns int for TemporaryFile().name
|
||||
self.filename = os.path.abspath(fid.name)
|
||||
# same as memmap copies (e.g. memmap + 1)
|
||||
else:
|
||||
self.filename = None
|
||||
|
||||
return self
|
||||
|
||||
def __array_finalize__(self, obj):
|
||||
if hasattr(obj, '_mmap') and np.may_share_memory(self, obj):
|
||||
self._mmap = obj._mmap
|
||||
self.filename = obj.filename
|
||||
self.offset = obj.offset
|
||||
self.mode = obj.mode
|
||||
else:
|
||||
self._mmap = None
|
||||
self.filename = None
|
||||
self.offset = None
|
||||
self.mode = None
|
||||
|
||||
def flush(self):
|
||||
"""
|
||||
Write any changes in the array to the file on disk.
|
||||
|
||||
For further information, see `memmap`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
None
|
||||
|
||||
See Also
|
||||
--------
|
||||
memmap
|
||||
|
||||
"""
|
||||
if self.base is not None and hasattr(self.base, 'flush'):
|
||||
self.base.flush()
|
||||
|
||||
def __array_wrap__(self, arr, context=None, return_scalar=False):
|
||||
arr = super().__array_wrap__(arr, context)
|
||||
|
||||
# Return a memmap if a memmap was given as the output of the
|
||||
# ufunc. Leave the arr class unchanged if self is not a memmap
|
||||
# to keep original memmap subclasses behavior
|
||||
if self is arr or type(self) is not memmap:
|
||||
return arr
|
||||
|
||||
# Return scalar instead of 0d memmap, e.g. for np.sum with
|
||||
# axis=None (note that subclasses will not reach here)
|
||||
if return_scalar:
|
||||
return arr[()]
|
||||
|
||||
# Return ndarray otherwise
|
||||
return arr.view(np.ndarray)
|
||||
|
||||
def __getitem__(self, index):
|
||||
res = super().__getitem__(index)
|
||||
if type(res) is memmap and res._mmap is None:
|
||||
return res.view(type=ndarray)
|
||||
return res
|
||||
3
lib/python3.11/site-packages/numpy/_core/memmap.pyi
Normal file
3
lib/python3.11/site-packages/numpy/_core/memmap.pyi
Normal file
@ -0,0 +1,3 @@
|
||||
from numpy import memmap
|
||||
|
||||
__all__ = ["memmap"]
|
||||
1762
lib/python3.11/site-packages/numpy/_core/multiarray.py
Normal file
1762
lib/python3.11/site-packages/numpy/_core/multiarray.py
Normal file
File diff suppressed because it is too large
Load Diff
1285
lib/python3.11/site-packages/numpy/_core/multiarray.pyi
Normal file
1285
lib/python3.11/site-packages/numpy/_core/multiarray.pyi
Normal file
File diff suppressed because it is too large
Load Diff
2760
lib/python3.11/site-packages/numpy/_core/numeric.py
Normal file
2760
lib/python3.11/site-packages/numpy/_core/numeric.py
Normal file
File diff suppressed because it is too large
Load Diff
882
lib/python3.11/site-packages/numpy/_core/numeric.pyi
Normal file
882
lib/python3.11/site-packages/numpy/_core/numeric.pyi
Normal file
@ -0,0 +1,882 @@
|
||||
from collections.abc import Callable, Sequence
|
||||
from typing import (
|
||||
Any,
|
||||
Final,
|
||||
Never,
|
||||
NoReturn,
|
||||
SupportsAbs,
|
||||
SupportsIndex,
|
||||
TypeAlias,
|
||||
TypeGuard,
|
||||
TypeVar,
|
||||
Unpack,
|
||||
overload,
|
||||
)
|
||||
from typing import Literal as L
|
||||
|
||||
import numpy as np
|
||||
from numpy import (
|
||||
False_,
|
||||
True_,
|
||||
_OrderCF,
|
||||
_OrderKACF,
|
||||
# re-exports
|
||||
bitwise_not,
|
||||
broadcast,
|
||||
complexfloating,
|
||||
dtype,
|
||||
flatiter,
|
||||
float64,
|
||||
floating,
|
||||
from_dlpack,
|
||||
# other
|
||||
generic,
|
||||
inf,
|
||||
int_,
|
||||
intp,
|
||||
little_endian,
|
||||
matmul,
|
||||
nan,
|
||||
ndarray,
|
||||
nditer,
|
||||
newaxis,
|
||||
object_,
|
||||
signedinteger,
|
||||
timedelta64,
|
||||
ufunc,
|
||||
unsignedinteger,
|
||||
vecdot,
|
||||
)
|
||||
from numpy._typing import (
|
||||
ArrayLike,
|
||||
DTypeLike,
|
||||
NDArray,
|
||||
_ArrayLike,
|
||||
_ArrayLikeBool_co,
|
||||
_ArrayLikeComplex_co,
|
||||
_ArrayLikeFloat_co,
|
||||
_ArrayLikeInt_co,
|
||||
_ArrayLikeObject_co,
|
||||
_ArrayLikeTD64_co,
|
||||
_ArrayLikeUInt_co,
|
||||
_DTypeLike,
|
||||
_NestedSequence,
|
||||
_ScalarLike_co,
|
||||
_Shape,
|
||||
_ShapeLike,
|
||||
_SupportsArrayFunc,
|
||||
_SupportsDType,
|
||||
)
|
||||
|
||||
from .fromnumeric import all as all
|
||||
from .fromnumeric import any as any
|
||||
from .fromnumeric import argpartition as argpartition
|
||||
from .fromnumeric import matrix_transpose as matrix_transpose
|
||||
from .fromnumeric import mean as mean
|
||||
from .multiarray import (
|
||||
# other
|
||||
_Array,
|
||||
_ConstructorEmpty,
|
||||
_KwargsEmpty,
|
||||
# re-exports
|
||||
arange,
|
||||
array,
|
||||
asanyarray,
|
||||
asarray,
|
||||
ascontiguousarray,
|
||||
asfortranarray,
|
||||
can_cast,
|
||||
concatenate,
|
||||
copyto,
|
||||
dot,
|
||||
empty,
|
||||
empty_like,
|
||||
frombuffer,
|
||||
fromfile,
|
||||
fromiter,
|
||||
fromstring,
|
||||
inner,
|
||||
lexsort,
|
||||
may_share_memory,
|
||||
min_scalar_type,
|
||||
nested_iters,
|
||||
promote_types,
|
||||
putmask,
|
||||
result_type,
|
||||
shares_memory,
|
||||
vdot,
|
||||
where,
|
||||
zeros,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"newaxis",
|
||||
"ndarray",
|
||||
"flatiter",
|
||||
"nditer",
|
||||
"nested_iters",
|
||||
"ufunc",
|
||||
"arange",
|
||||
"array",
|
||||
"asarray",
|
||||
"asanyarray",
|
||||
"ascontiguousarray",
|
||||
"asfortranarray",
|
||||
"zeros",
|
||||
"count_nonzero",
|
||||
"empty",
|
||||
"broadcast",
|
||||
"dtype",
|
||||
"fromstring",
|
||||
"fromfile",
|
||||
"frombuffer",
|
||||
"from_dlpack",
|
||||
"where",
|
||||
"argwhere",
|
||||
"copyto",
|
||||
"concatenate",
|
||||
"lexsort",
|
||||
"astype",
|
||||
"can_cast",
|
||||
"promote_types",
|
||||
"min_scalar_type",
|
||||
"result_type",
|
||||
"isfortran",
|
||||
"empty_like",
|
||||
"zeros_like",
|
||||
"ones_like",
|
||||
"correlate",
|
||||
"convolve",
|
||||
"inner",
|
||||
"dot",
|
||||
"outer",
|
||||
"vdot",
|
||||
"roll",
|
||||
"rollaxis",
|
||||
"moveaxis",
|
||||
"cross",
|
||||
"tensordot",
|
||||
"little_endian",
|
||||
"fromiter",
|
||||
"array_equal",
|
||||
"array_equiv",
|
||||
"indices",
|
||||
"fromfunction",
|
||||
"isclose",
|
||||
"isscalar",
|
||||
"binary_repr",
|
||||
"base_repr",
|
||||
"ones",
|
||||
"identity",
|
||||
"allclose",
|
||||
"putmask",
|
||||
"flatnonzero",
|
||||
"inf",
|
||||
"nan",
|
||||
"False_",
|
||||
"True_",
|
||||
"bitwise_not",
|
||||
"full",
|
||||
"full_like",
|
||||
"matmul",
|
||||
"vecdot",
|
||||
"shares_memory",
|
||||
"may_share_memory",
|
||||
]
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_ScalarT = TypeVar("_ScalarT", bound=generic)
|
||||
_DTypeT = TypeVar("_DTypeT", bound=np.dtype)
|
||||
_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any])
|
||||
_ShapeT = TypeVar("_ShapeT", bound=_Shape)
|
||||
_AnyShapeT = TypeVar(
|
||||
"_AnyShapeT",
|
||||
tuple[()],
|
||||
tuple[int],
|
||||
tuple[int, int],
|
||||
tuple[int, int, int],
|
||||
tuple[int, int, int, int],
|
||||
tuple[int, ...],
|
||||
)
|
||||
|
||||
_CorrelateMode: TypeAlias = L["valid", "same", "full"]
|
||||
|
||||
@overload
|
||||
def zeros_like(
|
||||
a: _ArrayT,
|
||||
dtype: None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
subok: L[True] = ...,
|
||||
shape: None = ...,
|
||||
*,
|
||||
device: L["cpu"] | None = ...,
|
||||
) -> _ArrayT: ...
|
||||
@overload
|
||||
def zeros_like(
|
||||
a: _ArrayLike[_ScalarT],
|
||||
dtype: None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
subok: bool = ...,
|
||||
shape: _ShapeLike | None = ...,
|
||||
*,
|
||||
device: L["cpu"] | None = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def zeros_like(
|
||||
a: Any,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
order: _OrderKACF = ...,
|
||||
subok: bool = ...,
|
||||
shape: _ShapeLike | None = ...,
|
||||
*,
|
||||
device: L["cpu"] | None = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def zeros_like(
|
||||
a: Any,
|
||||
dtype: DTypeLike | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
subok: bool = ...,
|
||||
shape: _ShapeLike | None = ...,
|
||||
*,
|
||||
device: L["cpu"] | None = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
ones: Final[_ConstructorEmpty]
|
||||
|
||||
@overload
|
||||
def ones_like(
|
||||
a: _ArrayT,
|
||||
dtype: None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
subok: L[True] = ...,
|
||||
shape: None = ...,
|
||||
*,
|
||||
device: L["cpu"] | None = ...,
|
||||
) -> _ArrayT: ...
|
||||
@overload
|
||||
def ones_like(
|
||||
a: _ArrayLike[_ScalarT],
|
||||
dtype: None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
subok: bool = ...,
|
||||
shape: _ShapeLike | None = ...,
|
||||
*,
|
||||
device: L["cpu"] | None = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def ones_like(
|
||||
a: Any,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
order: _OrderKACF = ...,
|
||||
subok: bool = ...,
|
||||
shape: _ShapeLike | None = ...,
|
||||
*,
|
||||
device: L["cpu"] | None = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def ones_like(
|
||||
a: Any,
|
||||
dtype: DTypeLike | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
subok: bool = ...,
|
||||
shape: _ShapeLike | None = ...,
|
||||
*,
|
||||
device: L["cpu"] | None = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
# TODO: Add overloads for bool, int, float, complex, str, bytes, and memoryview
|
||||
# 1-D shape
|
||||
@overload
|
||||
def full(
|
||||
shape: SupportsIndex,
|
||||
fill_value: _ScalarT,
|
||||
dtype: None = ...,
|
||||
order: _OrderCF = ...,
|
||||
**kwargs: Unpack[_KwargsEmpty],
|
||||
) -> _Array[tuple[int], _ScalarT]: ...
|
||||
@overload
|
||||
def full(
|
||||
shape: SupportsIndex,
|
||||
fill_value: Any,
|
||||
dtype: _DTypeT | _SupportsDType[_DTypeT],
|
||||
order: _OrderCF = ...,
|
||||
**kwargs: Unpack[_KwargsEmpty],
|
||||
) -> np.ndarray[tuple[int], _DTypeT]: ...
|
||||
@overload
|
||||
def full(
|
||||
shape: SupportsIndex,
|
||||
fill_value: Any,
|
||||
dtype: type[_ScalarT],
|
||||
order: _OrderCF = ...,
|
||||
**kwargs: Unpack[_KwargsEmpty],
|
||||
) -> _Array[tuple[int], _ScalarT]: ...
|
||||
@overload
|
||||
def full(
|
||||
shape: SupportsIndex,
|
||||
fill_value: Any,
|
||||
dtype: DTypeLike | None = ...,
|
||||
order: _OrderCF = ...,
|
||||
**kwargs: Unpack[_KwargsEmpty],
|
||||
) -> _Array[tuple[int], Any]: ...
|
||||
# known shape
|
||||
@overload
|
||||
def full(
|
||||
shape: _AnyShapeT,
|
||||
fill_value: _ScalarT,
|
||||
dtype: None = ...,
|
||||
order: _OrderCF = ...,
|
||||
**kwargs: Unpack[_KwargsEmpty],
|
||||
) -> _Array[_AnyShapeT, _ScalarT]: ...
|
||||
@overload
|
||||
def full(
|
||||
shape: _AnyShapeT,
|
||||
fill_value: Any,
|
||||
dtype: _DTypeT | _SupportsDType[_DTypeT],
|
||||
order: _OrderCF = ...,
|
||||
**kwargs: Unpack[_KwargsEmpty],
|
||||
) -> np.ndarray[_AnyShapeT, _DTypeT]: ...
|
||||
@overload
|
||||
def full(
|
||||
shape: _AnyShapeT,
|
||||
fill_value: Any,
|
||||
dtype: type[_ScalarT],
|
||||
order: _OrderCF = ...,
|
||||
**kwargs: Unpack[_KwargsEmpty],
|
||||
) -> _Array[_AnyShapeT, _ScalarT]: ...
|
||||
@overload
|
||||
def full(
|
||||
shape: _AnyShapeT,
|
||||
fill_value: Any,
|
||||
dtype: DTypeLike | None = ...,
|
||||
order: _OrderCF = ...,
|
||||
**kwargs: Unpack[_KwargsEmpty],
|
||||
) -> _Array[_AnyShapeT, Any]: ...
|
||||
# unknown shape
|
||||
@overload
|
||||
def full(
|
||||
shape: _ShapeLike,
|
||||
fill_value: _ScalarT,
|
||||
dtype: None = ...,
|
||||
order: _OrderCF = ...,
|
||||
**kwargs: Unpack[_KwargsEmpty],
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def full(
|
||||
shape: _ShapeLike,
|
||||
fill_value: Any,
|
||||
dtype: _DTypeT | _SupportsDType[_DTypeT],
|
||||
order: _OrderCF = ...,
|
||||
**kwargs: Unpack[_KwargsEmpty],
|
||||
) -> np.ndarray[Any, _DTypeT]: ...
|
||||
@overload
|
||||
def full(
|
||||
shape: _ShapeLike,
|
||||
fill_value: Any,
|
||||
dtype: type[_ScalarT],
|
||||
order: _OrderCF = ...,
|
||||
**kwargs: Unpack[_KwargsEmpty],
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def full(
|
||||
shape: _ShapeLike,
|
||||
fill_value: Any,
|
||||
dtype: DTypeLike | None = ...,
|
||||
order: _OrderCF = ...,
|
||||
**kwargs: Unpack[_KwargsEmpty],
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def full_like(
|
||||
a: _ArrayT,
|
||||
fill_value: Any,
|
||||
dtype: None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
subok: L[True] = ...,
|
||||
shape: None = ...,
|
||||
*,
|
||||
device: L["cpu"] | None = ...,
|
||||
) -> _ArrayT: ...
|
||||
@overload
|
||||
def full_like(
|
||||
a: _ArrayLike[_ScalarT],
|
||||
fill_value: Any,
|
||||
dtype: None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
subok: bool = ...,
|
||||
shape: _ShapeLike | None = ...,
|
||||
*,
|
||||
device: L["cpu"] | None = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def full_like(
|
||||
a: Any,
|
||||
fill_value: Any,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
order: _OrderKACF = ...,
|
||||
subok: bool = ...,
|
||||
shape: _ShapeLike | None = ...,
|
||||
*,
|
||||
device: L["cpu"] | None = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def full_like(
|
||||
a: Any,
|
||||
fill_value: Any,
|
||||
dtype: DTypeLike | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
subok: bool = ...,
|
||||
shape: _ShapeLike | None = ...,
|
||||
*,
|
||||
device: L["cpu"] | None = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def count_nonzero(a: ArrayLike, axis: None = None, *, keepdims: L[False] = False) -> np.intp: ...
|
||||
@overload
|
||||
def count_nonzero(a: _ScalarLike_co, axis: _ShapeLike | None = None, *, keepdims: L[True]) -> np.intp: ...
|
||||
@overload
|
||||
def count_nonzero(
|
||||
a: NDArray[Any] | _NestedSequence[ArrayLike], axis: _ShapeLike | None = None, *, keepdims: L[True]
|
||||
) -> NDArray[np.intp]: ...
|
||||
@overload
|
||||
def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: bool = False) -> Any: ...
|
||||
|
||||
#
|
||||
def isfortran(a: NDArray[Any] | generic) -> bool: ...
|
||||
|
||||
def argwhere(a: ArrayLike) -> NDArray[intp]: ...
|
||||
|
||||
def flatnonzero(a: ArrayLike) -> NDArray[intp]: ...
|
||||
|
||||
@overload
|
||||
def correlate(
|
||||
a: _ArrayLike[Never],
|
||||
v: _ArrayLike[Never],
|
||||
mode: _CorrelateMode = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def correlate(
|
||||
a: _ArrayLikeBool_co,
|
||||
v: _ArrayLikeBool_co,
|
||||
mode: _CorrelateMode = ...,
|
||||
) -> NDArray[np.bool]: ...
|
||||
@overload
|
||||
def correlate(
|
||||
a: _ArrayLikeUInt_co,
|
||||
v: _ArrayLikeUInt_co,
|
||||
mode: _CorrelateMode = ...,
|
||||
) -> NDArray[unsignedinteger]: ...
|
||||
@overload
|
||||
def correlate(
|
||||
a: _ArrayLikeInt_co,
|
||||
v: _ArrayLikeInt_co,
|
||||
mode: _CorrelateMode = ...,
|
||||
) -> NDArray[signedinteger]: ...
|
||||
@overload
|
||||
def correlate(
|
||||
a: _ArrayLikeFloat_co,
|
||||
v: _ArrayLikeFloat_co,
|
||||
mode: _CorrelateMode = ...,
|
||||
) -> NDArray[floating]: ...
|
||||
@overload
|
||||
def correlate(
|
||||
a: _ArrayLikeComplex_co,
|
||||
v: _ArrayLikeComplex_co,
|
||||
mode: _CorrelateMode = ...,
|
||||
) -> NDArray[complexfloating]: ...
|
||||
@overload
|
||||
def correlate(
|
||||
a: _ArrayLikeTD64_co,
|
||||
v: _ArrayLikeTD64_co,
|
||||
mode: _CorrelateMode = ...,
|
||||
) -> NDArray[timedelta64]: ...
|
||||
@overload
|
||||
def correlate(
|
||||
a: _ArrayLikeObject_co,
|
||||
v: _ArrayLikeObject_co,
|
||||
mode: _CorrelateMode = ...,
|
||||
) -> NDArray[object_]: ...
|
||||
|
||||
@overload
|
||||
def convolve(
|
||||
a: _ArrayLike[Never],
|
||||
v: _ArrayLike[Never],
|
||||
mode: _CorrelateMode = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def convolve(
|
||||
a: _ArrayLikeBool_co,
|
||||
v: _ArrayLikeBool_co,
|
||||
mode: _CorrelateMode = ...,
|
||||
) -> NDArray[np.bool]: ...
|
||||
@overload
|
||||
def convolve(
|
||||
a: _ArrayLikeUInt_co,
|
||||
v: _ArrayLikeUInt_co,
|
||||
mode: _CorrelateMode = ...,
|
||||
) -> NDArray[unsignedinteger]: ...
|
||||
@overload
|
||||
def convolve(
|
||||
a: _ArrayLikeInt_co,
|
||||
v: _ArrayLikeInt_co,
|
||||
mode: _CorrelateMode = ...,
|
||||
) -> NDArray[signedinteger]: ...
|
||||
@overload
|
||||
def convolve(
|
||||
a: _ArrayLikeFloat_co,
|
||||
v: _ArrayLikeFloat_co,
|
||||
mode: _CorrelateMode = ...,
|
||||
) -> NDArray[floating]: ...
|
||||
@overload
|
||||
def convolve(
|
||||
a: _ArrayLikeComplex_co,
|
||||
v: _ArrayLikeComplex_co,
|
||||
mode: _CorrelateMode = ...,
|
||||
) -> NDArray[complexfloating]: ...
|
||||
@overload
|
||||
def convolve(
|
||||
a: _ArrayLikeTD64_co,
|
||||
v: _ArrayLikeTD64_co,
|
||||
mode: _CorrelateMode = ...,
|
||||
) -> NDArray[timedelta64]: ...
|
||||
@overload
|
||||
def convolve(
|
||||
a: _ArrayLikeObject_co,
|
||||
v: _ArrayLikeObject_co,
|
||||
mode: _CorrelateMode = ...,
|
||||
) -> NDArray[object_]: ...
|
||||
|
||||
@overload
|
||||
def outer(
|
||||
a: _ArrayLike[Never],
|
||||
b: _ArrayLike[Never],
|
||||
out: None = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def outer(
|
||||
a: _ArrayLikeBool_co,
|
||||
b: _ArrayLikeBool_co,
|
||||
out: None = ...,
|
||||
) -> NDArray[np.bool]: ...
|
||||
@overload
|
||||
def outer(
|
||||
a: _ArrayLikeUInt_co,
|
||||
b: _ArrayLikeUInt_co,
|
||||
out: None = ...,
|
||||
) -> NDArray[unsignedinteger]: ...
|
||||
@overload
|
||||
def outer(
|
||||
a: _ArrayLikeInt_co,
|
||||
b: _ArrayLikeInt_co,
|
||||
out: None = ...,
|
||||
) -> NDArray[signedinteger]: ...
|
||||
@overload
|
||||
def outer(
|
||||
a: _ArrayLikeFloat_co,
|
||||
b: _ArrayLikeFloat_co,
|
||||
out: None = ...,
|
||||
) -> NDArray[floating]: ...
|
||||
@overload
|
||||
def outer(
|
||||
a: _ArrayLikeComplex_co,
|
||||
b: _ArrayLikeComplex_co,
|
||||
out: None = ...,
|
||||
) -> NDArray[complexfloating]: ...
|
||||
@overload
|
||||
def outer(
|
||||
a: _ArrayLikeTD64_co,
|
||||
b: _ArrayLikeTD64_co,
|
||||
out: None = ...,
|
||||
) -> NDArray[timedelta64]: ...
|
||||
@overload
|
||||
def outer(
|
||||
a: _ArrayLikeObject_co,
|
||||
b: _ArrayLikeObject_co,
|
||||
out: None = ...,
|
||||
) -> NDArray[object_]: ...
|
||||
@overload
|
||||
def outer(
|
||||
a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
|
||||
b: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
|
||||
out: _ArrayT,
|
||||
) -> _ArrayT: ...
|
||||
|
||||
@overload
|
||||
def tensordot(
|
||||
a: _ArrayLike[Never],
|
||||
b: _ArrayLike[Never],
|
||||
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def tensordot(
|
||||
a: _ArrayLikeBool_co,
|
||||
b: _ArrayLikeBool_co,
|
||||
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
||||
) -> NDArray[np.bool]: ...
|
||||
@overload
|
||||
def tensordot(
|
||||
a: _ArrayLikeUInt_co,
|
||||
b: _ArrayLikeUInt_co,
|
||||
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
||||
) -> NDArray[unsignedinteger]: ...
|
||||
@overload
|
||||
def tensordot(
|
||||
a: _ArrayLikeInt_co,
|
||||
b: _ArrayLikeInt_co,
|
||||
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
||||
) -> NDArray[signedinteger]: ...
|
||||
@overload
|
||||
def tensordot(
|
||||
a: _ArrayLikeFloat_co,
|
||||
b: _ArrayLikeFloat_co,
|
||||
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
||||
) -> NDArray[floating]: ...
|
||||
@overload
|
||||
def tensordot(
|
||||
a: _ArrayLikeComplex_co,
|
||||
b: _ArrayLikeComplex_co,
|
||||
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
||||
) -> NDArray[complexfloating]: ...
|
||||
@overload
|
||||
def tensordot(
|
||||
a: _ArrayLikeTD64_co,
|
||||
b: _ArrayLikeTD64_co,
|
||||
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
||||
) -> NDArray[timedelta64]: ...
|
||||
@overload
|
||||
def tensordot(
|
||||
a: _ArrayLikeObject_co,
|
||||
b: _ArrayLikeObject_co,
|
||||
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
||||
) -> NDArray[object_]: ...
|
||||
|
||||
@overload
|
||||
def roll(
|
||||
a: _ArrayLike[_ScalarT],
|
||||
shift: _ShapeLike,
|
||||
axis: _ShapeLike | None = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def roll(
|
||||
a: ArrayLike,
|
||||
shift: _ShapeLike,
|
||||
axis: _ShapeLike | None = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def rollaxis(
|
||||
a: NDArray[_ScalarT],
|
||||
axis: int,
|
||||
start: int = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
|
||||
def moveaxis(
|
||||
a: NDArray[_ScalarT],
|
||||
source: _ShapeLike,
|
||||
destination: _ShapeLike,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
|
||||
@overload
|
||||
def cross(
|
||||
a: _ArrayLike[Never],
|
||||
b: _ArrayLike[Never],
|
||||
axisa: int = ...,
|
||||
axisb: int = ...,
|
||||
axisc: int = ...,
|
||||
axis: int | None = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def cross(
|
||||
a: _ArrayLikeBool_co,
|
||||
b: _ArrayLikeBool_co,
|
||||
axisa: int = ...,
|
||||
axisb: int = ...,
|
||||
axisc: int = ...,
|
||||
axis: int | None = ...,
|
||||
) -> NoReturn: ...
|
||||
@overload
|
||||
def cross(
|
||||
a: _ArrayLikeUInt_co,
|
||||
b: _ArrayLikeUInt_co,
|
||||
axisa: int = ...,
|
||||
axisb: int = ...,
|
||||
axisc: int = ...,
|
||||
axis: int | None = ...,
|
||||
) -> NDArray[unsignedinteger]: ...
|
||||
@overload
|
||||
def cross(
|
||||
a: _ArrayLikeInt_co,
|
||||
b: _ArrayLikeInt_co,
|
||||
axisa: int = ...,
|
||||
axisb: int = ...,
|
||||
axisc: int = ...,
|
||||
axis: int | None = ...,
|
||||
) -> NDArray[signedinteger]: ...
|
||||
@overload
|
||||
def cross(
|
||||
a: _ArrayLikeFloat_co,
|
||||
b: _ArrayLikeFloat_co,
|
||||
axisa: int = ...,
|
||||
axisb: int = ...,
|
||||
axisc: int = ...,
|
||||
axis: int | None = ...,
|
||||
) -> NDArray[floating]: ...
|
||||
@overload
|
||||
def cross(
|
||||
a: _ArrayLikeComplex_co,
|
||||
b: _ArrayLikeComplex_co,
|
||||
axisa: int = ...,
|
||||
axisb: int = ...,
|
||||
axisc: int = ...,
|
||||
axis: int | None = ...,
|
||||
) -> NDArray[complexfloating]: ...
|
||||
@overload
|
||||
def cross(
|
||||
a: _ArrayLikeObject_co,
|
||||
b: _ArrayLikeObject_co,
|
||||
axisa: int = ...,
|
||||
axisb: int = ...,
|
||||
axisc: int = ...,
|
||||
axis: int | None = ...,
|
||||
) -> NDArray[object_]: ...
|
||||
|
||||
@overload
|
||||
def indices(
|
||||
dimensions: Sequence[int],
|
||||
dtype: type[int] = ...,
|
||||
sparse: L[False] = ...,
|
||||
) -> NDArray[int_]: ...
|
||||
@overload
|
||||
def indices(
|
||||
dimensions: Sequence[int],
|
||||
dtype: type[int],
|
||||
sparse: L[True],
|
||||
) -> tuple[NDArray[int_], ...]: ...
|
||||
@overload
|
||||
def indices(
|
||||
dimensions: Sequence[int],
|
||||
dtype: type[int] = ...,
|
||||
*,
|
||||
sparse: L[True],
|
||||
) -> tuple[NDArray[int_], ...]: ...
|
||||
@overload
|
||||
def indices(
|
||||
dimensions: Sequence[int],
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
sparse: L[False] = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def indices(
|
||||
dimensions: Sequence[int],
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
sparse: L[True],
|
||||
) -> tuple[NDArray[_ScalarT], ...]: ...
|
||||
@overload
|
||||
def indices(
|
||||
dimensions: Sequence[int],
|
||||
dtype: DTypeLike = ...,
|
||||
sparse: L[False] = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def indices(
|
||||
dimensions: Sequence[int],
|
||||
dtype: DTypeLike,
|
||||
sparse: L[True],
|
||||
) -> tuple[NDArray[Any], ...]: ...
|
||||
@overload
|
||||
def indices(
|
||||
dimensions: Sequence[int],
|
||||
dtype: DTypeLike = ...,
|
||||
*,
|
||||
sparse: L[True],
|
||||
) -> tuple[NDArray[Any], ...]: ...
|
||||
|
||||
def fromfunction(
|
||||
function: Callable[..., _T],
|
||||
shape: Sequence[int],
|
||||
*,
|
||||
dtype: DTypeLike = ...,
|
||||
like: _SupportsArrayFunc | None = ...,
|
||||
**kwargs: Any,
|
||||
) -> _T: ...
|
||||
|
||||
def isscalar(element: object) -> TypeGuard[generic | complex | str | bytes | memoryview]: ...
|
||||
|
||||
def binary_repr(num: SupportsIndex, width: int | None = ...) -> str: ...
|
||||
|
||||
def base_repr(
|
||||
number: SupportsAbs[float],
|
||||
base: float = ...,
|
||||
padding: SupportsIndex | None = ...,
|
||||
) -> str: ...
|
||||
|
||||
@overload
|
||||
def identity(
|
||||
n: int,
|
||||
dtype: None = ...,
|
||||
*,
|
||||
like: _SupportsArrayFunc | None = ...,
|
||||
) -> NDArray[float64]: ...
|
||||
@overload
|
||||
def identity(
|
||||
n: int,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
*,
|
||||
like: _SupportsArrayFunc | None = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def identity(
|
||||
n: int,
|
||||
dtype: DTypeLike | None = ...,
|
||||
*,
|
||||
like: _SupportsArrayFunc | None = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def allclose(
|
||||
a: ArrayLike,
|
||||
b: ArrayLike,
|
||||
rtol: ArrayLike = ...,
|
||||
atol: ArrayLike = ...,
|
||||
equal_nan: bool = ...,
|
||||
) -> bool: ...
|
||||
|
||||
@overload
|
||||
def isclose(
|
||||
a: _ScalarLike_co,
|
||||
b: _ScalarLike_co,
|
||||
rtol: ArrayLike = ...,
|
||||
atol: ArrayLike = ...,
|
||||
equal_nan: bool = ...,
|
||||
) -> np.bool: ...
|
||||
@overload
|
||||
def isclose(
|
||||
a: ArrayLike,
|
||||
b: ArrayLike,
|
||||
rtol: ArrayLike = ...,
|
||||
atol: ArrayLike = ...,
|
||||
equal_nan: bool = ...,
|
||||
) -> NDArray[np.bool]: ...
|
||||
|
||||
def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ...
|
||||
|
||||
def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ...
|
||||
|
||||
@overload
|
||||
def astype(
|
||||
x: ndarray[_ShapeT, dtype],
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
/,
|
||||
*,
|
||||
copy: bool = ...,
|
||||
device: L["cpu"] | None = ...,
|
||||
) -> ndarray[_ShapeT, dtype[_ScalarT]]: ...
|
||||
@overload
|
||||
def astype(
|
||||
x: ndarray[_ShapeT, dtype],
|
||||
dtype: DTypeLike,
|
||||
/,
|
||||
*,
|
||||
copy: bool = ...,
|
||||
device: L["cpu"] | None = ...,
|
||||
) -> ndarray[_ShapeT, dtype]: ...
|
||||
633
lib/python3.11/site-packages/numpy/_core/numerictypes.py
Normal file
633
lib/python3.11/site-packages/numpy/_core/numerictypes.py
Normal file
@ -0,0 +1,633 @@
|
||||
"""
|
||||
numerictypes: Define the numeric type objects
|
||||
|
||||
This module is designed so "from numerictypes import \\*" is safe.
|
||||
Exported symbols include:
|
||||
|
||||
Dictionary with all registered number types (including aliases):
|
||||
sctypeDict
|
||||
|
||||
Type objects (not all will be available, depends on platform):
|
||||
see variable sctypes for which ones you have
|
||||
|
||||
Bit-width names
|
||||
|
||||
int8 int16 int32 int64
|
||||
uint8 uint16 uint32 uint64
|
||||
float16 float32 float64 float96 float128
|
||||
complex64 complex128 complex192 complex256
|
||||
datetime64 timedelta64
|
||||
|
||||
c-based names
|
||||
|
||||
bool
|
||||
|
||||
object_
|
||||
|
||||
void, str_
|
||||
|
||||
byte, ubyte,
|
||||
short, ushort
|
||||
intc, uintc,
|
||||
intp, uintp,
|
||||
int_, uint,
|
||||
longlong, ulonglong,
|
||||
|
||||
single, csingle,
|
||||
double, cdouble,
|
||||
longdouble, clongdouble,
|
||||
|
||||
As part of the type-hierarchy: xx -- is bit-width
|
||||
|
||||
generic
|
||||
+-> bool (kind=b)
|
||||
+-> number
|
||||
| +-> integer
|
||||
| | +-> signedinteger (intxx) (kind=i)
|
||||
| | | byte
|
||||
| | | short
|
||||
| | | intc
|
||||
| | | intp
|
||||
| | | int_
|
||||
| | | longlong
|
||||
| | \\-> unsignedinteger (uintxx) (kind=u)
|
||||
| | ubyte
|
||||
| | ushort
|
||||
| | uintc
|
||||
| | uintp
|
||||
| | uint
|
||||
| | ulonglong
|
||||
| +-> inexact
|
||||
| +-> floating (floatxx) (kind=f)
|
||||
| | half
|
||||
| | single
|
||||
| | double
|
||||
| | longdouble
|
||||
| \\-> complexfloating (complexxx) (kind=c)
|
||||
| csingle
|
||||
| cdouble
|
||||
| clongdouble
|
||||
+-> flexible
|
||||
| +-> character
|
||||
| | bytes_ (kind=S)
|
||||
| | str_ (kind=U)
|
||||
| |
|
||||
| \\-> void (kind=V)
|
||||
\\-> object_ (not used much) (kind=O)
|
||||
|
||||
"""
|
||||
import numbers
|
||||
import warnings
|
||||
|
||||
from numpy._utils import set_module
|
||||
|
||||
from . import multiarray as ma
|
||||
from .multiarray import (
|
||||
busday_count,
|
||||
busday_offset,
|
||||
busdaycalendar,
|
||||
datetime_as_string,
|
||||
datetime_data,
|
||||
dtype,
|
||||
is_busday,
|
||||
ndarray,
|
||||
)
|
||||
|
||||
# we add more at the bottom
|
||||
__all__ = [
|
||||
'ScalarType', 'typecodes', 'issubdtype', 'datetime_data',
|
||||
'datetime_as_string', 'busday_offset', 'busday_count',
|
||||
'is_busday', 'busdaycalendar', 'isdtype'
|
||||
]
|
||||
|
||||
# we don't need all these imports, but we need to keep them for compatibility
|
||||
# for users using np._core.numerictypes.UPPER_TABLE
|
||||
# we don't export these for import *, but we do want them accessible
|
||||
# as numerictypes.bool, etc.
|
||||
from builtins import bool, bytes, complex, float, int, object, str # noqa: F401, UP029
|
||||
|
||||
from ._dtype import _kind_name
|
||||
from ._string_helpers import ( # noqa: F401
|
||||
LOWER_TABLE,
|
||||
UPPER_TABLE,
|
||||
english_capitalize,
|
||||
english_lower,
|
||||
english_upper,
|
||||
)
|
||||
from ._type_aliases import allTypes, sctypeDict, sctypes
|
||||
|
||||
# We use this later
|
||||
generic = allTypes['generic']
|
||||
|
||||
genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
|
||||
'int32', 'uint32', 'int64', 'uint64',
|
||||
'float16', 'float32', 'float64', 'float96', 'float128',
|
||||
'complex64', 'complex128', 'complex192', 'complex256',
|
||||
'object']
|
||||
|
||||
@set_module('numpy')
|
||||
def maximum_sctype(t):
|
||||
"""
|
||||
Return the scalar type of highest precision of the same kind as the input.
|
||||
|
||||
.. deprecated:: 2.0
|
||||
Use an explicit dtype like int64 or float64 instead.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
t : dtype or dtype specifier
|
||||
The input data type. This can be a `dtype` object or an object that
|
||||
is convertible to a `dtype`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : dtype
|
||||
The highest precision data type of the same kind (`dtype.kind`) as `t`.
|
||||
|
||||
See Also
|
||||
--------
|
||||
obj2sctype, mintypecode, sctype2char
|
||||
dtype
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from numpy._core.numerictypes import maximum_sctype
|
||||
>>> maximum_sctype(int)
|
||||
<class 'numpy.int64'>
|
||||
>>> maximum_sctype(np.uint8)
|
||||
<class 'numpy.uint64'>
|
||||
>>> maximum_sctype(complex)
|
||||
<class 'numpy.complex256'> # may vary
|
||||
|
||||
>>> maximum_sctype(str)
|
||||
<class 'numpy.str_'>
|
||||
|
||||
>>> maximum_sctype('i2')
|
||||
<class 'numpy.int64'>
|
||||
>>> maximum_sctype('f4')
|
||||
<class 'numpy.float128'> # may vary
|
||||
|
||||
"""
|
||||
|
||||
# Deprecated in NumPy 2.0, 2023-07-11
|
||||
warnings.warn(
|
||||
"`maximum_sctype` is deprecated. Use an explicit dtype like int64 "
|
||||
"or float64 instead. (deprecated in NumPy 2.0)",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
|
||||
g = obj2sctype(t)
|
||||
if g is None:
|
||||
return t
|
||||
t = g
|
||||
base = _kind_name(dtype(t))
|
||||
if base in sctypes:
|
||||
return sctypes[base][-1]
|
||||
else:
|
||||
return t
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
def issctype(rep):
|
||||
"""
|
||||
Determines whether the given object represents a scalar data-type.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
rep : any
|
||||
If `rep` is an instance of a scalar dtype, True is returned. If not,
|
||||
False is returned.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : bool
|
||||
Boolean result of check whether `rep` is a scalar dtype.
|
||||
|
||||
See Also
|
||||
--------
|
||||
issubsctype, issubdtype, obj2sctype, sctype2char
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from numpy._core.numerictypes import issctype
|
||||
>>> issctype(np.int32)
|
||||
True
|
||||
>>> issctype(list)
|
||||
False
|
||||
>>> issctype(1.1)
|
||||
False
|
||||
|
||||
Strings are also a scalar type:
|
||||
|
||||
>>> issctype(np.dtype('str'))
|
||||
True
|
||||
|
||||
"""
|
||||
if not isinstance(rep, (type, dtype)):
|
||||
return False
|
||||
try:
|
||||
res = obj2sctype(rep)
|
||||
if res and res != object_:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def obj2sctype(rep, default=None):
|
||||
"""
|
||||
Return the scalar dtype or NumPy equivalent of Python type of an object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
rep : any
|
||||
The object of which the type is returned.
|
||||
default : any, optional
|
||||
If given, this is returned for objects whose types can not be
|
||||
determined. If not given, None is returned for those objects.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dtype : dtype or Python type
|
||||
The data type of `rep`.
|
||||
|
||||
See Also
|
||||
--------
|
||||
sctype2char, issctype, issubsctype, issubdtype
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from numpy._core.numerictypes import obj2sctype
|
||||
>>> obj2sctype(np.int32)
|
||||
<class 'numpy.int32'>
|
||||
>>> obj2sctype(np.array([1., 2.]))
|
||||
<class 'numpy.float64'>
|
||||
>>> obj2sctype(np.array([1.j]))
|
||||
<class 'numpy.complex128'>
|
||||
|
||||
>>> obj2sctype(dict)
|
||||
<class 'numpy.object_'>
|
||||
>>> obj2sctype('string')
|
||||
|
||||
>>> obj2sctype(1, default=list)
|
||||
<class 'list'>
|
||||
|
||||
"""
|
||||
# prevent abstract classes being upcast
|
||||
if isinstance(rep, type) and issubclass(rep, generic):
|
||||
return rep
|
||||
# extract dtype from arrays
|
||||
if isinstance(rep, ndarray):
|
||||
return rep.dtype.type
|
||||
# fall back on dtype to convert
|
||||
try:
|
||||
res = dtype(rep)
|
||||
except Exception:
|
||||
return default
|
||||
else:
|
||||
return res.type
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
def issubclass_(arg1, arg2):
|
||||
"""
|
||||
Determine if a class is a subclass of a second class.
|
||||
|
||||
`issubclass_` is equivalent to the Python built-in ``issubclass``,
|
||||
except that it returns False instead of raising a TypeError if one
|
||||
of the arguments is not a class.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
arg1 : class
|
||||
Input class. True is returned if `arg1` is a subclass of `arg2`.
|
||||
arg2 : class or tuple of classes.
|
||||
Input class. If a tuple of classes, True is returned if `arg1` is a
|
||||
subclass of any of the tuple elements.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : bool
|
||||
Whether `arg1` is a subclass of `arg2` or not.
|
||||
|
||||
See Also
|
||||
--------
|
||||
issubsctype, issubdtype, issctype
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.issubclass_(np.int32, int)
|
||||
False
|
||||
>>> np.issubclass_(np.int32, float)
|
||||
False
|
||||
>>> np.issubclass_(np.float64, float)
|
||||
True
|
||||
|
||||
"""
|
||||
try:
|
||||
return issubclass(arg1, arg2)
|
||||
except TypeError:
|
||||
return False
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
def issubsctype(arg1, arg2):
|
||||
"""
|
||||
Determine if the first argument is a subclass of the second argument.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
arg1, arg2 : dtype or dtype specifier
|
||||
Data-types.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : bool
|
||||
The result.
|
||||
|
||||
See Also
|
||||
--------
|
||||
issctype, issubdtype, obj2sctype
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from numpy._core import issubsctype
|
||||
>>> issubsctype('S8', str)
|
||||
False
|
||||
>>> issubsctype(np.array([1]), int)
|
||||
True
|
||||
>>> issubsctype(np.array([1]), float)
|
||||
False
|
||||
|
||||
"""
|
||||
return issubclass(obj2sctype(arg1), obj2sctype(arg2))
|
||||
|
||||
|
||||
class _PreprocessDTypeError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def _preprocess_dtype(dtype):
|
||||
"""
|
||||
Preprocess dtype argument by:
|
||||
1. fetching type from a data type
|
||||
2. verifying that types are built-in NumPy dtypes
|
||||
"""
|
||||
if isinstance(dtype, ma.dtype):
|
||||
dtype = dtype.type
|
||||
if isinstance(dtype, ndarray) or dtype not in allTypes.values():
|
||||
raise _PreprocessDTypeError
|
||||
return dtype
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
def isdtype(dtype, kind):
|
||||
"""
|
||||
Determine if a provided dtype is of a specified data type ``kind``.
|
||||
|
||||
This function only supports built-in NumPy's data types.
|
||||
Third-party dtypes are not yet supported.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dtype : dtype
|
||||
The input dtype.
|
||||
kind : dtype or str or tuple of dtypes/strs.
|
||||
dtype or dtype kind. Allowed dtype kinds are:
|
||||
* ``'bool'`` : boolean kind
|
||||
* ``'signed integer'`` : signed integer data types
|
||||
* ``'unsigned integer'`` : unsigned integer data types
|
||||
* ``'integral'`` : integer data types
|
||||
* ``'real floating'`` : real-valued floating-point data types
|
||||
* ``'complex floating'`` : complex floating-point data types
|
||||
* ``'numeric'`` : numeric data types
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : bool
|
||||
|
||||
See Also
|
||||
--------
|
||||
issubdtype
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.isdtype(np.float32, np.float64)
|
||||
False
|
||||
>>> np.isdtype(np.float32, "real floating")
|
||||
True
|
||||
>>> np.isdtype(np.complex128, ("real floating", "complex floating"))
|
||||
True
|
||||
|
||||
"""
|
||||
try:
|
||||
dtype = _preprocess_dtype(dtype)
|
||||
except _PreprocessDTypeError:
|
||||
raise TypeError(
|
||||
"dtype argument must be a NumPy dtype, "
|
||||
f"but it is a {type(dtype)}."
|
||||
) from None
|
||||
|
||||
input_kinds = kind if isinstance(kind, tuple) else (kind,)
|
||||
|
||||
processed_kinds = set()
|
||||
|
||||
for kind in input_kinds:
|
||||
if kind == "bool":
|
||||
processed_kinds.add(allTypes["bool"])
|
||||
elif kind == "signed integer":
|
||||
processed_kinds.update(sctypes["int"])
|
||||
elif kind == "unsigned integer":
|
||||
processed_kinds.update(sctypes["uint"])
|
||||
elif kind == "integral":
|
||||
processed_kinds.update(sctypes["int"] + sctypes["uint"])
|
||||
elif kind == "real floating":
|
||||
processed_kinds.update(sctypes["float"])
|
||||
elif kind == "complex floating":
|
||||
processed_kinds.update(sctypes["complex"])
|
||||
elif kind == "numeric":
|
||||
processed_kinds.update(
|
||||
sctypes["int"] + sctypes["uint"] +
|
||||
sctypes["float"] + sctypes["complex"]
|
||||
)
|
||||
elif isinstance(kind, str):
|
||||
raise ValueError(
|
||||
"kind argument is a string, but"
|
||||
f" {kind!r} is not a known kind name."
|
||||
)
|
||||
else:
|
||||
try:
|
||||
kind = _preprocess_dtype(kind)
|
||||
except _PreprocessDTypeError:
|
||||
raise TypeError(
|
||||
"kind argument must be comprised of "
|
||||
"NumPy dtypes or strings only, "
|
||||
f"but is a {type(kind)}."
|
||||
) from None
|
||||
processed_kinds.add(kind)
|
||||
|
||||
return dtype in processed_kinds
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
def issubdtype(arg1, arg2):
|
||||
r"""
|
||||
Returns True if first argument is a typecode lower/equal in type hierarchy.
|
||||
|
||||
This is like the builtin :func:`issubclass`, but for `dtype`\ s.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
arg1, arg2 : dtype_like
|
||||
`dtype` or object coercible to one
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : bool
|
||||
|
||||
See Also
|
||||
--------
|
||||
:ref:`arrays.scalars` : Overview of the numpy type hierarchy.
|
||||
|
||||
Examples
|
||||
--------
|
||||
`issubdtype` can be used to check the type of arrays:
|
||||
|
||||
>>> ints = np.array([1, 2, 3], dtype=np.int32)
|
||||
>>> np.issubdtype(ints.dtype, np.integer)
|
||||
True
|
||||
>>> np.issubdtype(ints.dtype, np.floating)
|
||||
False
|
||||
|
||||
>>> floats = np.array([1, 2, 3], dtype=np.float32)
|
||||
>>> np.issubdtype(floats.dtype, np.integer)
|
||||
False
|
||||
>>> np.issubdtype(floats.dtype, np.floating)
|
||||
True
|
||||
|
||||
Similar types of different sizes are not subdtypes of each other:
|
||||
|
||||
>>> np.issubdtype(np.float64, np.float32)
|
||||
False
|
||||
>>> np.issubdtype(np.float32, np.float64)
|
||||
False
|
||||
|
||||
but both are subtypes of `floating`:
|
||||
|
||||
>>> np.issubdtype(np.float64, np.floating)
|
||||
True
|
||||
>>> np.issubdtype(np.float32, np.floating)
|
||||
True
|
||||
|
||||
For convenience, dtype-like objects are allowed too:
|
||||
|
||||
>>> np.issubdtype('S1', np.bytes_)
|
||||
True
|
||||
>>> np.issubdtype('i4', np.signedinteger)
|
||||
True
|
||||
|
||||
"""
|
||||
if not issubclass_(arg1, generic):
|
||||
arg1 = dtype(arg1).type
|
||||
if not issubclass_(arg2, generic):
|
||||
arg2 = dtype(arg2).type
|
||||
|
||||
return issubclass(arg1, arg2)
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
def sctype2char(sctype):
|
||||
"""
|
||||
Return the string representation of a scalar dtype.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
sctype : scalar dtype or object
|
||||
If a scalar dtype, the corresponding string character is
|
||||
returned. If an object, `sctype2char` tries to infer its scalar type
|
||||
and then return the corresponding string character.
|
||||
|
||||
Returns
|
||||
-------
|
||||
typechar : str
|
||||
The string character corresponding to the scalar type.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If `sctype` is an object for which the type can not be inferred.
|
||||
|
||||
See Also
|
||||
--------
|
||||
obj2sctype, issctype, issubsctype, mintypecode
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from numpy._core.numerictypes import sctype2char
|
||||
>>> for sctype in [np.int32, np.double, np.cdouble, np.bytes_, np.ndarray]:
|
||||
... print(sctype2char(sctype))
|
||||
l # may vary
|
||||
d
|
||||
D
|
||||
S
|
||||
O
|
||||
|
||||
>>> x = np.array([1., 2-1.j])
|
||||
>>> sctype2char(x)
|
||||
'D'
|
||||
>>> sctype2char(list)
|
||||
'O'
|
||||
|
||||
"""
|
||||
sctype = obj2sctype(sctype)
|
||||
if sctype is None:
|
||||
raise ValueError("unrecognized type")
|
||||
if sctype not in sctypeDict.values():
|
||||
# for compatibility
|
||||
raise KeyError(sctype)
|
||||
return dtype(sctype).char
|
||||
|
||||
|
||||
def _scalar_type_key(typ):
|
||||
"""A ``key`` function for `sorted`."""
|
||||
dt = dtype(typ)
|
||||
return (dt.kind.lower(), dt.itemsize)
|
||||
|
||||
|
||||
ScalarType = [int, float, complex, bool, bytes, str, memoryview]
|
||||
ScalarType += sorted(set(sctypeDict.values()), key=_scalar_type_key)
|
||||
ScalarType = tuple(ScalarType)
|
||||
|
||||
|
||||
# Now add the types we've determined to this module
|
||||
for key in allTypes:
|
||||
globals()[key] = allTypes[key]
|
||||
__all__.append(key)
|
||||
|
||||
del key
|
||||
|
||||
typecodes = {'Character': 'c',
|
||||
'Integer': 'bhilqnp',
|
||||
'UnsignedInteger': 'BHILQNP',
|
||||
'Float': 'efdg',
|
||||
'Complex': 'FDG',
|
||||
'AllInteger': 'bBhHiIlLqQnNpP',
|
||||
'AllFloat': 'efdgFDG',
|
||||
'Datetime': 'Mm',
|
||||
'All': '?bhilqnpBHILQNPefdgFDGSUVOMm'}
|
||||
|
||||
# backwards compatibility --- deprecated name
|
||||
# Formal deprecation: Numpy 1.20.0, 2020-10-19 (see numpy/__init__.py)
|
||||
typeDict = sctypeDict
|
||||
|
||||
def _register_types():
|
||||
numbers.Integral.register(integer)
|
||||
numbers.Complex.register(inexact)
|
||||
numbers.Real.register(floating)
|
||||
numbers.Number.register(number)
|
||||
|
||||
|
||||
_register_types()
|
||||
192
lib/python3.11/site-packages/numpy/_core/numerictypes.pyi
Normal file
192
lib/python3.11/site-packages/numpy/_core/numerictypes.pyi
Normal file
@ -0,0 +1,192 @@
|
||||
import builtins
|
||||
from typing import Any, TypedDict, type_check_only
|
||||
from typing import Literal as L
|
||||
|
||||
import numpy as np
|
||||
from numpy import (
|
||||
bool,
|
||||
bool_,
|
||||
byte,
|
||||
bytes_,
|
||||
cdouble,
|
||||
character,
|
||||
clongdouble,
|
||||
complex64,
|
||||
complex128,
|
||||
complexfloating,
|
||||
csingle,
|
||||
datetime64,
|
||||
double,
|
||||
dtype,
|
||||
flexible,
|
||||
float16,
|
||||
float32,
|
||||
float64,
|
||||
floating,
|
||||
generic,
|
||||
half,
|
||||
inexact,
|
||||
int8,
|
||||
int16,
|
||||
int32,
|
||||
int64,
|
||||
int_,
|
||||
intc,
|
||||
integer,
|
||||
intp,
|
||||
long,
|
||||
longdouble,
|
||||
longlong,
|
||||
number,
|
||||
object_,
|
||||
short,
|
||||
signedinteger,
|
||||
single,
|
||||
str_,
|
||||
timedelta64,
|
||||
ubyte,
|
||||
uint,
|
||||
uint8,
|
||||
uint16,
|
||||
uint32,
|
||||
uint64,
|
||||
uintc,
|
||||
uintp,
|
||||
ulong,
|
||||
ulonglong,
|
||||
unsignedinteger,
|
||||
ushort,
|
||||
void,
|
||||
)
|
||||
from numpy._typing import DTypeLike
|
||||
from numpy._typing._extended_precision import complex192, complex256, float96, float128
|
||||
|
||||
from ._type_aliases import sctypeDict # noqa: F401
|
||||
from .multiarray import (
|
||||
busday_count,
|
||||
busday_offset,
|
||||
busdaycalendar,
|
||||
datetime_as_string,
|
||||
datetime_data,
|
||||
is_busday,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"ScalarType",
|
||||
"typecodes",
|
||||
"issubdtype",
|
||||
"datetime_data",
|
||||
"datetime_as_string",
|
||||
"busday_offset",
|
||||
"busday_count",
|
||||
"is_busday",
|
||||
"busdaycalendar",
|
||||
"isdtype",
|
||||
"generic",
|
||||
"unsignedinteger",
|
||||
"character",
|
||||
"inexact",
|
||||
"number",
|
||||
"integer",
|
||||
"flexible",
|
||||
"complexfloating",
|
||||
"signedinteger",
|
||||
"floating",
|
||||
"bool",
|
||||
"float16",
|
||||
"float32",
|
||||
"float64",
|
||||
"longdouble",
|
||||
"complex64",
|
||||
"complex128",
|
||||
"clongdouble",
|
||||
"bytes_",
|
||||
"str_",
|
||||
"void",
|
||||
"object_",
|
||||
"datetime64",
|
||||
"timedelta64",
|
||||
"int8",
|
||||
"byte",
|
||||
"uint8",
|
||||
"ubyte",
|
||||
"int16",
|
||||
"short",
|
||||
"uint16",
|
||||
"ushort",
|
||||
"int32",
|
||||
"intc",
|
||||
"uint32",
|
||||
"uintc",
|
||||
"int64",
|
||||
"long",
|
||||
"uint64",
|
||||
"ulong",
|
||||
"longlong",
|
||||
"ulonglong",
|
||||
"intp",
|
||||
"uintp",
|
||||
"double",
|
||||
"cdouble",
|
||||
"single",
|
||||
"csingle",
|
||||
"half",
|
||||
"bool_",
|
||||
"int_",
|
||||
"uint",
|
||||
"float96",
|
||||
"float128",
|
||||
"complex192",
|
||||
"complex256",
|
||||
]
|
||||
|
||||
@type_check_only
|
||||
class _TypeCodes(TypedDict):
|
||||
Character: L['c']
|
||||
Integer: L['bhilqnp']
|
||||
UnsignedInteger: L['BHILQNP']
|
||||
Float: L['efdg']
|
||||
Complex: L['FDG']
|
||||
AllInteger: L['bBhHiIlLqQnNpP']
|
||||
AllFloat: L['efdgFDG']
|
||||
Datetime: L['Mm']
|
||||
All: L['?bhilqnpBHILQNPefdgFDGSUVOMm']
|
||||
|
||||
def isdtype(dtype: dtype | type[Any], kind: DTypeLike | tuple[DTypeLike, ...]) -> builtins.bool: ...
|
||||
|
||||
def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> builtins.bool: ...
|
||||
|
||||
typecodes: _TypeCodes
|
||||
ScalarType: tuple[
|
||||
type[int],
|
||||
type[float],
|
||||
type[complex],
|
||||
type[builtins.bool],
|
||||
type[bytes],
|
||||
type[str],
|
||||
type[memoryview],
|
||||
type[np.bool],
|
||||
type[csingle],
|
||||
type[cdouble],
|
||||
type[clongdouble],
|
||||
type[half],
|
||||
type[single],
|
||||
type[double],
|
||||
type[longdouble],
|
||||
type[byte],
|
||||
type[short],
|
||||
type[intc],
|
||||
type[long],
|
||||
type[longlong],
|
||||
type[timedelta64],
|
||||
type[datetime64],
|
||||
type[object_],
|
||||
type[bytes_],
|
||||
type[str_],
|
||||
type[ubyte],
|
||||
type[ushort],
|
||||
type[uintc],
|
||||
type[ulong],
|
||||
type[ulonglong],
|
||||
type[void],
|
||||
]
|
||||
183
lib/python3.11/site-packages/numpy/_core/overrides.py
Normal file
183
lib/python3.11/site-packages/numpy/_core/overrides.py
Normal file
@ -0,0 +1,183 @@
|
||||
"""Implementation of __array_function__ overrides from NEP-18."""
|
||||
import collections
|
||||
import functools
|
||||
|
||||
from numpy._core._multiarray_umath import (
|
||||
_ArrayFunctionDispatcher,
|
||||
_get_implementing_args,
|
||||
add_docstring,
|
||||
)
|
||||
from numpy._utils import set_module # noqa: F401
|
||||
from numpy._utils._inspect import getargspec
|
||||
|
||||
ARRAY_FUNCTIONS = set()
|
||||
|
||||
array_function_like_doc = (
|
||||
"""like : array_like, optional
|
||||
Reference object to allow the creation of arrays which are not
|
||||
NumPy arrays. If an array-like passed in as ``like`` supports
|
||||
the ``__array_function__`` protocol, the result will be defined
|
||||
by it. In this case, it ensures the creation of an array object
|
||||
compatible with that passed in via this argument."""
|
||||
)
|
||||
|
||||
def get_array_function_like_doc(public_api, docstring_template=""):
|
||||
ARRAY_FUNCTIONS.add(public_api)
|
||||
docstring = public_api.__doc__ or docstring_template
|
||||
return docstring.replace("${ARRAY_FUNCTION_LIKE}", array_function_like_doc)
|
||||
|
||||
def finalize_array_function_like(public_api):
|
||||
public_api.__doc__ = get_array_function_like_doc(public_api)
|
||||
return public_api
|
||||
|
||||
|
||||
add_docstring(
|
||||
_ArrayFunctionDispatcher,
|
||||
"""
|
||||
Class to wrap functions with checks for __array_function__ overrides.
|
||||
|
||||
All arguments are required, and can only be passed by position.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dispatcher : function or None
|
||||
The dispatcher function that returns a single sequence-like object
|
||||
of all arguments relevant. It must have the same signature (except
|
||||
the default values) as the actual implementation.
|
||||
If ``None``, this is a ``like=`` dispatcher and the
|
||||
``_ArrayFunctionDispatcher`` must be called with ``like`` as the
|
||||
first (additional and positional) argument.
|
||||
implementation : function
|
||||
Function that implements the operation on NumPy arrays without
|
||||
overrides. Arguments passed calling the ``_ArrayFunctionDispatcher``
|
||||
will be forwarded to this (and the ``dispatcher``) as if using
|
||||
``*args, **kwargs``.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
_implementation : function
|
||||
The original implementation passed in.
|
||||
""")
|
||||
|
||||
|
||||
# exposed for testing purposes; used internally by _ArrayFunctionDispatcher
|
||||
add_docstring(
|
||||
_get_implementing_args,
|
||||
"""
|
||||
Collect arguments on which to call __array_function__.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
relevant_args : iterable of array-like
|
||||
Iterable of possibly array-like arguments to check for
|
||||
__array_function__ methods.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Sequence of arguments with __array_function__ methods, in the order in
|
||||
which they should be called.
|
||||
""")
|
||||
|
||||
|
||||
ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults')
|
||||
|
||||
|
||||
def verify_matching_signatures(implementation, dispatcher):
|
||||
"""Verify that a dispatcher function has the right signature."""
|
||||
implementation_spec = ArgSpec(*getargspec(implementation))
|
||||
dispatcher_spec = ArgSpec(*getargspec(dispatcher))
|
||||
|
||||
if (implementation_spec.args != dispatcher_spec.args or
|
||||
implementation_spec.varargs != dispatcher_spec.varargs or
|
||||
implementation_spec.keywords != dispatcher_spec.keywords or
|
||||
(bool(implementation_spec.defaults) !=
|
||||
bool(dispatcher_spec.defaults)) or
|
||||
(implementation_spec.defaults is not None and
|
||||
len(implementation_spec.defaults) !=
|
||||
len(dispatcher_spec.defaults))):
|
||||
raise RuntimeError('implementation and dispatcher for %s have '
|
||||
'different function signatures' % implementation)
|
||||
|
||||
if implementation_spec.defaults is not None:
|
||||
if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults):
|
||||
raise RuntimeError('dispatcher functions can only use None for '
|
||||
'default argument values')
|
||||
|
||||
|
||||
def array_function_dispatch(dispatcher=None, module=None, verify=True,
|
||||
docs_from_dispatcher=False):
|
||||
"""Decorator for adding dispatch with the __array_function__ protocol.
|
||||
|
||||
See NEP-18 for example usage.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dispatcher : callable or None
|
||||
Function that when called like ``dispatcher(*args, **kwargs)`` with
|
||||
arguments from the NumPy function call returns an iterable of
|
||||
array-like arguments to check for ``__array_function__``.
|
||||
|
||||
If `None`, the first argument is used as the single `like=` argument
|
||||
and not passed on. A function implementing `like=` must call its
|
||||
dispatcher with `like` as the first non-keyword argument.
|
||||
module : str, optional
|
||||
__module__ attribute to set on new function, e.g., ``module='numpy'``.
|
||||
By default, module is copied from the decorated function.
|
||||
verify : bool, optional
|
||||
If True, verify the that the signature of the dispatcher and decorated
|
||||
function signatures match exactly: all required and optional arguments
|
||||
should appear in order with the same names, but the default values for
|
||||
all optional arguments should be ``None``. Only disable verification
|
||||
if the dispatcher's signature needs to deviate for some particular
|
||||
reason, e.g., because the function has a signature like
|
||||
``func(*args, **kwargs)``.
|
||||
docs_from_dispatcher : bool, optional
|
||||
If True, copy docs from the dispatcher function onto the dispatched
|
||||
function, rather than from the implementation. This is useful for
|
||||
functions defined in C, which otherwise don't have docstrings.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Function suitable for decorating the implementation of a NumPy function.
|
||||
|
||||
"""
|
||||
def decorator(implementation):
|
||||
if verify:
|
||||
if dispatcher is not None:
|
||||
verify_matching_signatures(implementation, dispatcher)
|
||||
else:
|
||||
# Using __code__ directly similar to verify_matching_signature
|
||||
co = implementation.__code__
|
||||
last_arg = co.co_argcount + co.co_kwonlyargcount - 1
|
||||
last_arg = co.co_varnames[last_arg]
|
||||
if last_arg != "like" or co.co_kwonlyargcount == 0:
|
||||
raise RuntimeError(
|
||||
"__array_function__ expects `like=` to be the last "
|
||||
"argument and a keyword-only argument. "
|
||||
f"{implementation} does not seem to comply.")
|
||||
|
||||
if docs_from_dispatcher:
|
||||
add_docstring(implementation, dispatcher.__doc__)
|
||||
|
||||
public_api = _ArrayFunctionDispatcher(dispatcher, implementation)
|
||||
public_api = functools.wraps(implementation)(public_api)
|
||||
|
||||
if module is not None:
|
||||
public_api.__module__ = module
|
||||
|
||||
ARRAY_FUNCTIONS.add(public_api)
|
||||
|
||||
return public_api
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def array_function_from_dispatcher(
|
||||
implementation, module=None, verify=True, docs_from_dispatcher=True):
|
||||
"""Like array_function_dispatcher, but with function arguments flipped."""
|
||||
|
||||
def decorator(dispatcher):
|
||||
return array_function_dispatch(
|
||||
dispatcher, module, verify=verify,
|
||||
docs_from_dispatcher=docs_from_dispatcher)(implementation)
|
||||
return decorator
|
||||
48
lib/python3.11/site-packages/numpy/_core/overrides.pyi
Normal file
48
lib/python3.11/site-packages/numpy/_core/overrides.pyi
Normal file
@ -0,0 +1,48 @@
|
||||
from collections.abc import Callable, Iterable
|
||||
from typing import Any, Final, NamedTuple, ParamSpec, TypeVar
|
||||
|
||||
from numpy._typing import _SupportsArrayFunc
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_Tss = ParamSpec("_Tss")
|
||||
_FuncT = TypeVar("_FuncT", bound=Callable[..., object])
|
||||
|
||||
###
|
||||
|
||||
ARRAY_FUNCTIONS: set[Callable[..., Any]] = ...
|
||||
array_function_like_doc: Final[str] = ...
|
||||
|
||||
class ArgSpec(NamedTuple):
|
||||
args: list[str]
|
||||
varargs: str | None
|
||||
keywords: str | None
|
||||
defaults: tuple[Any, ...]
|
||||
|
||||
def get_array_function_like_doc(public_api: Callable[..., Any], docstring_template: str = "") -> str: ...
|
||||
def finalize_array_function_like(public_api: _FuncT) -> _FuncT: ...
|
||||
|
||||
#
|
||||
def verify_matching_signatures(
|
||||
implementation: Callable[_Tss, object],
|
||||
dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]],
|
||||
) -> None: ...
|
||||
|
||||
# NOTE: This actually returns a `_ArrayFunctionDispatcher` callable wrapper object, with
|
||||
# the original wrapped callable stored in the `._implementation` attribute. It checks
|
||||
# for any `__array_function__` of the values of specific arguments that the dispatcher
|
||||
# specifies. Since the dispatcher only returns an iterable of passed array-like args,
|
||||
# this overridable behaviour is impossible to annotate.
|
||||
def array_function_dispatch(
|
||||
dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]] | None = None,
|
||||
module: str | None = None,
|
||||
verify: bool = True,
|
||||
docs_from_dispatcher: bool = False,
|
||||
) -> Callable[[_FuncT], _FuncT]: ...
|
||||
|
||||
#
|
||||
def array_function_from_dispatcher(
|
||||
implementation: Callable[_Tss, _T],
|
||||
module: str | None = None,
|
||||
verify: bool = True,
|
||||
docs_from_dispatcher: bool = True,
|
||||
) -> Callable[[Callable[_Tss, Iterable[_SupportsArrayFunc]]], Callable[_Tss, _T]]: ...
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user