done
This commit is contained in:
97
lib/python3.11/site-packages/numpy/lib/__init__.py
Normal file
97
lib/python3.11/site-packages/numpy/lib/__init__.py
Normal file
@ -0,0 +1,97 @@
|
||||
"""
|
||||
``numpy.lib`` is mostly a space for implementing functions that don't
|
||||
belong in core or in another NumPy submodule with a clear purpose
|
||||
(e.g. ``random``, ``fft``, ``linalg``, ``ma``).
|
||||
|
||||
``numpy.lib``'s private submodules contain basic functions that are used by
|
||||
other public modules and are useful to have in the main name-space.
|
||||
|
||||
"""
|
||||
|
||||
# Public submodules
|
||||
# Note: recfunctions is public, but not imported
|
||||
from numpy._core._multiarray_umath import add_docstring, tracemalloc_domain
|
||||
from numpy._core.function_base import add_newdoc
|
||||
|
||||
# Private submodules
|
||||
# load module names. See https://github.com/networkx/networkx/issues/5838
|
||||
from . import (
|
||||
_arraypad_impl,
|
||||
_arraysetops_impl,
|
||||
_arrayterator_impl,
|
||||
_function_base_impl,
|
||||
_histograms_impl,
|
||||
_index_tricks_impl,
|
||||
_nanfunctions_impl,
|
||||
_npyio_impl,
|
||||
_polynomial_impl,
|
||||
_shape_base_impl,
|
||||
_stride_tricks_impl,
|
||||
_twodim_base_impl,
|
||||
_type_check_impl,
|
||||
_ufunclike_impl,
|
||||
_utils_impl,
|
||||
_version,
|
||||
array_utils,
|
||||
format,
|
||||
introspect,
|
||||
mixins,
|
||||
npyio,
|
||||
scimath,
|
||||
stride_tricks,
|
||||
)
|
||||
|
||||
# numpy.lib namespace members
|
||||
from ._arrayterator_impl import Arrayterator
|
||||
from ._version import NumpyVersion
|
||||
|
||||
__all__ = [
|
||||
"Arrayterator", "add_docstring", "add_newdoc", "array_utils",
|
||||
"format", "introspect", "mixins", "NumpyVersion", "npyio", "scimath",
|
||||
"stride_tricks", "tracemalloc_domain",
|
||||
]
|
||||
|
||||
add_newdoc.__module__ = "numpy.lib"
|
||||
|
||||
from numpy._pytesttester import PytestTester
|
||||
|
||||
test = PytestTester(__name__)
|
||||
del PytestTester
|
||||
|
||||
def __getattr__(attr):
|
||||
# Warn for deprecated/removed aliases
|
||||
import math
|
||||
import warnings
|
||||
|
||||
if attr == "math":
|
||||
warnings.warn(
|
||||
"`np.lib.math` is a deprecated alias for the standard library "
|
||||
"`math` module (Deprecated Numpy 1.25). Replace usages of "
|
||||
"`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2)
|
||||
return math
|
||||
elif attr == "emath":
|
||||
raise AttributeError(
|
||||
"numpy.lib.emath was an alias for emath module that was removed "
|
||||
"in NumPy 2.0. Replace usages of numpy.lib.emath with "
|
||||
"numpy.emath.",
|
||||
name=None
|
||||
)
|
||||
elif attr in (
|
||||
"histograms", "type_check", "nanfunctions", "function_base",
|
||||
"arraypad", "arraysetops", "ufunclike", "utils", "twodim_base",
|
||||
"shape_base", "polynomial", "index_tricks",
|
||||
):
|
||||
raise AttributeError(
|
||||
f"numpy.lib.{attr} is now private. If you are using a public "
|
||||
"function, it should be available in the main numpy namespace, "
|
||||
"otherwise check the NumPy 2.0 migration guide.",
|
||||
name=None
|
||||
)
|
||||
elif attr == "arrayterator":
|
||||
raise AttributeError(
|
||||
"numpy.lib.arrayterator submodule is now private. To access "
|
||||
"Arrayterator class use numpy.lib.Arrayterator.",
|
||||
name=None
|
||||
)
|
||||
else:
|
||||
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}")
|
||||
44
lib/python3.11/site-packages/numpy/lib/__init__.pyi
Normal file
44
lib/python3.11/site-packages/numpy/lib/__init__.pyi
Normal file
@ -0,0 +1,44 @@
|
||||
from numpy._core.function_base import add_newdoc
|
||||
from numpy._core.multiarray import add_docstring, tracemalloc_domain
|
||||
|
||||
# all submodules of `lib` are accessible at runtime through `__getattr__`,
|
||||
# so we implicitly re-export them here
|
||||
from . import _array_utils_impl as _array_utils_impl
|
||||
from . import _arraypad_impl as _arraypad_impl
|
||||
from . import _arraysetops_impl as _arraysetops_impl
|
||||
from . import _arrayterator_impl as _arrayterator_impl
|
||||
from . import _datasource as _datasource
|
||||
from . import _format_impl as _format_impl
|
||||
from . import _function_base_impl as _function_base_impl
|
||||
from . import _histograms_impl as _histograms_impl
|
||||
from . import _index_tricks_impl as _index_tricks_impl
|
||||
from . import _iotools as _iotools
|
||||
from . import _nanfunctions_impl as _nanfunctions_impl
|
||||
from . import _npyio_impl as _npyio_impl
|
||||
from . import _polynomial_impl as _polynomial_impl
|
||||
from . import _scimath_impl as _scimath_impl
|
||||
from . import _shape_base_impl as _shape_base_impl
|
||||
from . import _stride_tricks_impl as _stride_tricks_impl
|
||||
from . import _twodim_base_impl as _twodim_base_impl
|
||||
from . import _type_check_impl as _type_check_impl
|
||||
from . import _ufunclike_impl as _ufunclike_impl
|
||||
from . import _utils_impl as _utils_impl
|
||||
from . import _version as _version
|
||||
from . import array_utils, format, introspect, mixins, npyio, scimath, stride_tricks
|
||||
from ._arrayterator_impl import Arrayterator
|
||||
from ._version import NumpyVersion
|
||||
|
||||
__all__ = [
|
||||
"Arrayterator",
|
||||
"add_docstring",
|
||||
"add_newdoc",
|
||||
"array_utils",
|
||||
"format",
|
||||
"introspect",
|
||||
"mixins",
|
||||
"NumpyVersion",
|
||||
"npyio",
|
||||
"scimath",
|
||||
"stride_tricks",
|
||||
"tracemalloc_domain",
|
||||
]
|
||||
62
lib/python3.11/site-packages/numpy/lib/_array_utils_impl.py
Normal file
62
lib/python3.11/site-packages/numpy/lib/_array_utils_impl.py
Normal file
@ -0,0 +1,62 @@
|
||||
"""
|
||||
Miscellaneous utils.
|
||||
"""
|
||||
from numpy._core import asarray
|
||||
from numpy._core.numeric import normalize_axis_index, normalize_axis_tuple
|
||||
from numpy._utils import set_module
|
||||
|
||||
__all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"]
|
||||
|
||||
|
||||
@set_module("numpy.lib.array_utils")
|
||||
def byte_bounds(a):
|
||||
"""
|
||||
Returns pointers to the end-points of an array.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
a : ndarray
|
||||
Input array. It must conform to the Python-side of the array
|
||||
interface.
|
||||
|
||||
Returns
|
||||
-------
|
||||
(low, high) : tuple of 2 integers
|
||||
The first integer is the first byte of the array, the second
|
||||
integer is just past the last byte of the array. If `a` is not
|
||||
contiguous it will not use every byte between the (`low`, `high`)
|
||||
values.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> I = np.eye(2, dtype='f'); I.dtype
|
||||
dtype('float32')
|
||||
>>> low, high = np.lib.array_utils.byte_bounds(I)
|
||||
>>> high - low == I.size*I.itemsize
|
||||
True
|
||||
>>> I = np.eye(2); I.dtype
|
||||
dtype('float64')
|
||||
>>> low, high = np.lib.array_utils.byte_bounds(I)
|
||||
>>> high - low == I.size*I.itemsize
|
||||
True
|
||||
|
||||
"""
|
||||
ai = a.__array_interface__
|
||||
a_data = ai['data'][0]
|
||||
astrides = ai['strides']
|
||||
ashape = ai['shape']
|
||||
bytes_a = asarray(a).dtype.itemsize
|
||||
|
||||
a_low = a_high = a_data
|
||||
if astrides is None:
|
||||
# contiguous case
|
||||
a_high += a.size * bytes_a
|
||||
else:
|
||||
for shape, stride in zip(ashape, astrides):
|
||||
if stride < 0:
|
||||
a_low += (shape - 1) * stride
|
||||
else:
|
||||
a_high += (shape - 1) * stride
|
||||
a_high += bytes_a
|
||||
return a_low, a_high
|
||||
26
lib/python3.11/site-packages/numpy/lib/_array_utils_impl.pyi
Normal file
26
lib/python3.11/site-packages/numpy/lib/_array_utils_impl.pyi
Normal file
@ -0,0 +1,26 @@
|
||||
from collections.abc import Iterable
|
||||
from typing import Any
|
||||
|
||||
from numpy import generic
|
||||
from numpy.typing import NDArray
|
||||
|
||||
__all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"]
|
||||
|
||||
# NOTE: In practice `byte_bounds` can (potentially) take any object
|
||||
# implementing the `__array_interface__` protocol. The caveat is
|
||||
# that certain keys, marked as optional in the spec, must be present for
|
||||
# `byte_bounds`. This concerns `"strides"` and `"data"`.
|
||||
def byte_bounds(a: generic | NDArray[Any]) -> tuple[int, int]: ...
|
||||
|
||||
def normalize_axis_tuple(
|
||||
axis: int | Iterable[int],
|
||||
ndim: int = ...,
|
||||
argname: str | None = ...,
|
||||
allow_duplicate: bool | None = ...,
|
||||
) -> tuple[int, int]: ...
|
||||
|
||||
def normalize_axis_index(
|
||||
axis: int = ...,
|
||||
ndim: int = ...,
|
||||
msg_prefix: str | None = ...,
|
||||
) -> int: ...
|
||||
890
lib/python3.11/site-packages/numpy/lib/_arraypad_impl.py
Normal file
890
lib/python3.11/site-packages/numpy/lib/_arraypad_impl.py
Normal file
@ -0,0 +1,890 @@
|
||||
"""
|
||||
The arraypad module contains a group of functions to pad values onto the edges
|
||||
of an n-dimensional array.
|
||||
|
||||
"""
|
||||
import numpy as np
|
||||
from numpy._core.overrides import array_function_dispatch
|
||||
from numpy.lib._index_tricks_impl import ndindex
|
||||
|
||||
__all__ = ['pad']
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Private utility functions.
|
||||
|
||||
|
||||
def _round_if_needed(arr, dtype):
|
||||
"""
|
||||
Rounds arr inplace if destination dtype is integer.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
arr : ndarray
|
||||
Input array.
|
||||
dtype : dtype
|
||||
The dtype of the destination array.
|
||||
"""
|
||||
if np.issubdtype(dtype, np.integer):
|
||||
arr.round(out=arr)
|
||||
|
||||
|
||||
def _slice_at_axis(sl, axis):
|
||||
"""
|
||||
Construct tuple of slices to slice an array in the given dimension.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
sl : slice
|
||||
The slice for the given dimension.
|
||||
axis : int
|
||||
The axis to which `sl` is applied. All other dimensions are left
|
||||
"unsliced".
|
||||
|
||||
Returns
|
||||
-------
|
||||
sl : tuple of slices
|
||||
A tuple with slices matching `shape` in length.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np._slice_at_axis(slice(None, 3, -1), 1)
|
||||
(slice(None, None, None), slice(None, 3, -1), (...,))
|
||||
"""
|
||||
return (slice(None),) * axis + (sl,) + (...,)
|
||||
|
||||
|
||||
def _view_roi(array, original_area_slice, axis):
|
||||
"""
|
||||
Get a view of the current region of interest during iterative padding.
|
||||
|
||||
When padding multiple dimensions iteratively corner values are
|
||||
unnecessarily overwritten multiple times. This function reduces the
|
||||
working area for the first dimensions so that corners are excluded.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
array : ndarray
|
||||
The array with the region of interest.
|
||||
original_area_slice : tuple of slices
|
||||
Denotes the area with original values of the unpadded array.
|
||||
axis : int
|
||||
The currently padded dimension assuming that `axis` is padded before
|
||||
`axis` + 1.
|
||||
|
||||
Returns
|
||||
-------
|
||||
roi : ndarray
|
||||
The region of interest of the original `array`.
|
||||
"""
|
||||
axis += 1
|
||||
sl = (slice(None),) * axis + original_area_slice[axis:]
|
||||
return array[sl]
|
||||
|
||||
|
||||
def _pad_simple(array, pad_width, fill_value=None):
|
||||
"""
|
||||
Pad array on all sides with either a single value or undefined values.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
array : ndarray
|
||||
Array to grow.
|
||||
pad_width : sequence of tuple[int, int]
|
||||
Pad width on both sides for each dimension in `arr`.
|
||||
fill_value : scalar, optional
|
||||
If provided the padded area is filled with this value, otherwise
|
||||
the pad area left undefined.
|
||||
|
||||
Returns
|
||||
-------
|
||||
padded : ndarray
|
||||
The padded array with the same dtype as`array`. Its order will default
|
||||
to C-style if `array` is not F-contiguous.
|
||||
original_area_slice : tuple
|
||||
A tuple of slices pointing to the area of the original array.
|
||||
"""
|
||||
# Allocate grown array
|
||||
new_shape = tuple(
|
||||
left + size + right
|
||||
for size, (left, right) in zip(array.shape, pad_width)
|
||||
)
|
||||
order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order
|
||||
padded = np.empty(new_shape, dtype=array.dtype, order=order)
|
||||
|
||||
if fill_value is not None:
|
||||
padded.fill(fill_value)
|
||||
|
||||
# Copy old array into correct space
|
||||
original_area_slice = tuple(
|
||||
slice(left, left + size)
|
||||
for size, (left, right) in zip(array.shape, pad_width)
|
||||
)
|
||||
padded[original_area_slice] = array
|
||||
|
||||
return padded, original_area_slice
|
||||
|
||||
|
||||
def _set_pad_area(padded, axis, width_pair, value_pair):
|
||||
"""
|
||||
Set empty-padded area in given dimension.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
padded : ndarray
|
||||
Array with the pad area which is modified inplace.
|
||||
axis : int
|
||||
Dimension with the pad area to set.
|
||||
width_pair : (int, int)
|
||||
Pair of widths that mark the pad area on both sides in the given
|
||||
dimension.
|
||||
value_pair : tuple of scalars or ndarrays
|
||||
Values inserted into the pad area on each side. It must match or be
|
||||
broadcastable to the shape of `arr`.
|
||||
"""
|
||||
left_slice = _slice_at_axis(slice(None, width_pair[0]), axis)
|
||||
padded[left_slice] = value_pair[0]
|
||||
|
||||
right_slice = _slice_at_axis(
|
||||
slice(padded.shape[axis] - width_pair[1], None), axis)
|
||||
padded[right_slice] = value_pair[1]
|
||||
|
||||
|
||||
def _get_edges(padded, axis, width_pair):
|
||||
"""
|
||||
Retrieve edge values from empty-padded array in given dimension.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
padded : ndarray
|
||||
Empty-padded array.
|
||||
axis : int
|
||||
Dimension in which the edges are considered.
|
||||
width_pair : (int, int)
|
||||
Pair of widths that mark the pad area on both sides in the given
|
||||
dimension.
|
||||
|
||||
Returns
|
||||
-------
|
||||
left_edge, right_edge : ndarray
|
||||
Edge values of the valid area in `padded` in the given dimension. Its
|
||||
shape will always match `padded` except for the dimension given by
|
||||
`axis` which will have a length of 1.
|
||||
"""
|
||||
left_index = width_pair[0]
|
||||
left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis)
|
||||
left_edge = padded[left_slice]
|
||||
|
||||
right_index = padded.shape[axis] - width_pair[1]
|
||||
right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis)
|
||||
right_edge = padded[right_slice]
|
||||
|
||||
return left_edge, right_edge
|
||||
|
||||
|
||||
def _get_linear_ramps(padded, axis, width_pair, end_value_pair):
|
||||
"""
|
||||
Construct linear ramps for empty-padded array in given dimension.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
padded : ndarray
|
||||
Empty-padded array.
|
||||
axis : int
|
||||
Dimension in which the ramps are constructed.
|
||||
width_pair : (int, int)
|
||||
Pair of widths that mark the pad area on both sides in the given
|
||||
dimension.
|
||||
end_value_pair : (scalar, scalar)
|
||||
End values for the linear ramps which form the edge of the fully padded
|
||||
array. These values are included in the linear ramps.
|
||||
|
||||
Returns
|
||||
-------
|
||||
left_ramp, right_ramp : ndarray
|
||||
Linear ramps to set on both sides of `padded`.
|
||||
"""
|
||||
edge_pair = _get_edges(padded, axis, width_pair)
|
||||
|
||||
left_ramp, right_ramp = (
|
||||
np.linspace(
|
||||
start=end_value,
|
||||
stop=edge.squeeze(axis), # Dimension is replaced by linspace
|
||||
num=width,
|
||||
endpoint=False,
|
||||
dtype=padded.dtype,
|
||||
axis=axis
|
||||
)
|
||||
for end_value, edge, width in zip(
|
||||
end_value_pair, edge_pair, width_pair
|
||||
)
|
||||
)
|
||||
|
||||
# Reverse linear space in appropriate dimension
|
||||
right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)]
|
||||
|
||||
return left_ramp, right_ramp
|
||||
|
||||
|
||||
def _get_stats(padded, axis, width_pair, length_pair, stat_func):
|
||||
"""
|
||||
Calculate statistic for the empty-padded array in given dimension.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
padded : ndarray
|
||||
Empty-padded array.
|
||||
axis : int
|
||||
Dimension in which the statistic is calculated.
|
||||
width_pair : (int, int)
|
||||
Pair of widths that mark the pad area on both sides in the given
|
||||
dimension.
|
||||
length_pair : 2-element sequence of None or int
|
||||
Gives the number of values in valid area from each side that is
|
||||
taken into account when calculating the statistic. If None the entire
|
||||
valid area in `padded` is considered.
|
||||
stat_func : function
|
||||
Function to compute statistic. The expected signature is
|
||||
``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
left_stat, right_stat : ndarray
|
||||
Calculated statistic for both sides of `padded`.
|
||||
"""
|
||||
# Calculate indices of the edges of the area with original values
|
||||
left_index = width_pair[0]
|
||||
right_index = padded.shape[axis] - width_pair[1]
|
||||
# as well as its length
|
||||
max_length = right_index - left_index
|
||||
|
||||
# Limit stat_lengths to max_length
|
||||
left_length, right_length = length_pair
|
||||
if left_length is None or max_length < left_length:
|
||||
left_length = max_length
|
||||
if right_length is None or max_length < right_length:
|
||||
right_length = max_length
|
||||
|
||||
if (left_length == 0 or right_length == 0) \
|
||||
and stat_func in {np.amax, np.amin}:
|
||||
# amax and amin can't operate on an empty array,
|
||||
# raise a more descriptive warning here instead of the default one
|
||||
raise ValueError("stat_length of 0 yields no value for padding")
|
||||
|
||||
# Calculate statistic for the left side
|
||||
left_slice = _slice_at_axis(
|
||||
slice(left_index, left_index + left_length), axis)
|
||||
left_chunk = padded[left_slice]
|
||||
left_stat = stat_func(left_chunk, axis=axis, keepdims=True)
|
||||
_round_if_needed(left_stat, padded.dtype)
|
||||
|
||||
if left_length == right_length == max_length:
|
||||
# return early as right_stat must be identical to left_stat
|
||||
return left_stat, left_stat
|
||||
|
||||
# Calculate statistic for the right side
|
||||
right_slice = _slice_at_axis(
|
||||
slice(right_index - right_length, right_index), axis)
|
||||
right_chunk = padded[right_slice]
|
||||
right_stat = stat_func(right_chunk, axis=axis, keepdims=True)
|
||||
_round_if_needed(right_stat, padded.dtype)
|
||||
|
||||
return left_stat, right_stat
|
||||
|
||||
|
||||
def _set_reflect_both(padded, axis, width_pair, method,
|
||||
original_period, include_edge=False):
|
||||
"""
|
||||
Pad `axis` of `arr` with reflection.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
padded : ndarray
|
||||
Input array of arbitrary shape.
|
||||
axis : int
|
||||
Axis along which to pad `arr`.
|
||||
width_pair : (int, int)
|
||||
Pair of widths that mark the pad area on both sides in the given
|
||||
dimension.
|
||||
method : str
|
||||
Controls method of reflection; options are 'even' or 'odd'.
|
||||
original_period : int
|
||||
Original length of data on `axis` of `arr`.
|
||||
include_edge : bool
|
||||
If true, edge value is included in reflection, otherwise the edge
|
||||
value forms the symmetric axis to the reflection.
|
||||
|
||||
Returns
|
||||
-------
|
||||
pad_amt : tuple of ints, length 2
|
||||
New index positions of padding to do along the `axis`. If these are
|
||||
both 0, padding is done in this dimension.
|
||||
"""
|
||||
left_pad, right_pad = width_pair
|
||||
old_length = padded.shape[axis] - right_pad - left_pad
|
||||
|
||||
if include_edge:
|
||||
# Avoid wrapping with only a subset of the original area
|
||||
# by ensuring period can only be a multiple of the original
|
||||
# area's length.
|
||||
old_length = old_length // original_period * original_period
|
||||
# Edge is included, we need to offset the pad amount by 1
|
||||
edge_offset = 1
|
||||
else:
|
||||
# Avoid wrapping with only a subset of the original area
|
||||
# by ensuring period can only be a multiple of the original
|
||||
# area's length.
|
||||
old_length = ((old_length - 1) // (original_period - 1)
|
||||
* (original_period - 1) + 1)
|
||||
edge_offset = 0 # Edge is not included, no need to offset pad amount
|
||||
old_length -= 1 # but must be omitted from the chunk
|
||||
|
||||
if left_pad > 0:
|
||||
# Pad with reflected values on left side:
|
||||
# First limit chunk size which can't be larger than pad area
|
||||
chunk_length = min(old_length, left_pad)
|
||||
# Slice right to left, stop on or next to edge, start relative to stop
|
||||
stop = left_pad - edge_offset
|
||||
start = stop + chunk_length
|
||||
left_slice = _slice_at_axis(slice(start, stop, -1), axis)
|
||||
left_chunk = padded[left_slice]
|
||||
|
||||
if method == "odd":
|
||||
# Negate chunk and align with edge
|
||||
edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis)
|
||||
left_chunk = 2 * padded[edge_slice] - left_chunk
|
||||
|
||||
# Insert chunk into padded area
|
||||
start = left_pad - chunk_length
|
||||
stop = left_pad
|
||||
pad_area = _slice_at_axis(slice(start, stop), axis)
|
||||
padded[pad_area] = left_chunk
|
||||
# Adjust pointer to left edge for next iteration
|
||||
left_pad -= chunk_length
|
||||
|
||||
if right_pad > 0:
|
||||
# Pad with reflected values on right side:
|
||||
# First limit chunk size which can't be larger than pad area
|
||||
chunk_length = min(old_length, right_pad)
|
||||
# Slice right to left, start on or next to edge, stop relative to start
|
||||
start = -right_pad + edge_offset - 2
|
||||
stop = start - chunk_length
|
||||
right_slice = _slice_at_axis(slice(start, stop, -1), axis)
|
||||
right_chunk = padded[right_slice]
|
||||
|
||||
if method == "odd":
|
||||
# Negate chunk and align with edge
|
||||
edge_slice = _slice_at_axis(
|
||||
slice(-right_pad - 1, -right_pad), axis)
|
||||
right_chunk = 2 * padded[edge_slice] - right_chunk
|
||||
|
||||
# Insert chunk into padded area
|
||||
start = padded.shape[axis] - right_pad
|
||||
stop = start + chunk_length
|
||||
pad_area = _slice_at_axis(slice(start, stop), axis)
|
||||
padded[pad_area] = right_chunk
|
||||
# Adjust pointer to right edge for next iteration
|
||||
right_pad -= chunk_length
|
||||
|
||||
return left_pad, right_pad
|
||||
|
||||
|
||||
def _set_wrap_both(padded, axis, width_pair, original_period):
|
||||
"""
|
||||
Pad `axis` of `arr` with wrapped values.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
padded : ndarray
|
||||
Input array of arbitrary shape.
|
||||
axis : int
|
||||
Axis along which to pad `arr`.
|
||||
width_pair : (int, int)
|
||||
Pair of widths that mark the pad area on both sides in the given
|
||||
dimension.
|
||||
original_period : int
|
||||
Original length of data on `axis` of `arr`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
pad_amt : tuple of ints, length 2
|
||||
New index positions of padding to do along the `axis`. If these are
|
||||
both 0, padding is done in this dimension.
|
||||
"""
|
||||
left_pad, right_pad = width_pair
|
||||
period = padded.shape[axis] - right_pad - left_pad
|
||||
# Avoid wrapping with only a subset of the original area by ensuring period
|
||||
# can only be a multiple of the original area's length.
|
||||
period = period // original_period * original_period
|
||||
|
||||
# If the current dimension of `arr` doesn't contain enough valid values
|
||||
# (not part of the undefined pad area) we need to pad multiple times.
|
||||
# Each time the pad area shrinks on both sides which is communicated with
|
||||
# these variables.
|
||||
new_left_pad = 0
|
||||
new_right_pad = 0
|
||||
|
||||
if left_pad > 0:
|
||||
# Pad with wrapped values on left side
|
||||
# First slice chunk from left side of the non-pad area.
|
||||
# Use min(period, left_pad) to ensure that chunk is not larger than
|
||||
# pad area.
|
||||
slice_end = left_pad + period
|
||||
slice_start = slice_end - min(period, left_pad)
|
||||
right_slice = _slice_at_axis(slice(slice_start, slice_end), axis)
|
||||
right_chunk = padded[right_slice]
|
||||
|
||||
if left_pad > period:
|
||||
# Chunk is smaller than pad area
|
||||
pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis)
|
||||
new_left_pad = left_pad - period
|
||||
else:
|
||||
# Chunk matches pad area
|
||||
pad_area = _slice_at_axis(slice(None, left_pad), axis)
|
||||
padded[pad_area] = right_chunk
|
||||
|
||||
if right_pad > 0:
|
||||
# Pad with wrapped values on right side
|
||||
# First slice chunk from right side of the non-pad area.
|
||||
# Use min(period, right_pad) to ensure that chunk is not larger than
|
||||
# pad area.
|
||||
slice_start = -right_pad - period
|
||||
slice_end = slice_start + min(period, right_pad)
|
||||
left_slice = _slice_at_axis(slice(slice_start, slice_end), axis)
|
||||
left_chunk = padded[left_slice]
|
||||
|
||||
if right_pad > period:
|
||||
# Chunk is smaller than pad area
|
||||
pad_area = _slice_at_axis(
|
||||
slice(-right_pad, -right_pad + period), axis)
|
||||
new_right_pad = right_pad - period
|
||||
else:
|
||||
# Chunk matches pad area
|
||||
pad_area = _slice_at_axis(slice(-right_pad, None), axis)
|
||||
padded[pad_area] = left_chunk
|
||||
|
||||
return new_left_pad, new_right_pad
|
||||
|
||||
|
||||
def _as_pairs(x, ndim, as_index=False):
|
||||
"""
|
||||
Broadcast `x` to an array with the shape (`ndim`, 2).
|
||||
|
||||
A helper function for `pad` that prepares and validates arguments like
|
||||
`pad_width` for iteration in pairs.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : {None, scalar, array-like}
|
||||
The object to broadcast to the shape (`ndim`, 2).
|
||||
ndim : int
|
||||
Number of pairs the broadcasted `x` will have.
|
||||
as_index : bool, optional
|
||||
If `x` is not None, try to round each element of `x` to an integer
|
||||
(dtype `np.intp`) and ensure every element is positive.
|
||||
|
||||
Returns
|
||||
-------
|
||||
pairs : nested iterables, shape (`ndim`, 2)
|
||||
The broadcasted version of `x`.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If `as_index` is True and `x` contains negative elements.
|
||||
Or if `x` is not broadcastable to the shape (`ndim`, 2).
|
||||
"""
|
||||
if x is None:
|
||||
# Pass through None as a special case, otherwise np.round(x) fails
|
||||
# with an AttributeError
|
||||
return ((None, None),) * ndim
|
||||
|
||||
x = np.array(x)
|
||||
if as_index:
|
||||
x = np.round(x).astype(np.intp, copy=False)
|
||||
|
||||
if x.ndim < 3:
|
||||
# Optimization: Possibly use faster paths for cases where `x` has
|
||||
# only 1 or 2 elements. `np.broadcast_to` could handle these as well
|
||||
# but is currently slower
|
||||
|
||||
if x.size == 1:
|
||||
# x was supplied as a single value
|
||||
x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2
|
||||
if as_index and x < 0:
|
||||
raise ValueError("index can't contain negative values")
|
||||
return ((x[0], x[0]),) * ndim
|
||||
|
||||
if x.size == 2 and x.shape != (2, 1):
|
||||
# x was supplied with a single value for each side
|
||||
# but except case when each dimension has a single value
|
||||
# which should be broadcasted to a pair,
|
||||
# e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]]
|
||||
x = x.ravel() # Ensure x[0], x[1] works
|
||||
if as_index and (x[0] < 0 or x[1] < 0):
|
||||
raise ValueError("index can't contain negative values")
|
||||
return ((x[0], x[1]),) * ndim
|
||||
|
||||
if as_index and x.min() < 0:
|
||||
raise ValueError("index can't contain negative values")
|
||||
|
||||
# Converting the array with `tolist` seems to improve performance
|
||||
# when iterating and indexing the result (see usage in `pad`)
|
||||
return np.broadcast_to(x, (ndim, 2)).tolist()
|
||||
|
||||
|
||||
def _pad_dispatcher(array, pad_width, mode=None, **kwargs):
|
||||
return (array,)
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Public functions
|
||||
|
||||
|
||||
@array_function_dispatch(_pad_dispatcher, module='numpy')
|
||||
def pad(array, pad_width, mode='constant', **kwargs):
|
||||
"""
|
||||
Pad an array.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
array : array_like of rank N
|
||||
The array to pad.
|
||||
pad_width : {sequence, array_like, int}
|
||||
Number of values padded to the edges of each axis.
|
||||
``((before_1, after_1), ... (before_N, after_N))`` unique pad widths
|
||||
for each axis.
|
||||
``(before, after)`` or ``((before, after),)`` yields same before
|
||||
and after pad for each axis.
|
||||
``(pad,)`` or ``int`` is a shortcut for before = after = pad width
|
||||
for all axes.
|
||||
mode : str or function, optional
|
||||
One of the following string values or a user supplied function.
|
||||
|
||||
'constant' (default)
|
||||
Pads with a constant value.
|
||||
'edge'
|
||||
Pads with the edge values of array.
|
||||
'linear_ramp'
|
||||
Pads with the linear ramp between end_value and the
|
||||
array edge value.
|
||||
'maximum'
|
||||
Pads with the maximum value of all or part of the
|
||||
vector along each axis.
|
||||
'mean'
|
||||
Pads with the mean value of all or part of the
|
||||
vector along each axis.
|
||||
'median'
|
||||
Pads with the median value of all or part of the
|
||||
vector along each axis.
|
||||
'minimum'
|
||||
Pads with the minimum value of all or part of the
|
||||
vector along each axis.
|
||||
'reflect'
|
||||
Pads with the reflection of the vector mirrored on
|
||||
the first and last values of the vector along each
|
||||
axis.
|
||||
'symmetric'
|
||||
Pads with the reflection of the vector mirrored
|
||||
along the edge of the array.
|
||||
'wrap'
|
||||
Pads with the wrap of the vector along the axis.
|
||||
The first values are used to pad the end and the
|
||||
end values are used to pad the beginning.
|
||||
'empty'
|
||||
Pads with undefined values.
|
||||
|
||||
<function>
|
||||
Padding function, see Notes.
|
||||
stat_length : sequence or int, optional
|
||||
Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
|
||||
values at edge of each axis used to calculate the statistic value.
|
||||
|
||||
``((before_1, after_1), ... (before_N, after_N))`` unique statistic
|
||||
lengths for each axis.
|
||||
|
||||
``(before, after)`` or ``((before, after),)`` yields same before
|
||||
and after statistic lengths for each axis.
|
||||
|
||||
``(stat_length,)`` or ``int`` is a shortcut for
|
||||
``before = after = statistic`` length for all axes.
|
||||
|
||||
Default is ``None``, to use the entire axis.
|
||||
constant_values : sequence or scalar, optional
|
||||
Used in 'constant'. The values to set the padded values for each
|
||||
axis.
|
||||
|
||||
``((before_1, after_1), ... (before_N, after_N))`` unique pad constants
|
||||
for each axis.
|
||||
|
||||
``(before, after)`` or ``((before, after),)`` yields same before
|
||||
and after constants for each axis.
|
||||
|
||||
``(constant,)`` or ``constant`` is a shortcut for
|
||||
``before = after = constant`` for all axes.
|
||||
|
||||
Default is 0.
|
||||
end_values : sequence or scalar, optional
|
||||
Used in 'linear_ramp'. The values used for the ending value of the
|
||||
linear_ramp and that will form the edge of the padded array.
|
||||
|
||||
``((before_1, after_1), ... (before_N, after_N))`` unique end values
|
||||
for each axis.
|
||||
|
||||
``(before, after)`` or ``((before, after),)`` yields same before
|
||||
and after end values for each axis.
|
||||
|
||||
``(constant,)`` or ``constant`` is a shortcut for
|
||||
``before = after = constant`` for all axes.
|
||||
|
||||
Default is 0.
|
||||
reflect_type : {'even', 'odd'}, optional
|
||||
Used in 'reflect', and 'symmetric'. The 'even' style is the
|
||||
default with an unaltered reflection around the edge value. For
|
||||
the 'odd' style, the extended part of the array is created by
|
||||
subtracting the reflected values from two times the edge value.
|
||||
|
||||
Returns
|
||||
-------
|
||||
pad : ndarray
|
||||
Padded array of rank equal to `array` with shape increased
|
||||
according to `pad_width`.
|
||||
|
||||
Notes
|
||||
-----
|
||||
For an array with rank greater than 1, some of the padding of later
|
||||
axes is calculated from padding of previous axes. This is easiest to
|
||||
think about with a rank 2 array where the corners of the padded array
|
||||
are calculated by using padded values from the first axis.
|
||||
|
||||
The padding function, if used, should modify a rank 1 array in-place. It
|
||||
has the following signature::
|
||||
|
||||
padding_func(vector, iaxis_pad_width, iaxis, kwargs)
|
||||
|
||||
where
|
||||
|
||||
vector : ndarray
|
||||
A rank 1 array already padded with zeros. Padded values are
|
||||
vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:].
|
||||
iaxis_pad_width : tuple
|
||||
A 2-tuple of ints, iaxis_pad_width[0] represents the number of
|
||||
values padded at the beginning of vector where
|
||||
iaxis_pad_width[1] represents the number of values padded at
|
||||
the end of vector.
|
||||
iaxis : int
|
||||
The axis currently being calculated.
|
||||
kwargs : dict
|
||||
Any keyword arguments the function requires.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> a = [1, 2, 3, 4, 5]
|
||||
>>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6))
|
||||
array([4, 4, 1, ..., 6, 6, 6])
|
||||
|
||||
>>> np.pad(a, (2, 3), 'edge')
|
||||
array([1, 1, 1, ..., 5, 5, 5])
|
||||
|
||||
>>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))
|
||||
array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])
|
||||
|
||||
>>> np.pad(a, (2,), 'maximum')
|
||||
array([5, 5, 1, 2, 3, 4, 5, 5, 5])
|
||||
|
||||
>>> np.pad(a, (2,), 'mean')
|
||||
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
|
||||
|
||||
>>> np.pad(a, (2,), 'median')
|
||||
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
|
||||
|
||||
>>> a = [[1, 2], [3, 4]]
|
||||
>>> np.pad(a, ((3, 2), (2, 3)), 'minimum')
|
||||
array([[1, 1, 1, 2, 1, 1, 1],
|
||||
[1, 1, 1, 2, 1, 1, 1],
|
||||
[1, 1, 1, 2, 1, 1, 1],
|
||||
[1, 1, 1, 2, 1, 1, 1],
|
||||
[3, 3, 3, 4, 3, 3, 3],
|
||||
[1, 1, 1, 2, 1, 1, 1],
|
||||
[1, 1, 1, 2, 1, 1, 1]])
|
||||
|
||||
>>> a = [1, 2, 3, 4, 5]
|
||||
>>> np.pad(a, (2, 3), 'reflect')
|
||||
array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
|
||||
|
||||
>>> np.pad(a, (2, 3), 'reflect', reflect_type='odd')
|
||||
array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
|
||||
|
||||
>>> np.pad(a, (2, 3), 'symmetric')
|
||||
array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
|
||||
|
||||
>>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd')
|
||||
array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])
|
||||
|
||||
>>> np.pad(a, (2, 3), 'wrap')
|
||||
array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])
|
||||
|
||||
>>> def pad_with(vector, pad_width, iaxis, kwargs):
|
||||
... pad_value = kwargs.get('padder', 10)
|
||||
... vector[:pad_width[0]] = pad_value
|
||||
... vector[-pad_width[1]:] = pad_value
|
||||
>>> a = np.arange(6)
|
||||
>>> a = a.reshape((2, 3))
|
||||
>>> np.pad(a, 2, pad_with)
|
||||
array([[10, 10, 10, 10, 10, 10, 10],
|
||||
[10, 10, 10, 10, 10, 10, 10],
|
||||
[10, 10, 0, 1, 2, 10, 10],
|
||||
[10, 10, 3, 4, 5, 10, 10],
|
||||
[10, 10, 10, 10, 10, 10, 10],
|
||||
[10, 10, 10, 10, 10, 10, 10]])
|
||||
>>> np.pad(a, 2, pad_with, padder=100)
|
||||
array([[100, 100, 100, 100, 100, 100, 100],
|
||||
[100, 100, 100, 100, 100, 100, 100],
|
||||
[100, 100, 0, 1, 2, 100, 100],
|
||||
[100, 100, 3, 4, 5, 100, 100],
|
||||
[100, 100, 100, 100, 100, 100, 100],
|
||||
[100, 100, 100, 100, 100, 100, 100]])
|
||||
"""
|
||||
array = np.asarray(array)
|
||||
pad_width = np.asarray(pad_width)
|
||||
|
||||
if not pad_width.dtype.kind == 'i':
|
||||
raise TypeError('`pad_width` must be of integral type.')
|
||||
|
||||
# Broadcast to shape (array.ndim, 2)
|
||||
pad_width = _as_pairs(pad_width, array.ndim, as_index=True)
|
||||
|
||||
if callable(mode):
|
||||
# Old behavior: Use user-supplied function with np.apply_along_axis
|
||||
function = mode
|
||||
# Create a new zero padded array
|
||||
padded, _ = _pad_simple(array, pad_width, fill_value=0)
|
||||
# And apply along each axis
|
||||
|
||||
for axis in range(padded.ndim):
|
||||
# Iterate using ndindex as in apply_along_axis, but assuming that
|
||||
# function operates inplace on the padded array.
|
||||
|
||||
# view with the iteration axis at the end
|
||||
view = np.moveaxis(padded, axis, -1)
|
||||
|
||||
# compute indices for the iteration axes, and append a trailing
|
||||
# ellipsis to prevent 0d arrays decaying to scalars (gh-8642)
|
||||
inds = ndindex(view.shape[:-1])
|
||||
inds = (ind + (Ellipsis,) for ind in inds)
|
||||
for ind in inds:
|
||||
function(view[ind], pad_width[axis], axis, kwargs)
|
||||
|
||||
return padded
|
||||
|
||||
# Make sure that no unsupported keywords were passed for the current mode
|
||||
allowed_kwargs = {
|
||||
'empty': [], 'edge': [], 'wrap': [],
|
||||
'constant': ['constant_values'],
|
||||
'linear_ramp': ['end_values'],
|
||||
'maximum': ['stat_length'],
|
||||
'mean': ['stat_length'],
|
||||
'median': ['stat_length'],
|
||||
'minimum': ['stat_length'],
|
||||
'reflect': ['reflect_type'],
|
||||
'symmetric': ['reflect_type'],
|
||||
}
|
||||
try:
|
||||
unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode])
|
||||
except KeyError:
|
||||
raise ValueError(f"mode '{mode}' is not supported") from None
|
||||
if unsupported_kwargs:
|
||||
raise ValueError("unsupported keyword arguments for mode "
|
||||
f"'{mode}': {unsupported_kwargs}")
|
||||
|
||||
stat_functions = {"maximum": np.amax, "minimum": np.amin,
|
||||
"mean": np.mean, "median": np.median}
|
||||
|
||||
# Create array with final shape and original values
|
||||
# (padded area is undefined)
|
||||
padded, original_area_slice = _pad_simple(array, pad_width)
|
||||
# And prepare iteration over all dimensions
|
||||
# (zipping may be more readable than using enumerate)
|
||||
axes = range(padded.ndim)
|
||||
|
||||
if mode == "constant":
|
||||
values = kwargs.get("constant_values", 0)
|
||||
values = _as_pairs(values, padded.ndim)
|
||||
for axis, width_pair, value_pair in zip(axes, pad_width, values):
|
||||
roi = _view_roi(padded, original_area_slice, axis)
|
||||
_set_pad_area(roi, axis, width_pair, value_pair)
|
||||
|
||||
elif mode == "empty":
|
||||
pass # Do nothing as _pad_simple already returned the correct result
|
||||
|
||||
elif array.size == 0:
|
||||
# Only modes "constant" and "empty" can extend empty axes, all other
|
||||
# modes depend on `array` not being empty
|
||||
# -> ensure every empty axis is only "padded with 0"
|
||||
for axis, width_pair in zip(axes, pad_width):
|
||||
if array.shape[axis] == 0 and any(width_pair):
|
||||
raise ValueError(
|
||||
f"can't extend empty axis {axis} using modes other than "
|
||||
"'constant' or 'empty'"
|
||||
)
|
||||
# passed, don't need to do anything more as _pad_simple already
|
||||
# returned the correct result
|
||||
|
||||
elif mode == "edge":
|
||||
for axis, width_pair in zip(axes, pad_width):
|
||||
roi = _view_roi(padded, original_area_slice, axis)
|
||||
edge_pair = _get_edges(roi, axis, width_pair)
|
||||
_set_pad_area(roi, axis, width_pair, edge_pair)
|
||||
|
||||
elif mode == "linear_ramp":
|
||||
end_values = kwargs.get("end_values", 0)
|
||||
end_values = _as_pairs(end_values, padded.ndim)
|
||||
for axis, width_pair, value_pair in zip(axes, pad_width, end_values):
|
||||
roi = _view_roi(padded, original_area_slice, axis)
|
||||
ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair)
|
||||
_set_pad_area(roi, axis, width_pair, ramp_pair)
|
||||
|
||||
elif mode in stat_functions:
|
||||
func = stat_functions[mode]
|
||||
length = kwargs.get("stat_length")
|
||||
length = _as_pairs(length, padded.ndim, as_index=True)
|
||||
for axis, width_pair, length_pair in zip(axes, pad_width, length):
|
||||
roi = _view_roi(padded, original_area_slice, axis)
|
||||
stat_pair = _get_stats(roi, axis, width_pair, length_pair, func)
|
||||
_set_pad_area(roi, axis, width_pair, stat_pair)
|
||||
|
||||
elif mode in {"reflect", "symmetric"}:
|
||||
method = kwargs.get("reflect_type", "even")
|
||||
include_edge = mode == "symmetric"
|
||||
for axis, (left_index, right_index) in zip(axes, pad_width):
|
||||
if array.shape[axis] == 1 and (left_index > 0 or right_index > 0):
|
||||
# Extending singleton dimension for 'reflect' is legacy
|
||||
# behavior; it really should raise an error.
|
||||
edge_pair = _get_edges(padded, axis, (left_index, right_index))
|
||||
_set_pad_area(
|
||||
padded, axis, (left_index, right_index), edge_pair)
|
||||
continue
|
||||
|
||||
roi = _view_roi(padded, original_area_slice, axis)
|
||||
while left_index > 0 or right_index > 0:
|
||||
# Iteratively pad until dimension is filled with reflected
|
||||
# values. This is necessary if the pad area is larger than
|
||||
# the length of the original values in the current dimension.
|
||||
left_index, right_index = _set_reflect_both(
|
||||
roi, axis, (left_index, right_index),
|
||||
method, array.shape[axis], include_edge
|
||||
)
|
||||
|
||||
elif mode == "wrap":
|
||||
for axis, (left_index, right_index) in zip(axes, pad_width):
|
||||
roi = _view_roi(padded, original_area_slice, axis)
|
||||
original_period = padded.shape[axis] - right_index - left_index
|
||||
while left_index > 0 or right_index > 0:
|
||||
# Iteratively pad until dimension is filled with wrapped
|
||||
# values. This is necessary if the pad area is larger than
|
||||
# the length of the original values in the current dimension.
|
||||
left_index, right_index = _set_wrap_both(
|
||||
roi, axis, (left_index, right_index), original_period)
|
||||
|
||||
return padded
|
||||
89
lib/python3.11/site-packages/numpy/lib/_arraypad_impl.pyi
Normal file
89
lib/python3.11/site-packages/numpy/lib/_arraypad_impl.pyi
Normal file
@ -0,0 +1,89 @@
|
||||
from typing import (
|
||||
Any,
|
||||
Protocol,
|
||||
TypeAlias,
|
||||
TypeVar,
|
||||
overload,
|
||||
type_check_only,
|
||||
)
|
||||
from typing import (
|
||||
Literal as L,
|
||||
)
|
||||
|
||||
from numpy import generic
|
||||
from numpy._typing import (
|
||||
ArrayLike,
|
||||
NDArray,
|
||||
_ArrayLike,
|
||||
_ArrayLikeInt,
|
||||
)
|
||||
|
||||
__all__ = ["pad"]
|
||||
|
||||
_ScalarT = TypeVar("_ScalarT", bound=generic)
|
||||
|
||||
@type_check_only
|
||||
class _ModeFunc(Protocol):
|
||||
def __call__(
|
||||
self,
|
||||
vector: NDArray[Any],
|
||||
iaxis_pad_width: tuple[int, int],
|
||||
iaxis: int,
|
||||
kwargs: dict[str, Any],
|
||||
/,
|
||||
) -> None: ...
|
||||
|
||||
_ModeKind: TypeAlias = L[
|
||||
"constant",
|
||||
"edge",
|
||||
"linear_ramp",
|
||||
"maximum",
|
||||
"mean",
|
||||
"median",
|
||||
"minimum",
|
||||
"reflect",
|
||||
"symmetric",
|
||||
"wrap",
|
||||
"empty",
|
||||
]
|
||||
|
||||
# TODO: In practice each keyword argument is exclusive to one or more
|
||||
# specific modes. Consider adding more overloads to express this in the future.
|
||||
|
||||
# Expand `**kwargs` into explicit keyword-only arguments
|
||||
@overload
|
||||
def pad(
|
||||
array: _ArrayLike[_ScalarT],
|
||||
pad_width: _ArrayLikeInt,
|
||||
mode: _ModeKind = ...,
|
||||
*,
|
||||
stat_length: _ArrayLikeInt | None = ...,
|
||||
constant_values: ArrayLike = ...,
|
||||
end_values: ArrayLike = ...,
|
||||
reflect_type: L["odd", "even"] = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def pad(
|
||||
array: ArrayLike,
|
||||
pad_width: _ArrayLikeInt,
|
||||
mode: _ModeKind = ...,
|
||||
*,
|
||||
stat_length: _ArrayLikeInt | None = ...,
|
||||
constant_values: ArrayLike = ...,
|
||||
end_values: ArrayLike = ...,
|
||||
reflect_type: L["odd", "even"] = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def pad(
|
||||
array: _ArrayLike[_ScalarT],
|
||||
pad_width: _ArrayLikeInt,
|
||||
mode: _ModeFunc,
|
||||
**kwargs: Any,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def pad(
|
||||
array: ArrayLike,
|
||||
pad_width: _ArrayLikeInt,
|
||||
mode: _ModeFunc,
|
||||
**kwargs: Any,
|
||||
) -> NDArray[Any]: ...
|
||||
1260
lib/python3.11/site-packages/numpy/lib/_arraysetops_impl.py
Normal file
1260
lib/python3.11/site-packages/numpy/lib/_arraysetops_impl.py
Normal file
File diff suppressed because it is too large
Load Diff
444
lib/python3.11/site-packages/numpy/lib/_arraysetops_impl.pyi
Normal file
444
lib/python3.11/site-packages/numpy/lib/_arraysetops_impl.pyi
Normal file
@ -0,0 +1,444 @@
|
||||
from typing import Any, Generic, NamedTuple, SupportsIndex, TypeAlias, overload
|
||||
from typing import Literal as L
|
||||
|
||||
from typing_extensions import TypeVar, deprecated
|
||||
|
||||
import numpy as np
|
||||
from numpy._typing import (
|
||||
ArrayLike,
|
||||
NDArray,
|
||||
_ArrayLike,
|
||||
_ArrayLikeBool_co,
|
||||
_ArrayLikeNumber_co,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"ediff1d",
|
||||
"in1d",
|
||||
"intersect1d",
|
||||
"isin",
|
||||
"setdiff1d",
|
||||
"setxor1d",
|
||||
"union1d",
|
||||
"unique",
|
||||
"unique_all",
|
||||
"unique_counts",
|
||||
"unique_inverse",
|
||||
"unique_values",
|
||||
]
|
||||
|
||||
_ScalarT = TypeVar("_ScalarT", bound=np.generic)
|
||||
_NumericT = TypeVar("_NumericT", bound=np.number | np.timedelta64 | np.object_)
|
||||
|
||||
# Explicitly set all allowed values to prevent accidental castings to
|
||||
# abstract dtypes (their common super-type).
|
||||
# Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`)
|
||||
# which could result in, for example, `int64` and `float64`producing a
|
||||
# `number[_64Bit]` array
|
||||
_EitherSCT = TypeVar(
|
||||
"_EitherSCT",
|
||||
np.bool,
|
||||
np.int8, np.int16, np.int32, np.int64, np.intp,
|
||||
np.uint8, np.uint16, np.uint32, np.uint64, np.uintp,
|
||||
np.float16, np.float32, np.float64, np.longdouble,
|
||||
np.complex64, np.complex128, np.clongdouble,
|
||||
np.timedelta64, np.datetime64,
|
||||
np.bytes_, np.str_, np.void, np.object_,
|
||||
np.integer, np.floating, np.complexfloating, np.character,
|
||||
) # fmt: skip
|
||||
|
||||
_AnyArray: TypeAlias = NDArray[Any]
|
||||
_IntArray: TypeAlias = NDArray[np.intp]
|
||||
|
||||
###
|
||||
|
||||
class UniqueAllResult(NamedTuple, Generic[_ScalarT]):
|
||||
values: NDArray[_ScalarT]
|
||||
indices: _IntArray
|
||||
inverse_indices: _IntArray
|
||||
counts: _IntArray
|
||||
|
||||
class UniqueCountsResult(NamedTuple, Generic[_ScalarT]):
|
||||
values: NDArray[_ScalarT]
|
||||
counts: _IntArray
|
||||
|
||||
class UniqueInverseResult(NamedTuple, Generic[_ScalarT]):
|
||||
values: NDArray[_ScalarT]
|
||||
inverse_indices: _IntArray
|
||||
|
||||
#
|
||||
@overload
|
||||
def ediff1d(
|
||||
ary: _ArrayLikeBool_co,
|
||||
to_end: ArrayLike | None = None,
|
||||
to_begin: ArrayLike | None = None,
|
||||
) -> NDArray[np.int8]: ...
|
||||
@overload
|
||||
def ediff1d(
|
||||
ary: _ArrayLike[_NumericT],
|
||||
to_end: ArrayLike | None = None,
|
||||
to_begin: ArrayLike | None = None,
|
||||
) -> NDArray[_NumericT]: ...
|
||||
@overload
|
||||
def ediff1d(
|
||||
ary: _ArrayLike[np.datetime64[Any]],
|
||||
to_end: ArrayLike | None = None,
|
||||
to_begin: ArrayLike | None = None,
|
||||
) -> NDArray[np.timedelta64]: ...
|
||||
@overload
|
||||
def ediff1d(
|
||||
ary: _ArrayLikeNumber_co,
|
||||
to_end: ArrayLike | None = None,
|
||||
to_begin: ArrayLike | None = None,
|
||||
) -> _AnyArray: ...
|
||||
|
||||
#
|
||||
@overload # known scalar-type, FFF
|
||||
def unique(
|
||||
ar: _ArrayLike[_ScalarT],
|
||||
return_index: L[False] = False,
|
||||
return_inverse: L[False] = False,
|
||||
return_counts: L[False] = False,
|
||||
axis: SupportsIndex | None = None,
|
||||
*,
|
||||
equal_nan: bool = True,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload # unknown scalar-type, FFF
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[False] = False,
|
||||
return_inverse: L[False] = False,
|
||||
return_counts: L[False] = False,
|
||||
axis: SupportsIndex | None = None,
|
||||
*,
|
||||
equal_nan: bool = True,
|
||||
) -> _AnyArray: ...
|
||||
@overload # known scalar-type, TFF
|
||||
def unique(
|
||||
ar: _ArrayLike[_ScalarT],
|
||||
return_index: L[True],
|
||||
return_inverse: L[False] = False,
|
||||
return_counts: L[False] = False,
|
||||
axis: SupportsIndex | None = None,
|
||||
*,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[NDArray[_ScalarT], _IntArray]: ...
|
||||
@overload # unknown scalar-type, TFF
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[True],
|
||||
return_inverse: L[False] = False,
|
||||
return_counts: L[False] = False,
|
||||
axis: SupportsIndex | None = None,
|
||||
*,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[_AnyArray, _IntArray]: ...
|
||||
@overload # known scalar-type, FTF (positional)
|
||||
def unique(
|
||||
ar: _ArrayLike[_ScalarT],
|
||||
return_index: L[False],
|
||||
return_inverse: L[True],
|
||||
return_counts: L[False] = False,
|
||||
axis: SupportsIndex | None = None,
|
||||
*,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[NDArray[_ScalarT], _IntArray]: ...
|
||||
@overload # known scalar-type, FTF (keyword)
|
||||
def unique(
|
||||
ar: _ArrayLike[_ScalarT],
|
||||
return_index: L[False] = False,
|
||||
*,
|
||||
return_inverse: L[True],
|
||||
return_counts: L[False] = False,
|
||||
axis: SupportsIndex | None = None,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[NDArray[_ScalarT], _IntArray]: ...
|
||||
@overload # unknown scalar-type, FTF (positional)
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[False],
|
||||
return_inverse: L[True],
|
||||
return_counts: L[False] = False,
|
||||
axis: SupportsIndex | None = None,
|
||||
*,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[_AnyArray, _IntArray]: ...
|
||||
@overload # unknown scalar-type, FTF (keyword)
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[False] = False,
|
||||
*,
|
||||
return_inverse: L[True],
|
||||
return_counts: L[False] = False,
|
||||
axis: SupportsIndex | None = None,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[_AnyArray, _IntArray]: ...
|
||||
@overload # known scalar-type, FFT (positional)
|
||||
def unique(
|
||||
ar: _ArrayLike[_ScalarT],
|
||||
return_index: L[False],
|
||||
return_inverse: L[False],
|
||||
return_counts: L[True],
|
||||
axis: SupportsIndex | None = None,
|
||||
*,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[NDArray[_ScalarT], _IntArray]: ...
|
||||
@overload # known scalar-type, FFT (keyword)
|
||||
def unique(
|
||||
ar: _ArrayLike[_ScalarT],
|
||||
return_index: L[False] = False,
|
||||
return_inverse: L[False] = False,
|
||||
*,
|
||||
return_counts: L[True],
|
||||
axis: SupportsIndex | None = None,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[NDArray[_ScalarT], _IntArray]: ...
|
||||
@overload # unknown scalar-type, FFT (positional)
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[False],
|
||||
return_inverse: L[False],
|
||||
return_counts: L[True],
|
||||
axis: SupportsIndex | None = None,
|
||||
*,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[_AnyArray, _IntArray]: ...
|
||||
@overload # unknown scalar-type, FFT (keyword)
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[False] = False,
|
||||
return_inverse: L[False] = False,
|
||||
*,
|
||||
return_counts: L[True],
|
||||
axis: SupportsIndex | None = None,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[_AnyArray, _IntArray]: ...
|
||||
@overload # known scalar-type, TTF
|
||||
def unique(
|
||||
ar: _ArrayLike[_ScalarT],
|
||||
return_index: L[True],
|
||||
return_inverse: L[True],
|
||||
return_counts: L[False] = False,
|
||||
axis: SupportsIndex | None = None,
|
||||
*,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ...
|
||||
@overload # unknown scalar-type, TTF
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[True],
|
||||
return_inverse: L[True],
|
||||
return_counts: L[False] = False,
|
||||
axis: SupportsIndex | None = None,
|
||||
*,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[_AnyArray, _IntArray, _IntArray]: ...
|
||||
@overload # known scalar-type, TFT (positional)
|
||||
def unique(
|
||||
ar: _ArrayLike[_ScalarT],
|
||||
return_index: L[True],
|
||||
return_inverse: L[False],
|
||||
return_counts: L[True],
|
||||
axis: SupportsIndex | None = None,
|
||||
*,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ...
|
||||
@overload # known scalar-type, TFT (keyword)
|
||||
def unique(
|
||||
ar: _ArrayLike[_ScalarT],
|
||||
return_index: L[True],
|
||||
return_inverse: L[False] = False,
|
||||
*,
|
||||
return_counts: L[True],
|
||||
axis: SupportsIndex | None = None,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ...
|
||||
@overload # unknown scalar-type, TFT (positional)
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[True],
|
||||
return_inverse: L[False],
|
||||
return_counts: L[True],
|
||||
axis: SupportsIndex | None = None,
|
||||
*,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[_AnyArray, _IntArray, _IntArray]: ...
|
||||
@overload # unknown scalar-type, TFT (keyword)
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[True],
|
||||
return_inverse: L[False] = False,
|
||||
*,
|
||||
return_counts: L[True],
|
||||
axis: SupportsIndex | None = None,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[_AnyArray, _IntArray, _IntArray]: ...
|
||||
@overload # known scalar-type, FTT (positional)
|
||||
def unique(
|
||||
ar: _ArrayLike[_ScalarT],
|
||||
return_index: L[False],
|
||||
return_inverse: L[True],
|
||||
return_counts: L[True],
|
||||
axis: SupportsIndex | None = None,
|
||||
*,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ...
|
||||
@overload # known scalar-type, FTT (keyword)
|
||||
def unique(
|
||||
ar: _ArrayLike[_ScalarT],
|
||||
return_index: L[False] = False,
|
||||
*,
|
||||
return_inverse: L[True],
|
||||
return_counts: L[True],
|
||||
axis: SupportsIndex | None = None,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ...
|
||||
@overload # unknown scalar-type, FTT (positional)
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[False],
|
||||
return_inverse: L[True],
|
||||
return_counts: L[True],
|
||||
axis: SupportsIndex | None = None,
|
||||
*,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[_AnyArray, _IntArray, _IntArray]: ...
|
||||
@overload # unknown scalar-type, FTT (keyword)
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[False] = False,
|
||||
*,
|
||||
return_inverse: L[True],
|
||||
return_counts: L[True],
|
||||
axis: SupportsIndex | None = None,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[_AnyArray, _IntArray, _IntArray]: ...
|
||||
@overload # known scalar-type, TTT
|
||||
def unique(
|
||||
ar: _ArrayLike[_ScalarT],
|
||||
return_index: L[True],
|
||||
return_inverse: L[True],
|
||||
return_counts: L[True],
|
||||
axis: SupportsIndex | None = None,
|
||||
*,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray, _IntArray]: ...
|
||||
@overload # unknown scalar-type, TTT
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[True],
|
||||
return_inverse: L[True],
|
||||
return_counts: L[True],
|
||||
axis: SupportsIndex | None = None,
|
||||
*,
|
||||
equal_nan: bool = True,
|
||||
) -> tuple[_AnyArray, _IntArray, _IntArray, _IntArray]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def unique_all(x: _ArrayLike[_ScalarT]) -> UniqueAllResult[_ScalarT]: ...
|
||||
@overload
|
||||
def unique_all(x: ArrayLike) -> UniqueAllResult[Any]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def unique_counts(x: _ArrayLike[_ScalarT]) -> UniqueCountsResult[_ScalarT]: ...
|
||||
@overload
|
||||
def unique_counts(x: ArrayLike) -> UniqueCountsResult[Any]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def unique_inverse(x: _ArrayLike[_ScalarT]) -> UniqueInverseResult[_ScalarT]: ...
|
||||
@overload
|
||||
def unique_inverse(x: ArrayLike) -> UniqueInverseResult[Any]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def unique_values(x: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def unique_values(x: ArrayLike) -> _AnyArray: ...
|
||||
|
||||
#
|
||||
@overload # known scalar-type, return_indices=False (default)
|
||||
def intersect1d(
|
||||
ar1: _ArrayLike[_EitherSCT],
|
||||
ar2: _ArrayLike[_EitherSCT],
|
||||
assume_unique: bool = False,
|
||||
return_indices: L[False] = False,
|
||||
) -> NDArray[_EitherSCT]: ...
|
||||
@overload # known scalar-type, return_indices=True (positional)
|
||||
def intersect1d(
|
||||
ar1: _ArrayLike[_EitherSCT],
|
||||
ar2: _ArrayLike[_EitherSCT],
|
||||
assume_unique: bool,
|
||||
return_indices: L[True],
|
||||
) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ...
|
||||
@overload # known scalar-type, return_indices=True (keyword)
|
||||
def intersect1d(
|
||||
ar1: _ArrayLike[_EitherSCT],
|
||||
ar2: _ArrayLike[_EitherSCT],
|
||||
assume_unique: bool = False,
|
||||
*,
|
||||
return_indices: L[True],
|
||||
) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ...
|
||||
@overload # unknown scalar-type, return_indices=False (default)
|
||||
def intersect1d(
|
||||
ar1: ArrayLike,
|
||||
ar2: ArrayLike,
|
||||
assume_unique: bool = False,
|
||||
return_indices: L[False] = False,
|
||||
) -> _AnyArray: ...
|
||||
@overload # unknown scalar-type, return_indices=True (positional)
|
||||
def intersect1d(
|
||||
ar1: ArrayLike,
|
||||
ar2: ArrayLike,
|
||||
assume_unique: bool,
|
||||
return_indices: L[True],
|
||||
) -> tuple[_AnyArray, _IntArray, _IntArray]: ...
|
||||
@overload # unknown scalar-type, return_indices=True (keyword)
|
||||
def intersect1d(
|
||||
ar1: ArrayLike,
|
||||
ar2: ArrayLike,
|
||||
assume_unique: bool = False,
|
||||
*,
|
||||
return_indices: L[True],
|
||||
) -> tuple[_AnyArray, _IntArray, _IntArray]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def setxor1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ...
|
||||
@overload
|
||||
def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def union1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT]) -> NDArray[_EitherSCT]: ...
|
||||
@overload
|
||||
def union1d(ar1: ArrayLike, ar2: ArrayLike) -> _AnyArray: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def setdiff1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ...
|
||||
@overload
|
||||
def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ...
|
||||
|
||||
#
|
||||
def isin(
|
||||
element: ArrayLike,
|
||||
test_elements: ArrayLike,
|
||||
assume_unique: bool = False,
|
||||
invert: bool = False,
|
||||
*,
|
||||
kind: L["sort", "table"] | None = None,
|
||||
) -> NDArray[np.bool]: ...
|
||||
|
||||
#
|
||||
@deprecated("Use 'isin' instead")
|
||||
def in1d(
|
||||
element: ArrayLike,
|
||||
test_elements: ArrayLike,
|
||||
assume_unique: bool = False,
|
||||
invert: bool = False,
|
||||
*,
|
||||
kind: L["sort", "table"] | None = None,
|
||||
) -> NDArray[np.bool]: ...
|
||||
224
lib/python3.11/site-packages/numpy/lib/_arrayterator_impl.py
Normal file
224
lib/python3.11/site-packages/numpy/lib/_arrayterator_impl.py
Normal file
@ -0,0 +1,224 @@
|
||||
"""
|
||||
A buffered iterator for big arrays.
|
||||
|
||||
This module solves the problem of iterating over a big file-based array
|
||||
without having to read it into memory. The `Arrayterator` class wraps
|
||||
an array object, and when iterated it will return sub-arrays with at most
|
||||
a user-specified number of elements.
|
||||
|
||||
"""
|
||||
from functools import reduce
|
||||
from operator import mul
|
||||
|
||||
__all__ = ['Arrayterator']
|
||||
|
||||
|
||||
class Arrayterator:
|
||||
"""
|
||||
Buffered iterator for big arrays.
|
||||
|
||||
`Arrayterator` creates a buffered iterator for reading big arrays in small
|
||||
contiguous blocks. The class is useful for objects stored in the
|
||||
file system. It allows iteration over the object *without* reading
|
||||
everything in memory; instead, small blocks are read and iterated over.
|
||||
|
||||
`Arrayterator` can be used with any object that supports multidimensional
|
||||
slices. This includes NumPy arrays, but also variables from
|
||||
Scientific.IO.NetCDF or pynetcdf for example.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
var : array_like
|
||||
The object to iterate over.
|
||||
buf_size : int, optional
|
||||
The buffer size. If `buf_size` is supplied, the maximum amount of
|
||||
data that will be read into memory is `buf_size` elements.
|
||||
Default is None, which will read as many element as possible
|
||||
into memory.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
var
|
||||
buf_size
|
||||
start
|
||||
stop
|
||||
step
|
||||
shape
|
||||
flat
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.ndenumerate : Multidimensional array iterator.
|
||||
numpy.flatiter : Flat array iterator.
|
||||
numpy.memmap : Create a memory-map to an array stored
|
||||
in a binary file on disk.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The algorithm works by first finding a "running dimension", along which
|
||||
the blocks will be extracted. Given an array of dimensions
|
||||
``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the
|
||||
first dimension will be used. If, on the other hand,
|
||||
``d1 < buf_size < d1*d2`` the second dimension will be used, and so on.
|
||||
Blocks are extracted along this dimension, and when the last block is
|
||||
returned the process continues from the next dimension, until all
|
||||
elements have been read.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
|
||||
>>> a_itor = np.lib.Arrayterator(a, 2)
|
||||
>>> a_itor.shape
|
||||
(3, 4, 5, 6)
|
||||
|
||||
Now we can iterate over ``a_itor``, and it will return arrays of size
|
||||
two. Since `buf_size` was smaller than any dimension, the first
|
||||
dimension will be iterated over first:
|
||||
|
||||
>>> for subarr in a_itor:
|
||||
... if not subarr.all():
|
||||
... print(subarr, subarr.shape) # doctest: +SKIP
|
||||
>>> # [[[[0 1]]]] (1, 1, 1, 2)
|
||||
|
||||
"""
|
||||
|
||||
__module__ = "numpy.lib"
|
||||
|
||||
def __init__(self, var, buf_size=None):
|
||||
self.var = var
|
||||
self.buf_size = buf_size
|
||||
|
||||
self.start = [0 for dim in var.shape]
|
||||
self.stop = list(var.shape)
|
||||
self.step = [1 for dim in var.shape]
|
||||
|
||||
def __getattr__(self, attr):
|
||||
return getattr(self.var, attr)
|
||||
|
||||
def __getitem__(self, index):
|
||||
"""
|
||||
Return a new arrayterator.
|
||||
|
||||
"""
|
||||
# Fix index, handling ellipsis and incomplete slices.
|
||||
if not isinstance(index, tuple):
|
||||
index = (index,)
|
||||
fixed = []
|
||||
length, dims = len(index), self.ndim
|
||||
for slice_ in index:
|
||||
if slice_ is Ellipsis:
|
||||
fixed.extend([slice(None)] * (dims - length + 1))
|
||||
length = len(fixed)
|
||||
elif isinstance(slice_, int):
|
||||
fixed.append(slice(slice_, slice_ + 1, 1))
|
||||
else:
|
||||
fixed.append(slice_)
|
||||
index = tuple(fixed)
|
||||
if len(index) < dims:
|
||||
index += (slice(None),) * (dims - len(index))
|
||||
|
||||
# Return a new arrayterator object.
|
||||
out = self.__class__(self.var, self.buf_size)
|
||||
for i, (start, stop, step, slice_) in enumerate(
|
||||
zip(self.start, self.stop, self.step, index)):
|
||||
out.start[i] = start + (slice_.start or 0)
|
||||
out.step[i] = step * (slice_.step or 1)
|
||||
out.stop[i] = start + (slice_.stop or stop - start)
|
||||
out.stop[i] = min(stop, out.stop[i])
|
||||
return out
|
||||
|
||||
def __array__(self, dtype=None, copy=None):
|
||||
"""
|
||||
Return corresponding data.
|
||||
|
||||
"""
|
||||
slice_ = tuple(slice(*t) for t in zip(
|
||||
self.start, self.stop, self.step))
|
||||
return self.var[slice_]
|
||||
|
||||
@property
|
||||
def flat(self):
|
||||
"""
|
||||
A 1-D flat iterator for Arrayterator objects.
|
||||
|
||||
This iterator returns elements of the array to be iterated over in
|
||||
`~lib.Arrayterator` one by one.
|
||||
It is similar to `flatiter`.
|
||||
|
||||
See Also
|
||||
--------
|
||||
lib.Arrayterator
|
||||
flatiter
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
|
||||
>>> a_itor = np.lib.Arrayterator(a, 2)
|
||||
|
||||
>>> for subarr in a_itor.flat:
|
||||
... if not subarr:
|
||||
... print(subarr, type(subarr))
|
||||
...
|
||||
0 <class 'numpy.int64'>
|
||||
|
||||
"""
|
||||
for block in self:
|
||||
yield from block.flat
|
||||
|
||||
@property
|
||||
def shape(self):
|
||||
"""
|
||||
The shape of the array to be iterated over.
|
||||
|
||||
For an example, see `Arrayterator`.
|
||||
|
||||
"""
|
||||
return tuple(((stop - start - 1) // step + 1) for start, stop, step in
|
||||
zip(self.start, self.stop, self.step))
|
||||
|
||||
def __iter__(self):
|
||||
# Skip arrays with degenerate dimensions
|
||||
if [dim for dim in self.shape if dim <= 0]:
|
||||
return
|
||||
|
||||
start = self.start[:]
|
||||
stop = self.stop[:]
|
||||
step = self.step[:]
|
||||
ndims = self.var.ndim
|
||||
|
||||
while True:
|
||||
count = self.buf_size or reduce(mul, self.shape)
|
||||
|
||||
# iterate over each dimension, looking for the
|
||||
# running dimension (ie, the dimension along which
|
||||
# the blocks will be built from)
|
||||
rundim = 0
|
||||
for i in range(ndims - 1, -1, -1):
|
||||
# if count is zero we ran out of elements to read
|
||||
# along higher dimensions, so we read only a single position
|
||||
if count == 0:
|
||||
stop[i] = start[i] + 1
|
||||
elif count <= self.shape[i]:
|
||||
# limit along this dimension
|
||||
stop[i] = start[i] + count * step[i]
|
||||
rundim = i
|
||||
else:
|
||||
# read everything along this dimension
|
||||
stop[i] = self.stop[i]
|
||||
stop[i] = min(self.stop[i], stop[i])
|
||||
count = count // self.shape[i]
|
||||
|
||||
# yield a block
|
||||
slice_ = tuple(slice(*t) for t in zip(start, stop, step))
|
||||
yield self.var[slice_]
|
||||
|
||||
# Update start position, taking care of overflow to
|
||||
# other dimensions
|
||||
start[rundim] = stop[rundim] # start where we stopped
|
||||
for i in range(ndims - 1, 0, -1):
|
||||
if start[i] >= self.stop[i]:
|
||||
start[i] = self.start[i]
|
||||
start[i - 1] += self.step[i - 1]
|
||||
if start[0] >= self.stop[0]:
|
||||
return
|
||||
@ -0,0 +1,46 @@
|
||||
# pyright: reportIncompatibleMethodOverride=false
|
||||
|
||||
from collections.abc import Generator
|
||||
from types import EllipsisType
|
||||
from typing import Any, Final, TypeAlias, overload
|
||||
|
||||
from typing_extensions import TypeVar
|
||||
|
||||
import numpy as np
|
||||
from numpy._typing import _AnyShape, _Shape
|
||||
|
||||
__all__ = ["Arrayterator"]
|
||||
|
||||
_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True)
|
||||
_DTypeT = TypeVar("_DTypeT", bound=np.dtype)
|
||||
_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True)
|
||||
_ScalarT = TypeVar("_ScalarT", bound=np.generic)
|
||||
|
||||
_AnyIndex: TypeAlias = EllipsisType | int | slice | tuple[EllipsisType | int | slice, ...]
|
||||
|
||||
# NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`,
|
||||
# but its ``__getattr__` method does wrap around the former and thus has
|
||||
# access to all its methods
|
||||
|
||||
class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]):
|
||||
var: np.ndarray[_ShapeT_co, _DTypeT_co] # type: ignore[assignment]
|
||||
buf_size: Final[int | None]
|
||||
start: Final[list[int]]
|
||||
stop: Final[list[int]]
|
||||
step: Final[list[int]]
|
||||
|
||||
@property # type: ignore[misc]
|
||||
def shape(self) -> _ShapeT_co: ...
|
||||
@property
|
||||
def flat(self: Arrayterator[Any, np.dtype[_ScalarT]]) -> Generator[_ScalarT]: ... # type: ignore[override]
|
||||
|
||||
#
|
||||
def __init__(self, /, var: np.ndarray[_ShapeT_co, _DTypeT_co], buf_size: int | None = None) -> None: ...
|
||||
def __getitem__(self, index: _AnyIndex, /) -> Arrayterator[_AnyShape, _DTypeT_co]: ... # type: ignore[override]
|
||||
def __iter__(self) -> Generator[np.ndarray[_AnyShape, _DTypeT_co]]: ...
|
||||
|
||||
#
|
||||
@overload # type: ignore[override]
|
||||
def __array__(self, /, dtype: None = None, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ...
|
||||
@overload
|
||||
def __array__(self, /, dtype: _DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT]: ...
|
||||
700
lib/python3.11/site-packages/numpy/lib/_datasource.py
Normal file
700
lib/python3.11/site-packages/numpy/lib/_datasource.py
Normal file
@ -0,0 +1,700 @@
|
||||
"""A file interface for handling local and remote data files.
|
||||
|
||||
The goal of datasource is to abstract some of the file system operations
|
||||
when dealing with data files so the researcher doesn't have to know all the
|
||||
low-level details. Through datasource, a researcher can obtain and use a
|
||||
file with one function call, regardless of location of the file.
|
||||
|
||||
DataSource is meant to augment standard python libraries, not replace them.
|
||||
It should work seamlessly with standard file IO operations and the os
|
||||
module.
|
||||
|
||||
DataSource files can originate locally or remotely:
|
||||
|
||||
- local files : '/home/guido/src/local/data.txt'
|
||||
- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
|
||||
|
||||
DataSource files can also be compressed or uncompressed. Currently only
|
||||
gzip, bz2 and xz are supported.
|
||||
|
||||
Example::
|
||||
|
||||
>>> # Create a DataSource, use os.curdir (default) for local storage.
|
||||
>>> from numpy import DataSource
|
||||
>>> ds = DataSource()
|
||||
>>>
|
||||
>>> # Open a remote file.
|
||||
>>> # DataSource downloads the file, stores it locally in:
|
||||
>>> # './www.google.com/index.html'
|
||||
>>> # opens the file and returns a file object.
|
||||
>>> fp = ds.open('http://www.google.com/') # doctest: +SKIP
|
||||
>>>
|
||||
>>> # Use the file as you normally would
|
||||
>>> fp.read() # doctest: +SKIP
|
||||
>>> fp.close() # doctest: +SKIP
|
||||
|
||||
"""
|
||||
import os
|
||||
|
||||
from numpy._utils import set_module
|
||||
|
||||
_open = open
|
||||
|
||||
|
||||
def _check_mode(mode, encoding, newline):
|
||||
"""Check mode and that encoding and newline are compatible.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
mode : str
|
||||
File open mode.
|
||||
encoding : str
|
||||
File encoding.
|
||||
newline : str
|
||||
Newline for text files.
|
||||
|
||||
"""
|
||||
if "t" in mode:
|
||||
if "b" in mode:
|
||||
raise ValueError(f"Invalid mode: {mode!r}")
|
||||
else:
|
||||
if encoding is not None:
|
||||
raise ValueError("Argument 'encoding' not supported in binary mode")
|
||||
if newline is not None:
|
||||
raise ValueError("Argument 'newline' not supported in binary mode")
|
||||
|
||||
|
||||
# Using a class instead of a module-level dictionary
|
||||
# to reduce the initial 'import numpy' overhead by
|
||||
# deferring the import of lzma, bz2 and gzip until needed
|
||||
|
||||
# TODO: .zip support, .tar support?
|
||||
class _FileOpeners:
|
||||
"""
|
||||
Container for different methods to open (un-)compressed files.
|
||||
|
||||
`_FileOpeners` contains a dictionary that holds one method for each
|
||||
supported file format. Attribute lookup is implemented in such a way
|
||||
that an instance of `_FileOpeners` itself can be indexed with the keys
|
||||
of that dictionary. Currently uncompressed files as well as files
|
||||
compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported.
|
||||
|
||||
Notes
|
||||
-----
|
||||
`_file_openers`, an instance of `_FileOpeners`, is made available for
|
||||
use in the `_datasource` module.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import gzip
|
||||
>>> np.lib._datasource._file_openers.keys()
|
||||
[None, '.bz2', '.gz', '.xz', '.lzma']
|
||||
>>> np.lib._datasource._file_openers['.gz'] is gzip.open
|
||||
True
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._loaded = False
|
||||
self._file_openers = {None: open}
|
||||
|
||||
def _load(self):
|
||||
if self._loaded:
|
||||
return
|
||||
|
||||
try:
|
||||
import bz2
|
||||
self._file_openers[".bz2"] = bz2.open
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
try:
|
||||
import gzip
|
||||
self._file_openers[".gz"] = gzip.open
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
try:
|
||||
import lzma
|
||||
self._file_openers[".xz"] = lzma.open
|
||||
self._file_openers[".lzma"] = lzma.open
|
||||
except (ImportError, AttributeError):
|
||||
# There are incompatible backports of lzma that do not have the
|
||||
# lzma.open attribute, so catch that as well as ImportError.
|
||||
pass
|
||||
|
||||
self._loaded = True
|
||||
|
||||
def keys(self):
|
||||
"""
|
||||
Return the keys of currently supported file openers.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
None
|
||||
|
||||
Returns
|
||||
-------
|
||||
keys : list
|
||||
The keys are None for uncompressed files and the file extension
|
||||
strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression
|
||||
methods.
|
||||
|
||||
"""
|
||||
self._load()
|
||||
return list(self._file_openers.keys())
|
||||
|
||||
def __getitem__(self, key):
|
||||
self._load()
|
||||
return self._file_openers[key]
|
||||
|
||||
|
||||
_file_openers = _FileOpeners()
|
||||
|
||||
def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None):
|
||||
"""
|
||||
Open `path` with `mode` and return the file object.
|
||||
|
||||
If ``path`` is an URL, it will be downloaded, stored in the
|
||||
`DataSource` `destpath` directory and opened from there.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path : str or pathlib.Path
|
||||
Local file path or URL to open.
|
||||
mode : str, optional
|
||||
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
|
||||
append. Available modes depend on the type of object specified by
|
||||
path. Default is 'r'.
|
||||
destpath : str, optional
|
||||
Path to the directory where the source file gets downloaded to for
|
||||
use. If `destpath` is None, a temporary directory will be created.
|
||||
The default path is the current directory.
|
||||
encoding : {None, str}, optional
|
||||
Open text file with given encoding. The default encoding will be
|
||||
what `open` uses.
|
||||
newline : {None, str}, optional
|
||||
Newline to use when reading text file.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : file object
|
||||
The opened file.
|
||||
|
||||
Notes
|
||||
-----
|
||||
This is a convenience function that instantiates a `DataSource` and
|
||||
returns the file object from ``DataSource.open(path)``.
|
||||
|
||||
"""
|
||||
|
||||
ds = DataSource(destpath)
|
||||
return ds.open(path, mode, encoding=encoding, newline=newline)
|
||||
|
||||
|
||||
@set_module('numpy.lib.npyio')
|
||||
class DataSource:
|
||||
"""
|
||||
DataSource(destpath='.')
|
||||
|
||||
A generic data source file (file, http, ftp, ...).
|
||||
|
||||
DataSources can be local files or remote files/URLs. The files may
|
||||
also be compressed or uncompressed. DataSource hides some of the
|
||||
low-level details of downloading the file, allowing you to simply pass
|
||||
in a valid file path (or URL) and obtain a file object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
destpath : str or None, optional
|
||||
Path to the directory where the source file gets downloaded to for
|
||||
use. If `destpath` is None, a temporary directory will be created.
|
||||
The default path is the current directory.
|
||||
|
||||
Notes
|
||||
-----
|
||||
URLs require a scheme string (``http://``) to be used, without it they
|
||||
will fail::
|
||||
|
||||
>>> repos = np.lib.npyio.DataSource()
|
||||
>>> repos.exists('www.google.com/index.html')
|
||||
False
|
||||
>>> repos.exists('http://www.google.com/index.html')
|
||||
True
|
||||
|
||||
Temporary directories are deleted when the DataSource is deleted.
|
||||
|
||||
Examples
|
||||
--------
|
||||
::
|
||||
|
||||
>>> ds = np.lib.npyio.DataSource('/home/guido')
|
||||
>>> urlname = 'http://www.google.com/'
|
||||
>>> gfile = ds.open('http://www.google.com/')
|
||||
>>> ds.abspath(urlname)
|
||||
'/home/guido/www.google.com/index.html'
|
||||
|
||||
>>> ds = np.lib.npyio.DataSource(None) # use with temporary file
|
||||
>>> ds.open('/home/guido/foobar.txt')
|
||||
<open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
|
||||
>>> ds.abspath('/home/guido/foobar.txt')
|
||||
'/tmp/.../home/guido/foobar.txt'
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, destpath=os.curdir):
|
||||
"""Create a DataSource with a local path at destpath."""
|
||||
if destpath:
|
||||
self._destpath = os.path.abspath(destpath)
|
||||
self._istmpdest = False
|
||||
else:
|
||||
import tempfile # deferring import to improve startup time
|
||||
self._destpath = tempfile.mkdtemp()
|
||||
self._istmpdest = True
|
||||
|
||||
def __del__(self):
|
||||
# Remove temp directories
|
||||
if hasattr(self, '_istmpdest') and self._istmpdest:
|
||||
import shutil
|
||||
|
||||
shutil.rmtree(self._destpath)
|
||||
|
||||
def _iszip(self, filename):
|
||||
"""Test if the filename is a zip file by looking at the file extension.
|
||||
|
||||
"""
|
||||
fname, ext = os.path.splitext(filename)
|
||||
return ext in _file_openers.keys()
|
||||
|
||||
def _iswritemode(self, mode):
|
||||
"""Test if the given mode will open a file for writing."""
|
||||
|
||||
# Currently only used to test the bz2 files.
|
||||
_writemodes = ("w", "+")
|
||||
return any(c in _writemodes for c in mode)
|
||||
|
||||
def _splitzipext(self, filename):
|
||||
"""Split zip extension from filename and return filename.
|
||||
|
||||
Returns
|
||||
-------
|
||||
base, zip_ext : {tuple}
|
||||
|
||||
"""
|
||||
|
||||
if self._iszip(filename):
|
||||
return os.path.splitext(filename)
|
||||
else:
|
||||
return filename, None
|
||||
|
||||
def _possible_names(self, filename):
|
||||
"""Return a tuple containing compressed filename variations."""
|
||||
names = [filename]
|
||||
if not self._iszip(filename):
|
||||
for zipext in _file_openers.keys():
|
||||
if zipext:
|
||||
names.append(filename + zipext)
|
||||
return names
|
||||
|
||||
def _isurl(self, path):
|
||||
"""Test if path is a net location. Tests the scheme and netloc."""
|
||||
|
||||
# We do this here to reduce the 'import numpy' initial import time.
|
||||
from urllib.parse import urlparse
|
||||
|
||||
# BUG : URLs require a scheme string ('http://') to be used.
|
||||
# www.google.com will fail.
|
||||
# Should we prepend the scheme for those that don't have it and
|
||||
# test that also? Similar to the way we append .gz and test for
|
||||
# for compressed versions of files.
|
||||
|
||||
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
|
||||
return bool(scheme and netloc)
|
||||
|
||||
def _cache(self, path):
|
||||
"""Cache the file specified by path.
|
||||
|
||||
Creates a copy of the file in the datasource cache.
|
||||
|
||||
"""
|
||||
# We import these here because importing them is slow and
|
||||
# a significant fraction of numpy's total import time.
|
||||
import shutil
|
||||
from urllib.request import urlopen
|
||||
|
||||
upath = self.abspath(path)
|
||||
|
||||
# ensure directory exists
|
||||
if not os.path.exists(os.path.dirname(upath)):
|
||||
os.makedirs(os.path.dirname(upath))
|
||||
|
||||
# TODO: Doesn't handle compressed files!
|
||||
if self._isurl(path):
|
||||
with urlopen(path) as openedurl:
|
||||
with _open(upath, 'wb') as f:
|
||||
shutil.copyfileobj(openedurl, f)
|
||||
else:
|
||||
shutil.copyfile(path, upath)
|
||||
return upath
|
||||
|
||||
def _findfile(self, path):
|
||||
"""Searches for ``path`` and returns full path if found.
|
||||
|
||||
If path is an URL, _findfile will cache a local copy and return the
|
||||
path to the cached file. If path is a local file, _findfile will
|
||||
return a path to that local file.
|
||||
|
||||
The search will include possible compressed versions of the file
|
||||
and return the first occurrence found.
|
||||
|
||||
"""
|
||||
|
||||
# Build list of possible local file paths
|
||||
if not self._isurl(path):
|
||||
# Valid local paths
|
||||
filelist = self._possible_names(path)
|
||||
# Paths in self._destpath
|
||||
filelist += self._possible_names(self.abspath(path))
|
||||
else:
|
||||
# Cached URLs in self._destpath
|
||||
filelist = self._possible_names(self.abspath(path))
|
||||
# Remote URLs
|
||||
filelist = filelist + self._possible_names(path)
|
||||
|
||||
for name in filelist:
|
||||
if self.exists(name):
|
||||
if self._isurl(name):
|
||||
name = self._cache(name)
|
||||
return name
|
||||
return None
|
||||
|
||||
def abspath(self, path):
|
||||
"""
|
||||
Return absolute path of file in the DataSource directory.
|
||||
|
||||
If `path` is an URL, then `abspath` will return either the location
|
||||
the file exists locally or the location it would exist when opened
|
||||
using the `open` method.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path : str or pathlib.Path
|
||||
Can be a local file or a remote URL.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : str
|
||||
Complete path, including the `DataSource` destination directory.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The functionality is based on `os.path.abspath`.
|
||||
|
||||
"""
|
||||
# We do this here to reduce the 'import numpy' initial import time.
|
||||
from urllib.parse import urlparse
|
||||
|
||||
# TODO: This should be more robust. Handles case where path includes
|
||||
# the destpath, but not other sub-paths. Failing case:
|
||||
# path = /home/guido/datafile.txt
|
||||
# destpath = /home/alex/
|
||||
# upath = self.abspath(path)
|
||||
# upath == '/home/alex/home/guido/datafile.txt'
|
||||
|
||||
# handle case where path includes self._destpath
|
||||
splitpath = path.split(self._destpath, 2)
|
||||
if len(splitpath) > 1:
|
||||
path = splitpath[1]
|
||||
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
|
||||
netloc = self._sanitize_relative_path(netloc)
|
||||
upath = self._sanitize_relative_path(upath)
|
||||
return os.path.join(self._destpath, netloc, upath)
|
||||
|
||||
def _sanitize_relative_path(self, path):
|
||||
"""Return a sanitised relative path for which
|
||||
os.path.abspath(os.path.join(base, path)).startswith(base)
|
||||
"""
|
||||
last = None
|
||||
path = os.path.normpath(path)
|
||||
while path != last:
|
||||
last = path
|
||||
# Note: os.path.join treats '/' as os.sep on Windows
|
||||
path = path.lstrip(os.sep).lstrip('/')
|
||||
path = path.lstrip(os.pardir).removeprefix('..')
|
||||
drive, path = os.path.splitdrive(path) # for Windows
|
||||
return path
|
||||
|
||||
def exists(self, path):
|
||||
"""
|
||||
Test if path exists.
|
||||
|
||||
Test if `path` exists as (and in this order):
|
||||
|
||||
- a local file.
|
||||
- a remote URL that has been downloaded and stored locally in the
|
||||
`DataSource` directory.
|
||||
- a remote URL that has not been downloaded, but is valid and
|
||||
accessible.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path : str or pathlib.Path
|
||||
Can be a local file or a remote URL.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : bool
|
||||
True if `path` exists.
|
||||
|
||||
Notes
|
||||
-----
|
||||
When `path` is an URL, `exists` will return True if it's either
|
||||
stored locally in the `DataSource` directory, or is a valid remote
|
||||
URL. `DataSource` does not discriminate between the two, the file
|
||||
is accessible if it exists in either location.
|
||||
|
||||
"""
|
||||
|
||||
# First test for local path
|
||||
if os.path.exists(path):
|
||||
return True
|
||||
|
||||
# We import this here because importing urllib is slow and
|
||||
# a significant fraction of numpy's total import time.
|
||||
from urllib.error import URLError
|
||||
from urllib.request import urlopen
|
||||
|
||||
# Test cached url
|
||||
upath = self.abspath(path)
|
||||
if os.path.exists(upath):
|
||||
return True
|
||||
|
||||
# Test remote url
|
||||
if self._isurl(path):
|
||||
try:
|
||||
netfile = urlopen(path)
|
||||
netfile.close()
|
||||
del netfile
|
||||
return True
|
||||
except URLError:
|
||||
return False
|
||||
return False
|
||||
|
||||
def open(self, path, mode='r', encoding=None, newline=None):
|
||||
"""
|
||||
Open and return file-like object.
|
||||
|
||||
If `path` is an URL, it will be downloaded, stored in the
|
||||
`DataSource` directory and opened from there.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path : str or pathlib.Path
|
||||
Local file path or URL to open.
|
||||
mode : {'r', 'w', 'a'}, optional
|
||||
Mode to open `path`. Mode 'r' for reading, 'w' for writing,
|
||||
'a' to append. Available modes depend on the type of object
|
||||
specified by `path`. Default is 'r'.
|
||||
encoding : {None, str}, optional
|
||||
Open text file with given encoding. The default encoding will be
|
||||
what `open` uses.
|
||||
newline : {None, str}, optional
|
||||
Newline to use when reading text file.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : file object
|
||||
File object.
|
||||
|
||||
"""
|
||||
|
||||
# TODO: There is no support for opening a file for writing which
|
||||
# doesn't exist yet (creating a file). Should there be?
|
||||
|
||||
# TODO: Add a ``subdir`` parameter for specifying the subdirectory
|
||||
# used to store URLs in self._destpath.
|
||||
|
||||
if self._isurl(path) and self._iswritemode(mode):
|
||||
raise ValueError("URLs are not writeable")
|
||||
|
||||
# NOTE: _findfile will fail on a new file opened for writing.
|
||||
found = self._findfile(path)
|
||||
if found:
|
||||
_fname, ext = self._splitzipext(found)
|
||||
if ext == 'bz2':
|
||||
mode.replace("+", "")
|
||||
return _file_openers[ext](found, mode=mode,
|
||||
encoding=encoding, newline=newline)
|
||||
else:
|
||||
raise FileNotFoundError(f"{path} not found.")
|
||||
|
||||
|
||||
class Repository (DataSource):
|
||||
"""
|
||||
Repository(baseurl, destpath='.')
|
||||
|
||||
A data repository where multiple DataSource's share a base
|
||||
URL/directory.
|
||||
|
||||
`Repository` extends `DataSource` by prepending a base URL (or
|
||||
directory) to all the files it handles. Use `Repository` when you will
|
||||
be working with multiple files from one base URL. Initialize
|
||||
`Repository` with the base URL, then refer to each file by its filename
|
||||
only.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
baseurl : str
|
||||
Path to the local directory or remote location that contains the
|
||||
data files.
|
||||
destpath : str or None, optional
|
||||
Path to the directory where the source file gets downloaded to for
|
||||
use. If `destpath` is None, a temporary directory will be created.
|
||||
The default path is the current directory.
|
||||
|
||||
Examples
|
||||
--------
|
||||
To analyze all files in the repository, do something like this
|
||||
(note: this is not self-contained code)::
|
||||
|
||||
>>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
|
||||
>>> for filename in filelist:
|
||||
... fp = repos.open(filename)
|
||||
... fp.analyze()
|
||||
... fp.close()
|
||||
|
||||
Similarly you could use a URL for a repository::
|
||||
|
||||
>>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, baseurl, destpath=os.curdir):
|
||||
"""Create a Repository with a shared url or directory of baseurl."""
|
||||
DataSource.__init__(self, destpath=destpath)
|
||||
self._baseurl = baseurl
|
||||
|
||||
def __del__(self):
|
||||
DataSource.__del__(self)
|
||||
|
||||
def _fullpath(self, path):
|
||||
"""Return complete path for path. Prepends baseurl if necessary."""
|
||||
splitpath = path.split(self._baseurl, 2)
|
||||
if len(splitpath) == 1:
|
||||
result = os.path.join(self._baseurl, path)
|
||||
else:
|
||||
result = path # path contains baseurl already
|
||||
return result
|
||||
|
||||
def _findfile(self, path):
|
||||
"""Extend DataSource method to prepend baseurl to ``path``."""
|
||||
return DataSource._findfile(self, self._fullpath(path))
|
||||
|
||||
def abspath(self, path):
|
||||
"""
|
||||
Return absolute path of file in the Repository directory.
|
||||
|
||||
If `path` is an URL, then `abspath` will return either the location
|
||||
the file exists locally or the location it would exist when opened
|
||||
using the `open` method.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path : str or pathlib.Path
|
||||
Can be a local file or a remote URL. This may, but does not
|
||||
have to, include the `baseurl` with which the `Repository` was
|
||||
initialized.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : str
|
||||
Complete path, including the `DataSource` destination directory.
|
||||
|
||||
"""
|
||||
return DataSource.abspath(self, self._fullpath(path))
|
||||
|
||||
def exists(self, path):
|
||||
"""
|
||||
Test if path exists prepending Repository base URL to path.
|
||||
|
||||
Test if `path` exists as (and in this order):
|
||||
|
||||
- a local file.
|
||||
- a remote URL that has been downloaded and stored locally in the
|
||||
`DataSource` directory.
|
||||
- a remote URL that has not been downloaded, but is valid and
|
||||
accessible.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path : str or pathlib.Path
|
||||
Can be a local file or a remote URL. This may, but does not
|
||||
have to, include the `baseurl` with which the `Repository` was
|
||||
initialized.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : bool
|
||||
True if `path` exists.
|
||||
|
||||
Notes
|
||||
-----
|
||||
When `path` is an URL, `exists` will return True if it's either
|
||||
stored locally in the `DataSource` directory, or is a valid remote
|
||||
URL. `DataSource` does not discriminate between the two, the file
|
||||
is accessible if it exists in either location.
|
||||
|
||||
"""
|
||||
return DataSource.exists(self, self._fullpath(path))
|
||||
|
||||
def open(self, path, mode='r', encoding=None, newline=None):
|
||||
"""
|
||||
Open and return file-like object prepending Repository base URL.
|
||||
|
||||
If `path` is an URL, it will be downloaded, stored in the
|
||||
DataSource directory and opened from there.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path : str or pathlib.Path
|
||||
Local file path or URL to open. This may, but does not have to,
|
||||
include the `baseurl` with which the `Repository` was
|
||||
initialized.
|
||||
mode : {'r', 'w', 'a'}, optional
|
||||
Mode to open `path`. Mode 'r' for reading, 'w' for writing,
|
||||
'a' to append. Available modes depend on the type of object
|
||||
specified by `path`. Default is 'r'.
|
||||
encoding : {None, str}, optional
|
||||
Open text file with given encoding. The default encoding will be
|
||||
what `open` uses.
|
||||
newline : {None, str}, optional
|
||||
Newline to use when reading text file.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : file object
|
||||
File object.
|
||||
|
||||
"""
|
||||
return DataSource.open(self, self._fullpath(path), mode,
|
||||
encoding=encoding, newline=newline)
|
||||
|
||||
def listdir(self):
|
||||
"""
|
||||
List files in the source Repository.
|
||||
|
||||
Returns
|
||||
-------
|
||||
files : list of str or pathlib.Path
|
||||
List of file names (not containing a directory part).
|
||||
|
||||
Notes
|
||||
-----
|
||||
Does not currently work for remote repositories.
|
||||
|
||||
"""
|
||||
if self._isurl(self._baseurl):
|
||||
raise NotImplementedError(
|
||||
"Directory listing of URLs, not supported yet.")
|
||||
else:
|
||||
return os.listdir(self._baseurl)
|
||||
31
lib/python3.11/site-packages/numpy/lib/_datasource.pyi
Normal file
31
lib/python3.11/site-packages/numpy/lib/_datasource.pyi
Normal file
@ -0,0 +1,31 @@
|
||||
from pathlib import Path
|
||||
from typing import IO, Any, TypeAlias
|
||||
|
||||
from _typeshed import OpenBinaryMode, OpenTextMode
|
||||
|
||||
_Mode: TypeAlias = OpenBinaryMode | OpenTextMode
|
||||
|
||||
###
|
||||
|
||||
# exported in numpy.lib.nppyio
|
||||
class DataSource:
|
||||
def __init__(self, /, destpath: Path | str | None = ...) -> None: ...
|
||||
def __del__(self, /) -> None: ...
|
||||
def abspath(self, /, path: str) -> str: ...
|
||||
def exists(self, /, path: str) -> bool: ...
|
||||
|
||||
# Whether the file-object is opened in string or bytes mode (by default)
|
||||
# depends on the file-extension of `path`
|
||||
def open(self, /, path: str, mode: _Mode = "r", encoding: str | None = None, newline: str | None = None) -> IO[Any]: ...
|
||||
|
||||
class Repository(DataSource):
|
||||
def __init__(self, /, baseurl: str, destpath: str | None = ...) -> None: ...
|
||||
def listdir(self, /) -> list[str]: ...
|
||||
|
||||
def open(
|
||||
path: str,
|
||||
mode: _Mode = "r",
|
||||
destpath: str | None = ...,
|
||||
encoding: str | None = None,
|
||||
newline: str | None = None,
|
||||
) -> IO[Any]: ...
|
||||
1036
lib/python3.11/site-packages/numpy/lib/_format_impl.py
Normal file
1036
lib/python3.11/site-packages/numpy/lib/_format_impl.py
Normal file
File diff suppressed because it is too large
Load Diff
26
lib/python3.11/site-packages/numpy/lib/_format_impl.pyi
Normal file
26
lib/python3.11/site-packages/numpy/lib/_format_impl.pyi
Normal file
@ -0,0 +1,26 @@
|
||||
from typing import Final, Literal
|
||||
|
||||
from numpy.lib._utils_impl import drop_metadata # noqa: F401
|
||||
|
||||
__all__: list[str] = []
|
||||
|
||||
EXPECTED_KEYS: Final[set[str]]
|
||||
MAGIC_PREFIX: Final[bytes]
|
||||
MAGIC_LEN: Literal[8]
|
||||
ARRAY_ALIGN: Literal[64]
|
||||
BUFFER_SIZE: Literal[262144] # 2**18
|
||||
GROWTH_AXIS_MAX_DIGITS: Literal[21]
|
||||
|
||||
def magic(major, minor): ...
|
||||
def read_magic(fp): ...
|
||||
def dtype_to_descr(dtype): ...
|
||||
def descr_to_dtype(descr): ...
|
||||
def header_data_from_array_1_0(array): ...
|
||||
def write_array_header_1_0(fp, d): ...
|
||||
def write_array_header_2_0(fp, d): ...
|
||||
def read_array_header_1_0(fp): ...
|
||||
def read_array_header_2_0(fp): ...
|
||||
def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ...
|
||||
def read_array(fp, allow_pickle=..., pickle_kwargs=...): ...
|
||||
def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ...
|
||||
def isfileobj(f): ...
|
||||
5844
lib/python3.11/site-packages/numpy/lib/_function_base_impl.py
Normal file
5844
lib/python3.11/site-packages/numpy/lib/_function_base_impl.py
Normal file
File diff suppressed because it is too large
Load Diff
985
lib/python3.11/site-packages/numpy/lib/_function_base_impl.pyi
Normal file
985
lib/python3.11/site-packages/numpy/lib/_function_base_impl.pyi
Normal file
@ -0,0 +1,985 @@
|
||||
# ruff: noqa: ANN401
|
||||
from collections.abc import Callable, Iterable, Sequence
|
||||
from typing import (
|
||||
Any,
|
||||
Concatenate,
|
||||
ParamSpec,
|
||||
Protocol,
|
||||
SupportsIndex,
|
||||
SupportsInt,
|
||||
TypeAlias,
|
||||
TypeVar,
|
||||
overload,
|
||||
type_check_only,
|
||||
)
|
||||
from typing import Literal as L
|
||||
|
||||
from _typeshed import Incomplete
|
||||
from typing_extensions import TypeIs, deprecated
|
||||
|
||||
import numpy as np
|
||||
from numpy import (
|
||||
_OrderKACF,
|
||||
bool_,
|
||||
complex128,
|
||||
complexfloating,
|
||||
datetime64,
|
||||
float64,
|
||||
floating,
|
||||
generic,
|
||||
integer,
|
||||
intp,
|
||||
object_,
|
||||
timedelta64,
|
||||
vectorize,
|
||||
)
|
||||
from numpy._core.multiarray import bincount
|
||||
from numpy._globals import _NoValueType
|
||||
from numpy._typing import (
|
||||
ArrayLike,
|
||||
DTypeLike,
|
||||
NDArray,
|
||||
_ArrayLike,
|
||||
_ArrayLikeBool_co,
|
||||
_ArrayLikeComplex_co,
|
||||
_ArrayLikeDT64_co,
|
||||
_ArrayLikeFloat_co,
|
||||
_ArrayLikeInt_co,
|
||||
_ArrayLikeNumber_co,
|
||||
_ArrayLikeObject_co,
|
||||
_ArrayLikeTD64_co,
|
||||
_ComplexLike_co,
|
||||
_DTypeLike,
|
||||
_FloatLike_co,
|
||||
_NestedSequence,
|
||||
_NumberLike_co,
|
||||
_ScalarLike_co,
|
||||
_ShapeLike,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"select",
|
||||
"piecewise",
|
||||
"trim_zeros",
|
||||
"copy",
|
||||
"iterable",
|
||||
"percentile",
|
||||
"diff",
|
||||
"gradient",
|
||||
"angle",
|
||||
"unwrap",
|
||||
"sort_complex",
|
||||
"flip",
|
||||
"rot90",
|
||||
"extract",
|
||||
"place",
|
||||
"vectorize",
|
||||
"asarray_chkfinite",
|
||||
"average",
|
||||
"bincount",
|
||||
"digitize",
|
||||
"cov",
|
||||
"corrcoef",
|
||||
"median",
|
||||
"sinc",
|
||||
"hamming",
|
||||
"hanning",
|
||||
"bartlett",
|
||||
"blackman",
|
||||
"kaiser",
|
||||
"trapezoid",
|
||||
"trapz",
|
||||
"i0",
|
||||
"meshgrid",
|
||||
"delete",
|
||||
"insert",
|
||||
"append",
|
||||
"interp",
|
||||
"quantile",
|
||||
]
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_T_co = TypeVar("_T_co", covariant=True)
|
||||
# The `{}ss` suffix refers to the Python 3.12 syntax: `**P`
|
||||
_Pss = ParamSpec("_Pss")
|
||||
_ScalarT = TypeVar("_ScalarT", bound=generic)
|
||||
_ScalarT1 = TypeVar("_ScalarT1", bound=generic)
|
||||
_ScalarT2 = TypeVar("_ScalarT2", bound=generic)
|
||||
_ArrayT = TypeVar("_ArrayT", bound=np.ndarray)
|
||||
|
||||
_2Tuple: TypeAlias = tuple[_T, _T]
|
||||
_MeshgridIdx: TypeAlias = L['ij', 'xy']
|
||||
|
||||
@type_check_only
|
||||
class _TrimZerosSequence(Protocol[_T_co]):
|
||||
def __len__(self, /) -> int: ...
|
||||
@overload
|
||||
def __getitem__(self, key: int, /) -> object: ...
|
||||
@overload
|
||||
def __getitem__(self, key: slice, /) -> _T_co: ...
|
||||
|
||||
###
|
||||
|
||||
@overload
|
||||
def rot90(
|
||||
m: _ArrayLike[_ScalarT],
|
||||
k: int = ...,
|
||||
axes: tuple[int, int] = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def rot90(
|
||||
m: ArrayLike,
|
||||
k: int = ...,
|
||||
axes: tuple[int, int] = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def flip(m: _ScalarT, axis: None = ...) -> _ScalarT: ...
|
||||
@overload
|
||||
def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ...
|
||||
@overload
|
||||
def flip(m: _ArrayLike[_ScalarT], axis: _ShapeLike | None = ...) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def flip(m: ArrayLike, axis: _ShapeLike | None = ...) -> NDArray[Any]: ...
|
||||
|
||||
def iterable(y: object) -> TypeIs[Iterable[Any]]: ...
|
||||
|
||||
@overload
|
||||
def average(
|
||||
a: _ArrayLikeFloat_co,
|
||||
axis: None = None,
|
||||
weights: _ArrayLikeFloat_co | None = None,
|
||||
returned: L[False] = False,
|
||||
*,
|
||||
keepdims: L[False] | _NoValueType = ...,
|
||||
) -> floating: ...
|
||||
@overload
|
||||
def average(
|
||||
a: _ArrayLikeFloat_co,
|
||||
axis: None = None,
|
||||
weights: _ArrayLikeFloat_co | None = None,
|
||||
*,
|
||||
returned: L[True],
|
||||
keepdims: L[False] | _NoValueType = ...,
|
||||
) -> _2Tuple[floating]: ...
|
||||
@overload
|
||||
def average(
|
||||
a: _ArrayLikeComplex_co,
|
||||
axis: None = None,
|
||||
weights: _ArrayLikeComplex_co | None = None,
|
||||
returned: L[False] = False,
|
||||
*,
|
||||
keepdims: L[False] | _NoValueType = ...,
|
||||
) -> complexfloating: ...
|
||||
@overload
|
||||
def average(
|
||||
a: _ArrayLikeComplex_co,
|
||||
axis: None = None,
|
||||
weights: _ArrayLikeComplex_co | None = None,
|
||||
*,
|
||||
returned: L[True],
|
||||
keepdims: L[False] | _NoValueType = ...,
|
||||
) -> _2Tuple[complexfloating]: ...
|
||||
@overload
|
||||
def average(
|
||||
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
||||
axis: _ShapeLike | None = None,
|
||||
weights: object | None = None,
|
||||
*,
|
||||
returned: L[True],
|
||||
keepdims: bool | bool_ | _NoValueType = ...,
|
||||
) -> _2Tuple[Incomplete]: ...
|
||||
@overload
|
||||
def average(
|
||||
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
||||
axis: _ShapeLike | None = None,
|
||||
weights: object | None = None,
|
||||
returned: bool | bool_ = False,
|
||||
*,
|
||||
keepdims: bool | bool_ | _NoValueType = ...,
|
||||
) -> Incomplete: ...
|
||||
|
||||
@overload
|
||||
def asarray_chkfinite(
|
||||
a: _ArrayLike[_ScalarT],
|
||||
dtype: None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def asarray_chkfinite(
|
||||
a: object,
|
||||
dtype: None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def asarray_chkfinite(
|
||||
a: Any,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
order: _OrderKACF = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def asarray_chkfinite(
|
||||
a: Any,
|
||||
dtype: DTypeLike,
|
||||
order: _OrderKACF = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def piecewise(
|
||||
x: _ArrayLike[_ScalarT],
|
||||
condlist: _ArrayLike[bool_] | Sequence[_ArrayLikeBool_co],
|
||||
funclist: Sequence[
|
||||
Callable[Concatenate[NDArray[_ScalarT], _Pss], NDArray[_ScalarT | Any]]
|
||||
| _ScalarT | object
|
||||
],
|
||||
/,
|
||||
*args: _Pss.args,
|
||||
**kw: _Pss.kwargs,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def piecewise(
|
||||
x: ArrayLike,
|
||||
condlist: _ArrayLike[bool_] | Sequence[_ArrayLikeBool_co],
|
||||
funclist: Sequence[
|
||||
Callable[Concatenate[NDArray[Any], _Pss], NDArray[Any]]
|
||||
| object
|
||||
],
|
||||
/,
|
||||
*args: _Pss.args,
|
||||
**kw: _Pss.kwargs,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def select(
|
||||
condlist: Sequence[ArrayLike],
|
||||
choicelist: Sequence[ArrayLike],
|
||||
default: ArrayLike = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def copy(
|
||||
a: _ArrayT,
|
||||
order: _OrderKACF,
|
||||
subok: L[True],
|
||||
) -> _ArrayT: ...
|
||||
@overload
|
||||
def copy(
|
||||
a: _ArrayT,
|
||||
order: _OrderKACF = ...,
|
||||
*,
|
||||
subok: L[True],
|
||||
) -> _ArrayT: ...
|
||||
@overload
|
||||
def copy(
|
||||
a: _ArrayLike[_ScalarT],
|
||||
order: _OrderKACF = ...,
|
||||
subok: L[False] = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def copy(
|
||||
a: ArrayLike,
|
||||
order: _OrderKACF = ...,
|
||||
subok: L[False] = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def gradient(
|
||||
f: ArrayLike,
|
||||
*varargs: ArrayLike,
|
||||
axis: _ShapeLike | None = ...,
|
||||
edge_order: L[1, 2] = ...,
|
||||
) -> Any: ...
|
||||
|
||||
@overload
|
||||
def diff(
|
||||
a: _T,
|
||||
n: L[0],
|
||||
axis: SupportsIndex = ...,
|
||||
prepend: ArrayLike = ...,
|
||||
append: ArrayLike = ...,
|
||||
) -> _T: ...
|
||||
@overload
|
||||
def diff(
|
||||
a: ArrayLike,
|
||||
n: int = ...,
|
||||
axis: SupportsIndex = ...,
|
||||
prepend: ArrayLike = ...,
|
||||
append: ArrayLike = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload # float scalar
|
||||
def interp(
|
||||
x: _FloatLike_co,
|
||||
xp: _ArrayLikeFloat_co,
|
||||
fp: _ArrayLikeFloat_co,
|
||||
left: _FloatLike_co | None = None,
|
||||
right: _FloatLike_co | None = None,
|
||||
period: _FloatLike_co | None = None,
|
||||
) -> float64: ...
|
||||
@overload # float array
|
||||
def interp(
|
||||
x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co],
|
||||
xp: _ArrayLikeFloat_co,
|
||||
fp: _ArrayLikeFloat_co,
|
||||
left: _FloatLike_co | None = None,
|
||||
right: _FloatLike_co | None = None,
|
||||
period: _FloatLike_co | None = None,
|
||||
) -> NDArray[float64]: ...
|
||||
@overload # float scalar or array
|
||||
def interp(
|
||||
x: _ArrayLikeFloat_co,
|
||||
xp: _ArrayLikeFloat_co,
|
||||
fp: _ArrayLikeFloat_co,
|
||||
left: _FloatLike_co | None = None,
|
||||
right: _FloatLike_co | None = None,
|
||||
period: _FloatLike_co | None = None,
|
||||
) -> NDArray[float64] | float64: ...
|
||||
@overload # complex scalar
|
||||
def interp(
|
||||
x: _FloatLike_co,
|
||||
xp: _ArrayLikeFloat_co,
|
||||
fp: _ArrayLike[complexfloating],
|
||||
left: _NumberLike_co | None = None,
|
||||
right: _NumberLike_co | None = None,
|
||||
period: _FloatLike_co | None = None,
|
||||
) -> complex128: ...
|
||||
@overload # complex or float scalar
|
||||
def interp(
|
||||
x: _FloatLike_co,
|
||||
xp: _ArrayLikeFloat_co,
|
||||
fp: Sequence[complex | complexfloating],
|
||||
left: _NumberLike_co | None = None,
|
||||
right: _NumberLike_co | None = None,
|
||||
period: _FloatLike_co | None = None,
|
||||
) -> complex128 | float64: ...
|
||||
@overload # complex array
|
||||
def interp(
|
||||
x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co],
|
||||
xp: _ArrayLikeFloat_co,
|
||||
fp: _ArrayLike[complexfloating],
|
||||
left: _NumberLike_co | None = None,
|
||||
right: _NumberLike_co | None = None,
|
||||
period: _FloatLike_co | None = None,
|
||||
) -> NDArray[complex128]: ...
|
||||
@overload # complex or float array
|
||||
def interp(
|
||||
x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co],
|
||||
xp: _ArrayLikeFloat_co,
|
||||
fp: Sequence[complex | complexfloating],
|
||||
left: _NumberLike_co | None = None,
|
||||
right: _NumberLike_co | None = None,
|
||||
period: _FloatLike_co | None = None,
|
||||
) -> NDArray[complex128 | float64]: ...
|
||||
@overload # complex scalar or array
|
||||
def interp(
|
||||
x: _ArrayLikeFloat_co,
|
||||
xp: _ArrayLikeFloat_co,
|
||||
fp: _ArrayLike[complexfloating],
|
||||
left: _NumberLike_co | None = None,
|
||||
right: _NumberLike_co | None = None,
|
||||
period: _FloatLike_co | None = None,
|
||||
) -> NDArray[complex128] | complex128: ...
|
||||
@overload # complex or float scalar or array
|
||||
def interp(
|
||||
x: _ArrayLikeFloat_co,
|
||||
xp: _ArrayLikeFloat_co,
|
||||
fp: _ArrayLikeNumber_co,
|
||||
left: _NumberLike_co | None = None,
|
||||
right: _NumberLike_co | None = None,
|
||||
period: _FloatLike_co | None = None,
|
||||
) -> NDArray[complex128 | float64] | complex128 | float64: ...
|
||||
|
||||
@overload
|
||||
def angle(z: _ComplexLike_co, deg: bool = ...) -> floating: ...
|
||||
@overload
|
||||
def angle(z: object_, deg: bool = ...) -> Any: ...
|
||||
@overload
|
||||
def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating]: ...
|
||||
@overload
|
||||
def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]: ...
|
||||
|
||||
@overload
|
||||
def unwrap(
|
||||
p: _ArrayLikeFloat_co,
|
||||
discont: float | None = ...,
|
||||
axis: int = ...,
|
||||
*,
|
||||
period: float = ...,
|
||||
) -> NDArray[floating]: ...
|
||||
@overload
|
||||
def unwrap(
|
||||
p: _ArrayLikeObject_co,
|
||||
discont: float | None = ...,
|
||||
axis: int = ...,
|
||||
*,
|
||||
period: float = ...,
|
||||
) -> NDArray[object_]: ...
|
||||
|
||||
def sort_complex(a: ArrayLike) -> NDArray[complexfloating]: ...
|
||||
|
||||
def trim_zeros(
|
||||
filt: _TrimZerosSequence[_T],
|
||||
trim: L["f", "b", "fb", "bf"] = ...,
|
||||
) -> _T: ...
|
||||
|
||||
@overload
|
||||
def extract(condition: ArrayLike, arr: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ...
|
||||
|
||||
def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ...
|
||||
|
||||
@overload
|
||||
def cov(
|
||||
m: _ArrayLikeFloat_co,
|
||||
y: _ArrayLikeFloat_co | None = ...,
|
||||
rowvar: bool = ...,
|
||||
bias: bool = ...,
|
||||
ddof: SupportsIndex | SupportsInt | None = ...,
|
||||
fweights: ArrayLike | None = ...,
|
||||
aweights: ArrayLike | None = ...,
|
||||
*,
|
||||
dtype: None = ...,
|
||||
) -> NDArray[floating]: ...
|
||||
@overload
|
||||
def cov(
|
||||
m: _ArrayLikeComplex_co,
|
||||
y: _ArrayLikeComplex_co | None = ...,
|
||||
rowvar: bool = ...,
|
||||
bias: bool = ...,
|
||||
ddof: SupportsIndex | SupportsInt | None = ...,
|
||||
fweights: ArrayLike | None = ...,
|
||||
aweights: ArrayLike | None = ...,
|
||||
*,
|
||||
dtype: None = ...,
|
||||
) -> NDArray[complexfloating]: ...
|
||||
@overload
|
||||
def cov(
|
||||
m: _ArrayLikeComplex_co,
|
||||
y: _ArrayLikeComplex_co | None = ...,
|
||||
rowvar: bool = ...,
|
||||
bias: bool = ...,
|
||||
ddof: SupportsIndex | SupportsInt | None = ...,
|
||||
fweights: ArrayLike | None = ...,
|
||||
aweights: ArrayLike | None = ...,
|
||||
*,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def cov(
|
||||
m: _ArrayLikeComplex_co,
|
||||
y: _ArrayLikeComplex_co | None = ...,
|
||||
rowvar: bool = ...,
|
||||
bias: bool = ...,
|
||||
ddof: SupportsIndex | SupportsInt | None = ...,
|
||||
fweights: ArrayLike | None = ...,
|
||||
aweights: ArrayLike | None = ...,
|
||||
*,
|
||||
dtype: DTypeLike,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
# NOTE `bias` and `ddof` are deprecated and ignored
|
||||
@overload
|
||||
def corrcoef(
|
||||
m: _ArrayLikeFloat_co,
|
||||
y: _ArrayLikeFloat_co | None = None,
|
||||
rowvar: bool = True,
|
||||
bias: _NoValueType = ...,
|
||||
ddof: _NoValueType = ...,
|
||||
*,
|
||||
dtype: None = None,
|
||||
) -> NDArray[floating]: ...
|
||||
@overload
|
||||
def corrcoef(
|
||||
m: _ArrayLikeComplex_co,
|
||||
y: _ArrayLikeComplex_co | None = None,
|
||||
rowvar: bool = True,
|
||||
bias: _NoValueType = ...,
|
||||
ddof: _NoValueType = ...,
|
||||
*,
|
||||
dtype: None = None,
|
||||
) -> NDArray[complexfloating]: ...
|
||||
@overload
|
||||
def corrcoef(
|
||||
m: _ArrayLikeComplex_co,
|
||||
y: _ArrayLikeComplex_co | None = None,
|
||||
rowvar: bool = True,
|
||||
bias: _NoValueType = ...,
|
||||
ddof: _NoValueType = ...,
|
||||
*,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def corrcoef(
|
||||
m: _ArrayLikeComplex_co,
|
||||
y: _ArrayLikeComplex_co | None = None,
|
||||
rowvar: bool = True,
|
||||
bias: _NoValueType = ...,
|
||||
ddof: _NoValueType = ...,
|
||||
*,
|
||||
dtype: DTypeLike | None = None,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def blackman(M: _FloatLike_co) -> NDArray[floating]: ...
|
||||
|
||||
def bartlett(M: _FloatLike_co) -> NDArray[floating]: ...
|
||||
|
||||
def hanning(M: _FloatLike_co) -> NDArray[floating]: ...
|
||||
|
||||
def hamming(M: _FloatLike_co) -> NDArray[floating]: ...
|
||||
|
||||
def i0(x: _ArrayLikeFloat_co) -> NDArray[floating]: ...
|
||||
|
||||
def kaiser(
|
||||
M: _FloatLike_co,
|
||||
beta: _FloatLike_co,
|
||||
) -> NDArray[floating]: ...
|
||||
|
||||
@overload
|
||||
def sinc(x: _FloatLike_co) -> floating: ...
|
||||
@overload
|
||||
def sinc(x: _ComplexLike_co) -> complexfloating: ...
|
||||
@overload
|
||||
def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating]: ...
|
||||
@overload
|
||||
def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
|
||||
|
||||
@overload
|
||||
def median(
|
||||
a: _ArrayLikeFloat_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> floating: ...
|
||||
@overload
|
||||
def median(
|
||||
a: _ArrayLikeComplex_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> complexfloating: ...
|
||||
@overload
|
||||
def median(
|
||||
a: _ArrayLikeTD64_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> timedelta64: ...
|
||||
@overload
|
||||
def median(
|
||||
a: _ArrayLikeObject_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def median(
|
||||
a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
|
||||
axis: _ShapeLike | None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
keepdims: bool = ...,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def median(
|
||||
a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
|
||||
axis: _ShapeLike | None,
|
||||
out: _ArrayT,
|
||||
overwrite_input: bool = ...,
|
||||
keepdims: bool = ...,
|
||||
) -> _ArrayT: ...
|
||||
@overload
|
||||
def median(
|
||||
a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
|
||||
axis: _ShapeLike | None = ...,
|
||||
*,
|
||||
out: _ArrayT,
|
||||
overwrite_input: bool = ...,
|
||||
keepdims: bool = ...,
|
||||
) -> _ArrayT: ...
|
||||
|
||||
_MethodKind = L[
|
||||
"inverted_cdf",
|
||||
"averaged_inverted_cdf",
|
||||
"closest_observation",
|
||||
"interpolated_inverted_cdf",
|
||||
"hazen",
|
||||
"weibull",
|
||||
"linear",
|
||||
"median_unbiased",
|
||||
"normal_unbiased",
|
||||
"lower",
|
||||
"higher",
|
||||
"midpoint",
|
||||
"nearest",
|
||||
]
|
||||
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeFloat_co,
|
||||
q: _FloatLike_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
*,
|
||||
weights: _ArrayLikeFloat_co | None = ...,
|
||||
) -> floating: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeComplex_co,
|
||||
q: _FloatLike_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
*,
|
||||
weights: _ArrayLikeFloat_co | None = ...,
|
||||
) -> complexfloating: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeTD64_co,
|
||||
q: _FloatLike_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
*,
|
||||
weights: _ArrayLikeFloat_co | None = ...,
|
||||
) -> timedelta64: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeDT64_co,
|
||||
q: _FloatLike_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
*,
|
||||
weights: _ArrayLikeFloat_co | None = ...,
|
||||
) -> datetime64: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeObject_co,
|
||||
q: _FloatLike_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
*,
|
||||
weights: _ArrayLikeFloat_co | None = ...,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeFloat_co,
|
||||
q: _ArrayLikeFloat_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
*,
|
||||
weights: _ArrayLikeFloat_co | None = ...,
|
||||
) -> NDArray[floating]: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeComplex_co,
|
||||
q: _ArrayLikeFloat_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
*,
|
||||
weights: _ArrayLikeFloat_co | None = ...,
|
||||
) -> NDArray[complexfloating]: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeTD64_co,
|
||||
q: _ArrayLikeFloat_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
*,
|
||||
weights: _ArrayLikeFloat_co | None = ...,
|
||||
) -> NDArray[timedelta64]: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeDT64_co,
|
||||
q: _ArrayLikeFloat_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
*,
|
||||
weights: _ArrayLikeFloat_co | None = ...,
|
||||
) -> NDArray[datetime64]: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeObject_co,
|
||||
q: _ArrayLikeFloat_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
*,
|
||||
weights: _ArrayLikeFloat_co | None = ...,
|
||||
) -> NDArray[object_]: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co,
|
||||
q: _ArrayLikeFloat_co,
|
||||
axis: _ShapeLike | None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: bool = ...,
|
||||
*,
|
||||
weights: _ArrayLikeFloat_co | None = ...,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co,
|
||||
q: _ArrayLikeFloat_co,
|
||||
axis: _ShapeLike | None,
|
||||
out: _ArrayT,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: bool = ...,
|
||||
*,
|
||||
weights: _ArrayLikeFloat_co | None = ...,
|
||||
) -> _ArrayT: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co,
|
||||
q: _ArrayLikeFloat_co,
|
||||
axis: _ShapeLike | None = ...,
|
||||
*,
|
||||
out: _ArrayT,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: bool = ...,
|
||||
weights: _ArrayLikeFloat_co | None = ...,
|
||||
) -> _ArrayT: ...
|
||||
|
||||
# NOTE: Not an alias, but they do have identical signatures
|
||||
# (that we can reuse)
|
||||
quantile = percentile
|
||||
|
||||
_ScalarT_fm = TypeVar(
|
||||
"_ScalarT_fm",
|
||||
bound=floating | complexfloating | timedelta64,
|
||||
)
|
||||
|
||||
class _SupportsRMulFloat(Protocol[_T_co]):
|
||||
def __rmul__(self, other: float, /) -> _T_co: ...
|
||||
|
||||
@overload
|
||||
def trapezoid( # type: ignore[overload-overlap]
|
||||
y: Sequence[_FloatLike_co],
|
||||
x: Sequence[_FloatLike_co] | None = ...,
|
||||
dx: float = ...,
|
||||
axis: SupportsIndex = ...,
|
||||
) -> float64: ...
|
||||
@overload
|
||||
def trapezoid(
|
||||
y: Sequence[_ComplexLike_co],
|
||||
x: Sequence[_ComplexLike_co] | None = ...,
|
||||
dx: float = ...,
|
||||
axis: SupportsIndex = ...,
|
||||
) -> complex128: ...
|
||||
@overload
|
||||
def trapezoid(
|
||||
y: _ArrayLike[bool_ | integer],
|
||||
x: _ArrayLike[bool_ | integer] | None = ...,
|
||||
dx: float = ...,
|
||||
axis: SupportsIndex = ...,
|
||||
) -> float64 | NDArray[float64]: ...
|
||||
@overload
|
||||
def trapezoid( # type: ignore[overload-overlap]
|
||||
y: _ArrayLikeObject_co,
|
||||
x: _ArrayLikeFloat_co | _ArrayLikeObject_co | None = ...,
|
||||
dx: float = ...,
|
||||
axis: SupportsIndex = ...,
|
||||
) -> float | NDArray[object_]: ...
|
||||
@overload
|
||||
def trapezoid(
|
||||
y: _ArrayLike[_ScalarT_fm],
|
||||
x: _ArrayLike[_ScalarT_fm] | _ArrayLikeInt_co | None = ...,
|
||||
dx: float = ...,
|
||||
axis: SupportsIndex = ...,
|
||||
) -> _ScalarT_fm | NDArray[_ScalarT_fm]: ...
|
||||
@overload
|
||||
def trapezoid(
|
||||
y: Sequence[_SupportsRMulFloat[_T]],
|
||||
x: Sequence[_SupportsRMulFloat[_T] | _T] | None = ...,
|
||||
dx: float = ...,
|
||||
axis: SupportsIndex = ...,
|
||||
) -> _T: ...
|
||||
@overload
|
||||
def trapezoid(
|
||||
y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
|
||||
x: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co | None = ...,
|
||||
dx: float = ...,
|
||||
axis: SupportsIndex = ...,
|
||||
) -> (
|
||||
floating | complexfloating | timedelta64
|
||||
| NDArray[floating | complexfloating | timedelta64 | object_]
|
||||
): ...
|
||||
|
||||
@deprecated("Use 'trapezoid' instead")
|
||||
def trapz(y: ArrayLike, x: ArrayLike | None = None, dx: float = 1.0, axis: int = -1) -> generic | NDArray[generic]: ...
|
||||
|
||||
@overload
|
||||
def meshgrid(
|
||||
*,
|
||||
copy: bool = ...,
|
||||
sparse: bool = ...,
|
||||
indexing: _MeshgridIdx = ...,
|
||||
) -> tuple[()]: ...
|
||||
@overload
|
||||
def meshgrid(
|
||||
x1: _ArrayLike[_ScalarT],
|
||||
/,
|
||||
*,
|
||||
copy: bool = ...,
|
||||
sparse: bool = ...,
|
||||
indexing: _MeshgridIdx = ...,
|
||||
) -> tuple[NDArray[_ScalarT]]: ...
|
||||
@overload
|
||||
def meshgrid(
|
||||
x1: ArrayLike,
|
||||
/,
|
||||
*,
|
||||
copy: bool = ...,
|
||||
sparse: bool = ...,
|
||||
indexing: _MeshgridIdx = ...,
|
||||
) -> tuple[NDArray[Any]]: ...
|
||||
@overload
|
||||
def meshgrid(
|
||||
x1: _ArrayLike[_ScalarT1],
|
||||
x2: _ArrayLike[_ScalarT2],
|
||||
/,
|
||||
*,
|
||||
copy: bool = ...,
|
||||
sparse: bool = ...,
|
||||
indexing: _MeshgridIdx = ...,
|
||||
) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ...
|
||||
@overload
|
||||
def meshgrid(
|
||||
x1: ArrayLike,
|
||||
x2: _ArrayLike[_ScalarT],
|
||||
/,
|
||||
*,
|
||||
copy: bool = ...,
|
||||
sparse: bool = ...,
|
||||
indexing: _MeshgridIdx = ...,
|
||||
) -> tuple[NDArray[Any], NDArray[_ScalarT]]: ...
|
||||
@overload
|
||||
def meshgrid(
|
||||
x1: _ArrayLike[_ScalarT],
|
||||
x2: ArrayLike,
|
||||
/,
|
||||
*,
|
||||
copy: bool = ...,
|
||||
sparse: bool = ...,
|
||||
indexing: _MeshgridIdx = ...,
|
||||
) -> tuple[NDArray[_ScalarT], NDArray[Any]]: ...
|
||||
@overload
|
||||
def meshgrid(
|
||||
x1: ArrayLike,
|
||||
x2: ArrayLike,
|
||||
/,
|
||||
*,
|
||||
copy: bool = ...,
|
||||
sparse: bool = ...,
|
||||
indexing: _MeshgridIdx = ...,
|
||||
) -> tuple[NDArray[Any], NDArray[Any]]: ...
|
||||
@overload
|
||||
def meshgrid(
|
||||
x1: ArrayLike,
|
||||
x2: ArrayLike,
|
||||
x3: ArrayLike,
|
||||
/,
|
||||
*,
|
||||
copy: bool = ...,
|
||||
sparse: bool = ...,
|
||||
indexing: _MeshgridIdx = ...,
|
||||
) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any]]: ...
|
||||
@overload
|
||||
def meshgrid(
|
||||
x1: ArrayLike,
|
||||
x2: ArrayLike,
|
||||
x3: ArrayLike,
|
||||
x4: ArrayLike,
|
||||
/,
|
||||
*,
|
||||
copy: bool = ...,
|
||||
sparse: bool = ...,
|
||||
indexing: _MeshgridIdx = ...,
|
||||
) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any], NDArray[Any]]: ...
|
||||
@overload
|
||||
def meshgrid(
|
||||
*xi: ArrayLike,
|
||||
copy: bool = ...,
|
||||
sparse: bool = ...,
|
||||
indexing: _MeshgridIdx = ...,
|
||||
) -> tuple[NDArray[Any], ...]: ...
|
||||
|
||||
@overload
|
||||
def delete(
|
||||
arr: _ArrayLike[_ScalarT],
|
||||
obj: slice | _ArrayLikeInt_co,
|
||||
axis: SupportsIndex | None = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def delete(
|
||||
arr: ArrayLike,
|
||||
obj: slice | _ArrayLikeInt_co,
|
||||
axis: SupportsIndex | None = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def insert(
|
||||
arr: _ArrayLike[_ScalarT],
|
||||
obj: slice | _ArrayLikeInt_co,
|
||||
values: ArrayLike,
|
||||
axis: SupportsIndex | None = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def insert(
|
||||
arr: ArrayLike,
|
||||
obj: slice | _ArrayLikeInt_co,
|
||||
values: ArrayLike,
|
||||
axis: SupportsIndex | None = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def append(
|
||||
arr: ArrayLike,
|
||||
values: ArrayLike,
|
||||
axis: SupportsIndex | None = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def digitize(
|
||||
x: _FloatLike_co,
|
||||
bins: _ArrayLikeFloat_co,
|
||||
right: bool = ...,
|
||||
) -> intp: ...
|
||||
@overload
|
||||
def digitize(
|
||||
x: _ArrayLikeFloat_co,
|
||||
bins: _ArrayLikeFloat_co,
|
||||
right: bool = ...,
|
||||
) -> NDArray[intp]: ...
|
||||
1085
lib/python3.11/site-packages/numpy/lib/_histograms_impl.py
Normal file
1085
lib/python3.11/site-packages/numpy/lib/_histograms_impl.py
Normal file
File diff suppressed because it is too large
Load Diff
50
lib/python3.11/site-packages/numpy/lib/_histograms_impl.pyi
Normal file
50
lib/python3.11/site-packages/numpy/lib/_histograms_impl.pyi
Normal file
@ -0,0 +1,50 @@
|
||||
from collections.abc import Sequence
|
||||
from typing import (
|
||||
Any,
|
||||
SupportsIndex,
|
||||
TypeAlias,
|
||||
)
|
||||
from typing import (
|
||||
Literal as L,
|
||||
)
|
||||
|
||||
from numpy._typing import (
|
||||
ArrayLike,
|
||||
NDArray,
|
||||
)
|
||||
|
||||
__all__ = ["histogram", "histogramdd", "histogram_bin_edges"]
|
||||
|
||||
_BinKind: TypeAlias = L[
|
||||
"stone",
|
||||
"auto",
|
||||
"doane",
|
||||
"fd",
|
||||
"rice",
|
||||
"scott",
|
||||
"sqrt",
|
||||
"sturges",
|
||||
]
|
||||
|
||||
def histogram_bin_edges(
|
||||
a: ArrayLike,
|
||||
bins: _BinKind | SupportsIndex | ArrayLike = ...,
|
||||
range: tuple[float, float] | None = ...,
|
||||
weights: ArrayLike | None = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def histogram(
|
||||
a: ArrayLike,
|
||||
bins: _BinKind | SupportsIndex | ArrayLike = ...,
|
||||
range: tuple[float, float] | None = ...,
|
||||
density: bool = ...,
|
||||
weights: ArrayLike | None = ...,
|
||||
) -> tuple[NDArray[Any], NDArray[Any]]: ...
|
||||
|
||||
def histogramdd(
|
||||
sample: ArrayLike,
|
||||
bins: SupportsIndex | ArrayLike = ...,
|
||||
range: Sequence[tuple[float, float]] = ...,
|
||||
density: bool | None = ...,
|
||||
weights: ArrayLike | None = ...,
|
||||
) -> tuple[NDArray[Any], tuple[NDArray[Any], ...]]: ...
|
||||
1067
lib/python3.11/site-packages/numpy/lib/_index_tricks_impl.py
Normal file
1067
lib/python3.11/site-packages/numpy/lib/_index_tricks_impl.py
Normal file
File diff suppressed because it is too large
Load Diff
196
lib/python3.11/site-packages/numpy/lib/_index_tricks_impl.pyi
Normal file
196
lib/python3.11/site-packages/numpy/lib/_index_tricks_impl.pyi
Normal file
@ -0,0 +1,196 @@
|
||||
from collections.abc import Sequence
|
||||
from typing import Any, ClassVar, Final, Generic, Self, SupportsIndex, final, overload
|
||||
from typing import Literal as L
|
||||
|
||||
from _typeshed import Incomplete
|
||||
from typing_extensions import TypeVar, deprecated
|
||||
|
||||
import numpy as np
|
||||
from numpy._core.multiarray import ravel_multi_index, unravel_index
|
||||
from numpy._typing import (
|
||||
ArrayLike,
|
||||
NDArray,
|
||||
_AnyShape,
|
||||
_FiniteNestedSequence,
|
||||
_NestedSequence,
|
||||
_SupportsArray,
|
||||
_SupportsDType,
|
||||
)
|
||||
|
||||
__all__ = [ # noqa: RUF022
|
||||
"ravel_multi_index",
|
||||
"unravel_index",
|
||||
"mgrid",
|
||||
"ogrid",
|
||||
"r_",
|
||||
"c_",
|
||||
"s_",
|
||||
"index_exp",
|
||||
"ix_",
|
||||
"ndenumerate",
|
||||
"ndindex",
|
||||
"fill_diagonal",
|
||||
"diag_indices",
|
||||
"diag_indices_from",
|
||||
]
|
||||
|
||||
###
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_TupleT = TypeVar("_TupleT", bound=tuple[Any, ...])
|
||||
_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any])
|
||||
_DTypeT = TypeVar("_DTypeT", bound=np.dtype)
|
||||
_ScalarT = TypeVar("_ScalarT", bound=np.generic)
|
||||
_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True)
|
||||
_BoolT_co = TypeVar("_BoolT_co", bound=bool, default=bool, covariant=True)
|
||||
|
||||
_AxisT_co = TypeVar("_AxisT_co", bound=int, default=L[0], covariant=True)
|
||||
_MatrixT_co = TypeVar("_MatrixT_co", bound=bool, default=L[False], covariant=True)
|
||||
_NDMinT_co = TypeVar("_NDMinT_co", bound=int, default=L[1], covariant=True)
|
||||
_Trans1DT_co = TypeVar("_Trans1DT_co", bound=int, default=L[-1], covariant=True)
|
||||
|
||||
###
|
||||
|
||||
class ndenumerate(Generic[_ScalarT_co]):
|
||||
@overload
|
||||
def __new__(cls, arr: _FiniteNestedSequence[_SupportsArray[np.dtype[_ScalarT]]]) -> ndenumerate[_ScalarT]: ...
|
||||
@overload
|
||||
def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[np.str_]: ...
|
||||
@overload
|
||||
def __new__(cls, arr: bytes | _NestedSequence[bytes]) -> ndenumerate[np.bytes_]: ...
|
||||
@overload
|
||||
def __new__(cls, arr: bool | _NestedSequence[bool]) -> ndenumerate[np.bool]: ...
|
||||
@overload
|
||||
def __new__(cls, arr: int | _NestedSequence[int]) -> ndenumerate[np.intp]: ...
|
||||
@overload
|
||||
def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[np.float64]: ...
|
||||
@overload
|
||||
def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[np.complex128]: ...
|
||||
@overload
|
||||
def __new__(cls, arr: object) -> ndenumerate[Any]: ...
|
||||
|
||||
# The first overload is a (semi-)workaround for a mypy bug (tested with v1.10 and v1.11)
|
||||
@overload
|
||||
def __next__(
|
||||
self: ndenumerate[np.bool | np.number | np.flexible | np.datetime64 | np.timedelta64],
|
||||
/,
|
||||
) -> tuple[_AnyShape, _ScalarT_co]: ...
|
||||
@overload
|
||||
def __next__(self: ndenumerate[np.object_], /) -> tuple[_AnyShape, Incomplete]: ...
|
||||
@overload
|
||||
def __next__(self, /) -> tuple[_AnyShape, _ScalarT_co]: ...
|
||||
|
||||
#
|
||||
def __iter__(self) -> Self: ...
|
||||
|
||||
class ndindex:
|
||||
@overload
|
||||
def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ...
|
||||
@overload
|
||||
def __init__(self, /, *shape: SupportsIndex) -> None: ...
|
||||
|
||||
#
|
||||
def __iter__(self) -> Self: ...
|
||||
def __next__(self) -> _AnyShape: ...
|
||||
|
||||
#
|
||||
@deprecated("Deprecated since 1.20.0.")
|
||||
def ndincr(self, /) -> None: ...
|
||||
|
||||
class nd_grid(Generic[_BoolT_co]):
|
||||
sparse: _BoolT_co
|
||||
def __init__(self, sparse: _BoolT_co = ...) -> None: ...
|
||||
@overload
|
||||
def __getitem__(self: nd_grid[L[False]], key: slice | Sequence[slice]) -> NDArray[Incomplete]: ...
|
||||
@overload
|
||||
def __getitem__(self: nd_grid[L[True]], key: slice | Sequence[slice]) -> tuple[NDArray[Incomplete], ...]: ...
|
||||
|
||||
@final
|
||||
class MGridClass(nd_grid[L[False]]):
|
||||
def __init__(self) -> None: ...
|
||||
|
||||
@final
|
||||
class OGridClass(nd_grid[L[True]]):
|
||||
def __init__(self) -> None: ...
|
||||
|
||||
class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co]):
|
||||
__slots__ = "axis", "matrix", "ndmin", "trans1d"
|
||||
|
||||
makemat: ClassVar[type[np.matrix[tuple[int, int], np.dtype]]]
|
||||
|
||||
axis: _AxisT_co
|
||||
matrix: _MatrixT_co
|
||||
ndmin: _NDMinT_co
|
||||
trans1d: _Trans1DT_co
|
||||
|
||||
#
|
||||
def __init__(
|
||||
self,
|
||||
/,
|
||||
axis: _AxisT_co = ...,
|
||||
matrix: _MatrixT_co = ...,
|
||||
ndmin: _NDMinT_co = ...,
|
||||
trans1d: _Trans1DT_co = ...,
|
||||
) -> None: ...
|
||||
|
||||
# TODO(jorenham): annotate this
|
||||
def __getitem__(self, key: Incomplete, /) -> Incomplete: ...
|
||||
def __len__(self, /) -> L[0]: ...
|
||||
|
||||
#
|
||||
@staticmethod
|
||||
@overload
|
||||
def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: _ArrayT) -> _ArrayT: ...
|
||||
@staticmethod
|
||||
@overload
|
||||
def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: None = None) -> NDArray[Incomplete]: ...
|
||||
|
||||
@final
|
||||
class RClass(AxisConcatenator[L[0], L[False], L[1], L[-1]]):
|
||||
def __init__(self, /) -> None: ...
|
||||
|
||||
@final
|
||||
class CClass(AxisConcatenator[L[-1], L[False], L[2], L[0]]):
|
||||
def __init__(self, /) -> None: ...
|
||||
|
||||
class IndexExpression(Generic[_BoolT_co]):
|
||||
maketuple: _BoolT_co
|
||||
def __init__(self, maketuple: _BoolT_co) -> None: ...
|
||||
@overload
|
||||
def __getitem__(self, item: _TupleT) -> _TupleT: ...
|
||||
@overload
|
||||
def __getitem__(self: IndexExpression[L[True]], item: _T) -> tuple[_T]: ...
|
||||
@overload
|
||||
def __getitem__(self: IndexExpression[L[False]], item: _T) -> _T: ...
|
||||
|
||||
@overload
|
||||
def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DTypeT]]) -> tuple[np.ndarray[_AnyShape, _DTypeT], ...]: ...
|
||||
@overload
|
||||
def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[np.str_], ...]: ...
|
||||
@overload
|
||||
def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[np.bytes_], ...]: ...
|
||||
@overload
|
||||
def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[np.bool], ...]: ...
|
||||
@overload
|
||||
def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[np.intp], ...]: ...
|
||||
@overload
|
||||
def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[np.float64], ...]: ...
|
||||
@overload
|
||||
def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[np.complex128], ...]: ...
|
||||
|
||||
#
|
||||
def fill_diagonal(a: NDArray[Any], val: object, wrap: bool = ...) -> None: ...
|
||||
|
||||
#
|
||||
def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[np.intp], ...]: ...
|
||||
def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[np.intp], ...]: ...
|
||||
|
||||
#
|
||||
mgrid: Final[MGridClass] = ...
|
||||
ogrid: Final[OGridClass] = ...
|
||||
|
||||
r_: Final[RClass] = ...
|
||||
c_: Final[CClass] = ...
|
||||
|
||||
index_exp: Final[IndexExpression[L[True]]] = ...
|
||||
s_: Final[IndexExpression[L[False]]] = ...
|
||||
900
lib/python3.11/site-packages/numpy/lib/_iotools.py
Normal file
900
lib/python3.11/site-packages/numpy/lib/_iotools.py
Normal file
@ -0,0 +1,900 @@
|
||||
"""A collection of functions designed to help I/O with ascii files.
|
||||
|
||||
"""
|
||||
__docformat__ = "restructuredtext en"
|
||||
|
||||
import itertools
|
||||
|
||||
import numpy as np
|
||||
import numpy._core.numeric as nx
|
||||
from numpy._utils import asbytes, asunicode
|
||||
|
||||
|
||||
def _decode_line(line, encoding=None):
|
||||
"""Decode bytes from binary input streams.
|
||||
|
||||
Defaults to decoding from 'latin1'.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
line : str or bytes
|
||||
Line to be decoded.
|
||||
encoding : str
|
||||
Encoding used to decode `line`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
decoded_line : str
|
||||
|
||||
"""
|
||||
if type(line) is bytes:
|
||||
if encoding is None:
|
||||
encoding = "latin1"
|
||||
line = line.decode(encoding)
|
||||
|
||||
return line
|
||||
|
||||
|
||||
def _is_string_like(obj):
|
||||
"""
|
||||
Check whether obj behaves like a string.
|
||||
"""
|
||||
try:
|
||||
obj + ''
|
||||
except (TypeError, ValueError):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _is_bytes_like(obj):
|
||||
"""
|
||||
Check whether obj behaves like a bytes object.
|
||||
"""
|
||||
try:
|
||||
obj + b''
|
||||
except (TypeError, ValueError):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def has_nested_fields(ndtype):
|
||||
"""
|
||||
Returns whether one or several fields of a dtype are nested.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ndtype : dtype
|
||||
Data-type of a structured array.
|
||||
|
||||
Raises
|
||||
------
|
||||
AttributeError
|
||||
If `ndtype` does not have a `names` attribute.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])
|
||||
>>> np.lib._iotools.has_nested_fields(dt)
|
||||
False
|
||||
|
||||
"""
|
||||
return any(ndtype[name].names is not None for name in ndtype.names or ())
|
||||
|
||||
|
||||
def flatten_dtype(ndtype, flatten_base=False):
|
||||
"""
|
||||
Unpack a structured data-type by collapsing nested fields and/or fields
|
||||
with a shape.
|
||||
|
||||
Note that the field names are lost.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ndtype : dtype
|
||||
The datatype to collapse
|
||||
flatten_base : bool, optional
|
||||
If True, transform a field with a shape into several fields. Default is
|
||||
False.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
|
||||
... ('block', int, (2, 3))])
|
||||
>>> np.lib._iotools.flatten_dtype(dt)
|
||||
[dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')]
|
||||
>>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)
|
||||
[dtype('S4'),
|
||||
dtype('float64'),
|
||||
dtype('float64'),
|
||||
dtype('int64'),
|
||||
dtype('int64'),
|
||||
dtype('int64'),
|
||||
dtype('int64'),
|
||||
dtype('int64'),
|
||||
dtype('int64')]
|
||||
|
||||
"""
|
||||
names = ndtype.names
|
||||
if names is None:
|
||||
if flatten_base:
|
||||
return [ndtype.base] * int(np.prod(ndtype.shape))
|
||||
return [ndtype.base]
|
||||
else:
|
||||
types = []
|
||||
for field in names:
|
||||
info = ndtype.fields[field]
|
||||
flat_dt = flatten_dtype(info[0], flatten_base)
|
||||
types.extend(flat_dt)
|
||||
return types
|
||||
|
||||
|
||||
class LineSplitter:
|
||||
"""
|
||||
Object to split a string at a given delimiter or at given places.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
delimiter : str, int, or sequence of ints, optional
|
||||
If a string, character used to delimit consecutive fields.
|
||||
If an integer or a sequence of integers, width(s) of each field.
|
||||
comments : str, optional
|
||||
Character used to mark the beginning of a comment. Default is '#'.
|
||||
autostrip : bool, optional
|
||||
Whether to strip each individual field. Default is True.
|
||||
|
||||
"""
|
||||
|
||||
def autostrip(self, method):
|
||||
"""
|
||||
Wrapper to strip each member of the output of `method`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
method : function
|
||||
Function that takes a single argument and returns a sequence of
|
||||
strings.
|
||||
|
||||
Returns
|
||||
-------
|
||||
wrapped : function
|
||||
The result of wrapping `method`. `wrapped` takes a single input
|
||||
argument and returns a list of strings that are stripped of
|
||||
white-space.
|
||||
|
||||
"""
|
||||
return lambda input: [_.strip() for _ in method(input)]
|
||||
|
||||
def __init__(self, delimiter=None, comments='#', autostrip=True,
|
||||
encoding=None):
|
||||
delimiter = _decode_line(delimiter)
|
||||
comments = _decode_line(comments)
|
||||
|
||||
self.comments = comments
|
||||
|
||||
# Delimiter is a character
|
||||
if (delimiter is None) or isinstance(delimiter, str):
|
||||
delimiter = delimiter or None
|
||||
_handyman = self._delimited_splitter
|
||||
# Delimiter is a list of field widths
|
||||
elif hasattr(delimiter, '__iter__'):
|
||||
_handyman = self._variablewidth_splitter
|
||||
idx = np.cumsum([0] + list(delimiter))
|
||||
delimiter = [slice(i, j) for (i, j) in itertools.pairwise(idx)]
|
||||
# Delimiter is a single integer
|
||||
elif int(delimiter):
|
||||
(_handyman, delimiter) = (
|
||||
self._fixedwidth_splitter, int(delimiter))
|
||||
else:
|
||||
(_handyman, delimiter) = (self._delimited_splitter, None)
|
||||
self.delimiter = delimiter
|
||||
if autostrip:
|
||||
self._handyman = self.autostrip(_handyman)
|
||||
else:
|
||||
self._handyman = _handyman
|
||||
self.encoding = encoding
|
||||
|
||||
def _delimited_splitter(self, line):
|
||||
"""Chop off comments, strip, and split at delimiter. """
|
||||
if self.comments is not None:
|
||||
line = line.split(self.comments)[0]
|
||||
line = line.strip(" \r\n")
|
||||
if not line:
|
||||
return []
|
||||
return line.split(self.delimiter)
|
||||
|
||||
def _fixedwidth_splitter(self, line):
|
||||
if self.comments is not None:
|
||||
line = line.split(self.comments)[0]
|
||||
line = line.strip("\r\n")
|
||||
if not line:
|
||||
return []
|
||||
fixed = self.delimiter
|
||||
slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]
|
||||
return [line[s] for s in slices]
|
||||
|
||||
def _variablewidth_splitter(self, line):
|
||||
if self.comments is not None:
|
||||
line = line.split(self.comments)[0]
|
||||
if not line:
|
||||
return []
|
||||
slices = self.delimiter
|
||||
return [line[s] for s in slices]
|
||||
|
||||
def __call__(self, line):
|
||||
return self._handyman(_decode_line(line, self.encoding))
|
||||
|
||||
|
||||
class NameValidator:
|
||||
"""
|
||||
Object to validate a list of strings to use as field names.
|
||||
|
||||
The strings are stripped of any non alphanumeric character, and spaces
|
||||
are replaced by '_'. During instantiation, the user can define a list
|
||||
of names to exclude, as well as a list of invalid characters. Names in
|
||||
the exclusion list are appended a '_' character.
|
||||
|
||||
Once an instance has been created, it can be called with a list of
|
||||
names, and a list of valid names will be created. The `__call__`
|
||||
method accepts an optional keyword "default" that sets the default name
|
||||
in case of ambiguity. By default this is 'f', so that names will
|
||||
default to `f0`, `f1`, etc.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
excludelist : sequence, optional
|
||||
A list of names to exclude. This list is appended to the default
|
||||
list ['return', 'file', 'print']. Excluded names are appended an
|
||||
underscore: for example, `file` becomes `file_` if supplied.
|
||||
deletechars : str, optional
|
||||
A string combining invalid characters that must be deleted from the
|
||||
names.
|
||||
case_sensitive : {True, False, 'upper', 'lower'}, optional
|
||||
* If True, field names are case-sensitive.
|
||||
* If False or 'upper', field names are converted to upper case.
|
||||
* If 'lower', field names are converted to lower case.
|
||||
|
||||
The default value is True.
|
||||
replace_space : '_', optional
|
||||
Character(s) used in replacement of white spaces.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Calling an instance of `NameValidator` is the same as calling its
|
||||
method `validate`.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> validator = np.lib._iotools.NameValidator()
|
||||
>>> validator(['file', 'field2', 'with space', 'CaSe'])
|
||||
('file_', 'field2', 'with_space', 'CaSe')
|
||||
|
||||
>>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],
|
||||
... deletechars='q',
|
||||
... case_sensitive=False)
|
||||
>>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])
|
||||
('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE')
|
||||
|
||||
"""
|
||||
|
||||
defaultexcludelist = 'return', 'file', 'print'
|
||||
defaultdeletechars = frozenset(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
|
||||
|
||||
def __init__(self, excludelist=None, deletechars=None,
|
||||
case_sensitive=None, replace_space='_'):
|
||||
# Process the exclusion list ..
|
||||
if excludelist is None:
|
||||
excludelist = []
|
||||
excludelist.extend(self.defaultexcludelist)
|
||||
self.excludelist = excludelist
|
||||
# Process the list of characters to delete
|
||||
if deletechars is None:
|
||||
delete = set(self.defaultdeletechars)
|
||||
else:
|
||||
delete = set(deletechars)
|
||||
delete.add('"')
|
||||
self.deletechars = delete
|
||||
# Process the case option .....
|
||||
if (case_sensitive is None) or (case_sensitive is True):
|
||||
self.case_converter = lambda x: x
|
||||
elif (case_sensitive is False) or case_sensitive.startswith('u'):
|
||||
self.case_converter = lambda x: x.upper()
|
||||
elif case_sensitive.startswith('l'):
|
||||
self.case_converter = lambda x: x.lower()
|
||||
else:
|
||||
msg = f'unrecognized case_sensitive value {case_sensitive}.'
|
||||
raise ValueError(msg)
|
||||
|
||||
self.replace_space = replace_space
|
||||
|
||||
def validate(self, names, defaultfmt="f%i", nbfields=None):
|
||||
"""
|
||||
Validate a list of strings as field names for a structured array.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
names : sequence of str
|
||||
Strings to be validated.
|
||||
defaultfmt : str, optional
|
||||
Default format string, used if validating a given string
|
||||
reduces its length to zero.
|
||||
nbfields : integer, optional
|
||||
Final number of validated names, used to expand or shrink the
|
||||
initial list of names.
|
||||
|
||||
Returns
|
||||
-------
|
||||
validatednames : list of str
|
||||
The list of validated field names.
|
||||
|
||||
Notes
|
||||
-----
|
||||
A `NameValidator` instance can be called directly, which is the
|
||||
same as calling `validate`. For examples, see `NameValidator`.
|
||||
|
||||
"""
|
||||
# Initial checks ..............
|
||||
if (names is None):
|
||||
if (nbfields is None):
|
||||
return None
|
||||
names = []
|
||||
if isinstance(names, str):
|
||||
names = [names, ]
|
||||
if nbfields is not None:
|
||||
nbnames = len(names)
|
||||
if (nbnames < nbfields):
|
||||
names = list(names) + [''] * (nbfields - nbnames)
|
||||
elif (nbnames > nbfields):
|
||||
names = names[:nbfields]
|
||||
# Set some shortcuts ...........
|
||||
deletechars = self.deletechars
|
||||
excludelist = self.excludelist
|
||||
case_converter = self.case_converter
|
||||
replace_space = self.replace_space
|
||||
# Initializes some variables ...
|
||||
validatednames = []
|
||||
seen = {}
|
||||
nbempty = 0
|
||||
|
||||
for item in names:
|
||||
item = case_converter(item).strip()
|
||||
if replace_space:
|
||||
item = item.replace(' ', replace_space)
|
||||
item = ''.join([c for c in item if c not in deletechars])
|
||||
if item == '':
|
||||
item = defaultfmt % nbempty
|
||||
while item in names:
|
||||
nbempty += 1
|
||||
item = defaultfmt % nbempty
|
||||
nbempty += 1
|
||||
elif item in excludelist:
|
||||
item += '_'
|
||||
cnt = seen.get(item, 0)
|
||||
if cnt > 0:
|
||||
validatednames.append(item + '_%d' % cnt)
|
||||
else:
|
||||
validatednames.append(item)
|
||||
seen[item] = cnt + 1
|
||||
return tuple(validatednames)
|
||||
|
||||
def __call__(self, names, defaultfmt="f%i", nbfields=None):
|
||||
return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
|
||||
|
||||
|
||||
def str2bool(value):
|
||||
"""
|
||||
Tries to transform a string supposed to represent a boolean to a boolean.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
value : str
|
||||
The string that is transformed to a boolean.
|
||||
|
||||
Returns
|
||||
-------
|
||||
boolval : bool
|
||||
The boolean representation of `value`.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If the string is not 'True' or 'False' (case independent)
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.lib._iotools.str2bool('TRUE')
|
||||
True
|
||||
>>> np.lib._iotools.str2bool('false')
|
||||
False
|
||||
|
||||
"""
|
||||
value = value.upper()
|
||||
if value == 'TRUE':
|
||||
return True
|
||||
elif value == 'FALSE':
|
||||
return False
|
||||
else:
|
||||
raise ValueError("Invalid boolean")
|
||||
|
||||
|
||||
class ConverterError(Exception):
|
||||
"""
|
||||
Exception raised when an error occurs in a converter for string values.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ConverterLockError(ConverterError):
|
||||
"""
|
||||
Exception raised when an attempt is made to upgrade a locked converter.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ConversionWarning(UserWarning):
|
||||
"""
|
||||
Warning issued when a string converter has a problem.
|
||||
|
||||
Notes
|
||||
-----
|
||||
In `genfromtxt` a `ConversionWarning` is issued if raising exceptions
|
||||
is explicitly suppressed with the "invalid_raise" keyword.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class StringConverter:
|
||||
"""
|
||||
Factory class for function transforming a string into another object
|
||||
(int, float).
|
||||
|
||||
After initialization, an instance can be called to transform a string
|
||||
into another object. If the string is recognized as representing a
|
||||
missing value, a default value is returned.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
func : function
|
||||
Function used for the conversion.
|
||||
default : any
|
||||
Default value to return when the input corresponds to a missing
|
||||
value.
|
||||
type : type
|
||||
Type of the output.
|
||||
_status : int
|
||||
Integer representing the order of the conversion.
|
||||
_mapper : sequence of tuples
|
||||
Sequence of tuples (dtype, function, default value) to evaluate in
|
||||
order.
|
||||
_locked : bool
|
||||
Holds `locked` parameter.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dtype_or_func : {None, dtype, function}, optional
|
||||
If a `dtype`, specifies the input data type, used to define a basic
|
||||
function and a default value for missing data. For example, when
|
||||
`dtype` is float, the `func` attribute is set to `float` and the
|
||||
default value to `np.nan`. If a function, this function is used to
|
||||
convert a string to another object. In this case, it is recommended
|
||||
to give an associated default value as input.
|
||||
default : any, optional
|
||||
Value to return by default, that is, when the string to be
|
||||
converted is flagged as missing. If not given, `StringConverter`
|
||||
tries to supply a reasonable default value.
|
||||
missing_values : {None, sequence of str}, optional
|
||||
``None`` or sequence of strings indicating a missing value. If ``None``
|
||||
then missing values are indicated by empty entries. The default is
|
||||
``None``.
|
||||
locked : bool, optional
|
||||
Whether the StringConverter should be locked to prevent automatic
|
||||
upgrade or not. Default is False.
|
||||
|
||||
"""
|
||||
_mapper = [(nx.bool, str2bool, False),
|
||||
(nx.int_, int, -1),]
|
||||
|
||||
# On 32-bit systems, we need to make sure that we explicitly include
|
||||
# nx.int64 since ns.int_ is nx.int32.
|
||||
if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize:
|
||||
_mapper.append((nx.int64, int, -1))
|
||||
|
||||
_mapper.extend([(nx.float64, float, nx.nan),
|
||||
(nx.complex128, complex, nx.nan + 0j),
|
||||
(nx.longdouble, nx.longdouble, nx.nan),
|
||||
# If a non-default dtype is passed, fall back to generic
|
||||
# ones (should only be used for the converter)
|
||||
(nx.integer, int, -1),
|
||||
(nx.floating, float, nx.nan),
|
||||
(nx.complexfloating, complex, nx.nan + 0j),
|
||||
# Last, try with the string types (must be last, because
|
||||
# `_mapper[-1]` is used as default in some cases)
|
||||
(nx.str_, asunicode, '???'),
|
||||
(nx.bytes_, asbytes, '???'),
|
||||
])
|
||||
|
||||
@classmethod
|
||||
def _getdtype(cls, val):
|
||||
"""Returns the dtype of the input variable."""
|
||||
return np.array(val).dtype
|
||||
|
||||
@classmethod
|
||||
def _getsubdtype(cls, val):
|
||||
"""Returns the type of the dtype of the input variable."""
|
||||
return np.array(val).dtype.type
|
||||
|
||||
@classmethod
|
||||
def _dtypeortype(cls, dtype):
|
||||
"""Returns dtype for datetime64 and type of dtype otherwise."""
|
||||
|
||||
# This is a bit annoying. We want to return the "general" type in most
|
||||
# cases (ie. "string" rather than "S10"), but we want to return the
|
||||
# specific type for datetime64 (ie. "datetime64[us]" rather than
|
||||
# "datetime64").
|
||||
if dtype.type == np.datetime64:
|
||||
return dtype
|
||||
return dtype.type
|
||||
|
||||
@classmethod
|
||||
def upgrade_mapper(cls, func, default=None):
|
||||
"""
|
||||
Upgrade the mapper of a StringConverter by adding a new function and
|
||||
its corresponding default.
|
||||
|
||||
The input function (or sequence of functions) and its associated
|
||||
default value (if any) is inserted in penultimate position of the
|
||||
mapper. The corresponding type is estimated from the dtype of the
|
||||
default value.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
func : var
|
||||
Function, or sequence of functions
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import dateutil.parser
|
||||
>>> import datetime
|
||||
>>> dateparser = dateutil.parser.parse
|
||||
>>> defaultdate = datetime.date(2000, 1, 1)
|
||||
>>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
|
||||
"""
|
||||
# Func is a single functions
|
||||
if callable(func):
|
||||
cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))
|
||||
return
|
||||
elif hasattr(func, '__iter__'):
|
||||
if isinstance(func[0], (tuple, list)):
|
||||
for _ in func:
|
||||
cls._mapper.insert(-1, _)
|
||||
return
|
||||
if default is None:
|
||||
default = [None] * len(func)
|
||||
else:
|
||||
default = list(default)
|
||||
default.append([None] * (len(func) - len(default)))
|
||||
for fct, dft in zip(func, default):
|
||||
cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
|
||||
|
||||
@classmethod
|
||||
def _find_map_entry(cls, dtype):
|
||||
# if a converter for the specific dtype is available use that
|
||||
for i, (deftype, func, default_def) in enumerate(cls._mapper):
|
||||
if dtype.type == deftype:
|
||||
return i, (deftype, func, default_def)
|
||||
|
||||
# otherwise find an inexact match
|
||||
for i, (deftype, func, default_def) in enumerate(cls._mapper):
|
||||
if np.issubdtype(dtype.type, deftype):
|
||||
return i, (deftype, func, default_def)
|
||||
|
||||
raise LookupError
|
||||
|
||||
def __init__(self, dtype_or_func=None, default=None, missing_values=None,
|
||||
locked=False):
|
||||
# Defines a lock for upgrade
|
||||
self._locked = bool(locked)
|
||||
# No input dtype: minimal initialization
|
||||
if dtype_or_func is None:
|
||||
self.func = str2bool
|
||||
self._status = 0
|
||||
self.default = default or False
|
||||
dtype = np.dtype('bool')
|
||||
else:
|
||||
# Is the input a np.dtype ?
|
||||
try:
|
||||
self.func = None
|
||||
dtype = np.dtype(dtype_or_func)
|
||||
except TypeError:
|
||||
# dtype_or_func must be a function, then
|
||||
if not callable(dtype_or_func):
|
||||
errmsg = ("The input argument `dtype` is neither a"
|
||||
" function nor a dtype (got '%s' instead)")
|
||||
raise TypeError(errmsg % type(dtype_or_func))
|
||||
# Set the function
|
||||
self.func = dtype_or_func
|
||||
# If we don't have a default, try to guess it or set it to
|
||||
# None
|
||||
if default is None:
|
||||
try:
|
||||
default = self.func('0')
|
||||
except ValueError:
|
||||
default = None
|
||||
dtype = self._getdtype(default)
|
||||
|
||||
# find the best match in our mapper
|
||||
try:
|
||||
self._status, (_, func, default_def) = self._find_map_entry(dtype)
|
||||
except LookupError:
|
||||
# no match
|
||||
self.default = default
|
||||
_, func, _ = self._mapper[-1]
|
||||
self._status = 0
|
||||
else:
|
||||
# use the found default only if we did not already have one
|
||||
if default is None:
|
||||
self.default = default_def
|
||||
else:
|
||||
self.default = default
|
||||
|
||||
# If the input was a dtype, set the function to the last we saw
|
||||
if self.func is None:
|
||||
self.func = func
|
||||
|
||||
# If the status is 1 (int), change the function to
|
||||
# something more robust.
|
||||
if self.func == self._mapper[1][1]:
|
||||
if issubclass(dtype.type, np.uint64):
|
||||
self.func = np.uint64
|
||||
elif issubclass(dtype.type, np.int64):
|
||||
self.func = np.int64
|
||||
else:
|
||||
self.func = lambda x: int(float(x))
|
||||
# Store the list of strings corresponding to missing values.
|
||||
if missing_values is None:
|
||||
self.missing_values = {''}
|
||||
else:
|
||||
if isinstance(missing_values, str):
|
||||
missing_values = missing_values.split(",")
|
||||
self.missing_values = set(list(missing_values) + [''])
|
||||
|
||||
self._callingfunction = self._strict_call
|
||||
self.type = self._dtypeortype(dtype)
|
||||
self._checked = False
|
||||
self._initial_default = default
|
||||
|
||||
def _loose_call(self, value):
|
||||
try:
|
||||
return self.func(value)
|
||||
except ValueError:
|
||||
return self.default
|
||||
|
||||
def _strict_call(self, value):
|
||||
try:
|
||||
|
||||
# We check if we can convert the value using the current function
|
||||
new_value = self.func(value)
|
||||
|
||||
# In addition to having to check whether func can convert the
|
||||
# value, we also have to make sure that we don't get overflow
|
||||
# errors for integers.
|
||||
if self.func is int:
|
||||
try:
|
||||
np.array(value, dtype=self.type)
|
||||
except OverflowError:
|
||||
raise ValueError
|
||||
|
||||
# We're still here so we can now return the new value
|
||||
return new_value
|
||||
|
||||
except ValueError:
|
||||
if value.strip() in self.missing_values:
|
||||
if not self._status:
|
||||
self._checked = False
|
||||
return self.default
|
||||
raise ValueError(f"Cannot convert string '{value}'")
|
||||
|
||||
def __call__(self, value):
|
||||
return self._callingfunction(value)
|
||||
|
||||
def _do_upgrade(self):
|
||||
# Raise an exception if we locked the converter...
|
||||
if self._locked:
|
||||
errmsg = "Converter is locked and cannot be upgraded"
|
||||
raise ConverterLockError(errmsg)
|
||||
_statusmax = len(self._mapper)
|
||||
# Complains if we try to upgrade by the maximum
|
||||
_status = self._status
|
||||
if _status == _statusmax:
|
||||
errmsg = "Could not find a valid conversion function"
|
||||
raise ConverterError(errmsg)
|
||||
elif _status < _statusmax - 1:
|
||||
_status += 1
|
||||
self.type, self.func, default = self._mapper[_status]
|
||||
self._status = _status
|
||||
if self._initial_default is not None:
|
||||
self.default = self._initial_default
|
||||
else:
|
||||
self.default = default
|
||||
|
||||
def upgrade(self, value):
|
||||
"""
|
||||
Find the best converter for a given string, and return the result.
|
||||
|
||||
The supplied string `value` is converted by testing different
|
||||
converters in order. First the `func` method of the
|
||||
`StringConverter` instance is tried, if this fails other available
|
||||
converters are tried. The order in which these other converters
|
||||
are tried is determined by the `_status` attribute of the instance.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
value : str
|
||||
The string to convert.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : any
|
||||
The result of converting `value` with the appropriate converter.
|
||||
|
||||
"""
|
||||
self._checked = True
|
||||
try:
|
||||
return self._strict_call(value)
|
||||
except ValueError:
|
||||
self._do_upgrade()
|
||||
return self.upgrade(value)
|
||||
|
||||
def iterupgrade(self, value):
|
||||
self._checked = True
|
||||
if not hasattr(value, '__iter__'):
|
||||
value = (value,)
|
||||
_strict_call = self._strict_call
|
||||
try:
|
||||
for _m in value:
|
||||
_strict_call(_m)
|
||||
except ValueError:
|
||||
self._do_upgrade()
|
||||
self.iterupgrade(value)
|
||||
|
||||
def update(self, func, default=None, testing_value=None,
|
||||
missing_values='', locked=False):
|
||||
"""
|
||||
Set StringConverter attributes directly.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
func : function
|
||||
Conversion function.
|
||||
default : any, optional
|
||||
Value to return by default, that is, when the string to be
|
||||
converted is flagged as missing. If not given,
|
||||
`StringConverter` tries to supply a reasonable default value.
|
||||
testing_value : str, optional
|
||||
A string representing a standard input value of the converter.
|
||||
This string is used to help defining a reasonable default
|
||||
value.
|
||||
missing_values : {sequence of str, None}, optional
|
||||
Sequence of strings indicating a missing value. If ``None``, then
|
||||
the existing `missing_values` are cleared. The default is ``''``.
|
||||
locked : bool, optional
|
||||
Whether the StringConverter should be locked to prevent
|
||||
automatic upgrade or not. Default is False.
|
||||
|
||||
Notes
|
||||
-----
|
||||
`update` takes the same parameters as the constructor of
|
||||
`StringConverter`, except that `func` does not accept a `dtype`
|
||||
whereas `dtype_or_func` in the constructor does.
|
||||
|
||||
"""
|
||||
self.func = func
|
||||
self._locked = locked
|
||||
|
||||
# Don't reset the default to None if we can avoid it
|
||||
if default is not None:
|
||||
self.default = default
|
||||
self.type = self._dtypeortype(self._getdtype(default))
|
||||
else:
|
||||
try:
|
||||
tester = func(testing_value or '1')
|
||||
except (TypeError, ValueError):
|
||||
tester = None
|
||||
self.type = self._dtypeortype(self._getdtype(tester))
|
||||
|
||||
# Add the missing values to the existing set or clear it.
|
||||
if missing_values is None:
|
||||
# Clear all missing values even though the ctor initializes it to
|
||||
# set(['']) when the argument is None.
|
||||
self.missing_values = set()
|
||||
else:
|
||||
if not np.iterable(missing_values):
|
||||
missing_values = [missing_values]
|
||||
if not all(isinstance(v, str) for v in missing_values):
|
||||
raise TypeError("missing_values must be strings or unicode")
|
||||
self.missing_values.update(missing_values)
|
||||
|
||||
|
||||
def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
|
||||
"""
|
||||
Convenience function to create a `np.dtype` object.
|
||||
|
||||
The function processes the input `dtype` and matches it with the given
|
||||
names.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ndtype : var
|
||||
Definition of the dtype. Can be any string or dictionary recognized
|
||||
by the `np.dtype` function, or a sequence of types.
|
||||
names : str or sequence, optional
|
||||
Sequence of strings to use as field names for a structured dtype.
|
||||
For convenience, `names` can be a string of a comma-separated list
|
||||
of names.
|
||||
defaultfmt : str, optional
|
||||
Format string used to define missing names, such as ``"f%i"``
|
||||
(default) or ``"fields_%02i"``.
|
||||
validationargs : optional
|
||||
A series of optional arguments used to initialize a
|
||||
`NameValidator`.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.lib._iotools.easy_dtype(float)
|
||||
dtype('float64')
|
||||
>>> np.lib._iotools.easy_dtype("i4, f8")
|
||||
dtype([('f0', '<i4'), ('f1', '<f8')])
|
||||
>>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")
|
||||
dtype([('field_000', '<i4'), ('field_001', '<f8')])
|
||||
|
||||
>>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")
|
||||
dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])
|
||||
>>> np.lib._iotools.easy_dtype(float, names="a,b,c")
|
||||
dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
|
||||
|
||||
"""
|
||||
try:
|
||||
ndtype = np.dtype(ndtype)
|
||||
except TypeError:
|
||||
validate = NameValidator(**validationargs)
|
||||
nbfields = len(ndtype)
|
||||
if names is None:
|
||||
names = [''] * len(ndtype)
|
||||
elif isinstance(names, str):
|
||||
names = names.split(",")
|
||||
names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
|
||||
ndtype = np.dtype({"formats": ndtype, "names": names})
|
||||
else:
|
||||
# Explicit names
|
||||
if names is not None:
|
||||
validate = NameValidator(**validationargs)
|
||||
if isinstance(names, str):
|
||||
names = names.split(",")
|
||||
# Simple dtype: repeat to match the nb of names
|
||||
if ndtype.names is None:
|
||||
formats = tuple([ndtype.type] * len(names))
|
||||
names = validate(names, defaultfmt=defaultfmt)
|
||||
ndtype = np.dtype(list(zip(names, formats)))
|
||||
# Structured dtype: just validate the names as needed
|
||||
else:
|
||||
ndtype.names = validate(names, nbfields=len(ndtype.names),
|
||||
defaultfmt=defaultfmt)
|
||||
# No implicit names
|
||||
elif ndtype.names is not None:
|
||||
validate = NameValidator(**validationargs)
|
||||
# Default initial names : should we change the format ?
|
||||
numbered_names = tuple(f"f{i}" for i in range(len(ndtype.names)))
|
||||
if ((ndtype.names == numbered_names) and (defaultfmt != "f%i")):
|
||||
ndtype.names = validate([''] * len(ndtype.names),
|
||||
defaultfmt=defaultfmt)
|
||||
# Explicit initial names : just validate
|
||||
else:
|
||||
ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
|
||||
return ndtype
|
||||
114
lib/python3.11/site-packages/numpy/lib/_iotools.pyi
Normal file
114
lib/python3.11/site-packages/numpy/lib/_iotools.pyi
Normal file
@ -0,0 +1,114 @@
|
||||
from collections.abc import Callable, Iterable, Sequence
|
||||
from typing import (
|
||||
Any,
|
||||
ClassVar,
|
||||
Final,
|
||||
Literal,
|
||||
TypedDict,
|
||||
TypeVar,
|
||||
Unpack,
|
||||
overload,
|
||||
type_check_only,
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
@type_check_only
|
||||
class _ValidationKwargs(TypedDict, total=False):
|
||||
excludelist: Iterable[str] | None
|
||||
deletechars: Iterable[str] | None
|
||||
case_sensitive: Literal["upper", "lower"] | bool | None
|
||||
replace_space: str
|
||||
|
||||
###
|
||||
|
||||
__docformat__: Final[str] = "restructuredtext en"
|
||||
|
||||
class ConverterError(Exception): ...
|
||||
class ConverterLockError(ConverterError): ...
|
||||
class ConversionWarning(UserWarning): ...
|
||||
|
||||
class LineSplitter:
|
||||
delimiter: str | int | Iterable[int] | None
|
||||
comments: str
|
||||
encoding: str | None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
/,
|
||||
delimiter: str | bytes | int | Iterable[int] | None = None,
|
||||
comments: str | bytes = "#",
|
||||
autostrip: bool = True,
|
||||
encoding: str | None = None,
|
||||
) -> None: ...
|
||||
def __call__(self, /, line: str | bytes) -> list[str]: ...
|
||||
def autostrip(self, /, method: Callable[[_T], Iterable[str]]) -> Callable[[_T], list[str]]: ...
|
||||
|
||||
class NameValidator:
|
||||
defaultexcludelist: ClassVar[Sequence[str]]
|
||||
defaultdeletechars: ClassVar[Sequence[str]]
|
||||
excludelist: list[str]
|
||||
deletechars: set[str]
|
||||
case_converter: Callable[[str], str]
|
||||
replace_space: str
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
/,
|
||||
excludelist: Iterable[str] | None = None,
|
||||
deletechars: Iterable[str] | None = None,
|
||||
case_sensitive: Literal["upper", "lower"] | bool | None = None,
|
||||
replace_space: str = "_",
|
||||
) -> None: ...
|
||||
def __call__(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ...
|
||||
def validate(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ...
|
||||
|
||||
class StringConverter:
|
||||
func: Callable[[str], Any] | None
|
||||
default: Any
|
||||
missing_values: set[str]
|
||||
type: np.dtype[np.datetime64] | np.generic
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
/,
|
||||
dtype_or_func: npt.DTypeLike | None = None,
|
||||
default: None = None,
|
||||
missing_values: Iterable[str] | None = None,
|
||||
locked: bool = False,
|
||||
) -> None: ...
|
||||
def update(
|
||||
self,
|
||||
/,
|
||||
func: Callable[[str], Any],
|
||||
default: object | None = None,
|
||||
testing_value: str | None = None,
|
||||
missing_values: str = "",
|
||||
locked: bool = False,
|
||||
) -> None: ...
|
||||
#
|
||||
def __call__(self, /, value: str) -> Any: ...
|
||||
def upgrade(self, /, value: str) -> Any: ...
|
||||
def iterupgrade(self, /, value: Iterable[str] | str) -> None: ...
|
||||
|
||||
#
|
||||
@classmethod
|
||||
def upgrade_mapper(cls, func: Callable[[str], Any], default: object | None = None) -> None: ...
|
||||
|
||||
@overload
|
||||
def str2bool(value: Literal["false", "False", "FALSE"]) -> Literal[False]: ...
|
||||
@overload
|
||||
def str2bool(value: Literal["true", "True", "TRUE"]) -> Literal[True]: ...
|
||||
|
||||
#
|
||||
def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ...
|
||||
def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype]: ...
|
||||
def easy_dtype(
|
||||
ndtype: npt.DTypeLike,
|
||||
names: Iterable[str] | None = None,
|
||||
defaultfmt: str = "f%i",
|
||||
**validationargs: Unpack[_ValidationKwargs],
|
||||
) -> np.dtype[np.void]: ...
|
||||
2024
lib/python3.11/site-packages/numpy/lib/_nanfunctions_impl.py
Normal file
2024
lib/python3.11/site-packages/numpy/lib/_nanfunctions_impl.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,52 @@
|
||||
from numpy._core.fromnumeric import (
|
||||
amax,
|
||||
amin,
|
||||
argmax,
|
||||
argmin,
|
||||
cumprod,
|
||||
cumsum,
|
||||
mean,
|
||||
prod,
|
||||
std,
|
||||
sum,
|
||||
var,
|
||||
)
|
||||
from numpy.lib._function_base_impl import (
|
||||
median,
|
||||
percentile,
|
||||
quantile,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"nansum",
|
||||
"nanmax",
|
||||
"nanmin",
|
||||
"nanargmax",
|
||||
"nanargmin",
|
||||
"nanmean",
|
||||
"nanmedian",
|
||||
"nanpercentile",
|
||||
"nanvar",
|
||||
"nanstd",
|
||||
"nanprod",
|
||||
"nancumsum",
|
||||
"nancumprod",
|
||||
"nanquantile",
|
||||
]
|
||||
|
||||
# NOTE: In reality these functions are not aliases but distinct functions
|
||||
# with identical signatures.
|
||||
nanmin = amin
|
||||
nanmax = amax
|
||||
nanargmin = argmin
|
||||
nanargmax = argmax
|
||||
nansum = sum
|
||||
nanprod = prod
|
||||
nancumsum = cumsum
|
||||
nancumprod = cumprod
|
||||
nanmean = mean
|
||||
nanvar = var
|
||||
nanstd = std
|
||||
nanmedian = median
|
||||
nanpercentile = percentile
|
||||
nanquantile = quantile
|
||||
2596
lib/python3.11/site-packages/numpy/lib/_npyio_impl.py
Normal file
2596
lib/python3.11/site-packages/numpy/lib/_npyio_impl.py
Normal file
File diff suppressed because it is too large
Load Diff
301
lib/python3.11/site-packages/numpy/lib/_npyio_impl.pyi
Normal file
301
lib/python3.11/site-packages/numpy/lib/_npyio_impl.pyi
Normal file
@ -0,0 +1,301 @@
|
||||
import types
|
||||
import zipfile
|
||||
from collections.abc import Callable, Collection, Iterable, Iterator, Mapping, Sequence
|
||||
from re import Pattern
|
||||
from typing import (
|
||||
IO,
|
||||
Any,
|
||||
ClassVar,
|
||||
Generic,
|
||||
Protocol,
|
||||
Self,
|
||||
TypeAlias,
|
||||
overload,
|
||||
type_check_only,
|
||||
)
|
||||
from typing import Literal as L
|
||||
|
||||
from _typeshed import (
|
||||
StrOrBytesPath,
|
||||
StrPath,
|
||||
SupportsKeysAndGetItem,
|
||||
SupportsRead,
|
||||
SupportsWrite,
|
||||
)
|
||||
from typing_extensions import TypeVar, deprecated, override
|
||||
|
||||
import numpy as np
|
||||
from numpy._core.multiarray import packbits, unpackbits
|
||||
from numpy._typing import ArrayLike, DTypeLike, NDArray, _DTypeLike, _SupportsArrayFunc
|
||||
from numpy.ma.mrecords import MaskedRecords
|
||||
|
||||
from ._datasource import DataSource as DataSource
|
||||
|
||||
__all__ = [
|
||||
"fromregex",
|
||||
"genfromtxt",
|
||||
"load",
|
||||
"loadtxt",
|
||||
"packbits",
|
||||
"save",
|
||||
"savetxt",
|
||||
"savez",
|
||||
"savez_compressed",
|
||||
"unpackbits",
|
||||
]
|
||||
|
||||
_T_co = TypeVar("_T_co", covariant=True)
|
||||
_ScalarT = TypeVar("_ScalarT", bound=np.generic)
|
||||
_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True)
|
||||
|
||||
_FName: TypeAlias = StrPath | Iterable[str] | Iterable[bytes]
|
||||
_FNameRead: TypeAlias = StrPath | SupportsRead[str] | SupportsRead[bytes]
|
||||
_FNameWriteBytes: TypeAlias = StrPath | SupportsWrite[bytes]
|
||||
_FNameWrite: TypeAlias = _FNameWriteBytes | SupportsWrite[str]
|
||||
|
||||
@type_check_only
|
||||
class _SupportsReadSeek(SupportsRead[_T_co], Protocol[_T_co]):
|
||||
def seek(self, offset: int, whence: int, /) -> object: ...
|
||||
|
||||
class BagObj(Generic[_T_co]):
|
||||
def __init__(self, /, obj: SupportsKeysAndGetItem[str, _T_co]) -> None: ...
|
||||
def __getattribute__(self, key: str, /) -> _T_co: ...
|
||||
def __dir__(self) -> list[str]: ...
|
||||
|
||||
class NpzFile(Mapping[str, NDArray[_ScalarT_co]]):
|
||||
_MAX_REPR_ARRAY_COUNT: ClassVar[int] = 5
|
||||
|
||||
zip: zipfile.ZipFile
|
||||
fid: IO[str] | None
|
||||
files: list[str]
|
||||
allow_pickle: bool
|
||||
pickle_kwargs: Mapping[str, Any] | None
|
||||
f: BagObj[NpzFile[_ScalarT_co]]
|
||||
|
||||
#
|
||||
def __init__(
|
||||
self,
|
||||
/,
|
||||
fid: IO[Any],
|
||||
own_fid: bool = False,
|
||||
allow_pickle: bool = False,
|
||||
pickle_kwargs: Mapping[str, object] | None = None,
|
||||
*,
|
||||
max_header_size: int = 10_000,
|
||||
) -> None: ...
|
||||
def __del__(self) -> None: ...
|
||||
def __enter__(self) -> Self: ...
|
||||
def __exit__(self, cls: type[BaseException] | None, e: BaseException | None, tb: types.TracebackType | None, /) -> None: ...
|
||||
@override
|
||||
def __len__(self) -> int: ...
|
||||
@override
|
||||
def __iter__(self) -> Iterator[str]: ...
|
||||
@override
|
||||
def __getitem__(self, key: str, /) -> NDArray[_ScalarT_co]: ...
|
||||
def close(self) -> None: ...
|
||||
|
||||
# NOTE: Returns a `NpzFile` if file is a zip file;
|
||||
# returns an `ndarray`/`memmap` otherwise
|
||||
def load(
|
||||
file: StrOrBytesPath | _SupportsReadSeek[bytes],
|
||||
mmap_mode: L["r+", "r", "w+", "c"] | None = None,
|
||||
allow_pickle: bool = False,
|
||||
fix_imports: bool = True,
|
||||
encoding: L["ASCII", "latin1", "bytes"] = "ASCII",
|
||||
*,
|
||||
max_header_size: int = 10_000,
|
||||
) -> Any: ...
|
||||
|
||||
@overload
|
||||
def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True) -> None: ...
|
||||
@overload
|
||||
@deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.")
|
||||
def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool, fix_imports: bool) -> None: ...
|
||||
@overload
|
||||
@deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.")
|
||||
def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True, *, fix_imports: bool) -> None: ...
|
||||
|
||||
#
|
||||
def savez(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ...
|
||||
|
||||
#
|
||||
def savez_compressed(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ...
|
||||
|
||||
# File-like objects only have to implement `__iter__` and,
|
||||
# optionally, `encoding`
|
||||
@overload
|
||||
def loadtxt(
|
||||
fname: _FName,
|
||||
dtype: None = None,
|
||||
comments: str | Sequence[str] | None = "#",
|
||||
delimiter: str | None = None,
|
||||
converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None,
|
||||
skiprows: int = 0,
|
||||
usecols: int | Sequence[int] | None = None,
|
||||
unpack: bool = False,
|
||||
ndmin: L[0, 1, 2] = 0,
|
||||
encoding: str | None = None,
|
||||
max_rows: int | None = None,
|
||||
*,
|
||||
quotechar: str | None = None,
|
||||
like: _SupportsArrayFunc | None = None,
|
||||
) -> NDArray[np.float64]: ...
|
||||
@overload
|
||||
def loadtxt(
|
||||
fname: _FName,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
comments: str | Sequence[str] | None = "#",
|
||||
delimiter: str | None = None,
|
||||
converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None,
|
||||
skiprows: int = 0,
|
||||
usecols: int | Sequence[int] | None = None,
|
||||
unpack: bool = False,
|
||||
ndmin: L[0, 1, 2] = 0,
|
||||
encoding: str | None = None,
|
||||
max_rows: int | None = None,
|
||||
*,
|
||||
quotechar: str | None = None,
|
||||
like: _SupportsArrayFunc | None = None,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def loadtxt(
|
||||
fname: _FName,
|
||||
dtype: DTypeLike,
|
||||
comments: str | Sequence[str] | None = "#",
|
||||
delimiter: str | None = None,
|
||||
converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None,
|
||||
skiprows: int = 0,
|
||||
usecols: int | Sequence[int] | None = None,
|
||||
unpack: bool = False,
|
||||
ndmin: L[0, 1, 2] = 0,
|
||||
encoding: str | None = None,
|
||||
max_rows: int | None = None,
|
||||
*,
|
||||
quotechar: str | None = None,
|
||||
like: _SupportsArrayFunc | None = None,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def savetxt(
|
||||
fname: _FNameWrite,
|
||||
X: ArrayLike,
|
||||
fmt: str | Sequence[str] = "%.18e",
|
||||
delimiter: str = " ",
|
||||
newline: str = "\n",
|
||||
header: str = "",
|
||||
footer: str = "",
|
||||
comments: str = "# ",
|
||||
encoding: str | None = None,
|
||||
) -> None: ...
|
||||
|
||||
@overload
|
||||
def fromregex(
|
||||
file: _FNameRead,
|
||||
regexp: str | bytes | Pattern[Any],
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
encoding: str | None = None,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def fromregex(
|
||||
file: _FNameRead,
|
||||
regexp: str | bytes | Pattern[Any],
|
||||
dtype: DTypeLike,
|
||||
encoding: str | None = None,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def genfromtxt(
|
||||
fname: _FName,
|
||||
dtype: None = None,
|
||||
comments: str = ...,
|
||||
delimiter: str | int | Iterable[int] | None = ...,
|
||||
skip_header: int = ...,
|
||||
skip_footer: int = ...,
|
||||
converters: Mapping[int | str, Callable[[str], Any]] | None = ...,
|
||||
missing_values: Any = ...,
|
||||
filling_values: Any = ...,
|
||||
usecols: Sequence[int] | None = ...,
|
||||
names: L[True] | str | Collection[str] | None = ...,
|
||||
excludelist: Sequence[str] | None = ...,
|
||||
deletechars: str = ...,
|
||||
replace_space: str = ...,
|
||||
autostrip: bool = ...,
|
||||
case_sensitive: bool | L["upper", "lower"] = ...,
|
||||
defaultfmt: str = ...,
|
||||
unpack: bool | None = ...,
|
||||
usemask: bool = ...,
|
||||
loose: bool = ...,
|
||||
invalid_raise: bool = ...,
|
||||
max_rows: int | None = ...,
|
||||
encoding: str = ...,
|
||||
*,
|
||||
ndmin: L[0, 1, 2] = ...,
|
||||
like: _SupportsArrayFunc | None = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def genfromtxt(
|
||||
fname: _FName,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
comments: str = ...,
|
||||
delimiter: str | int | Iterable[int] | None = ...,
|
||||
skip_header: int = ...,
|
||||
skip_footer: int = ...,
|
||||
converters: Mapping[int | str, Callable[[str], Any]] | None = ...,
|
||||
missing_values: Any = ...,
|
||||
filling_values: Any = ...,
|
||||
usecols: Sequence[int] | None = ...,
|
||||
names: L[True] | str | Collection[str] | None = ...,
|
||||
excludelist: Sequence[str] | None = ...,
|
||||
deletechars: str = ...,
|
||||
replace_space: str = ...,
|
||||
autostrip: bool = ...,
|
||||
case_sensitive: bool | L["upper", "lower"] = ...,
|
||||
defaultfmt: str = ...,
|
||||
unpack: bool | None = ...,
|
||||
usemask: bool = ...,
|
||||
loose: bool = ...,
|
||||
invalid_raise: bool = ...,
|
||||
max_rows: int | None = ...,
|
||||
encoding: str = ...,
|
||||
*,
|
||||
ndmin: L[0, 1, 2] = ...,
|
||||
like: _SupportsArrayFunc | None = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def genfromtxt(
|
||||
fname: _FName,
|
||||
dtype: DTypeLike,
|
||||
comments: str = ...,
|
||||
delimiter: str | int | Iterable[int] | None = ...,
|
||||
skip_header: int = ...,
|
||||
skip_footer: int = ...,
|
||||
converters: Mapping[int | str, Callable[[str], Any]] | None = ...,
|
||||
missing_values: Any = ...,
|
||||
filling_values: Any = ...,
|
||||
usecols: Sequence[int] | None = ...,
|
||||
names: L[True] | str | Collection[str] | None = ...,
|
||||
excludelist: Sequence[str] | None = ...,
|
||||
deletechars: str = ...,
|
||||
replace_space: str = ...,
|
||||
autostrip: bool = ...,
|
||||
case_sensitive: bool | L["upper", "lower"] = ...,
|
||||
defaultfmt: str = ...,
|
||||
unpack: bool | None = ...,
|
||||
usemask: bool = ...,
|
||||
loose: bool = ...,
|
||||
invalid_raise: bool = ...,
|
||||
max_rows: int | None = ...,
|
||||
encoding: str = ...,
|
||||
*,
|
||||
ndmin: L[0, 1, 2] = ...,
|
||||
like: _SupportsArrayFunc | None = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def recfromtxt(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ...
|
||||
@overload
|
||||
def recfromtxt(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ...
|
||||
|
||||
@overload
|
||||
def recfromcsv(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ...
|
||||
@overload
|
||||
def recfromcsv(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ...
|
||||
1465
lib/python3.11/site-packages/numpy/lib/_polynomial_impl.py
Normal file
1465
lib/python3.11/site-packages/numpy/lib/_polynomial_impl.py
Normal file
File diff suppressed because it is too large
Load Diff
316
lib/python3.11/site-packages/numpy/lib/_polynomial_impl.pyi
Normal file
316
lib/python3.11/site-packages/numpy/lib/_polynomial_impl.pyi
Normal file
@ -0,0 +1,316 @@
|
||||
from typing import (
|
||||
Any,
|
||||
NoReturn,
|
||||
SupportsIndex,
|
||||
SupportsInt,
|
||||
TypeAlias,
|
||||
TypeVar,
|
||||
overload,
|
||||
)
|
||||
from typing import (
|
||||
Literal as L,
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
from numpy import (
|
||||
complex128,
|
||||
complexfloating,
|
||||
float64,
|
||||
floating,
|
||||
int32,
|
||||
int64,
|
||||
object_,
|
||||
poly1d,
|
||||
signedinteger,
|
||||
unsignedinteger,
|
||||
)
|
||||
from numpy._typing import (
|
||||
ArrayLike,
|
||||
NDArray,
|
||||
_ArrayLikeBool_co,
|
||||
_ArrayLikeComplex_co,
|
||||
_ArrayLikeFloat_co,
|
||||
_ArrayLikeInt_co,
|
||||
_ArrayLikeObject_co,
|
||||
_ArrayLikeUInt_co,
|
||||
)
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
_2Tup: TypeAlias = tuple[_T, _T]
|
||||
_5Tup: TypeAlias = tuple[
|
||||
_T,
|
||||
NDArray[float64],
|
||||
NDArray[int32],
|
||||
NDArray[float64],
|
||||
NDArray[float64],
|
||||
]
|
||||
|
||||
__all__ = [
|
||||
"poly",
|
||||
"roots",
|
||||
"polyint",
|
||||
"polyder",
|
||||
"polyadd",
|
||||
"polysub",
|
||||
"polymul",
|
||||
"polydiv",
|
||||
"polyval",
|
||||
"poly1d",
|
||||
"polyfit",
|
||||
]
|
||||
|
||||
def poly(seq_of_zeros: ArrayLike) -> NDArray[floating]: ...
|
||||
|
||||
# Returns either a float or complex array depending on the input values.
|
||||
# See `np.linalg.eigvals`.
|
||||
def roots(p: ArrayLike) -> NDArray[complexfloating] | NDArray[floating]: ...
|
||||
|
||||
@overload
|
||||
def polyint(
|
||||
p: poly1d,
|
||||
m: SupportsInt | SupportsIndex = ...,
|
||||
k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = ...,
|
||||
) -> poly1d: ...
|
||||
@overload
|
||||
def polyint(
|
||||
p: _ArrayLikeFloat_co,
|
||||
m: SupportsInt | SupportsIndex = ...,
|
||||
k: _ArrayLikeFloat_co | None = ...,
|
||||
) -> NDArray[floating]: ...
|
||||
@overload
|
||||
def polyint(
|
||||
p: _ArrayLikeComplex_co,
|
||||
m: SupportsInt | SupportsIndex = ...,
|
||||
k: _ArrayLikeComplex_co | None = ...,
|
||||
) -> NDArray[complexfloating]: ...
|
||||
@overload
|
||||
def polyint(
|
||||
p: _ArrayLikeObject_co,
|
||||
m: SupportsInt | SupportsIndex = ...,
|
||||
k: _ArrayLikeObject_co | None = ...,
|
||||
) -> NDArray[object_]: ...
|
||||
|
||||
@overload
|
||||
def polyder(
|
||||
p: poly1d,
|
||||
m: SupportsInt | SupportsIndex = ...,
|
||||
) -> poly1d: ...
|
||||
@overload
|
||||
def polyder(
|
||||
p: _ArrayLikeFloat_co,
|
||||
m: SupportsInt | SupportsIndex = ...,
|
||||
) -> NDArray[floating]: ...
|
||||
@overload
|
||||
def polyder(
|
||||
p: _ArrayLikeComplex_co,
|
||||
m: SupportsInt | SupportsIndex = ...,
|
||||
) -> NDArray[complexfloating]: ...
|
||||
@overload
|
||||
def polyder(
|
||||
p: _ArrayLikeObject_co,
|
||||
m: SupportsInt | SupportsIndex = ...,
|
||||
) -> NDArray[object_]: ...
|
||||
|
||||
@overload
|
||||
def polyfit(
|
||||
x: _ArrayLikeFloat_co,
|
||||
y: _ArrayLikeFloat_co,
|
||||
deg: SupportsIndex | SupportsInt,
|
||||
rcond: float | None = ...,
|
||||
full: L[False] = ...,
|
||||
w: _ArrayLikeFloat_co | None = ...,
|
||||
cov: L[False] = ...,
|
||||
) -> NDArray[float64]: ...
|
||||
@overload
|
||||
def polyfit(
|
||||
x: _ArrayLikeComplex_co,
|
||||
y: _ArrayLikeComplex_co,
|
||||
deg: SupportsIndex | SupportsInt,
|
||||
rcond: float | None = ...,
|
||||
full: L[False] = ...,
|
||||
w: _ArrayLikeFloat_co | None = ...,
|
||||
cov: L[False] = ...,
|
||||
) -> NDArray[complex128]: ...
|
||||
@overload
|
||||
def polyfit(
|
||||
x: _ArrayLikeFloat_co,
|
||||
y: _ArrayLikeFloat_co,
|
||||
deg: SupportsIndex | SupportsInt,
|
||||
rcond: float | None = ...,
|
||||
full: L[False] = ...,
|
||||
w: _ArrayLikeFloat_co | None = ...,
|
||||
cov: L[True, "unscaled"] = ...,
|
||||
) -> _2Tup[NDArray[float64]]: ...
|
||||
@overload
|
||||
def polyfit(
|
||||
x: _ArrayLikeComplex_co,
|
||||
y: _ArrayLikeComplex_co,
|
||||
deg: SupportsIndex | SupportsInt,
|
||||
rcond: float | None = ...,
|
||||
full: L[False] = ...,
|
||||
w: _ArrayLikeFloat_co | None = ...,
|
||||
cov: L[True, "unscaled"] = ...,
|
||||
) -> _2Tup[NDArray[complex128]]: ...
|
||||
@overload
|
||||
def polyfit(
|
||||
x: _ArrayLikeFloat_co,
|
||||
y: _ArrayLikeFloat_co,
|
||||
deg: SupportsIndex | SupportsInt,
|
||||
rcond: float | None = ...,
|
||||
full: L[True] = ...,
|
||||
w: _ArrayLikeFloat_co | None = ...,
|
||||
cov: bool | L["unscaled"] = ...,
|
||||
) -> _5Tup[NDArray[float64]]: ...
|
||||
@overload
|
||||
def polyfit(
|
||||
x: _ArrayLikeComplex_co,
|
||||
y: _ArrayLikeComplex_co,
|
||||
deg: SupportsIndex | SupportsInt,
|
||||
rcond: float | None = ...,
|
||||
full: L[True] = ...,
|
||||
w: _ArrayLikeFloat_co | None = ...,
|
||||
cov: bool | L["unscaled"] = ...,
|
||||
) -> _5Tup[NDArray[complex128]]: ...
|
||||
|
||||
@overload
|
||||
def polyval(
|
||||
p: _ArrayLikeBool_co,
|
||||
x: _ArrayLikeBool_co,
|
||||
) -> NDArray[int64]: ...
|
||||
@overload
|
||||
def polyval(
|
||||
p: _ArrayLikeUInt_co,
|
||||
x: _ArrayLikeUInt_co,
|
||||
) -> NDArray[unsignedinteger]: ...
|
||||
@overload
|
||||
def polyval(
|
||||
p: _ArrayLikeInt_co,
|
||||
x: _ArrayLikeInt_co,
|
||||
) -> NDArray[signedinteger]: ...
|
||||
@overload
|
||||
def polyval(
|
||||
p: _ArrayLikeFloat_co,
|
||||
x: _ArrayLikeFloat_co,
|
||||
) -> NDArray[floating]: ...
|
||||
@overload
|
||||
def polyval(
|
||||
p: _ArrayLikeComplex_co,
|
||||
x: _ArrayLikeComplex_co,
|
||||
) -> NDArray[complexfloating]: ...
|
||||
@overload
|
||||
def polyval(
|
||||
p: _ArrayLikeObject_co,
|
||||
x: _ArrayLikeObject_co,
|
||||
) -> NDArray[object_]: ...
|
||||
|
||||
@overload
|
||||
def polyadd(
|
||||
a1: poly1d,
|
||||
a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
||||
) -> poly1d: ...
|
||||
@overload
|
||||
def polyadd(
|
||||
a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
||||
a2: poly1d,
|
||||
) -> poly1d: ...
|
||||
@overload
|
||||
def polyadd(
|
||||
a1: _ArrayLikeBool_co,
|
||||
a2: _ArrayLikeBool_co,
|
||||
) -> NDArray[np.bool]: ...
|
||||
@overload
|
||||
def polyadd(
|
||||
a1: _ArrayLikeUInt_co,
|
||||
a2: _ArrayLikeUInt_co,
|
||||
) -> NDArray[unsignedinteger]: ...
|
||||
@overload
|
||||
def polyadd(
|
||||
a1: _ArrayLikeInt_co,
|
||||
a2: _ArrayLikeInt_co,
|
||||
) -> NDArray[signedinteger]: ...
|
||||
@overload
|
||||
def polyadd(
|
||||
a1: _ArrayLikeFloat_co,
|
||||
a2: _ArrayLikeFloat_co,
|
||||
) -> NDArray[floating]: ...
|
||||
@overload
|
||||
def polyadd(
|
||||
a1: _ArrayLikeComplex_co,
|
||||
a2: _ArrayLikeComplex_co,
|
||||
) -> NDArray[complexfloating]: ...
|
||||
@overload
|
||||
def polyadd(
|
||||
a1: _ArrayLikeObject_co,
|
||||
a2: _ArrayLikeObject_co,
|
||||
) -> NDArray[object_]: ...
|
||||
|
||||
@overload
|
||||
def polysub(
|
||||
a1: poly1d,
|
||||
a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
||||
) -> poly1d: ...
|
||||
@overload
|
||||
def polysub(
|
||||
a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
||||
a2: poly1d,
|
||||
) -> poly1d: ...
|
||||
@overload
|
||||
def polysub(
|
||||
a1: _ArrayLikeBool_co,
|
||||
a2: _ArrayLikeBool_co,
|
||||
) -> NoReturn: ...
|
||||
@overload
|
||||
def polysub(
|
||||
a1: _ArrayLikeUInt_co,
|
||||
a2: _ArrayLikeUInt_co,
|
||||
) -> NDArray[unsignedinteger]: ...
|
||||
@overload
|
||||
def polysub(
|
||||
a1: _ArrayLikeInt_co,
|
||||
a2: _ArrayLikeInt_co,
|
||||
) -> NDArray[signedinteger]: ...
|
||||
@overload
|
||||
def polysub(
|
||||
a1: _ArrayLikeFloat_co,
|
||||
a2: _ArrayLikeFloat_co,
|
||||
) -> NDArray[floating]: ...
|
||||
@overload
|
||||
def polysub(
|
||||
a1: _ArrayLikeComplex_co,
|
||||
a2: _ArrayLikeComplex_co,
|
||||
) -> NDArray[complexfloating]: ...
|
||||
@overload
|
||||
def polysub(
|
||||
a1: _ArrayLikeObject_co,
|
||||
a2: _ArrayLikeObject_co,
|
||||
) -> NDArray[object_]: ...
|
||||
|
||||
# NOTE: Not an alias, but they do have the same signature (that we can reuse)
|
||||
polymul = polyadd
|
||||
|
||||
@overload
|
||||
def polydiv(
|
||||
u: poly1d,
|
||||
v: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
||||
) -> _2Tup[poly1d]: ...
|
||||
@overload
|
||||
def polydiv(
|
||||
u: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
||||
v: poly1d,
|
||||
) -> _2Tup[poly1d]: ...
|
||||
@overload
|
||||
def polydiv(
|
||||
u: _ArrayLikeFloat_co,
|
||||
v: _ArrayLikeFloat_co,
|
||||
) -> _2Tup[NDArray[floating]]: ...
|
||||
@overload
|
||||
def polydiv(
|
||||
u: _ArrayLikeComplex_co,
|
||||
v: _ArrayLikeComplex_co,
|
||||
) -> _2Tup[NDArray[complexfloating]]: ...
|
||||
@overload
|
||||
def polydiv(
|
||||
u: _ArrayLikeObject_co,
|
||||
v: _ArrayLikeObject_co,
|
||||
) -> _2Tup[NDArray[Any]]: ...
|
||||
642
lib/python3.11/site-packages/numpy/lib/_scimath_impl.py
Normal file
642
lib/python3.11/site-packages/numpy/lib/_scimath_impl.py
Normal file
@ -0,0 +1,642 @@
|
||||
"""
|
||||
Wrapper functions to more user-friendly calling of certain math functions
|
||||
whose output data-type is different than the input data-type in certain
|
||||
domains of the input.
|
||||
|
||||
For example, for functions like `log` with branch cuts, the versions in this
|
||||
module provide the mathematically valid answers in the complex plane::
|
||||
|
||||
>>> import math
|
||||
>>> np.emath.log(-math.exp(1)) == (1+1j*math.pi)
|
||||
True
|
||||
|
||||
Similarly, `sqrt`, other base logarithms, `power` and trig functions are
|
||||
correctly handled. See their respective docstrings for specific examples.
|
||||
|
||||
"""
|
||||
import numpy._core.numeric as nx
|
||||
import numpy._core.numerictypes as nt
|
||||
from numpy._core.numeric import any, asarray
|
||||
from numpy._core.overrides import array_function_dispatch, set_module
|
||||
from numpy.lib._type_check_impl import isreal
|
||||
|
||||
__all__ = [
|
||||
'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin',
|
||||
'arctanh'
|
||||
]
|
||||
|
||||
|
||||
_ln2 = nx.log(2.0)
|
||||
|
||||
|
||||
def _tocomplex(arr):
|
||||
"""Convert its input `arr` to a complex array.
|
||||
|
||||
The input is returned as a complex array of the smallest type that will fit
|
||||
the original data: types like single, byte, short, etc. become csingle,
|
||||
while others become cdouble.
|
||||
|
||||
A copy of the input is always made.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
arr : array
|
||||
|
||||
Returns
|
||||
-------
|
||||
array
|
||||
An array with the same input data as the input but in complex form.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
|
||||
First, consider an input of type short:
|
||||
|
||||
>>> a = np.array([1,2,3],np.short)
|
||||
|
||||
>>> ac = np.lib.scimath._tocomplex(a); ac
|
||||
array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
|
||||
|
||||
>>> ac.dtype
|
||||
dtype('complex64')
|
||||
|
||||
If the input is of type double, the output is correspondingly of the
|
||||
complex double type as well:
|
||||
|
||||
>>> b = np.array([1,2,3],np.double)
|
||||
|
||||
>>> bc = np.lib.scimath._tocomplex(b); bc
|
||||
array([1.+0.j, 2.+0.j, 3.+0.j])
|
||||
|
||||
>>> bc.dtype
|
||||
dtype('complex128')
|
||||
|
||||
Note that even if the input was complex to begin with, a copy is still
|
||||
made, since the astype() method always copies:
|
||||
|
||||
>>> c = np.array([1,2,3],np.csingle)
|
||||
|
||||
>>> cc = np.lib.scimath._tocomplex(c); cc
|
||||
array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
|
||||
|
||||
>>> c *= 2; c
|
||||
array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64)
|
||||
|
||||
>>> cc
|
||||
array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
|
||||
"""
|
||||
if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte,
|
||||
nt.ushort, nt.csingle)):
|
||||
return arr.astype(nt.csingle)
|
||||
else:
|
||||
return arr.astype(nt.cdouble)
|
||||
|
||||
|
||||
def _fix_real_lt_zero(x):
|
||||
"""Convert `x` to complex if it has real, negative components.
|
||||
|
||||
Otherwise, output is just the array version of the input (via asarray).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
|
||||
Returns
|
||||
-------
|
||||
array
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.lib.scimath._fix_real_lt_zero([1,2])
|
||||
array([1, 2])
|
||||
|
||||
>>> np.lib.scimath._fix_real_lt_zero([-1,2])
|
||||
array([-1.+0.j, 2.+0.j])
|
||||
|
||||
"""
|
||||
x = asarray(x)
|
||||
if any(isreal(x) & (x < 0)):
|
||||
x = _tocomplex(x)
|
||||
return x
|
||||
|
||||
|
||||
def _fix_int_lt_zero(x):
|
||||
"""Convert `x` to double if it has real, negative components.
|
||||
|
||||
Otherwise, output is just the array version of the input (via asarray).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
|
||||
Returns
|
||||
-------
|
||||
array
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.lib.scimath._fix_int_lt_zero([1,2])
|
||||
array([1, 2])
|
||||
|
||||
>>> np.lib.scimath._fix_int_lt_zero([-1,2])
|
||||
array([-1., 2.])
|
||||
"""
|
||||
x = asarray(x)
|
||||
if any(isreal(x) & (x < 0)):
|
||||
x = x * 1.0
|
||||
return x
|
||||
|
||||
|
||||
def _fix_real_abs_gt_1(x):
|
||||
"""Convert `x` to complex if it has real components x_i with abs(x_i)>1.
|
||||
|
||||
Otherwise, output is just the array version of the input (via asarray).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
|
||||
Returns
|
||||
-------
|
||||
array
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.lib.scimath._fix_real_abs_gt_1([0,1])
|
||||
array([0, 1])
|
||||
|
||||
>>> np.lib.scimath._fix_real_abs_gt_1([0,2])
|
||||
array([0.+0.j, 2.+0.j])
|
||||
"""
|
||||
x = asarray(x)
|
||||
if any(isreal(x) & (abs(x) > 1)):
|
||||
x = _tocomplex(x)
|
||||
return x
|
||||
|
||||
|
||||
def _unary_dispatcher(x):
|
||||
return (x,)
|
||||
|
||||
|
||||
@set_module('numpy.lib.scimath')
|
||||
@array_function_dispatch(_unary_dispatcher)
|
||||
def sqrt(x):
|
||||
"""
|
||||
Compute the square root of x.
|
||||
|
||||
For negative input elements, a complex value is returned
|
||||
(unlike `numpy.sqrt` which returns NaN).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The input value(s).
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The square root of `x`. If `x` was a scalar, so is `out`,
|
||||
otherwise an array is returned.
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.sqrt
|
||||
|
||||
Examples
|
||||
--------
|
||||
For real, non-negative inputs this works just like `numpy.sqrt`:
|
||||
|
||||
>>> import numpy as np
|
||||
|
||||
>>> np.emath.sqrt(1)
|
||||
1.0
|
||||
>>> np.emath.sqrt([1, 4])
|
||||
array([1., 2.])
|
||||
|
||||
But it automatically handles negative inputs:
|
||||
|
||||
>>> np.emath.sqrt(-1)
|
||||
1j
|
||||
>>> np.emath.sqrt([-1,4])
|
||||
array([0.+1.j, 2.+0.j])
|
||||
|
||||
Different results are expected because:
|
||||
floating point 0.0 and -0.0 are distinct.
|
||||
|
||||
For more control, explicitly use complex() as follows:
|
||||
|
||||
>>> np.emath.sqrt(complex(-4.0, 0.0))
|
||||
2j
|
||||
>>> np.emath.sqrt(complex(-4.0, -0.0))
|
||||
-2j
|
||||
"""
|
||||
x = _fix_real_lt_zero(x)
|
||||
return nx.sqrt(x)
|
||||
|
||||
|
||||
@set_module('numpy.lib.scimath')
|
||||
@array_function_dispatch(_unary_dispatcher)
|
||||
def log(x):
|
||||
"""
|
||||
Compute the natural logarithm of `x`.
|
||||
|
||||
Return the "principal value" (for a description of this, see `numpy.log`)
|
||||
of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)``
|
||||
returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the
|
||||
complex principle value is returned.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The value(s) whose log is (are) required.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The log of the `x` value(s). If `x` was a scalar, so is `out`,
|
||||
otherwise an array is returned.
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.log
|
||||
|
||||
Notes
|
||||
-----
|
||||
For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log`
|
||||
(note, however, that otherwise `numpy.log` and this `log` are identical,
|
||||
i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and,
|
||||
notably, the complex principle value if ``x.imag != 0``).
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.emath.log(np.exp(1))
|
||||
1.0
|
||||
|
||||
Negative arguments are handled "correctly" (recall that
|
||||
``exp(log(x)) == x`` does *not* hold for real ``x < 0``):
|
||||
|
||||
>>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j)
|
||||
True
|
||||
|
||||
"""
|
||||
x = _fix_real_lt_zero(x)
|
||||
return nx.log(x)
|
||||
|
||||
|
||||
@set_module('numpy.lib.scimath')
|
||||
@array_function_dispatch(_unary_dispatcher)
|
||||
def log10(x):
|
||||
"""
|
||||
Compute the logarithm base 10 of `x`.
|
||||
|
||||
Return the "principal value" (for a description of this, see
|
||||
`numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this
|
||||
is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)``
|
||||
returns ``inf``). Otherwise, the complex principle value is returned.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like or scalar
|
||||
The value(s) whose log base 10 is (are) required.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`,
|
||||
otherwise an array object is returned.
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.log10
|
||||
|
||||
Notes
|
||||
-----
|
||||
For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10`
|
||||
(note, however, that otherwise `numpy.log10` and this `log10` are
|
||||
identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
|
||||
and, notably, the complex principle value if ``x.imag != 0``).
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
|
||||
(We set the printing precision so the example can be auto-tested)
|
||||
|
||||
>>> np.set_printoptions(precision=4)
|
||||
|
||||
>>> np.emath.log10(10**1)
|
||||
1.0
|
||||
|
||||
>>> np.emath.log10([-10**1, -10**2, 10**2])
|
||||
array([1.+1.3644j, 2.+1.3644j, 2.+0.j ])
|
||||
|
||||
"""
|
||||
x = _fix_real_lt_zero(x)
|
||||
return nx.log10(x)
|
||||
|
||||
|
||||
def _logn_dispatcher(n, x):
|
||||
return (n, x,)
|
||||
|
||||
|
||||
@set_module('numpy.lib.scimath')
|
||||
@array_function_dispatch(_logn_dispatcher)
|
||||
def logn(n, x):
|
||||
"""
|
||||
Take log base n of x.
|
||||
|
||||
If `x` contains negative inputs, the answer is computed and returned in the
|
||||
complex domain.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
n : array_like
|
||||
The integer base(s) in which the log is taken.
|
||||
x : array_like
|
||||
The value(s) whose log base `n` is (are) required.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The log base `n` of the `x` value(s). If `x` was a scalar, so is
|
||||
`out`, otherwise an array is returned.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.set_printoptions(precision=4)
|
||||
|
||||
>>> np.emath.logn(2, [4, 8])
|
||||
array([2., 3.])
|
||||
>>> np.emath.logn(2, [-4, -8, 8])
|
||||
array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
|
||||
|
||||
"""
|
||||
x = _fix_real_lt_zero(x)
|
||||
n = _fix_real_lt_zero(n)
|
||||
return nx.log(x) / nx.log(n)
|
||||
|
||||
|
||||
@set_module('numpy.lib.scimath')
|
||||
@array_function_dispatch(_unary_dispatcher)
|
||||
def log2(x):
|
||||
"""
|
||||
Compute the logarithm base 2 of `x`.
|
||||
|
||||
Return the "principal value" (for a description of this, see
|
||||
`numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is
|
||||
a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns
|
||||
``inf``). Otherwise, the complex principle value is returned.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The value(s) whose log base 2 is (are) required.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`,
|
||||
otherwise an array is returned.
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.log2
|
||||
|
||||
Notes
|
||||
-----
|
||||
For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2`
|
||||
(note, however, that otherwise `numpy.log2` and this `log2` are
|
||||
identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
|
||||
and, notably, the complex principle value if ``x.imag != 0``).
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
We set the printing precision so the example can be auto-tested:
|
||||
|
||||
>>> np.set_printoptions(precision=4)
|
||||
|
||||
>>> np.emath.log2(8)
|
||||
3.0
|
||||
>>> np.emath.log2([-4, -8, 8])
|
||||
array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
|
||||
|
||||
"""
|
||||
x = _fix_real_lt_zero(x)
|
||||
return nx.log2(x)
|
||||
|
||||
|
||||
def _power_dispatcher(x, p):
|
||||
return (x, p)
|
||||
|
||||
|
||||
@set_module('numpy.lib.scimath')
|
||||
@array_function_dispatch(_power_dispatcher)
|
||||
def power(x, p):
|
||||
"""
|
||||
Return x to the power p, (x**p).
|
||||
|
||||
If `x` contains negative values, the output is converted to the
|
||||
complex domain.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The input value(s).
|
||||
p : array_like of ints
|
||||
The power(s) to which `x` is raised. If `x` contains multiple values,
|
||||
`p` has to either be a scalar, or contain the same number of values
|
||||
as `x`. In the latter case, the result is
|
||||
``x[0]**p[0], x[1]**p[1], ...``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The result of ``x**p``. If `x` and `p` are scalars, so is `out`,
|
||||
otherwise an array is returned.
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.power
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.set_printoptions(precision=4)
|
||||
|
||||
>>> np.emath.power(2, 2)
|
||||
4
|
||||
|
||||
>>> np.emath.power([2, 4], 2)
|
||||
array([ 4, 16])
|
||||
|
||||
>>> np.emath.power([2, 4], -2)
|
||||
array([0.25 , 0.0625])
|
||||
|
||||
>>> np.emath.power([-2, 4], 2)
|
||||
array([ 4.-0.j, 16.+0.j])
|
||||
|
||||
>>> np.emath.power([2, 4], [2, 4])
|
||||
array([ 4, 256])
|
||||
|
||||
"""
|
||||
x = _fix_real_lt_zero(x)
|
||||
p = _fix_int_lt_zero(p)
|
||||
return nx.power(x, p)
|
||||
|
||||
|
||||
@set_module('numpy.lib.scimath')
|
||||
@array_function_dispatch(_unary_dispatcher)
|
||||
def arccos(x):
|
||||
"""
|
||||
Compute the inverse cosine of x.
|
||||
|
||||
Return the "principal value" (for a description of this, see
|
||||
`numpy.arccos`) of the inverse cosine of `x`. For real `x` such that
|
||||
`abs(x) <= 1`, this is a real number in the closed interval
|
||||
:math:`[0, \\pi]`. Otherwise, the complex principle value is returned.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like or scalar
|
||||
The value(s) whose arccos is (are) required.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so
|
||||
is `out`, otherwise an array object is returned.
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.arccos
|
||||
|
||||
Notes
|
||||
-----
|
||||
For an arccos() that returns ``NAN`` when real `x` is not in the
|
||||
interval ``[-1,1]``, use `numpy.arccos`.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.set_printoptions(precision=4)
|
||||
|
||||
>>> np.emath.arccos(1) # a scalar is returned
|
||||
0.0
|
||||
|
||||
>>> np.emath.arccos([1,2])
|
||||
array([0.-0.j , 0.-1.317j])
|
||||
|
||||
"""
|
||||
x = _fix_real_abs_gt_1(x)
|
||||
return nx.arccos(x)
|
||||
|
||||
|
||||
@set_module('numpy.lib.scimath')
|
||||
@array_function_dispatch(_unary_dispatcher)
|
||||
def arcsin(x):
|
||||
"""
|
||||
Compute the inverse sine of x.
|
||||
|
||||
Return the "principal value" (for a description of this, see
|
||||
`numpy.arcsin`) of the inverse sine of `x`. For real `x` such that
|
||||
`abs(x) <= 1`, this is a real number in the closed interval
|
||||
:math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is
|
||||
returned.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like or scalar
|
||||
The value(s) whose arcsin is (are) required.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The inverse sine(s) of the `x` value(s). If `x` was a scalar, so
|
||||
is `out`, otherwise an array object is returned.
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.arcsin
|
||||
|
||||
Notes
|
||||
-----
|
||||
For an arcsin() that returns ``NAN`` when real `x` is not in the
|
||||
interval ``[-1,1]``, use `numpy.arcsin`.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.set_printoptions(precision=4)
|
||||
|
||||
>>> np.emath.arcsin(0)
|
||||
0.0
|
||||
|
||||
>>> np.emath.arcsin([0,1])
|
||||
array([0. , 1.5708])
|
||||
|
||||
"""
|
||||
x = _fix_real_abs_gt_1(x)
|
||||
return nx.arcsin(x)
|
||||
|
||||
|
||||
@set_module('numpy.lib.scimath')
|
||||
@array_function_dispatch(_unary_dispatcher)
|
||||
def arctanh(x):
|
||||
"""
|
||||
Compute the inverse hyperbolic tangent of `x`.
|
||||
|
||||
Return the "principal value" (for a description of this, see
|
||||
`numpy.arctanh`) of ``arctanh(x)``. For real `x` such that
|
||||
``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is
|
||||
complex, the result is complex. Finally, `x = 1` returns``inf`` and
|
||||
``x=-1`` returns ``-inf``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The value(s) whose arctanh is (are) required.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was
|
||||
a scalar so is `out`, otherwise an array is returned.
|
||||
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.arctanh
|
||||
|
||||
Notes
|
||||
-----
|
||||
For an arctanh() that returns ``NAN`` when real `x` is not in the
|
||||
interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does
|
||||
return +/-inf for ``x = +/-1``).
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.set_printoptions(precision=4)
|
||||
|
||||
>>> np.emath.arctanh(0.5)
|
||||
0.5493061443340549
|
||||
|
||||
>>> from numpy.testing import suppress_warnings
|
||||
>>> with suppress_warnings() as sup:
|
||||
... sup.filter(RuntimeWarning)
|
||||
... np.emath.arctanh(np.eye(2))
|
||||
array([[inf, 0.],
|
||||
[ 0., inf]])
|
||||
>>> np.emath.arctanh([1j])
|
||||
array([0.+0.7854j])
|
||||
|
||||
"""
|
||||
x = _fix_real_abs_gt_1(x)
|
||||
return nx.arctanh(x)
|
||||
93
lib/python3.11/site-packages/numpy/lib/_scimath_impl.pyi
Normal file
93
lib/python3.11/site-packages/numpy/lib/_scimath_impl.pyi
Normal file
@ -0,0 +1,93 @@
|
||||
from typing import Any, overload
|
||||
|
||||
from numpy import complexfloating
|
||||
from numpy._typing import (
|
||||
NDArray,
|
||||
_ArrayLikeComplex_co,
|
||||
_ArrayLikeFloat_co,
|
||||
_ComplexLike_co,
|
||||
_FloatLike_co,
|
||||
)
|
||||
|
||||
__all__ = ["sqrt", "log", "log2", "logn", "log10", "power", "arccos", "arcsin", "arctanh"]
|
||||
|
||||
@overload
|
||||
def sqrt(x: _FloatLike_co) -> Any: ...
|
||||
@overload
|
||||
def sqrt(x: _ComplexLike_co) -> complexfloating: ...
|
||||
@overload
|
||||
def sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
|
||||
|
||||
@overload
|
||||
def log(x: _FloatLike_co) -> Any: ...
|
||||
@overload
|
||||
def log(x: _ComplexLike_co) -> complexfloating: ...
|
||||
@overload
|
||||
def log(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
|
||||
|
||||
@overload
|
||||
def log10(x: _FloatLike_co) -> Any: ...
|
||||
@overload
|
||||
def log10(x: _ComplexLike_co) -> complexfloating: ...
|
||||
@overload
|
||||
def log10(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
|
||||
|
||||
@overload
|
||||
def log2(x: _FloatLike_co) -> Any: ...
|
||||
@overload
|
||||
def log2(x: _ComplexLike_co) -> complexfloating: ...
|
||||
@overload
|
||||
def log2(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
|
||||
|
||||
@overload
|
||||
def logn(n: _FloatLike_co, x: _FloatLike_co) -> Any: ...
|
||||
@overload
|
||||
def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating: ...
|
||||
@overload
|
||||
def logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
|
||||
|
||||
@overload
|
||||
def power(x: _FloatLike_co, p: _FloatLike_co) -> Any: ...
|
||||
@overload
|
||||
def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating: ...
|
||||
@overload
|
||||
def power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
|
||||
|
||||
@overload
|
||||
def arccos(x: _FloatLike_co) -> Any: ...
|
||||
@overload
|
||||
def arccos(x: _ComplexLike_co) -> complexfloating: ...
|
||||
@overload
|
||||
def arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
|
||||
|
||||
@overload
|
||||
def arcsin(x: _FloatLike_co) -> Any: ...
|
||||
@overload
|
||||
def arcsin(x: _ComplexLike_co) -> complexfloating: ...
|
||||
@overload
|
||||
def arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
|
||||
|
||||
@overload
|
||||
def arctanh(x: _FloatLike_co) -> Any: ...
|
||||
@overload
|
||||
def arctanh(x: _ComplexLike_co) -> complexfloating: ...
|
||||
@overload
|
||||
def arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
|
||||
1301
lib/python3.11/site-packages/numpy/lib/_shape_base_impl.py
Normal file
1301
lib/python3.11/site-packages/numpy/lib/_shape_base_impl.py
Normal file
File diff suppressed because it is too large
Load Diff
235
lib/python3.11/site-packages/numpy/lib/_shape_base_impl.pyi
Normal file
235
lib/python3.11/site-packages/numpy/lib/_shape_base_impl.pyi
Normal file
@ -0,0 +1,235 @@
|
||||
from collections.abc import Callable, Sequence
|
||||
from typing import (
|
||||
Any,
|
||||
Concatenate,
|
||||
ParamSpec,
|
||||
Protocol,
|
||||
SupportsIndex,
|
||||
TypeVar,
|
||||
overload,
|
||||
type_check_only,
|
||||
)
|
||||
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import numpy as np
|
||||
from numpy import (
|
||||
_CastingKind,
|
||||
complexfloating,
|
||||
floating,
|
||||
generic,
|
||||
integer,
|
||||
object_,
|
||||
signedinteger,
|
||||
ufunc,
|
||||
unsignedinteger,
|
||||
)
|
||||
from numpy._typing import (
|
||||
ArrayLike,
|
||||
DTypeLike,
|
||||
NDArray,
|
||||
_ArrayLike,
|
||||
_ArrayLikeBool_co,
|
||||
_ArrayLikeComplex_co,
|
||||
_ArrayLikeFloat_co,
|
||||
_ArrayLikeInt_co,
|
||||
_ArrayLikeObject_co,
|
||||
_ArrayLikeUInt_co,
|
||||
_ShapeLike,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"column_stack",
|
||||
"row_stack",
|
||||
"dstack",
|
||||
"array_split",
|
||||
"split",
|
||||
"hsplit",
|
||||
"vsplit",
|
||||
"dsplit",
|
||||
"apply_over_axes",
|
||||
"expand_dims",
|
||||
"apply_along_axis",
|
||||
"kron",
|
||||
"tile",
|
||||
"take_along_axis",
|
||||
"put_along_axis",
|
||||
]
|
||||
|
||||
_P = ParamSpec("_P")
|
||||
_ScalarT = TypeVar("_ScalarT", bound=generic)
|
||||
|
||||
# Signature of `__array_wrap__`
|
||||
@type_check_only
|
||||
class _ArrayWrap(Protocol):
|
||||
def __call__(
|
||||
self,
|
||||
array: NDArray[Any],
|
||||
context: tuple[ufunc, tuple[Any, ...], int] | None = ...,
|
||||
return_scalar: bool = ...,
|
||||
/,
|
||||
) -> Any: ...
|
||||
|
||||
@type_check_only
|
||||
class _SupportsArrayWrap(Protocol):
|
||||
@property
|
||||
def __array_wrap__(self) -> _ArrayWrap: ...
|
||||
|
||||
###
|
||||
|
||||
def take_along_axis(
|
||||
arr: _ScalarT | NDArray[_ScalarT],
|
||||
indices: NDArray[integer],
|
||||
axis: int | None = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
|
||||
def put_along_axis(
|
||||
arr: NDArray[_ScalarT],
|
||||
indices: NDArray[integer],
|
||||
values: ArrayLike,
|
||||
axis: int | None,
|
||||
) -> None: ...
|
||||
|
||||
@overload
|
||||
def apply_along_axis(
|
||||
func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_ScalarT]],
|
||||
axis: SupportsIndex,
|
||||
arr: ArrayLike,
|
||||
*args: _P.args,
|
||||
**kwargs: _P.kwargs,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def apply_along_axis(
|
||||
func1d: Callable[Concatenate[NDArray[Any], _P], Any],
|
||||
axis: SupportsIndex,
|
||||
arr: ArrayLike,
|
||||
*args: _P.args,
|
||||
**kwargs: _P.kwargs,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def apply_over_axes(
|
||||
func: Callable[[NDArray[Any], int], NDArray[_ScalarT]],
|
||||
a: ArrayLike,
|
||||
axes: int | Sequence[int],
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
|
||||
@overload
|
||||
def expand_dims(
|
||||
a: _ArrayLike[_ScalarT],
|
||||
axis: _ShapeLike,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def expand_dims(
|
||||
a: ArrayLike,
|
||||
axis: _ShapeLike,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
# Deprecated in NumPy 2.0, 2023-08-18
|
||||
@deprecated("`row_stack` alias is deprecated. Use `np.vstack` directly.")
|
||||
def row_stack(
|
||||
tup: Sequence[ArrayLike],
|
||||
*,
|
||||
dtype: DTypeLike | None = None,
|
||||
casting: _CastingKind = "same_kind",
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def array_split(
|
||||
ary: _ArrayLike[_ScalarT],
|
||||
indices_or_sections: _ShapeLike,
|
||||
axis: SupportsIndex = ...,
|
||||
) -> list[NDArray[_ScalarT]]: ...
|
||||
@overload
|
||||
def array_split(
|
||||
ary: ArrayLike,
|
||||
indices_or_sections: _ShapeLike,
|
||||
axis: SupportsIndex = ...,
|
||||
) -> list[NDArray[Any]]: ...
|
||||
|
||||
@overload
|
||||
def split(
|
||||
ary: _ArrayLike[_ScalarT],
|
||||
indices_or_sections: _ShapeLike,
|
||||
axis: SupportsIndex = ...,
|
||||
) -> list[NDArray[_ScalarT]]: ...
|
||||
@overload
|
||||
def split(
|
||||
ary: ArrayLike,
|
||||
indices_or_sections: _ShapeLike,
|
||||
axis: SupportsIndex = ...,
|
||||
) -> list[NDArray[Any]]: ...
|
||||
|
||||
@overload
|
||||
def hsplit(
|
||||
ary: _ArrayLike[_ScalarT],
|
||||
indices_or_sections: _ShapeLike,
|
||||
) -> list[NDArray[_ScalarT]]: ...
|
||||
@overload
|
||||
def hsplit(
|
||||
ary: ArrayLike,
|
||||
indices_or_sections: _ShapeLike,
|
||||
) -> list[NDArray[Any]]: ...
|
||||
|
||||
@overload
|
||||
def vsplit(
|
||||
ary: _ArrayLike[_ScalarT],
|
||||
indices_or_sections: _ShapeLike,
|
||||
) -> list[NDArray[_ScalarT]]: ...
|
||||
@overload
|
||||
def vsplit(
|
||||
ary: ArrayLike,
|
||||
indices_or_sections: _ShapeLike,
|
||||
) -> list[NDArray[Any]]: ...
|
||||
|
||||
@overload
|
||||
def dsplit(
|
||||
ary: _ArrayLike[_ScalarT],
|
||||
indices_or_sections: _ShapeLike,
|
||||
) -> list[NDArray[_ScalarT]]: ...
|
||||
@overload
|
||||
def dsplit(
|
||||
ary: ArrayLike,
|
||||
indices_or_sections: _ShapeLike,
|
||||
) -> list[NDArray[Any]]: ...
|
||||
|
||||
@overload
|
||||
def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ...
|
||||
@overload
|
||||
def get_array_wrap(*args: object) -> _ArrayWrap | None: ...
|
||||
|
||||
@overload
|
||||
def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc]
|
||||
@overload
|
||||
def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ... # type: ignore[misc]
|
||||
@overload
|
||||
def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger]: ... # type: ignore[misc]
|
||||
@overload
|
||||
def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating]: ... # type: ignore[misc]
|
||||
@overload
|
||||
def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
|
||||
@overload
|
||||
def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ...
|
||||
@overload
|
||||
def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ...
|
||||
|
||||
@overload
|
||||
def tile(
|
||||
A: _ArrayLike[_ScalarT],
|
||||
reps: int | Sequence[int],
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def tile(
|
||||
A: ArrayLike,
|
||||
reps: int | Sequence[int],
|
||||
) -> NDArray[Any]: ...
|
||||
549
lib/python3.11/site-packages/numpy/lib/_stride_tricks_impl.py
Normal file
549
lib/python3.11/site-packages/numpy/lib/_stride_tricks_impl.py
Normal file
@ -0,0 +1,549 @@
|
||||
"""
|
||||
Utilities that manipulate strides to achieve desirable effects.
|
||||
|
||||
An explanation of strides can be found in the :ref:`arrays.ndarray`.
|
||||
|
||||
"""
|
||||
import numpy as np
|
||||
from numpy._core.numeric import normalize_axis_tuple
|
||||
from numpy._core.overrides import array_function_dispatch, set_module
|
||||
|
||||
__all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes']
|
||||
|
||||
|
||||
class DummyArray:
|
||||
"""Dummy object that just exists to hang __array_interface__ dictionaries
|
||||
and possibly keep alive a reference to a base array.
|
||||
"""
|
||||
|
||||
def __init__(self, interface, base=None):
|
||||
self.__array_interface__ = interface
|
||||
self.base = base
|
||||
|
||||
|
||||
def _maybe_view_as_subclass(original_array, new_array):
|
||||
if type(original_array) is not type(new_array):
|
||||
# if input was an ndarray subclass and subclasses were OK,
|
||||
# then view the result as that subclass.
|
||||
new_array = new_array.view(type=type(original_array))
|
||||
# Since we have done something akin to a view from original_array, we
|
||||
# should let the subclass finalize (if it has it implemented, i.e., is
|
||||
# not None).
|
||||
if new_array.__array_finalize__:
|
||||
new_array.__array_finalize__(original_array)
|
||||
return new_array
|
||||
|
||||
|
||||
@set_module("numpy.lib.stride_tricks")
|
||||
def as_strided(x, shape=None, strides=None, subok=False, writeable=True):
|
||||
"""
|
||||
Create a view into the array with the given shape and strides.
|
||||
|
||||
.. warning:: This function has to be used with extreme care, see notes.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : ndarray
|
||||
Array to create a new.
|
||||
shape : sequence of int, optional
|
||||
The shape of the new array. Defaults to ``x.shape``.
|
||||
strides : sequence of int, optional
|
||||
The strides of the new array. Defaults to ``x.strides``.
|
||||
subok : bool, optional
|
||||
If True, subclasses are preserved.
|
||||
writeable : bool, optional
|
||||
If set to False, the returned array will always be readonly.
|
||||
Otherwise it will be writable if the original array was. It
|
||||
is advisable to set this to False if possible (see Notes).
|
||||
|
||||
Returns
|
||||
-------
|
||||
view : ndarray
|
||||
|
||||
See also
|
||||
--------
|
||||
broadcast_to : broadcast an array to a given shape.
|
||||
reshape : reshape an array.
|
||||
lib.stride_tricks.sliding_window_view :
|
||||
userfriendly and safe function for a creation of sliding window views.
|
||||
|
||||
Notes
|
||||
-----
|
||||
``as_strided`` creates a view into the array given the exact strides
|
||||
and shape. This means it manipulates the internal data structure of
|
||||
ndarray and, if done incorrectly, the array elements can point to
|
||||
invalid memory and can corrupt results or crash your program.
|
||||
It is advisable to always use the original ``x.strides`` when
|
||||
calculating new strides to avoid reliance on a contiguous memory
|
||||
layout.
|
||||
|
||||
Furthermore, arrays created with this function often contain self
|
||||
overlapping memory, so that two elements are identical.
|
||||
Vectorized write operations on such arrays will typically be
|
||||
unpredictable. They may even give different results for small, large,
|
||||
or transposed arrays.
|
||||
|
||||
Since writing to these arrays has to be tested and done with great
|
||||
care, you may want to use ``writeable=False`` to avoid accidental write
|
||||
operations.
|
||||
|
||||
For these reasons it is advisable to avoid ``as_strided`` when
|
||||
possible.
|
||||
"""
|
||||
# first convert input to array, possibly keeping subclass
|
||||
x = np.array(x, copy=None, subok=subok)
|
||||
interface = dict(x.__array_interface__)
|
||||
if shape is not None:
|
||||
interface['shape'] = tuple(shape)
|
||||
if strides is not None:
|
||||
interface['strides'] = tuple(strides)
|
||||
|
||||
array = np.asarray(DummyArray(interface, base=x))
|
||||
# The route via `__interface__` does not preserve structured
|
||||
# dtypes. Since dtype should remain unchanged, we set it explicitly.
|
||||
array.dtype = x.dtype
|
||||
|
||||
view = _maybe_view_as_subclass(x, array)
|
||||
|
||||
if view.flags.writeable and not writeable:
|
||||
view.flags.writeable = False
|
||||
|
||||
return view
|
||||
|
||||
|
||||
def _sliding_window_view_dispatcher(x, window_shape, axis=None, *,
|
||||
subok=None, writeable=None):
|
||||
return (x,)
|
||||
|
||||
|
||||
@array_function_dispatch(
|
||||
_sliding_window_view_dispatcher, module="numpy.lib.stride_tricks"
|
||||
)
|
||||
def sliding_window_view(x, window_shape, axis=None, *,
|
||||
subok=False, writeable=False):
|
||||
"""
|
||||
Create a sliding window view into the array with the given window shape.
|
||||
|
||||
Also known as rolling or moving window, the window slides across all
|
||||
dimensions of the array and extracts subsets of the array at all window
|
||||
positions.
|
||||
|
||||
.. versionadded:: 1.20.0
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
Array to create the sliding window view from.
|
||||
window_shape : int or tuple of int
|
||||
Size of window over each axis that takes part in the sliding window.
|
||||
If `axis` is not present, must have same length as the number of input
|
||||
array dimensions. Single integers `i` are treated as if they were the
|
||||
tuple `(i,)`.
|
||||
axis : int or tuple of int, optional
|
||||
Axis or axes along which the sliding window is applied.
|
||||
By default, the sliding window is applied to all axes and
|
||||
`window_shape[i]` will refer to axis `i` of `x`.
|
||||
If `axis` is given as a `tuple of int`, `window_shape[i]` will refer to
|
||||
the axis `axis[i]` of `x`.
|
||||
Single integers `i` are treated as if they were the tuple `(i,)`.
|
||||
subok : bool, optional
|
||||
If True, sub-classes will be passed-through, otherwise the returned
|
||||
array will be forced to be a base-class array (default).
|
||||
writeable : bool, optional
|
||||
When true, allow writing to the returned view. The default is false,
|
||||
as this should be used with caution: the returned view contains the
|
||||
same memory location multiple times, so writing to one location will
|
||||
cause others to change.
|
||||
|
||||
Returns
|
||||
-------
|
||||
view : ndarray
|
||||
Sliding window view of the array. The sliding window dimensions are
|
||||
inserted at the end, and the original dimensions are trimmed as
|
||||
required by the size of the sliding window.
|
||||
That is, ``view.shape = x_shape_trimmed + window_shape``, where
|
||||
``x_shape_trimmed`` is ``x.shape`` with every entry reduced by one less
|
||||
than the corresponding window size.
|
||||
|
||||
See Also
|
||||
--------
|
||||
lib.stride_tricks.as_strided: A lower-level and less safe routine for
|
||||
creating arbitrary views from custom shape and strides.
|
||||
broadcast_to: broadcast an array to a given shape.
|
||||
|
||||
Notes
|
||||
-----
|
||||
For many applications using a sliding window view can be convenient, but
|
||||
potentially very slow. Often specialized solutions exist, for example:
|
||||
|
||||
- `scipy.signal.fftconvolve`
|
||||
|
||||
- filtering functions in `scipy.ndimage`
|
||||
|
||||
- moving window functions provided by
|
||||
`bottleneck <https://github.com/pydata/bottleneck>`_.
|
||||
|
||||
As a rough estimate, a sliding window approach with an input size of `N`
|
||||
and a window size of `W` will scale as `O(N*W)` where frequently a special
|
||||
algorithm can achieve `O(N)`. That means that the sliding window variant
|
||||
for a window size of 100 can be a 100 times slower than a more specialized
|
||||
version.
|
||||
|
||||
Nevertheless, for small window sizes, when no custom algorithm exists, or
|
||||
as a prototyping and developing tool, this function can be a good solution.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> from numpy.lib.stride_tricks import sliding_window_view
|
||||
>>> x = np.arange(6)
|
||||
>>> x.shape
|
||||
(6,)
|
||||
>>> v = sliding_window_view(x, 3)
|
||||
>>> v.shape
|
||||
(4, 3)
|
||||
>>> v
|
||||
array([[0, 1, 2],
|
||||
[1, 2, 3],
|
||||
[2, 3, 4],
|
||||
[3, 4, 5]])
|
||||
|
||||
This also works in more dimensions, e.g.
|
||||
|
||||
>>> i, j = np.ogrid[:3, :4]
|
||||
>>> x = 10*i + j
|
||||
>>> x.shape
|
||||
(3, 4)
|
||||
>>> x
|
||||
array([[ 0, 1, 2, 3],
|
||||
[10, 11, 12, 13],
|
||||
[20, 21, 22, 23]])
|
||||
>>> shape = (2,2)
|
||||
>>> v = sliding_window_view(x, shape)
|
||||
>>> v.shape
|
||||
(2, 3, 2, 2)
|
||||
>>> v
|
||||
array([[[[ 0, 1],
|
||||
[10, 11]],
|
||||
[[ 1, 2],
|
||||
[11, 12]],
|
||||
[[ 2, 3],
|
||||
[12, 13]]],
|
||||
[[[10, 11],
|
||||
[20, 21]],
|
||||
[[11, 12],
|
||||
[21, 22]],
|
||||
[[12, 13],
|
||||
[22, 23]]]])
|
||||
|
||||
The axis can be specified explicitly:
|
||||
|
||||
>>> v = sliding_window_view(x, 3, 0)
|
||||
>>> v.shape
|
||||
(1, 4, 3)
|
||||
>>> v
|
||||
array([[[ 0, 10, 20],
|
||||
[ 1, 11, 21],
|
||||
[ 2, 12, 22],
|
||||
[ 3, 13, 23]]])
|
||||
|
||||
The same axis can be used several times. In that case, every use reduces
|
||||
the corresponding original dimension:
|
||||
|
||||
>>> v = sliding_window_view(x, (2, 3), (1, 1))
|
||||
>>> v.shape
|
||||
(3, 1, 2, 3)
|
||||
>>> v
|
||||
array([[[[ 0, 1, 2],
|
||||
[ 1, 2, 3]]],
|
||||
[[[10, 11, 12],
|
||||
[11, 12, 13]]],
|
||||
[[[20, 21, 22],
|
||||
[21, 22, 23]]]])
|
||||
|
||||
Combining with stepped slicing (`::step`), this can be used to take sliding
|
||||
views which skip elements:
|
||||
|
||||
>>> x = np.arange(7)
|
||||
>>> sliding_window_view(x, 5)[:, ::2]
|
||||
array([[0, 2, 4],
|
||||
[1, 3, 5],
|
||||
[2, 4, 6]])
|
||||
|
||||
or views which move by multiple elements
|
||||
|
||||
>>> x = np.arange(7)
|
||||
>>> sliding_window_view(x, 3)[::2, :]
|
||||
array([[0, 1, 2],
|
||||
[2, 3, 4],
|
||||
[4, 5, 6]])
|
||||
|
||||
A common application of `sliding_window_view` is the calculation of running
|
||||
statistics. The simplest example is the
|
||||
`moving average <https://en.wikipedia.org/wiki/Moving_average>`_:
|
||||
|
||||
>>> x = np.arange(6)
|
||||
>>> x.shape
|
||||
(6,)
|
||||
>>> v = sliding_window_view(x, 3)
|
||||
>>> v.shape
|
||||
(4, 3)
|
||||
>>> v
|
||||
array([[0, 1, 2],
|
||||
[1, 2, 3],
|
||||
[2, 3, 4],
|
||||
[3, 4, 5]])
|
||||
>>> moving_average = v.mean(axis=-1)
|
||||
>>> moving_average
|
||||
array([1., 2., 3., 4.])
|
||||
|
||||
Note that a sliding window approach is often **not** optimal (see Notes).
|
||||
"""
|
||||
window_shape = (tuple(window_shape)
|
||||
if np.iterable(window_shape)
|
||||
else (window_shape,))
|
||||
# first convert input to array, possibly keeping subclass
|
||||
x = np.array(x, copy=None, subok=subok)
|
||||
|
||||
window_shape_array = np.array(window_shape)
|
||||
if np.any(window_shape_array < 0):
|
||||
raise ValueError('`window_shape` cannot contain negative values')
|
||||
|
||||
if axis is None:
|
||||
axis = tuple(range(x.ndim))
|
||||
if len(window_shape) != len(axis):
|
||||
raise ValueError(f'Since axis is `None`, must provide '
|
||||
f'window_shape for all dimensions of `x`; '
|
||||
f'got {len(window_shape)} window_shape elements '
|
||||
f'and `x.ndim` is {x.ndim}.')
|
||||
else:
|
||||
axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True)
|
||||
if len(window_shape) != len(axis):
|
||||
raise ValueError(f'Must provide matching length window_shape and '
|
||||
f'axis; got {len(window_shape)} window_shape '
|
||||
f'elements and {len(axis)} axes elements.')
|
||||
|
||||
out_strides = x.strides + tuple(x.strides[ax] for ax in axis)
|
||||
|
||||
# note: same axis can be windowed repeatedly
|
||||
x_shape_trimmed = list(x.shape)
|
||||
for ax, dim in zip(axis, window_shape):
|
||||
if x_shape_trimmed[ax] < dim:
|
||||
raise ValueError(
|
||||
'window shape cannot be larger than input array shape')
|
||||
x_shape_trimmed[ax] -= dim - 1
|
||||
out_shape = tuple(x_shape_trimmed) + window_shape
|
||||
return as_strided(x, strides=out_strides, shape=out_shape,
|
||||
subok=subok, writeable=writeable)
|
||||
|
||||
|
||||
def _broadcast_to(array, shape, subok, readonly):
|
||||
shape = tuple(shape) if np.iterable(shape) else (shape,)
|
||||
array = np.array(array, copy=None, subok=subok)
|
||||
if not shape and array.shape:
|
||||
raise ValueError('cannot broadcast a non-scalar to a scalar array')
|
||||
if any(size < 0 for size in shape):
|
||||
raise ValueError('all elements of broadcast shape must be non-'
|
||||
'negative')
|
||||
extras = []
|
||||
it = np.nditer(
|
||||
(array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras,
|
||||
op_flags=['readonly'], itershape=shape, order='C')
|
||||
with it:
|
||||
# never really has writebackifcopy semantics
|
||||
broadcast = it.itviews[0]
|
||||
result = _maybe_view_as_subclass(array, broadcast)
|
||||
# In a future version this will go away
|
||||
if not readonly and array.flags._writeable_no_warn:
|
||||
result.flags.writeable = True
|
||||
result.flags._warn_on_write = True
|
||||
return result
|
||||
|
||||
|
||||
def _broadcast_to_dispatcher(array, shape, subok=None):
|
||||
return (array,)
|
||||
|
||||
|
||||
@array_function_dispatch(_broadcast_to_dispatcher, module='numpy')
|
||||
def broadcast_to(array, shape, subok=False):
|
||||
"""Broadcast an array to a new shape.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
array : array_like
|
||||
The array to broadcast.
|
||||
shape : tuple or int
|
||||
The shape of the desired array. A single integer ``i`` is interpreted
|
||||
as ``(i,)``.
|
||||
subok : bool, optional
|
||||
If True, then sub-classes will be passed-through, otherwise
|
||||
the returned array will be forced to be a base-class array (default).
|
||||
|
||||
Returns
|
||||
-------
|
||||
broadcast : array
|
||||
A readonly view on the original array with the given shape. It is
|
||||
typically not contiguous. Furthermore, more than one element of a
|
||||
broadcasted array may refer to a single memory location.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If the array is not compatible with the new shape according to NumPy's
|
||||
broadcasting rules.
|
||||
|
||||
See Also
|
||||
--------
|
||||
broadcast
|
||||
broadcast_arrays
|
||||
broadcast_shapes
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> x = np.array([1, 2, 3])
|
||||
>>> np.broadcast_to(x, (3, 3))
|
||||
array([[1, 2, 3],
|
||||
[1, 2, 3],
|
||||
[1, 2, 3]])
|
||||
"""
|
||||
return _broadcast_to(array, shape, subok=subok, readonly=True)
|
||||
|
||||
|
||||
def _broadcast_shape(*args):
|
||||
"""Returns the shape of the arrays that would result from broadcasting the
|
||||
supplied arrays against each other.
|
||||
"""
|
||||
# use the old-iterator because np.nditer does not handle size 0 arrays
|
||||
# consistently
|
||||
b = np.broadcast(*args[:32])
|
||||
# unfortunately, it cannot handle 32 or more arguments directly
|
||||
for pos in range(32, len(args), 31):
|
||||
# ironically, np.broadcast does not properly handle np.broadcast
|
||||
# objects (it treats them as scalars)
|
||||
# use broadcasting to avoid allocating the full array
|
||||
b = broadcast_to(0, b.shape)
|
||||
b = np.broadcast(b, *args[pos:(pos + 31)])
|
||||
return b.shape
|
||||
|
||||
|
||||
_size0_dtype = np.dtype([])
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
def broadcast_shapes(*args):
|
||||
"""
|
||||
Broadcast the input shapes into a single shape.
|
||||
|
||||
:ref:`Learn more about broadcasting here <basics.broadcasting>`.
|
||||
|
||||
.. versionadded:: 1.20.0
|
||||
|
||||
Parameters
|
||||
----------
|
||||
*args : tuples of ints, or ints
|
||||
The shapes to be broadcast against each other.
|
||||
|
||||
Returns
|
||||
-------
|
||||
tuple
|
||||
Broadcasted shape.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If the shapes are not compatible and cannot be broadcast according
|
||||
to NumPy's broadcasting rules.
|
||||
|
||||
See Also
|
||||
--------
|
||||
broadcast
|
||||
broadcast_arrays
|
||||
broadcast_to
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.broadcast_shapes((1, 2), (3, 1), (3, 2))
|
||||
(3, 2)
|
||||
|
||||
>>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7))
|
||||
(5, 6, 7)
|
||||
"""
|
||||
arrays = [np.empty(x, dtype=_size0_dtype) for x in args]
|
||||
return _broadcast_shape(*arrays)
|
||||
|
||||
|
||||
def _broadcast_arrays_dispatcher(*args, subok=None):
|
||||
return args
|
||||
|
||||
|
||||
@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy')
|
||||
def broadcast_arrays(*args, subok=False):
|
||||
"""
|
||||
Broadcast any number of arrays against each other.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
*args : array_likes
|
||||
The arrays to broadcast.
|
||||
|
||||
subok : bool, optional
|
||||
If True, then sub-classes will be passed-through, otherwise
|
||||
the returned arrays will be forced to be a base-class array (default).
|
||||
|
||||
Returns
|
||||
-------
|
||||
broadcasted : tuple of arrays
|
||||
These arrays are views on the original arrays. They are typically
|
||||
not contiguous. Furthermore, more than one element of a
|
||||
broadcasted array may refer to a single memory location. If you need
|
||||
to write to the arrays, make copies first. While you can set the
|
||||
``writable`` flag True, writing to a single output value may end up
|
||||
changing more than one location in the output array.
|
||||
|
||||
.. deprecated:: 1.17
|
||||
The output is currently marked so that if written to, a deprecation
|
||||
warning will be emitted. A future version will set the
|
||||
``writable`` flag False so writing to it will raise an error.
|
||||
|
||||
See Also
|
||||
--------
|
||||
broadcast
|
||||
broadcast_to
|
||||
broadcast_shapes
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> x = np.array([[1,2,3]])
|
||||
>>> y = np.array([[4],[5]])
|
||||
>>> np.broadcast_arrays(x, y)
|
||||
(array([[1, 2, 3],
|
||||
[1, 2, 3]]),
|
||||
array([[4, 4, 4],
|
||||
[5, 5, 5]]))
|
||||
|
||||
Here is a useful idiom for getting contiguous copies instead of
|
||||
non-contiguous views.
|
||||
|
||||
>>> [np.array(a) for a in np.broadcast_arrays(x, y)]
|
||||
[array([[1, 2, 3],
|
||||
[1, 2, 3]]),
|
||||
array([[4, 4, 4],
|
||||
[5, 5, 5]])]
|
||||
|
||||
"""
|
||||
# nditer is not used here to avoid the limit of 32 arrays.
|
||||
# Otherwise, something like the following one-liner would suffice:
|
||||
# return np.nditer(args, flags=['multi_index', 'zerosize_ok'],
|
||||
# order='C').itviews
|
||||
|
||||
args = [np.array(_m, copy=None, subok=subok) for _m in args]
|
||||
|
||||
shape = _broadcast_shape(*args)
|
||||
|
||||
result = [array if array.shape == shape
|
||||
else _broadcast_to(array, shape, subok=subok, readonly=False)
|
||||
for array in args]
|
||||
return tuple(result)
|
||||
@ -0,0 +1,74 @@
|
||||
from collections.abc import Iterable
|
||||
from typing import Any, SupportsIndex, TypeVar, overload
|
||||
|
||||
from numpy import generic
|
||||
from numpy._typing import ArrayLike, NDArray, _AnyShape, _ArrayLike, _ShapeLike
|
||||
|
||||
__all__ = ["broadcast_to", "broadcast_arrays", "broadcast_shapes"]
|
||||
|
||||
_ScalarT = TypeVar("_ScalarT", bound=generic)
|
||||
|
||||
class DummyArray:
|
||||
__array_interface__: dict[str, Any]
|
||||
base: NDArray[Any] | None
|
||||
def __init__(
|
||||
self,
|
||||
interface: dict[str, Any],
|
||||
base: NDArray[Any] | None = ...,
|
||||
) -> None: ...
|
||||
|
||||
@overload
|
||||
def as_strided(
|
||||
x: _ArrayLike[_ScalarT],
|
||||
shape: Iterable[int] | None = ...,
|
||||
strides: Iterable[int] | None = ...,
|
||||
subok: bool = ...,
|
||||
writeable: bool = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def as_strided(
|
||||
x: ArrayLike,
|
||||
shape: Iterable[int] | None = ...,
|
||||
strides: Iterable[int] | None = ...,
|
||||
subok: bool = ...,
|
||||
writeable: bool = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def sliding_window_view(
|
||||
x: _ArrayLike[_ScalarT],
|
||||
window_shape: int | Iterable[int],
|
||||
axis: SupportsIndex | None = ...,
|
||||
*,
|
||||
subok: bool = ...,
|
||||
writeable: bool = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def sliding_window_view(
|
||||
x: ArrayLike,
|
||||
window_shape: int | Iterable[int],
|
||||
axis: SupportsIndex | None = ...,
|
||||
*,
|
||||
subok: bool = ...,
|
||||
writeable: bool = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def broadcast_to(
|
||||
array: _ArrayLike[_ScalarT],
|
||||
shape: int | Iterable[int],
|
||||
subok: bool = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def broadcast_to(
|
||||
array: ArrayLike,
|
||||
shape: int | Iterable[int],
|
||||
subok: bool = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def broadcast_shapes(*args: _ShapeLike) -> _AnyShape: ...
|
||||
|
||||
def broadcast_arrays(
|
||||
*args: ArrayLike,
|
||||
subok: bool = ...,
|
||||
) -> tuple[NDArray[Any], ...]: ...
|
||||
1201
lib/python3.11/site-packages/numpy/lib/_twodim_base_impl.py
Normal file
1201
lib/python3.11/site-packages/numpy/lib/_twodim_base_impl.py
Normal file
File diff suppressed because it is too large
Load Diff
438
lib/python3.11/site-packages/numpy/lib/_twodim_base_impl.pyi
Normal file
438
lib/python3.11/site-packages/numpy/lib/_twodim_base_impl.pyi
Normal file
@ -0,0 +1,438 @@
|
||||
from collections.abc import Callable, Sequence
|
||||
from typing import (
|
||||
Any,
|
||||
TypeAlias,
|
||||
TypeVar,
|
||||
overload,
|
||||
)
|
||||
from typing import (
|
||||
Literal as L,
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
from numpy import (
|
||||
_OrderCF,
|
||||
complex128,
|
||||
complexfloating,
|
||||
datetime64,
|
||||
float64,
|
||||
floating,
|
||||
generic,
|
||||
int_,
|
||||
intp,
|
||||
object_,
|
||||
signedinteger,
|
||||
timedelta64,
|
||||
)
|
||||
from numpy._typing import (
|
||||
ArrayLike,
|
||||
DTypeLike,
|
||||
NDArray,
|
||||
_ArrayLike,
|
||||
_ArrayLikeComplex_co,
|
||||
_ArrayLikeFloat_co,
|
||||
_ArrayLikeInt_co,
|
||||
_ArrayLikeObject_co,
|
||||
_DTypeLike,
|
||||
_SupportsArray,
|
||||
_SupportsArrayFunc,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"diag",
|
||||
"diagflat",
|
||||
"eye",
|
||||
"fliplr",
|
||||
"flipud",
|
||||
"tri",
|
||||
"triu",
|
||||
"tril",
|
||||
"vander",
|
||||
"histogram2d",
|
||||
"mask_indices",
|
||||
"tril_indices",
|
||||
"tril_indices_from",
|
||||
"triu_indices",
|
||||
"triu_indices_from",
|
||||
]
|
||||
|
||||
###
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_ScalarT = TypeVar("_ScalarT", bound=generic)
|
||||
_ComplexFloatingT = TypeVar("_ComplexFloatingT", bound=np.complexfloating)
|
||||
_InexactT = TypeVar("_InexactT", bound=np.inexact)
|
||||
_NumberCoT = TypeVar("_NumberCoT", bound=_Number_co)
|
||||
|
||||
# The returned arrays dtype must be compatible with `np.equal`
|
||||
_MaskFunc: TypeAlias = Callable[[NDArray[int_], _T], NDArray[_Number_co | timedelta64 | datetime64 | object_]]
|
||||
|
||||
_Int_co: TypeAlias = np.integer | np.bool
|
||||
_Float_co: TypeAlias = np.floating | _Int_co
|
||||
_Number_co: TypeAlias = np.number | np.bool
|
||||
|
||||
_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT]
|
||||
_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co]
|
||||
_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co]
|
||||
_ArrayLike2DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co]
|
||||
_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co]
|
||||
|
||||
###
|
||||
|
||||
@overload
|
||||
def fliplr(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def fliplr(m: ArrayLike) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def flipud(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def flipud(m: ArrayLike) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def eye(
|
||||
N: int,
|
||||
M: int | None = ...,
|
||||
k: int = ...,
|
||||
dtype: None = ...,
|
||||
order: _OrderCF = ...,
|
||||
*,
|
||||
device: L["cpu"] | None = ...,
|
||||
like: _SupportsArrayFunc | None = ...,
|
||||
) -> NDArray[float64]: ...
|
||||
@overload
|
||||
def eye(
|
||||
N: int,
|
||||
M: int | None,
|
||||
k: int,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
order: _OrderCF = ...,
|
||||
*,
|
||||
device: L["cpu"] | None = ...,
|
||||
like: _SupportsArrayFunc | None = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def eye(
|
||||
N: int,
|
||||
M: int | None = ...,
|
||||
k: int = ...,
|
||||
*,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
order: _OrderCF = ...,
|
||||
device: L["cpu"] | None = ...,
|
||||
like: _SupportsArrayFunc | None = ...,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def eye(
|
||||
N: int,
|
||||
M: int | None = ...,
|
||||
k: int = ...,
|
||||
dtype: DTypeLike = ...,
|
||||
order: _OrderCF = ...,
|
||||
*,
|
||||
device: L["cpu"] | None = ...,
|
||||
like: _SupportsArrayFunc | None = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def diag(v: _ArrayLike[_ScalarT], k: int = ...) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def diag(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def diagflat(v: _ArrayLike[_ScalarT], k: int = ...) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def tri(
|
||||
N: int,
|
||||
M: int | None = ...,
|
||||
k: int = ...,
|
||||
dtype: None = ...,
|
||||
*,
|
||||
like: _SupportsArrayFunc | None = ...
|
||||
) -> NDArray[float64]: ...
|
||||
@overload
|
||||
def tri(
|
||||
N: int,
|
||||
M: int | None,
|
||||
k: int,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
*,
|
||||
like: _SupportsArrayFunc | None = ...
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def tri(
|
||||
N: int,
|
||||
M: int | None = ...,
|
||||
k: int = ...,
|
||||
*,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
like: _SupportsArrayFunc | None = ...
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def tri(
|
||||
N: int,
|
||||
M: int | None = ...,
|
||||
k: int = ...,
|
||||
dtype: DTypeLike = ...,
|
||||
*,
|
||||
like: _SupportsArrayFunc | None = ...
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def tril(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def tril(m: ArrayLike, k: int = 0) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def triu(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def vander( # type: ignore[misc]
|
||||
x: _ArrayLikeInt_co,
|
||||
N: int | None = ...,
|
||||
increasing: bool = ...,
|
||||
) -> NDArray[signedinteger]: ...
|
||||
@overload
|
||||
def vander( # type: ignore[misc]
|
||||
x: _ArrayLikeFloat_co,
|
||||
N: int | None = ...,
|
||||
increasing: bool = ...,
|
||||
) -> NDArray[floating]: ...
|
||||
@overload
|
||||
def vander(
|
||||
x: _ArrayLikeComplex_co,
|
||||
N: int | None = ...,
|
||||
increasing: bool = ...,
|
||||
) -> NDArray[complexfloating]: ...
|
||||
@overload
|
||||
def vander(
|
||||
x: _ArrayLikeObject_co,
|
||||
N: int | None = ...,
|
||||
increasing: bool = ...,
|
||||
) -> NDArray[object_]: ...
|
||||
|
||||
@overload
|
||||
def histogram2d(
|
||||
x: _ArrayLike1D[_ComplexFloatingT],
|
||||
y: _ArrayLike1D[_ComplexFloatingT | _Float_co],
|
||||
bins: int | Sequence[int] = ...,
|
||||
range: _ArrayLike2DFloat_co | None = ...,
|
||||
density: bool | None = ...,
|
||||
weights: _ArrayLike1DFloat_co | None = ...,
|
||||
) -> tuple[
|
||||
NDArray[float64],
|
||||
NDArray[_ComplexFloatingT],
|
||||
NDArray[_ComplexFloatingT],
|
||||
]: ...
|
||||
@overload
|
||||
def histogram2d(
|
||||
x: _ArrayLike1D[_ComplexFloatingT | _Float_co],
|
||||
y: _ArrayLike1D[_ComplexFloatingT],
|
||||
bins: int | Sequence[int] = ...,
|
||||
range: _ArrayLike2DFloat_co | None = ...,
|
||||
density: bool | None = ...,
|
||||
weights: _ArrayLike1DFloat_co | None = ...,
|
||||
) -> tuple[
|
||||
NDArray[float64],
|
||||
NDArray[_ComplexFloatingT],
|
||||
NDArray[_ComplexFloatingT],
|
||||
]: ...
|
||||
@overload
|
||||
def histogram2d(
|
||||
x: _ArrayLike1D[_InexactT],
|
||||
y: _ArrayLike1D[_InexactT | _Int_co],
|
||||
bins: int | Sequence[int] = ...,
|
||||
range: _ArrayLike2DFloat_co | None = ...,
|
||||
density: bool | None = ...,
|
||||
weights: _ArrayLike1DFloat_co | None = ...,
|
||||
) -> tuple[
|
||||
NDArray[float64],
|
||||
NDArray[_InexactT],
|
||||
NDArray[_InexactT],
|
||||
]: ...
|
||||
@overload
|
||||
def histogram2d(
|
||||
x: _ArrayLike1D[_InexactT | _Int_co],
|
||||
y: _ArrayLike1D[_InexactT],
|
||||
bins: int | Sequence[int] = ...,
|
||||
range: _ArrayLike2DFloat_co | None = ...,
|
||||
density: bool | None = ...,
|
||||
weights: _ArrayLike1DFloat_co | None = ...,
|
||||
) -> tuple[
|
||||
NDArray[float64],
|
||||
NDArray[_InexactT],
|
||||
NDArray[_InexactT],
|
||||
]: ...
|
||||
@overload
|
||||
def histogram2d(
|
||||
x: _ArrayLike1DInt_co | Sequence[float],
|
||||
y: _ArrayLike1DInt_co | Sequence[float],
|
||||
bins: int | Sequence[int] = ...,
|
||||
range: _ArrayLike2DFloat_co | None = ...,
|
||||
density: bool | None = ...,
|
||||
weights: _ArrayLike1DFloat_co | None = ...,
|
||||
) -> tuple[
|
||||
NDArray[float64],
|
||||
NDArray[float64],
|
||||
NDArray[float64],
|
||||
]: ...
|
||||
@overload
|
||||
def histogram2d(
|
||||
x: Sequence[complex],
|
||||
y: Sequence[complex],
|
||||
bins: int | Sequence[int] = ...,
|
||||
range: _ArrayLike2DFloat_co | None = ...,
|
||||
density: bool | None = ...,
|
||||
weights: _ArrayLike1DFloat_co | None = ...,
|
||||
) -> tuple[
|
||||
NDArray[float64],
|
||||
NDArray[complex128 | float64],
|
||||
NDArray[complex128 | float64],
|
||||
]: ...
|
||||
@overload
|
||||
def histogram2d(
|
||||
x: _ArrayLike1DNumber_co,
|
||||
y: _ArrayLike1DNumber_co,
|
||||
bins: _ArrayLike1D[_NumberCoT] | Sequence[_ArrayLike1D[_NumberCoT]],
|
||||
range: _ArrayLike2DFloat_co | None = ...,
|
||||
density: bool | None = ...,
|
||||
weights: _ArrayLike1DFloat_co | None = ...,
|
||||
) -> tuple[
|
||||
NDArray[float64],
|
||||
NDArray[_NumberCoT],
|
||||
NDArray[_NumberCoT],
|
||||
]: ...
|
||||
@overload
|
||||
def histogram2d(
|
||||
x: _ArrayLike1D[_InexactT],
|
||||
y: _ArrayLike1D[_InexactT],
|
||||
bins: Sequence[_ArrayLike1D[_NumberCoT] | int],
|
||||
range: _ArrayLike2DFloat_co | None = ...,
|
||||
density: bool | None = ...,
|
||||
weights: _ArrayLike1DFloat_co | None = ...,
|
||||
) -> tuple[
|
||||
NDArray[float64],
|
||||
NDArray[_NumberCoT | _InexactT],
|
||||
NDArray[_NumberCoT | _InexactT],
|
||||
]: ...
|
||||
@overload
|
||||
def histogram2d(
|
||||
x: _ArrayLike1DInt_co | Sequence[float],
|
||||
y: _ArrayLike1DInt_co | Sequence[float],
|
||||
bins: Sequence[_ArrayLike1D[_NumberCoT] | int],
|
||||
range: _ArrayLike2DFloat_co | None = ...,
|
||||
density: bool | None = ...,
|
||||
weights: _ArrayLike1DFloat_co | None = ...,
|
||||
) -> tuple[
|
||||
NDArray[float64],
|
||||
NDArray[_NumberCoT | float64],
|
||||
NDArray[_NumberCoT | float64],
|
||||
]: ...
|
||||
@overload
|
||||
def histogram2d(
|
||||
x: Sequence[complex],
|
||||
y: Sequence[complex],
|
||||
bins: Sequence[_ArrayLike1D[_NumberCoT] | int],
|
||||
range: _ArrayLike2DFloat_co | None = ...,
|
||||
density: bool | None = ...,
|
||||
weights: _ArrayLike1DFloat_co | None = ...,
|
||||
) -> tuple[
|
||||
NDArray[float64],
|
||||
NDArray[_NumberCoT | complex128 | float64],
|
||||
NDArray[_NumberCoT | complex128 | float64],
|
||||
]: ...
|
||||
@overload
|
||||
def histogram2d(
|
||||
x: _ArrayLike1DNumber_co,
|
||||
y: _ArrayLike1DNumber_co,
|
||||
bins: Sequence[Sequence[bool]],
|
||||
range: _ArrayLike2DFloat_co | None = ...,
|
||||
density: bool | None = ...,
|
||||
weights: _ArrayLike1DFloat_co | None = ...,
|
||||
) -> tuple[
|
||||
NDArray[float64],
|
||||
NDArray[np.bool],
|
||||
NDArray[np.bool],
|
||||
]: ...
|
||||
@overload
|
||||
def histogram2d(
|
||||
x: _ArrayLike1DNumber_co,
|
||||
y: _ArrayLike1DNumber_co,
|
||||
bins: Sequence[Sequence[int]],
|
||||
range: _ArrayLike2DFloat_co | None = ...,
|
||||
density: bool | None = ...,
|
||||
weights: _ArrayLike1DFloat_co | None = ...,
|
||||
) -> tuple[
|
||||
NDArray[float64],
|
||||
NDArray[np.int_ | np.bool],
|
||||
NDArray[np.int_ | np.bool],
|
||||
]: ...
|
||||
@overload
|
||||
def histogram2d(
|
||||
x: _ArrayLike1DNumber_co,
|
||||
y: _ArrayLike1DNumber_co,
|
||||
bins: Sequence[Sequence[float]],
|
||||
range: _ArrayLike2DFloat_co | None = ...,
|
||||
density: bool | None = ...,
|
||||
weights: _ArrayLike1DFloat_co | None = ...,
|
||||
) -> tuple[
|
||||
NDArray[float64],
|
||||
NDArray[np.float64 | np.int_ | np.bool],
|
||||
NDArray[np.float64 | np.int_ | np.bool],
|
||||
]: ...
|
||||
@overload
|
||||
def histogram2d(
|
||||
x: _ArrayLike1DNumber_co,
|
||||
y: _ArrayLike1DNumber_co,
|
||||
bins: Sequence[Sequence[complex]],
|
||||
range: _ArrayLike2DFloat_co | None = ...,
|
||||
density: bool | None = ...,
|
||||
weights: _ArrayLike1DFloat_co | None = ...,
|
||||
) -> tuple[
|
||||
NDArray[float64],
|
||||
NDArray[np.complex128 | np.float64 | np.int_ | np.bool],
|
||||
NDArray[np.complex128 | np.float64 | np.int_ | np.bool],
|
||||
]: ...
|
||||
|
||||
# NOTE: we're assuming/demanding here the `mask_func` returns
|
||||
# an ndarray of shape `(n, n)`; otherwise there is the possibility
|
||||
# of the output tuple having more or less than 2 elements
|
||||
@overload
|
||||
def mask_indices(
|
||||
n: int,
|
||||
mask_func: _MaskFunc[int],
|
||||
k: int = ...,
|
||||
) -> tuple[NDArray[intp], NDArray[intp]]: ...
|
||||
@overload
|
||||
def mask_indices(
|
||||
n: int,
|
||||
mask_func: _MaskFunc[_T],
|
||||
k: _T,
|
||||
) -> tuple[NDArray[intp], NDArray[intp]]: ...
|
||||
|
||||
def tril_indices(
|
||||
n: int,
|
||||
k: int = ...,
|
||||
m: int | None = ...,
|
||||
) -> tuple[NDArray[int_], NDArray[int_]]: ...
|
||||
|
||||
def tril_indices_from(
|
||||
arr: NDArray[Any],
|
||||
k: int = ...,
|
||||
) -> tuple[NDArray[int_], NDArray[int_]]: ...
|
||||
|
||||
def triu_indices(
|
||||
n: int,
|
||||
k: int = ...,
|
||||
m: int | None = ...,
|
||||
) -> tuple[NDArray[int_], NDArray[int_]]: ...
|
||||
|
||||
def triu_indices_from(
|
||||
arr: NDArray[Any],
|
||||
k: int = ...,
|
||||
) -> tuple[NDArray[int_], NDArray[int_]]: ...
|
||||
699
lib/python3.11/site-packages/numpy/lib/_type_check_impl.py
Normal file
699
lib/python3.11/site-packages/numpy/lib/_type_check_impl.py
Normal file
@ -0,0 +1,699 @@
|
||||
"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py
|
||||
|
||||
"""
|
||||
import functools
|
||||
|
||||
__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
|
||||
'isreal', 'nan_to_num', 'real', 'real_if_close',
|
||||
'typename', 'mintypecode',
|
||||
'common_type']
|
||||
|
||||
import numpy._core.numeric as _nx
|
||||
from numpy._core import getlimits, overrides
|
||||
from numpy._core.numeric import asanyarray, asarray, isnan, zeros
|
||||
from numpy._utils import set_module
|
||||
|
||||
from ._ufunclike_impl import isneginf, isposinf
|
||||
|
||||
array_function_dispatch = functools.partial(
|
||||
overrides.array_function_dispatch, module='numpy')
|
||||
|
||||
|
||||
_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?'
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
def mintypecode(typechars, typeset='GDFgdf', default='d'):
|
||||
"""
|
||||
Return the character for the minimum-size type to which given types can
|
||||
be safely cast.
|
||||
|
||||
The returned type character must represent the smallest size dtype such
|
||||
that an array of the returned type can handle the data from an array of
|
||||
all types in `typechars` (or if `typechars` is an array, then its
|
||||
dtype.char).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
typechars : list of str or array_like
|
||||
If a list of strings, each string should represent a dtype.
|
||||
If array_like, the character representation of the array dtype is used.
|
||||
typeset : str or list of str, optional
|
||||
The set of characters that the returned character is chosen from.
|
||||
The default set is 'GDFgdf'.
|
||||
default : str, optional
|
||||
The default character, this is returned if none of the characters in
|
||||
`typechars` matches a character in `typeset`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
typechar : str
|
||||
The character representing the minimum-size type that was found.
|
||||
|
||||
See Also
|
||||
--------
|
||||
dtype
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.mintypecode(['d', 'f', 'S'])
|
||||
'd'
|
||||
>>> x = np.array([1.1, 2-3.j])
|
||||
>>> np.mintypecode(x)
|
||||
'D'
|
||||
|
||||
>>> np.mintypecode('abceh', default='G')
|
||||
'G'
|
||||
|
||||
"""
|
||||
typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char
|
||||
for t in typechars)
|
||||
intersection = {t for t in typecodes if t in typeset}
|
||||
if not intersection:
|
||||
return default
|
||||
if 'F' in intersection and 'd' in intersection:
|
||||
return 'D'
|
||||
return min(intersection, key=_typecodes_by_elsize.index)
|
||||
|
||||
|
||||
def _real_dispatcher(val):
|
||||
return (val,)
|
||||
|
||||
|
||||
@array_function_dispatch(_real_dispatcher)
|
||||
def real(val):
|
||||
"""
|
||||
Return the real part of the complex argument.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
val : array_like
|
||||
Input array.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The real component of the complex argument. If `val` is real, the type
|
||||
of `val` is used for the output. If `val` has complex elements, the
|
||||
returned type is float.
|
||||
|
||||
See Also
|
||||
--------
|
||||
real_if_close, imag, angle
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> a = np.array([1+2j, 3+4j, 5+6j])
|
||||
>>> a.real
|
||||
array([1., 3., 5.])
|
||||
>>> a.real = 9
|
||||
>>> a
|
||||
array([9.+2.j, 9.+4.j, 9.+6.j])
|
||||
>>> a.real = np.array([9, 8, 7])
|
||||
>>> a
|
||||
array([9.+2.j, 8.+4.j, 7.+6.j])
|
||||
>>> np.real(1 + 1j)
|
||||
1.0
|
||||
|
||||
"""
|
||||
try:
|
||||
return val.real
|
||||
except AttributeError:
|
||||
return asanyarray(val).real
|
||||
|
||||
|
||||
def _imag_dispatcher(val):
|
||||
return (val,)
|
||||
|
||||
|
||||
@array_function_dispatch(_imag_dispatcher)
|
||||
def imag(val):
|
||||
"""
|
||||
Return the imaginary part of the complex argument.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
val : array_like
|
||||
Input array.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The imaginary component of the complex argument. If `val` is real,
|
||||
the type of `val` is used for the output. If `val` has complex
|
||||
elements, the returned type is float.
|
||||
|
||||
See Also
|
||||
--------
|
||||
real, angle, real_if_close
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> a = np.array([1+2j, 3+4j, 5+6j])
|
||||
>>> a.imag
|
||||
array([2., 4., 6.])
|
||||
>>> a.imag = np.array([8, 10, 12])
|
||||
>>> a
|
||||
array([1. +8.j, 3.+10.j, 5.+12.j])
|
||||
>>> np.imag(1 + 1j)
|
||||
1.0
|
||||
|
||||
"""
|
||||
try:
|
||||
return val.imag
|
||||
except AttributeError:
|
||||
return asanyarray(val).imag
|
||||
|
||||
|
||||
def _is_type_dispatcher(x):
|
||||
return (x,)
|
||||
|
||||
|
||||
@array_function_dispatch(_is_type_dispatcher)
|
||||
def iscomplex(x):
|
||||
"""
|
||||
Returns a bool array, where True if input element is complex.
|
||||
|
||||
What is tested is whether the input has a non-zero imaginary part, not if
|
||||
the input type is complex.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
Input array.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray of bools
|
||||
Output array.
|
||||
|
||||
See Also
|
||||
--------
|
||||
isreal
|
||||
iscomplexobj : Return True if x is a complex type or an array of complex
|
||||
numbers.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j])
|
||||
array([ True, False, False, False, False, True])
|
||||
|
||||
"""
|
||||
ax = asanyarray(x)
|
||||
if issubclass(ax.dtype.type, _nx.complexfloating):
|
||||
return ax.imag != 0
|
||||
res = zeros(ax.shape, bool)
|
||||
return res[()] # convert to scalar if needed
|
||||
|
||||
|
||||
@array_function_dispatch(_is_type_dispatcher)
|
||||
def isreal(x):
|
||||
"""
|
||||
Returns a bool array, where True if input element is real.
|
||||
|
||||
If element has complex type with zero imaginary part, the return value
|
||||
for that element is True.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
Input array.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray, bool
|
||||
Boolean array of same shape as `x`.
|
||||
|
||||
Notes
|
||||
-----
|
||||
`isreal` may behave unexpectedly for string or object arrays (see examples)
|
||||
|
||||
See Also
|
||||
--------
|
||||
iscomplex
|
||||
isrealobj : Return True if x is not a complex type.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex)
|
||||
>>> np.isreal(a)
|
||||
array([False, True, True, True, True, False])
|
||||
|
||||
The function does not work on string arrays.
|
||||
|
||||
>>> a = np.array([2j, "a"], dtype="U")
|
||||
>>> np.isreal(a) # Warns about non-elementwise comparison
|
||||
False
|
||||
|
||||
Returns True for all elements in input array of ``dtype=object`` even if
|
||||
any of the elements is complex.
|
||||
|
||||
>>> a = np.array([1, "2", 3+4j], dtype=object)
|
||||
>>> np.isreal(a)
|
||||
array([ True, True, True])
|
||||
|
||||
isreal should not be used with object arrays
|
||||
|
||||
>>> a = np.array([1+2j, 2+1j], dtype=object)
|
||||
>>> np.isreal(a)
|
||||
array([ True, True])
|
||||
|
||||
"""
|
||||
return imag(x) == 0
|
||||
|
||||
|
||||
@array_function_dispatch(_is_type_dispatcher)
|
||||
def iscomplexobj(x):
|
||||
"""
|
||||
Check for a complex type or an array of complex numbers.
|
||||
|
||||
The type of the input is checked, not the value. Even if the input
|
||||
has an imaginary part equal to zero, `iscomplexobj` evaluates to True.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : any
|
||||
The input can be of any type and shape.
|
||||
|
||||
Returns
|
||||
-------
|
||||
iscomplexobj : bool
|
||||
The return value, True if `x` is of a complex type or has at least
|
||||
one complex element.
|
||||
|
||||
See Also
|
||||
--------
|
||||
isrealobj, iscomplex
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.iscomplexobj(1)
|
||||
False
|
||||
>>> np.iscomplexobj(1+0j)
|
||||
True
|
||||
>>> np.iscomplexobj([3, 1+0j, True])
|
||||
True
|
||||
|
||||
"""
|
||||
try:
|
||||
dtype = x.dtype
|
||||
type_ = dtype.type
|
||||
except AttributeError:
|
||||
type_ = asarray(x).dtype.type
|
||||
return issubclass(type_, _nx.complexfloating)
|
||||
|
||||
|
||||
@array_function_dispatch(_is_type_dispatcher)
|
||||
def isrealobj(x):
|
||||
"""
|
||||
Return True if x is a not complex type or an array of complex numbers.
|
||||
|
||||
The type of the input is checked, not the value. So even if the input
|
||||
has an imaginary part equal to zero, `isrealobj` evaluates to False
|
||||
if the data type is complex.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : any
|
||||
The input can be of any type and shape.
|
||||
|
||||
Returns
|
||||
-------
|
||||
y : bool
|
||||
The return value, False if `x` is of a complex type.
|
||||
|
||||
See Also
|
||||
--------
|
||||
iscomplexobj, isreal
|
||||
|
||||
Notes
|
||||
-----
|
||||
The function is only meant for arrays with numerical values but it
|
||||
accepts all other objects. Since it assumes array input, the return
|
||||
value of other objects may be True.
|
||||
|
||||
>>> np.isrealobj('A string')
|
||||
True
|
||||
>>> np.isrealobj(False)
|
||||
True
|
||||
>>> np.isrealobj(None)
|
||||
True
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.isrealobj(1)
|
||||
True
|
||||
>>> np.isrealobj(1+0j)
|
||||
False
|
||||
>>> np.isrealobj([3, 1+0j, True])
|
||||
False
|
||||
|
||||
"""
|
||||
return not iscomplexobj(x)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
def _getmaxmin(t):
|
||||
from numpy._core import getlimits
|
||||
f = getlimits.finfo(t)
|
||||
return f.max, f.min
|
||||
|
||||
|
||||
def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None):
|
||||
return (x,)
|
||||
|
||||
|
||||
@array_function_dispatch(_nan_to_num_dispatcher)
|
||||
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
|
||||
"""
|
||||
Replace NaN with zero and infinity with large finite numbers (default
|
||||
behaviour) or with the numbers defined by the user using the `nan`,
|
||||
`posinf` and/or `neginf` keywords.
|
||||
|
||||
If `x` is inexact, NaN is replaced by zero or by the user defined value in
|
||||
`nan` keyword, infinity is replaced by the largest finite floating point
|
||||
values representable by ``x.dtype`` or by the user defined value in
|
||||
`posinf` keyword and -infinity is replaced by the most negative finite
|
||||
floating point values representable by ``x.dtype`` or by the user defined
|
||||
value in `neginf` keyword.
|
||||
|
||||
For complex dtypes, the above is applied to each of the real and
|
||||
imaginary components of `x` separately.
|
||||
|
||||
If `x` is not inexact, then no replacements are made.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : scalar or array_like
|
||||
Input data.
|
||||
copy : bool, optional
|
||||
Whether to create a copy of `x` (True) or to replace values
|
||||
in-place (False). The in-place operation only occurs if
|
||||
casting to an array does not require a copy.
|
||||
Default is True.
|
||||
nan : int, float, optional
|
||||
Value to be used to fill NaN values. If no value is passed
|
||||
then NaN values will be replaced with 0.0.
|
||||
posinf : int, float, optional
|
||||
Value to be used to fill positive infinity values. If no value is
|
||||
passed then positive infinity values will be replaced with a very
|
||||
large number.
|
||||
neginf : int, float, optional
|
||||
Value to be used to fill negative infinity values. If no value is
|
||||
passed then negative infinity values will be replaced with a very
|
||||
small (or negative) number.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray
|
||||
`x`, with the non-finite values replaced. If `copy` is False, this may
|
||||
be `x` itself.
|
||||
|
||||
See Also
|
||||
--------
|
||||
isinf : Shows which elements are positive or negative infinity.
|
||||
isneginf : Shows which elements are negative infinity.
|
||||
isposinf : Shows which elements are positive infinity.
|
||||
isnan : Shows which elements are Not a Number (NaN).
|
||||
isfinite : Shows which elements are finite (not NaN, not infinity)
|
||||
|
||||
Notes
|
||||
-----
|
||||
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
|
||||
(IEEE 754). This means that Not a Number is not equivalent to infinity.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.nan_to_num(np.inf)
|
||||
1.7976931348623157e+308
|
||||
>>> np.nan_to_num(-np.inf)
|
||||
-1.7976931348623157e+308
|
||||
>>> np.nan_to_num(np.nan)
|
||||
0.0
|
||||
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
|
||||
>>> np.nan_to_num(x)
|
||||
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
|
||||
-1.28000000e+002, 1.28000000e+002])
|
||||
>>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
|
||||
array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03,
|
||||
-1.2800000e+02, 1.2800000e+02])
|
||||
>>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)])
|
||||
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
|
||||
-1.28000000e+002, 1.28000000e+002])
|
||||
>>> np.nan_to_num(y)
|
||||
array([ 1.79769313e+308 +0.00000000e+000j, # may vary
|
||||
0.00000000e+000 +0.00000000e+000j,
|
||||
0.00000000e+000 +1.79769313e+308j])
|
||||
>>> np.nan_to_num(y, nan=111111, posinf=222222)
|
||||
array([222222.+111111.j, 111111. +0.j, 111111.+222222.j])
|
||||
"""
|
||||
x = _nx.array(x, subok=True, copy=copy)
|
||||
xtype = x.dtype.type
|
||||
|
||||
isscalar = (x.ndim == 0)
|
||||
|
||||
if not issubclass(xtype, _nx.inexact):
|
||||
return x[()] if isscalar else x
|
||||
|
||||
iscomplex = issubclass(xtype, _nx.complexfloating)
|
||||
|
||||
dest = (x.real, x.imag) if iscomplex else (x,)
|
||||
maxf, minf = _getmaxmin(x.real.dtype)
|
||||
if posinf is not None:
|
||||
maxf = posinf
|
||||
if neginf is not None:
|
||||
minf = neginf
|
||||
for d in dest:
|
||||
idx_nan = isnan(d)
|
||||
idx_posinf = isposinf(d)
|
||||
idx_neginf = isneginf(d)
|
||||
_nx.copyto(d, nan, where=idx_nan)
|
||||
_nx.copyto(d, maxf, where=idx_posinf)
|
||||
_nx.copyto(d, minf, where=idx_neginf)
|
||||
return x[()] if isscalar else x
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
def _real_if_close_dispatcher(a, tol=None):
|
||||
return (a,)
|
||||
|
||||
|
||||
@array_function_dispatch(_real_if_close_dispatcher)
|
||||
def real_if_close(a, tol=100):
|
||||
"""
|
||||
If input is complex with all imaginary parts close to zero, return
|
||||
real parts.
|
||||
|
||||
"Close to zero" is defined as `tol` * (machine epsilon of the type for
|
||||
`a`).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
a : array_like
|
||||
Input array.
|
||||
tol : float
|
||||
Tolerance in machine epsilons for the complex part of the elements
|
||||
in the array. If the tolerance is <=1, then the absolute tolerance
|
||||
is used.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray
|
||||
If `a` is real, the type of `a` is used for the output. If `a`
|
||||
has complex elements, the returned type is float.
|
||||
|
||||
See Also
|
||||
--------
|
||||
real, imag, angle
|
||||
|
||||
Notes
|
||||
-----
|
||||
Machine epsilon varies from machine to machine and between data types
|
||||
but Python floats on most platforms have a machine epsilon equal to
|
||||
2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print
|
||||
out the machine epsilon for floats.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.finfo(float).eps
|
||||
2.2204460492503131e-16 # may vary
|
||||
|
||||
>>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000)
|
||||
array([2.1, 5.2])
|
||||
>>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000)
|
||||
array([2.1+4.e-13j, 5.2 + 3e-15j])
|
||||
|
||||
"""
|
||||
a = asanyarray(a)
|
||||
type_ = a.dtype.type
|
||||
if not issubclass(type_, _nx.complexfloating):
|
||||
return a
|
||||
if tol > 1:
|
||||
f = getlimits.finfo(type_)
|
||||
tol = f.eps * tol
|
||||
if _nx.all(_nx.absolute(a.imag) < tol):
|
||||
a = a.real
|
||||
return a
|
||||
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
_namefromtype = {'S1': 'character',
|
||||
'?': 'bool',
|
||||
'b': 'signed char',
|
||||
'B': 'unsigned char',
|
||||
'h': 'short',
|
||||
'H': 'unsigned short',
|
||||
'i': 'integer',
|
||||
'I': 'unsigned integer',
|
||||
'l': 'long integer',
|
||||
'L': 'unsigned long integer',
|
||||
'q': 'long long integer',
|
||||
'Q': 'unsigned long long integer',
|
||||
'f': 'single precision',
|
||||
'd': 'double precision',
|
||||
'g': 'long precision',
|
||||
'F': 'complex single precision',
|
||||
'D': 'complex double precision',
|
||||
'G': 'complex long double precision',
|
||||
'S': 'string',
|
||||
'U': 'unicode',
|
||||
'V': 'void',
|
||||
'O': 'object'
|
||||
}
|
||||
|
||||
@set_module('numpy')
|
||||
def typename(char):
|
||||
"""
|
||||
Return a description for the given data type code.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
char : str
|
||||
Data type code.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : str
|
||||
Description of the input data type code.
|
||||
|
||||
See Also
|
||||
--------
|
||||
dtype
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q',
|
||||
... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q']
|
||||
>>> for typechar in typechars:
|
||||
... print(typechar, ' : ', np.typename(typechar))
|
||||
...
|
||||
S1 : character
|
||||
? : bool
|
||||
B : unsigned char
|
||||
D : complex double precision
|
||||
G : complex long double precision
|
||||
F : complex single precision
|
||||
I : unsigned integer
|
||||
H : unsigned short
|
||||
L : unsigned long integer
|
||||
O : object
|
||||
Q : unsigned long long integer
|
||||
S : string
|
||||
U : unicode
|
||||
V : void
|
||||
b : signed char
|
||||
d : double precision
|
||||
g : long precision
|
||||
f : single precision
|
||||
i : integer
|
||||
h : short
|
||||
l : long integer
|
||||
q : long long integer
|
||||
|
||||
"""
|
||||
return _namefromtype[char]
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
|
||||
#determine the "minimum common type" for a group of arrays.
|
||||
array_type = [[_nx.float16, _nx.float32, _nx.float64, _nx.longdouble],
|
||||
[None, _nx.complex64, _nx.complex128, _nx.clongdouble]]
|
||||
array_precision = {_nx.float16: 0,
|
||||
_nx.float32: 1,
|
||||
_nx.float64: 2,
|
||||
_nx.longdouble: 3,
|
||||
_nx.complex64: 1,
|
||||
_nx.complex128: 2,
|
||||
_nx.clongdouble: 3}
|
||||
|
||||
|
||||
def _common_type_dispatcher(*arrays):
|
||||
return arrays
|
||||
|
||||
|
||||
@array_function_dispatch(_common_type_dispatcher)
|
||||
def common_type(*arrays):
|
||||
"""
|
||||
Return a scalar type which is common to the input arrays.
|
||||
|
||||
The return type will always be an inexact (i.e. floating point) scalar
|
||||
type, even if all the arrays are integer arrays. If one of the inputs is
|
||||
an integer array, the minimum precision type that is returned is a
|
||||
64-bit floating point dtype.
|
||||
|
||||
All input arrays except int64 and uint64 can be safely cast to the
|
||||
returned dtype without loss of information.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
array1, array2, ... : ndarrays
|
||||
Input arrays.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : data type code
|
||||
Data type code.
|
||||
|
||||
See Also
|
||||
--------
|
||||
dtype, mintypecode
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.common_type(np.arange(2, dtype=np.float32))
|
||||
<class 'numpy.float32'>
|
||||
>>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2))
|
||||
<class 'numpy.float64'>
|
||||
>>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0]))
|
||||
<class 'numpy.complex128'>
|
||||
|
||||
"""
|
||||
is_complex = False
|
||||
precision = 0
|
||||
for a in arrays:
|
||||
t = a.dtype.type
|
||||
if iscomplexobj(a):
|
||||
is_complex = True
|
||||
if issubclass(t, _nx.integer):
|
||||
p = 2 # array_precision[_nx.double]
|
||||
else:
|
||||
p = array_precision.get(t)
|
||||
if p is None:
|
||||
raise TypeError("can't get common type for non-numeric array")
|
||||
precision = max(precision, p)
|
||||
if is_complex:
|
||||
return array_type[1][precision]
|
||||
else:
|
||||
return array_type[0][precision]
|
||||
350
lib/python3.11/site-packages/numpy/lib/_type_check_impl.pyi
Normal file
350
lib/python3.11/site-packages/numpy/lib/_type_check_impl.pyi
Normal file
@ -0,0 +1,350 @@
|
||||
from collections.abc import Container, Iterable
|
||||
from typing import Any, Protocol, TypeAlias, overload, type_check_only
|
||||
from typing import Literal as L
|
||||
|
||||
from _typeshed import Incomplete
|
||||
from typing_extensions import TypeVar
|
||||
|
||||
import numpy as np
|
||||
from numpy._typing import (
|
||||
ArrayLike,
|
||||
NDArray,
|
||||
_16Bit,
|
||||
_32Bit,
|
||||
_64Bit,
|
||||
_ArrayLike,
|
||||
_NestedSequence,
|
||||
_ScalarLike_co,
|
||||
_SupportsArray,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"common_type",
|
||||
"imag",
|
||||
"iscomplex",
|
||||
"iscomplexobj",
|
||||
"isreal",
|
||||
"isrealobj",
|
||||
"mintypecode",
|
||||
"nan_to_num",
|
||||
"real",
|
||||
"real_if_close",
|
||||
"typename",
|
||||
]
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_T_co = TypeVar("_T_co", covariant=True)
|
||||
_ScalarT = TypeVar("_ScalarT", bound=np.generic)
|
||||
_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True)
|
||||
_RealT = TypeVar("_RealT", bound=np.floating | np.integer | np.bool)
|
||||
|
||||
_FloatMax32: TypeAlias = np.float32 | np.float16
|
||||
_ComplexMax128: TypeAlias = np.complex128 | np.complex64
|
||||
_RealMax64: TypeAlias = np.float64 | np.float32 | np.float16 | np.integer
|
||||
_Real: TypeAlias = np.floating | np.integer
|
||||
_InexactMax32: TypeAlias = np.inexact[_32Bit] | np.float16
|
||||
_NumberMax64: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.number[_16Bit] | np.integer
|
||||
|
||||
@type_check_only
|
||||
class _HasReal(Protocol[_T_co]):
|
||||
@property
|
||||
def real(self, /) -> _T_co: ...
|
||||
|
||||
@type_check_only
|
||||
class _HasImag(Protocol[_T_co]):
|
||||
@property
|
||||
def imag(self, /) -> _T_co: ...
|
||||
|
||||
@type_check_only
|
||||
class _HasDType(Protocol[_ScalarT_co]):
|
||||
@property
|
||||
def dtype(self, /) -> np.dtype[_ScalarT_co]: ...
|
||||
|
||||
###
|
||||
|
||||
def mintypecode(typechars: Iterable[str | ArrayLike], typeset: str | Container[str] = "GDFgdf", default: str = "d") -> str: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def real(val: _HasReal[_T]) -> _T: ... # type: ignore[overload-overlap]
|
||||
@overload
|
||||
def real(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ...
|
||||
@overload
|
||||
def real(val: ArrayLike) -> NDArray[Any]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def imag(val: _HasImag[_T]) -> _T: ... # type: ignore[overload-overlap]
|
||||
@overload
|
||||
def imag(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ...
|
||||
@overload
|
||||
def imag(val: ArrayLike) -> NDArray[Any]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def iscomplex(x: _ScalarLike_co) -> np.bool: ...
|
||||
@overload
|
||||
def iscomplex(x: NDArray[Any] | _NestedSequence[ArrayLike]) -> NDArray[np.bool]: ...
|
||||
@overload
|
||||
def iscomplex(x: ArrayLike) -> np.bool | NDArray[np.bool]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def isreal(x: _ScalarLike_co) -> np.bool: ...
|
||||
@overload
|
||||
def isreal(x: NDArray[Any] | _NestedSequence[ArrayLike]) -> NDArray[np.bool]: ...
|
||||
@overload
|
||||
def isreal(x: ArrayLike) -> np.bool | NDArray[np.bool]: ...
|
||||
|
||||
#
|
||||
def iscomplexobj(x: _HasDType[Any] | ArrayLike) -> bool: ...
|
||||
def isrealobj(x: _HasDType[Any] | ArrayLike) -> bool: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def nan_to_num(
|
||||
x: _ScalarT,
|
||||
copy: bool = True,
|
||||
nan: float = 0.0,
|
||||
posinf: float | None = None,
|
||||
neginf: float | None = None,
|
||||
) -> _ScalarT: ...
|
||||
@overload
|
||||
def nan_to_num(
|
||||
x: NDArray[_ScalarT] | _NestedSequence[_ArrayLike[_ScalarT]],
|
||||
copy: bool = True,
|
||||
nan: float = 0.0,
|
||||
posinf: float | None = None,
|
||||
neginf: float | None = None,
|
||||
) -> NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def nan_to_num(
|
||||
x: _SupportsArray[np.dtype[_ScalarT]],
|
||||
copy: bool = True,
|
||||
nan: float = 0.0,
|
||||
posinf: float | None = None,
|
||||
neginf: float | None = None,
|
||||
) -> _ScalarT | NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def nan_to_num(
|
||||
x: _NestedSequence[ArrayLike],
|
||||
copy: bool = True,
|
||||
nan: float = 0.0,
|
||||
posinf: float | None = None,
|
||||
neginf: float | None = None,
|
||||
) -> NDArray[Incomplete]: ...
|
||||
@overload
|
||||
def nan_to_num(
|
||||
x: ArrayLike,
|
||||
copy: bool = True,
|
||||
nan: float = 0.0,
|
||||
posinf: float | None = None,
|
||||
neginf: float | None = None,
|
||||
) -> Incomplete: ...
|
||||
|
||||
# NOTE: The [overload-overlap] mypy error is a false positive
|
||||
@overload
|
||||
def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... # type: ignore[overload-overlap]
|
||||
@overload
|
||||
def real_if_close(a: _ArrayLike[np.complex128], tol: float = 100) -> NDArray[np.float64 | np.complex128]: ...
|
||||
@overload
|
||||
def real_if_close(a: _ArrayLike[np.clongdouble], tol: float = 100) -> NDArray[np.longdouble | np.clongdouble]: ...
|
||||
@overload
|
||||
def real_if_close(a: _ArrayLike[_RealT], tol: float = 100) -> NDArray[_RealT]: ...
|
||||
@overload
|
||||
def real_if_close(a: ArrayLike, tol: float = 100) -> NDArray[Any]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def typename(char: L['S1']) -> L['character']: ...
|
||||
@overload
|
||||
def typename(char: L['?']) -> L['bool']: ...
|
||||
@overload
|
||||
def typename(char: L['b']) -> L['signed char']: ...
|
||||
@overload
|
||||
def typename(char: L['B']) -> L['unsigned char']: ...
|
||||
@overload
|
||||
def typename(char: L['h']) -> L['short']: ...
|
||||
@overload
|
||||
def typename(char: L['H']) -> L['unsigned short']: ...
|
||||
@overload
|
||||
def typename(char: L['i']) -> L['integer']: ...
|
||||
@overload
|
||||
def typename(char: L['I']) -> L['unsigned integer']: ...
|
||||
@overload
|
||||
def typename(char: L['l']) -> L['long integer']: ...
|
||||
@overload
|
||||
def typename(char: L['L']) -> L['unsigned long integer']: ...
|
||||
@overload
|
||||
def typename(char: L['q']) -> L['long long integer']: ...
|
||||
@overload
|
||||
def typename(char: L['Q']) -> L['unsigned long long integer']: ...
|
||||
@overload
|
||||
def typename(char: L['f']) -> L['single precision']: ...
|
||||
@overload
|
||||
def typename(char: L['d']) -> L['double precision']: ...
|
||||
@overload
|
||||
def typename(char: L['g']) -> L['long precision']: ...
|
||||
@overload
|
||||
def typename(char: L['F']) -> L['complex single precision']: ...
|
||||
@overload
|
||||
def typename(char: L['D']) -> L['complex double precision']: ...
|
||||
@overload
|
||||
def typename(char: L['G']) -> L['complex long double precision']: ...
|
||||
@overload
|
||||
def typename(char: L['S']) -> L['string']: ...
|
||||
@overload
|
||||
def typename(char: L['U']) -> L['unicode']: ...
|
||||
@overload
|
||||
def typename(char: L['V']) -> L['void']: ...
|
||||
@overload
|
||||
def typename(char: L['O']) -> L['object']: ...
|
||||
|
||||
# NOTE: The [overload-overlap] mypy errors are false positives
|
||||
@overload
|
||||
def common_type() -> type[np.float16]: ...
|
||||
@overload
|
||||
def common_type(a0: _HasDType[np.float16], /, *ai: _HasDType[np.float16]) -> type[np.float16]: ... # type: ignore[overload-overlap]
|
||||
@overload
|
||||
def common_type(a0: _HasDType[np.float32], /, *ai: _HasDType[_FloatMax32]) -> type[np.float32]: ... # type: ignore[overload-overlap]
|
||||
@overload
|
||||
def common_type( # type: ignore[overload-overlap]
|
||||
a0: _HasDType[np.float64 | np.integer],
|
||||
/,
|
||||
*ai: _HasDType[_RealMax64],
|
||||
) -> type[np.float64]: ...
|
||||
@overload
|
||||
def common_type( # type: ignore[overload-overlap]
|
||||
a0: _HasDType[np.longdouble],
|
||||
/,
|
||||
*ai: _HasDType[_Real],
|
||||
) -> type[np.longdouble]: ...
|
||||
@overload
|
||||
def common_type( # type: ignore[overload-overlap]
|
||||
a0: _HasDType[np.complex64],
|
||||
/,
|
||||
*ai: _HasDType[_InexactMax32],
|
||||
) -> type[np.complex64]: ...
|
||||
@overload
|
||||
def common_type( # type: ignore[overload-overlap]
|
||||
a0: _HasDType[np.complex128],
|
||||
/,
|
||||
*ai: _HasDType[_NumberMax64],
|
||||
) -> type[np.complex128]: ...
|
||||
@overload
|
||||
def common_type( # type: ignore[overload-overlap]
|
||||
a0: _HasDType[np.clongdouble],
|
||||
/,
|
||||
*ai: _HasDType[np.number],
|
||||
) -> type[np.clongdouble]: ...
|
||||
@overload
|
||||
def common_type( # type: ignore[overload-overlap]
|
||||
a0: _HasDType[_FloatMax32],
|
||||
array1: _HasDType[np.float32],
|
||||
/,
|
||||
*ai: _HasDType[_FloatMax32],
|
||||
) -> type[np.float32]: ...
|
||||
@overload
|
||||
def common_type(
|
||||
a0: _HasDType[_RealMax64],
|
||||
array1: _HasDType[np.float64 | np.integer],
|
||||
/,
|
||||
*ai: _HasDType[_RealMax64],
|
||||
) -> type[np.float64]: ...
|
||||
@overload
|
||||
def common_type(
|
||||
a0: _HasDType[_Real],
|
||||
array1: _HasDType[np.longdouble],
|
||||
/,
|
||||
*ai: _HasDType[_Real],
|
||||
) -> type[np.longdouble]: ...
|
||||
@overload
|
||||
def common_type( # type: ignore[overload-overlap]
|
||||
a0: _HasDType[_InexactMax32],
|
||||
array1: _HasDType[np.complex64],
|
||||
/,
|
||||
*ai: _HasDType[_InexactMax32],
|
||||
) -> type[np.complex64]: ...
|
||||
@overload
|
||||
def common_type(
|
||||
a0: _HasDType[np.float64],
|
||||
array1: _HasDType[_ComplexMax128],
|
||||
/,
|
||||
*ai: _HasDType[_NumberMax64],
|
||||
) -> type[np.complex128]: ...
|
||||
@overload
|
||||
def common_type(
|
||||
a0: _HasDType[_ComplexMax128],
|
||||
array1: _HasDType[np.float64],
|
||||
/,
|
||||
*ai: _HasDType[_NumberMax64],
|
||||
) -> type[np.complex128]: ...
|
||||
@overload
|
||||
def common_type(
|
||||
a0: _HasDType[_NumberMax64],
|
||||
array1: _HasDType[np.complex128],
|
||||
/,
|
||||
*ai: _HasDType[_NumberMax64],
|
||||
) -> type[np.complex128]: ...
|
||||
@overload
|
||||
def common_type(
|
||||
a0: _HasDType[_ComplexMax128],
|
||||
array1: _HasDType[np.complex128 | np.integer],
|
||||
/,
|
||||
*ai: _HasDType[_NumberMax64],
|
||||
) -> type[np.complex128]: ...
|
||||
@overload
|
||||
def common_type(
|
||||
a0: _HasDType[np.complex128 | np.integer],
|
||||
array1: _HasDType[_ComplexMax128],
|
||||
/,
|
||||
*ai: _HasDType[_NumberMax64],
|
||||
) -> type[np.complex128]: ...
|
||||
@overload
|
||||
def common_type(
|
||||
a0: _HasDType[_Real],
|
||||
/,
|
||||
*ai: _HasDType[_Real],
|
||||
) -> type[np.floating]: ...
|
||||
@overload
|
||||
def common_type(
|
||||
a0: _HasDType[np.number],
|
||||
array1: _HasDType[np.clongdouble],
|
||||
/,
|
||||
*ai: _HasDType[np.number],
|
||||
) -> type[np.clongdouble]: ...
|
||||
@overload
|
||||
def common_type(
|
||||
a0: _HasDType[np.longdouble],
|
||||
array1: _HasDType[np.complexfloating],
|
||||
/,
|
||||
*ai: _HasDType[np.number],
|
||||
) -> type[np.clongdouble]: ...
|
||||
@overload
|
||||
def common_type(
|
||||
a0: _HasDType[np.complexfloating],
|
||||
array1: _HasDType[np.longdouble],
|
||||
/,
|
||||
*ai: _HasDType[np.number],
|
||||
) -> type[np.clongdouble]: ...
|
||||
@overload
|
||||
def common_type(
|
||||
a0: _HasDType[np.complexfloating],
|
||||
array1: _HasDType[np.number],
|
||||
/,
|
||||
*ai: _HasDType[np.number],
|
||||
) -> type[np.complexfloating]: ...
|
||||
@overload
|
||||
def common_type(
|
||||
a0: _HasDType[np.number],
|
||||
array1: _HasDType[np.complexfloating],
|
||||
/,
|
||||
*ai: _HasDType[np.number],
|
||||
) -> type[np.complexfloating]: ...
|
||||
@overload
|
||||
def common_type(
|
||||
a0: _HasDType[np.number],
|
||||
array1: _HasDType[np.number],
|
||||
/,
|
||||
*ai: _HasDType[np.number],
|
||||
) -> type[Any]: ...
|
||||
207
lib/python3.11/site-packages/numpy/lib/_ufunclike_impl.py
Normal file
207
lib/python3.11/site-packages/numpy/lib/_ufunclike_impl.py
Normal file
@ -0,0 +1,207 @@
|
||||
"""
|
||||
Module of functions that are like ufuncs in acting on arrays and optionally
|
||||
storing results in an output array.
|
||||
|
||||
"""
|
||||
__all__ = ['fix', 'isneginf', 'isposinf']
|
||||
|
||||
import numpy._core.numeric as nx
|
||||
from numpy._core.overrides import array_function_dispatch
|
||||
|
||||
|
||||
def _dispatcher(x, out=None):
|
||||
return (x, out)
|
||||
|
||||
|
||||
@array_function_dispatch(_dispatcher, verify=False, module='numpy')
|
||||
def fix(x, out=None):
|
||||
"""
|
||||
Round to nearest integer towards zero.
|
||||
|
||||
Round an array of floats element-wise to nearest integer towards zero.
|
||||
The rounded values have the same data-type as the input.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
An array to be rounded
|
||||
out : ndarray, optional
|
||||
A location into which the result is stored. If provided, it must have
|
||||
a shape that the input broadcasts to. If not provided or None, a
|
||||
freshly-allocated array is returned.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray of floats
|
||||
An array with the same dimensions and data-type as the input.
|
||||
If second argument is not supplied then a new array is returned
|
||||
with the rounded values.
|
||||
|
||||
If a second argument is supplied the result is stored there.
|
||||
The return value ``out`` is then a reference to that array.
|
||||
|
||||
See Also
|
||||
--------
|
||||
rint, trunc, floor, ceil
|
||||
around : Round to given number of decimals
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.fix(3.14)
|
||||
3.0
|
||||
>>> np.fix(3)
|
||||
3
|
||||
>>> np.fix([2.1, 2.9, -2.1, -2.9])
|
||||
array([ 2., 2., -2., -2.])
|
||||
|
||||
"""
|
||||
# promote back to an array if flattened
|
||||
res = nx.asanyarray(nx.ceil(x, out=out))
|
||||
res = nx.floor(x, out=res, where=nx.greater_equal(x, 0))
|
||||
|
||||
# when no out argument is passed and no subclasses are involved, flatten
|
||||
# scalars
|
||||
if out is None and type(res) is nx.ndarray:
|
||||
res = res[()]
|
||||
return res
|
||||
|
||||
|
||||
@array_function_dispatch(_dispatcher, verify=False, module='numpy')
|
||||
def isposinf(x, out=None):
|
||||
"""
|
||||
Test element-wise for positive infinity, return result as bool array.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The input array.
|
||||
out : array_like, optional
|
||||
A location into which the result is stored. If provided, it must have a
|
||||
shape that the input broadcasts to. If not provided or None, a
|
||||
freshly-allocated boolean array is returned.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray
|
||||
A boolean array with the same dimensions as the input.
|
||||
If second argument is not supplied then a boolean array is returned
|
||||
with values True where the corresponding element of the input is
|
||||
positive infinity and values False where the element of the input is
|
||||
not positive infinity.
|
||||
|
||||
If a second argument is supplied the result is stored there. If the
|
||||
type of that array is a numeric type the result is represented as zeros
|
||||
and ones, if the type is boolean then as False and True.
|
||||
The return value `out` is then a reference to that array.
|
||||
|
||||
See Also
|
||||
--------
|
||||
isinf, isneginf, isfinite, isnan
|
||||
|
||||
Notes
|
||||
-----
|
||||
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
|
||||
(IEEE 754).
|
||||
|
||||
Errors result if the second argument is also supplied when x is a scalar
|
||||
input, if first and second arguments have different shapes, or if the
|
||||
first argument has complex values
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.isposinf(np.inf)
|
||||
True
|
||||
>>> np.isposinf(-np.inf)
|
||||
False
|
||||
>>> np.isposinf([-np.inf, 0., np.inf])
|
||||
array([False, False, True])
|
||||
|
||||
>>> x = np.array([-np.inf, 0., np.inf])
|
||||
>>> y = np.array([2, 2, 2])
|
||||
>>> np.isposinf(x, y)
|
||||
array([0, 0, 1])
|
||||
>>> y
|
||||
array([0, 0, 1])
|
||||
|
||||
"""
|
||||
is_inf = nx.isinf(x)
|
||||
try:
|
||||
signbit = ~nx.signbit(x)
|
||||
except TypeError as e:
|
||||
dtype = nx.asanyarray(x).dtype
|
||||
raise TypeError(f'This operation is not supported for {dtype} values '
|
||||
'because it would be ambiguous.') from e
|
||||
else:
|
||||
return nx.logical_and(is_inf, signbit, out)
|
||||
|
||||
|
||||
@array_function_dispatch(_dispatcher, verify=False, module='numpy')
|
||||
def isneginf(x, out=None):
|
||||
"""
|
||||
Test element-wise for negative infinity, return result as bool array.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The input array.
|
||||
out : array_like, optional
|
||||
A location into which the result is stored. If provided, it must have a
|
||||
shape that the input broadcasts to. If not provided or None, a
|
||||
freshly-allocated boolean array is returned.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray
|
||||
A boolean array with the same dimensions as the input.
|
||||
If second argument is not supplied then a numpy boolean array is
|
||||
returned with values True where the corresponding element of the
|
||||
input is negative infinity and values False where the element of
|
||||
the input is not negative infinity.
|
||||
|
||||
If a second argument is supplied the result is stored there. If the
|
||||
type of that array is a numeric type the result is represented as
|
||||
zeros and ones, if the type is boolean then as False and True. The
|
||||
return value `out` is then a reference to that array.
|
||||
|
||||
See Also
|
||||
--------
|
||||
isinf, isposinf, isnan, isfinite
|
||||
|
||||
Notes
|
||||
-----
|
||||
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
|
||||
(IEEE 754).
|
||||
|
||||
Errors result if the second argument is also supplied when x is a scalar
|
||||
input, if first and second arguments have different shapes, or if the
|
||||
first argument has complex values.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.isneginf(-np.inf)
|
||||
True
|
||||
>>> np.isneginf(np.inf)
|
||||
False
|
||||
>>> np.isneginf([-np.inf, 0., np.inf])
|
||||
array([ True, False, False])
|
||||
|
||||
>>> x = np.array([-np.inf, 0., np.inf])
|
||||
>>> y = np.array([2, 2, 2])
|
||||
>>> np.isneginf(x, y)
|
||||
array([1, 0, 0])
|
||||
>>> y
|
||||
array([1, 0, 0])
|
||||
|
||||
"""
|
||||
is_inf = nx.isinf(x)
|
||||
try:
|
||||
signbit = nx.signbit(x)
|
||||
except TypeError as e:
|
||||
dtype = nx.asanyarray(x).dtype
|
||||
raise TypeError(f'This operation is not supported for {dtype} values '
|
||||
'because it would be ambiguous.') from e
|
||||
else:
|
||||
return nx.logical_and(is_inf, signbit, out)
|
||||
67
lib/python3.11/site-packages/numpy/lib/_ufunclike_impl.pyi
Normal file
67
lib/python3.11/site-packages/numpy/lib/_ufunclike_impl.pyi
Normal file
@ -0,0 +1,67 @@
|
||||
from typing import Any, TypeVar, overload
|
||||
|
||||
import numpy as np
|
||||
from numpy import floating, object_
|
||||
from numpy._typing import (
|
||||
NDArray,
|
||||
_ArrayLikeFloat_co,
|
||||
_ArrayLikeObject_co,
|
||||
_FloatLike_co,
|
||||
)
|
||||
|
||||
__all__ = ["fix", "isneginf", "isposinf"]
|
||||
|
||||
_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any])
|
||||
|
||||
@overload
|
||||
def fix( # type: ignore[misc]
|
||||
x: _FloatLike_co,
|
||||
out: None = ...,
|
||||
) -> floating: ...
|
||||
@overload
|
||||
def fix(
|
||||
x: _ArrayLikeFloat_co,
|
||||
out: None = ...,
|
||||
) -> NDArray[floating]: ...
|
||||
@overload
|
||||
def fix(
|
||||
x: _ArrayLikeObject_co,
|
||||
out: None = ...,
|
||||
) -> NDArray[object_]: ...
|
||||
@overload
|
||||
def fix(
|
||||
x: _ArrayLikeFloat_co | _ArrayLikeObject_co,
|
||||
out: _ArrayT,
|
||||
) -> _ArrayT: ...
|
||||
|
||||
@overload
|
||||
def isposinf( # type: ignore[misc]
|
||||
x: _FloatLike_co,
|
||||
out: None = ...,
|
||||
) -> np.bool: ...
|
||||
@overload
|
||||
def isposinf(
|
||||
x: _ArrayLikeFloat_co,
|
||||
out: None = ...,
|
||||
) -> NDArray[np.bool]: ...
|
||||
@overload
|
||||
def isposinf(
|
||||
x: _ArrayLikeFloat_co,
|
||||
out: _ArrayT,
|
||||
) -> _ArrayT: ...
|
||||
|
||||
@overload
|
||||
def isneginf( # type: ignore[misc]
|
||||
x: _FloatLike_co,
|
||||
out: None = ...,
|
||||
) -> np.bool: ...
|
||||
@overload
|
||||
def isneginf(
|
||||
x: _ArrayLikeFloat_co,
|
||||
out: None = ...,
|
||||
) -> NDArray[np.bool]: ...
|
||||
@overload
|
||||
def isneginf(
|
||||
x: _ArrayLikeFloat_co,
|
||||
out: _ArrayT,
|
||||
) -> _ArrayT: ...
|
||||
299
lib/python3.11/site-packages/numpy/lib/_user_array_impl.py
Normal file
299
lib/python3.11/site-packages/numpy/lib/_user_array_impl.py
Normal file
@ -0,0 +1,299 @@
|
||||
"""
|
||||
Container class for backward compatibility with NumArray.
|
||||
|
||||
The user_array.container class exists for backward compatibility with NumArray
|
||||
and is not meant to be used in new code. If you need to create an array
|
||||
container class, we recommend either creating a class that wraps an ndarray
|
||||
or subclasses ndarray.
|
||||
|
||||
"""
|
||||
from numpy._core import (
|
||||
absolute,
|
||||
add,
|
||||
arange,
|
||||
array,
|
||||
asarray,
|
||||
bitwise_and,
|
||||
bitwise_or,
|
||||
bitwise_xor,
|
||||
divide,
|
||||
equal,
|
||||
greater,
|
||||
greater_equal,
|
||||
invert,
|
||||
left_shift,
|
||||
less,
|
||||
less_equal,
|
||||
multiply,
|
||||
not_equal,
|
||||
power,
|
||||
remainder,
|
||||
reshape,
|
||||
right_shift,
|
||||
shape,
|
||||
sin,
|
||||
sqrt,
|
||||
subtract,
|
||||
transpose,
|
||||
)
|
||||
from numpy._core.overrides import set_module
|
||||
|
||||
|
||||
@set_module("numpy.lib.user_array")
|
||||
class container:
|
||||
"""
|
||||
container(data, dtype=None, copy=True)
|
||||
|
||||
Standard container-class for easy multiple-inheritance.
|
||||
|
||||
Methods
|
||||
-------
|
||||
copy
|
||||
byteswap
|
||||
astype
|
||||
|
||||
"""
|
||||
def __init__(self, data, dtype=None, copy=True):
|
||||
self.array = array(data, dtype, copy=copy)
|
||||
|
||||
def __repr__(self):
|
||||
if self.ndim > 0:
|
||||
return self.__class__.__name__ + repr(self.array)[len("array"):]
|
||||
else:
|
||||
return self.__class__.__name__ + "(" + repr(self.array) + ")"
|
||||
|
||||
def __array__(self, t=None):
|
||||
if t:
|
||||
return self.array.astype(t)
|
||||
return self.array
|
||||
|
||||
# Array as sequence
|
||||
def __len__(self):
|
||||
return len(self.array)
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self._rc(self.array[index])
|
||||
|
||||
def __setitem__(self, index, value):
|
||||
self.array[index] = asarray(value, self.dtype)
|
||||
|
||||
def __abs__(self):
|
||||
return self._rc(absolute(self.array))
|
||||
|
||||
def __neg__(self):
|
||||
return self._rc(-self.array)
|
||||
|
||||
def __add__(self, other):
|
||||
return self._rc(self.array + asarray(other))
|
||||
|
||||
__radd__ = __add__
|
||||
|
||||
def __iadd__(self, other):
|
||||
add(self.array, other, self.array)
|
||||
return self
|
||||
|
||||
def __sub__(self, other):
|
||||
return self._rc(self.array - asarray(other))
|
||||
|
||||
def __rsub__(self, other):
|
||||
return self._rc(asarray(other) - self.array)
|
||||
|
||||
def __isub__(self, other):
|
||||
subtract(self.array, other, self.array)
|
||||
return self
|
||||
|
||||
def __mul__(self, other):
|
||||
return self._rc(multiply(self.array, asarray(other)))
|
||||
|
||||
__rmul__ = __mul__
|
||||
|
||||
def __imul__(self, other):
|
||||
multiply(self.array, other, self.array)
|
||||
return self
|
||||
|
||||
def __mod__(self, other):
|
||||
return self._rc(remainder(self.array, other))
|
||||
|
||||
def __rmod__(self, other):
|
||||
return self._rc(remainder(other, self.array))
|
||||
|
||||
def __imod__(self, other):
|
||||
remainder(self.array, other, self.array)
|
||||
return self
|
||||
|
||||
def __divmod__(self, other):
|
||||
return (self._rc(divide(self.array, other)),
|
||||
self._rc(remainder(self.array, other)))
|
||||
|
||||
def __rdivmod__(self, other):
|
||||
return (self._rc(divide(other, self.array)),
|
||||
self._rc(remainder(other, self.array)))
|
||||
|
||||
def __pow__(self, other):
|
||||
return self._rc(power(self.array, asarray(other)))
|
||||
|
||||
def __rpow__(self, other):
|
||||
return self._rc(power(asarray(other), self.array))
|
||||
|
||||
def __ipow__(self, other):
|
||||
power(self.array, other, self.array)
|
||||
return self
|
||||
|
||||
def __lshift__(self, other):
|
||||
return self._rc(left_shift(self.array, other))
|
||||
|
||||
def __rshift__(self, other):
|
||||
return self._rc(right_shift(self.array, other))
|
||||
|
||||
def __rlshift__(self, other):
|
||||
return self._rc(left_shift(other, self.array))
|
||||
|
||||
def __rrshift__(self, other):
|
||||
return self._rc(right_shift(other, self.array))
|
||||
|
||||
def __ilshift__(self, other):
|
||||
left_shift(self.array, other, self.array)
|
||||
return self
|
||||
|
||||
def __irshift__(self, other):
|
||||
right_shift(self.array, other, self.array)
|
||||
return self
|
||||
|
||||
def __and__(self, other):
|
||||
return self._rc(bitwise_and(self.array, other))
|
||||
|
||||
def __rand__(self, other):
|
||||
return self._rc(bitwise_and(other, self.array))
|
||||
|
||||
def __iand__(self, other):
|
||||
bitwise_and(self.array, other, self.array)
|
||||
return self
|
||||
|
||||
def __xor__(self, other):
|
||||
return self._rc(bitwise_xor(self.array, other))
|
||||
|
||||
def __rxor__(self, other):
|
||||
return self._rc(bitwise_xor(other, self.array))
|
||||
|
||||
def __ixor__(self, other):
|
||||
bitwise_xor(self.array, other, self.array)
|
||||
return self
|
||||
|
||||
def __or__(self, other):
|
||||
return self._rc(bitwise_or(self.array, other))
|
||||
|
||||
def __ror__(self, other):
|
||||
return self._rc(bitwise_or(other, self.array))
|
||||
|
||||
def __ior__(self, other):
|
||||
bitwise_or(self.array, other, self.array)
|
||||
return self
|
||||
|
||||
def __pos__(self):
|
||||
return self._rc(self.array)
|
||||
|
||||
def __invert__(self):
|
||||
return self._rc(invert(self.array))
|
||||
|
||||
def _scalarfunc(self, func):
|
||||
if self.ndim == 0:
|
||||
return func(self[0])
|
||||
else:
|
||||
raise TypeError(
|
||||
"only rank-0 arrays can be converted to Python scalars.")
|
||||
|
||||
def __complex__(self):
|
||||
return self._scalarfunc(complex)
|
||||
|
||||
def __float__(self):
|
||||
return self._scalarfunc(float)
|
||||
|
||||
def __int__(self):
|
||||
return self._scalarfunc(int)
|
||||
|
||||
def __hex__(self):
|
||||
return self._scalarfunc(hex)
|
||||
|
||||
def __oct__(self):
|
||||
return self._scalarfunc(oct)
|
||||
|
||||
def __lt__(self, other):
|
||||
return self._rc(less(self.array, other))
|
||||
|
||||
def __le__(self, other):
|
||||
return self._rc(less_equal(self.array, other))
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._rc(equal(self.array, other))
|
||||
|
||||
def __ne__(self, other):
|
||||
return self._rc(not_equal(self.array, other))
|
||||
|
||||
def __gt__(self, other):
|
||||
return self._rc(greater(self.array, other))
|
||||
|
||||
def __ge__(self, other):
|
||||
return self._rc(greater_equal(self.array, other))
|
||||
|
||||
def copy(self):
|
||||
""
|
||||
return self._rc(self.array.copy())
|
||||
|
||||
def tobytes(self):
|
||||
""
|
||||
return self.array.tobytes()
|
||||
|
||||
def byteswap(self):
|
||||
""
|
||||
return self._rc(self.array.byteswap())
|
||||
|
||||
def astype(self, typecode):
|
||||
""
|
||||
return self._rc(self.array.astype(typecode))
|
||||
|
||||
def _rc(self, a):
|
||||
if len(shape(a)) == 0:
|
||||
return a
|
||||
else:
|
||||
return self.__class__(a)
|
||||
|
||||
def __array_wrap__(self, *args):
|
||||
return self.__class__(args[0])
|
||||
|
||||
def __setattr__(self, attr, value):
|
||||
if attr == 'array':
|
||||
object.__setattr__(self, attr, value)
|
||||
return
|
||||
try:
|
||||
self.array.__setattr__(attr, value)
|
||||
except AttributeError:
|
||||
object.__setattr__(self, attr, value)
|
||||
|
||||
# Only called after other approaches fail.
|
||||
def __getattr__(self, attr):
|
||||
if (attr == 'array'):
|
||||
return object.__getattribute__(self, attr)
|
||||
return self.array.__getattribute__(attr)
|
||||
|
||||
|
||||
#############################################################
|
||||
# Test of class container
|
||||
#############################################################
|
||||
if __name__ == '__main__':
|
||||
temp = reshape(arange(10000), (100, 100))
|
||||
|
||||
ua = container(temp)
|
||||
# new object created begin test
|
||||
print(dir(ua))
|
||||
print(shape(ua), ua.shape) # I have changed Numeric.py
|
||||
|
||||
ua_small = ua[:3, :5]
|
||||
print(ua_small)
|
||||
# this did not change ua[0,0], which is not normal behavior
|
||||
ua_small[0, 0] = 10
|
||||
print(ua_small[0, 0], ua[0, 0])
|
||||
print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2))
|
||||
print(less(ua_small, 103), type(less(ua_small, 103)))
|
||||
print(type(ua_small * reshape(arange(15), shape(ua_small))))
|
||||
print(reshape(ua_small, (5, 3)))
|
||||
print(transpose(ua_small))
|
||||
225
lib/python3.11/site-packages/numpy/lib/_user_array_impl.pyi
Normal file
225
lib/python3.11/site-packages/numpy/lib/_user_array_impl.pyi
Normal file
@ -0,0 +1,225 @@
|
||||
from types import EllipsisType
|
||||
from typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload
|
||||
|
||||
from _typeshed import Incomplete
|
||||
from typing_extensions import TypeVar, override
|
||||
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
from numpy._typing import (
|
||||
_AnyShape,
|
||||
_ArrayLike,
|
||||
_ArrayLikeBool_co,
|
||||
_ArrayLikeInt_co,
|
||||
_DTypeLike,
|
||||
)
|
||||
|
||||
###
|
||||
|
||||
_ScalarT = TypeVar("_ScalarT", bound=np.generic)
|
||||
_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...])
|
||||
_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True)
|
||||
_DTypeT = TypeVar("_DTypeT", bound=np.dtype)
|
||||
_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True)
|
||||
|
||||
_BoolArrayT = TypeVar("_BoolArrayT", bound=container[Any, np.dtype[np.bool]])
|
||||
_IntegralArrayT = TypeVar("_IntegralArrayT", bound=container[Any, np.dtype[np.bool | np.integer | np.object_]])
|
||||
_RealContainerT = TypeVar(
|
||||
"_RealContainerT",
|
||||
bound=container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]],
|
||||
)
|
||||
_NumericContainerT = TypeVar("_NumericContainerT", bound=container[Any, np.dtype[np.number | np.timedelta64 | np.object_]])
|
||||
|
||||
_ArrayInt_co: TypeAlias = npt.NDArray[np.integer | np.bool]
|
||||
|
||||
_ToIndexSlice: TypeAlias = slice | EllipsisType | _ArrayInt_co | None
|
||||
_ToIndexSlices: TypeAlias = _ToIndexSlice | tuple[_ToIndexSlice, ...]
|
||||
_ToIndex: TypeAlias = SupportsIndex | _ToIndexSlice
|
||||
_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...]
|
||||
|
||||
###
|
||||
|
||||
class container(Generic[_ShapeT_co, _DTypeT_co]):
|
||||
array: np.ndarray[_ShapeT_co, _DTypeT_co]
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
self,
|
||||
/,
|
||||
data: container[_ShapeT_co, _DTypeT_co] | np.ndarray[_ShapeT_co, _DTypeT_co],
|
||||
dtype: None = None,
|
||||
copy: bool = True,
|
||||
) -> None: ...
|
||||
@overload
|
||||
def __init__(
|
||||
self: container[Any, np.dtype[_ScalarT]],
|
||||
/,
|
||||
data: _ArrayLike[_ScalarT],
|
||||
dtype: None = None,
|
||||
copy: bool = True,
|
||||
) -> None: ...
|
||||
@overload
|
||||
def __init__(
|
||||
self: container[Any, np.dtype[_ScalarT]],
|
||||
/,
|
||||
data: npt.ArrayLike,
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
copy: bool = True,
|
||||
) -> None: ...
|
||||
@overload
|
||||
def __init__(self, /, data: npt.ArrayLike, dtype: npt.DTypeLike | None = None, copy: bool = True) -> None: ...
|
||||
|
||||
#
|
||||
def __complex__(self, /) -> complex: ...
|
||||
def __float__(self, /) -> float: ...
|
||||
def __int__(self, /) -> int: ...
|
||||
def __hex__(self, /) -> str: ...
|
||||
def __oct__(self, /) -> str: ...
|
||||
|
||||
#
|
||||
@override
|
||||
def __eq__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
|
||||
@override
|
||||
def __ne__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
|
||||
|
||||
#
|
||||
def __lt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ...
|
||||
def __le__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ...
|
||||
def __gt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ...
|
||||
def __ge__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ...
|
||||
|
||||
#
|
||||
def __len__(self, /) -> int: ...
|
||||
|
||||
# keep in sync with np.ndarray
|
||||
@overload
|
||||
def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> container[_ShapeT_co, _DTypeT_co]: ...
|
||||
@overload
|
||||
def __getitem__(self, key: _ToIndexSlices, /) -> container[_AnyShape, _DTypeT_co]: ...
|
||||
@overload
|
||||
def __getitem__(self, key: _ToIndices, /) -> Any: ...
|
||||
@overload
|
||||
def __getitem__(self: container[Any, np.dtype[np.void]], key: list[str], /) -> container[_ShapeT_co, np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def __getitem__(self: container[Any, np.dtype[np.void]], key: str, /) -> container[_ShapeT_co, np.dtype]: ...
|
||||
|
||||
# keep in sync with np.ndarray
|
||||
@overload
|
||||
def __setitem__(self, index: _ToIndices, value: object, /) -> None: ...
|
||||
@overload
|
||||
def __setitem__(self: container[Any, np.dtype[np.void]], key: str | list[str], value: object, /) -> None: ...
|
||||
|
||||
# keep in sync with np.ndarray
|
||||
@overload
|
||||
def __abs__(self: container[_ShapeT, np.dtype[np.complex64]], /) -> container[_ShapeT, np.dtype[np.float32]]: ... # type: ignore[overload-overlap]
|
||||
@overload
|
||||
def __abs__(self: container[_ShapeT, np.dtype[np.complex128]], /) -> container[_ShapeT, np.dtype[np.float64]]: ...
|
||||
@overload
|
||||
def __abs__(self: container[_ShapeT, np.dtype[np.complex192]], /) -> container[_ShapeT, np.dtype[np.float96]]: ...
|
||||
@overload
|
||||
def __abs__(self: container[_ShapeT, np.dtype[np.complex256]], /) -> container[_ShapeT, np.dtype[np.float128]]: ...
|
||||
@overload
|
||||
def __abs__(self: _RealContainerT, /) -> _RealContainerT: ...
|
||||
|
||||
#
|
||||
def __neg__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019
|
||||
def __pos__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019
|
||||
def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019
|
||||
|
||||
# TODO(jorenham): complete these binary ops
|
||||
|
||||
#
|
||||
def __add__(self, other: npt.ArrayLike, /) -> Incomplete: ...
|
||||
def __radd__(self, other: npt.ArrayLike, /) -> Incomplete: ...
|
||||
def __iadd__(self, other: npt.ArrayLike, /) -> Self: ...
|
||||
|
||||
#
|
||||
def __sub__(self, other: npt.ArrayLike, /) -> Incomplete: ...
|
||||
def __rsub__(self, other: npt.ArrayLike, /) -> Incomplete: ...
|
||||
def __isub__(self, other: npt.ArrayLike, /) -> Self: ...
|
||||
|
||||
#
|
||||
def __mul__(self, other: npt.ArrayLike, /) -> Incomplete: ...
|
||||
def __rmul__(self, other: npt.ArrayLike, /) -> Incomplete: ...
|
||||
def __imul__(self, other: npt.ArrayLike, /) -> Self: ...
|
||||
|
||||
#
|
||||
def __mod__(self, other: npt.ArrayLike, /) -> Incomplete: ...
|
||||
def __rmod__(self, other: npt.ArrayLike, /) -> Incomplete: ...
|
||||
def __imod__(self, other: npt.ArrayLike, /) -> Self: ...
|
||||
|
||||
#
|
||||
def __divmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ...
|
||||
def __rdivmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ...
|
||||
|
||||
#
|
||||
def __pow__(self, other: npt.ArrayLike, /) -> Incomplete: ...
|
||||
def __rpow__(self, other: npt.ArrayLike, /) -> Incomplete: ...
|
||||
def __ipow__(self, other: npt.ArrayLike, /) -> Self: ...
|
||||
|
||||
#
|
||||
def __lshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ...
|
||||
def __rlshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ...
|
||||
def __ilshift__(self, other: _ArrayLikeInt_co, /) -> Self: ...
|
||||
|
||||
#
|
||||
def __rshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ...
|
||||
def __rrshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ...
|
||||
def __irshift__(self, other: _ArrayLikeInt_co, /) -> Self: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def __and__(
|
||||
self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /
|
||||
) -> container[_AnyShape, np.dtype[np.bool]]: ...
|
||||
@overload
|
||||
def __and__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ...
|
||||
__rand__ = __and__
|
||||
@overload
|
||||
def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ...
|
||||
@overload
|
||||
def __iand__(self, other: _ArrayLikeInt_co, /) -> Self: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def __xor__(
|
||||
self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /
|
||||
) -> container[_AnyShape, np.dtype[np.bool]]: ...
|
||||
@overload
|
||||
def __xor__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ...
|
||||
__rxor__ = __xor__
|
||||
@overload
|
||||
def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ...
|
||||
@overload
|
||||
def __ixor__(self, other: _ArrayLikeInt_co, /) -> Self: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def __or__(
|
||||
self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /
|
||||
) -> container[_AnyShape, np.dtype[np.bool]]: ...
|
||||
@overload
|
||||
def __or__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ...
|
||||
__ror__ = __or__
|
||||
@overload
|
||||
def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ...
|
||||
@overload
|
||||
def __ior__(self, other: _ArrayLikeInt_co, /) -> Self: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def __array__(self, /, t: None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ...
|
||||
@overload
|
||||
def __array__(self, /, t: _DTypeT) -> np.ndarray[_ShapeT_co, _DTypeT]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def __array_wrap__(self, arg0: npt.ArrayLike, /) -> container[_ShapeT_co, _DTypeT_co]: ...
|
||||
@overload
|
||||
def __array_wrap__(self, a: np.ndarray[_ShapeT, _DTypeT], c: Any = ..., s: Any = ..., /) -> container[_ShapeT, _DTypeT]: ...
|
||||
|
||||
#
|
||||
def copy(self, /) -> Self: ...
|
||||
def tobytes(self, /) -> bytes: ...
|
||||
def byteswap(self, /) -> Self: ...
|
||||
def astype(self, /, typecode: _DTypeLike[_ScalarT]) -> container[_ShapeT_co, np.dtype[_ScalarT]]: ...
|
||||
779
lib/python3.11/site-packages/numpy/lib/_utils_impl.py
Normal file
779
lib/python3.11/site-packages/numpy/lib/_utils_impl.py
Normal file
@ -0,0 +1,779 @@
|
||||
import functools
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
import textwrap
|
||||
import types
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
from numpy._core import ndarray
|
||||
from numpy._utils import set_module
|
||||
|
||||
__all__ = [
|
||||
'get_include', 'info', 'show_runtime'
|
||||
]
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
def show_runtime():
|
||||
"""
|
||||
Print information about various resources in the system
|
||||
including available intrinsic support and BLAS/LAPACK library
|
||||
in use
|
||||
|
||||
.. versionadded:: 1.24.0
|
||||
|
||||
See Also
|
||||
--------
|
||||
show_config : Show libraries in the system on which NumPy was built.
|
||||
|
||||
Notes
|
||||
-----
|
||||
1. Information is derived with the help of `threadpoolctl <https://pypi.org/project/threadpoolctl/>`_
|
||||
library if available.
|
||||
2. SIMD related information is derived from ``__cpu_features__``,
|
||||
``__cpu_baseline__`` and ``__cpu_dispatch__``
|
||||
|
||||
"""
|
||||
from pprint import pprint
|
||||
|
||||
from numpy._core._multiarray_umath import (
|
||||
__cpu_baseline__,
|
||||
__cpu_dispatch__,
|
||||
__cpu_features__,
|
||||
)
|
||||
config_found = [{
|
||||
"numpy_version": np.__version__,
|
||||
"python": sys.version,
|
||||
"uname": platform.uname(),
|
||||
}]
|
||||
features_found, features_not_found = [], []
|
||||
for feature in __cpu_dispatch__:
|
||||
if __cpu_features__[feature]:
|
||||
features_found.append(feature)
|
||||
else:
|
||||
features_not_found.append(feature)
|
||||
config_found.append({
|
||||
"simd_extensions": {
|
||||
"baseline": __cpu_baseline__,
|
||||
"found": features_found,
|
||||
"not_found": features_not_found
|
||||
}
|
||||
})
|
||||
try:
|
||||
from threadpoolctl import threadpool_info
|
||||
config_found.extend(threadpool_info())
|
||||
except ImportError:
|
||||
print("WARNING: `threadpoolctl` not found in system!"
|
||||
" Install it by `pip install threadpoolctl`."
|
||||
" Once installed, try `np.show_runtime` again"
|
||||
" for more detailed build information")
|
||||
pprint(config_found)
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
def get_include():
|
||||
"""
|
||||
Return the directory that contains the NumPy \\*.h header files.
|
||||
|
||||
Extension modules that need to compile against NumPy may need to use this
|
||||
function to locate the appropriate include directory.
|
||||
|
||||
Notes
|
||||
-----
|
||||
When using ``setuptools``, for example in ``setup.py``::
|
||||
|
||||
import numpy as np
|
||||
...
|
||||
Extension('extension_name', ...
|
||||
include_dirs=[np.get_include()])
|
||||
...
|
||||
|
||||
Note that a CLI tool ``numpy-config`` was introduced in NumPy 2.0, using
|
||||
that is likely preferred for build systems other than ``setuptools``::
|
||||
|
||||
$ numpy-config --cflags
|
||||
-I/path/to/site-packages/numpy/_core/include
|
||||
|
||||
# Or rely on pkg-config:
|
||||
$ export PKG_CONFIG_PATH=$(numpy-config --pkgconfigdir)
|
||||
$ pkg-config --cflags
|
||||
-I/path/to/site-packages/numpy/_core/include
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.get_include()
|
||||
'.../site-packages/numpy/core/include' # may vary
|
||||
|
||||
"""
|
||||
import numpy
|
||||
if numpy.show_config is None:
|
||||
# running from numpy source directory
|
||||
d = os.path.join(os.path.dirname(numpy.__file__), '_core', 'include')
|
||||
else:
|
||||
# using installed numpy core headers
|
||||
import numpy._core as _core
|
||||
d = os.path.join(os.path.dirname(_core.__file__), 'include')
|
||||
return d
|
||||
|
||||
|
||||
class _Deprecate:
|
||||
"""
|
||||
Decorator class to deprecate old functions.
|
||||
|
||||
Refer to `deprecate` for details.
|
||||
|
||||
See Also
|
||||
--------
|
||||
deprecate
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, old_name=None, new_name=None, message=None):
|
||||
self.old_name = old_name
|
||||
self.new_name = new_name
|
||||
self.message = message
|
||||
|
||||
def __call__(self, func, *args, **kwargs):
|
||||
"""
|
||||
Decorator call. Refer to ``decorate``.
|
||||
|
||||
"""
|
||||
old_name = self.old_name
|
||||
new_name = self.new_name
|
||||
message = self.message
|
||||
|
||||
if old_name is None:
|
||||
old_name = func.__name__
|
||||
if new_name is None:
|
||||
depdoc = f"`{old_name}` is deprecated!"
|
||||
else:
|
||||
depdoc = f"`{old_name}` is deprecated, use `{new_name}` instead!"
|
||||
|
||||
if message is not None:
|
||||
depdoc += "\n" + message
|
||||
|
||||
@functools.wraps(func)
|
||||
def newfunc(*args, **kwds):
|
||||
warnings.warn(depdoc, DeprecationWarning, stacklevel=2)
|
||||
return func(*args, **kwds)
|
||||
|
||||
newfunc.__name__ = old_name
|
||||
doc = func.__doc__
|
||||
if doc is None:
|
||||
doc = depdoc
|
||||
else:
|
||||
lines = doc.expandtabs().split('\n')
|
||||
indent = _get_indent(lines[1:])
|
||||
if lines[0].lstrip():
|
||||
# Indent the original first line to let inspect.cleandoc()
|
||||
# dedent the docstring despite the deprecation notice.
|
||||
doc = indent * ' ' + doc
|
||||
else:
|
||||
# Remove the same leading blank lines as cleandoc() would.
|
||||
skip = len(lines[0]) + 1
|
||||
for line in lines[1:]:
|
||||
if len(line) > indent:
|
||||
break
|
||||
skip += len(line) + 1
|
||||
doc = doc[skip:]
|
||||
depdoc = textwrap.indent(depdoc, ' ' * indent)
|
||||
doc = f'{depdoc}\n\n{doc}'
|
||||
newfunc.__doc__ = doc
|
||||
|
||||
return newfunc
|
||||
|
||||
|
||||
def _get_indent(lines):
|
||||
"""
|
||||
Determines the leading whitespace that could be removed from all the lines.
|
||||
"""
|
||||
indent = sys.maxsize
|
||||
for line in lines:
|
||||
content = len(line.lstrip())
|
||||
if content:
|
||||
indent = min(indent, len(line) - content)
|
||||
if indent == sys.maxsize:
|
||||
indent = 0
|
||||
return indent
|
||||
|
||||
|
||||
def deprecate(*args, **kwargs):
|
||||
"""
|
||||
Issues a DeprecationWarning, adds warning to `old_name`'s
|
||||
docstring, rebinds ``old_name.__name__`` and returns the new
|
||||
function object.
|
||||
|
||||
This function may also be used as a decorator.
|
||||
|
||||
.. deprecated:: 2.0
|
||||
Use `~warnings.warn` with :exc:`DeprecationWarning` instead.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
func : function
|
||||
The function to be deprecated.
|
||||
old_name : str, optional
|
||||
The name of the function to be deprecated. Default is None, in
|
||||
which case the name of `func` is used.
|
||||
new_name : str, optional
|
||||
The new name for the function. Default is None, in which case the
|
||||
deprecation message is that `old_name` is deprecated. If given, the
|
||||
deprecation message is that `old_name` is deprecated and `new_name`
|
||||
should be used instead.
|
||||
message : str, optional
|
||||
Additional explanation of the deprecation. Displayed in the
|
||||
docstring after the warning.
|
||||
|
||||
Returns
|
||||
-------
|
||||
old_func : function
|
||||
The deprecated function.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Note that ``olduint`` returns a value after printing Deprecation
|
||||
Warning:
|
||||
|
||||
>>> olduint = np.lib.utils.deprecate(np.uint)
|
||||
DeprecationWarning: `uint64` is deprecated! # may vary
|
||||
>>> olduint(6)
|
||||
6
|
||||
|
||||
"""
|
||||
# Deprecate may be run as a function or as a decorator
|
||||
# If run as a function, we initialise the decorator class
|
||||
# and execute its __call__ method.
|
||||
|
||||
# Deprecated in NumPy 2.0, 2023-07-11
|
||||
warnings.warn(
|
||||
"`deprecate` is deprecated, "
|
||||
"use `warn` with `DeprecationWarning` instead. "
|
||||
"(deprecated in NumPy 2.0)",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
|
||||
if args:
|
||||
fn = args[0]
|
||||
args = args[1:]
|
||||
|
||||
return _Deprecate(*args, **kwargs)(fn)
|
||||
else:
|
||||
return _Deprecate(*args, **kwargs)
|
||||
|
||||
|
||||
def deprecate_with_doc(msg):
|
||||
"""
|
||||
Deprecates a function and includes the deprecation in its docstring.
|
||||
|
||||
.. deprecated:: 2.0
|
||||
Use `~warnings.warn` with :exc:`DeprecationWarning` instead.
|
||||
|
||||
This function is used as a decorator. It returns an object that can be
|
||||
used to issue a DeprecationWarning, by passing the to-be decorated
|
||||
function as argument, this adds warning to the to-be decorated function's
|
||||
docstring and returns the new function object.
|
||||
|
||||
See Also
|
||||
--------
|
||||
deprecate : Decorate a function such that it issues a
|
||||
:exc:`DeprecationWarning`
|
||||
|
||||
Parameters
|
||||
----------
|
||||
msg : str
|
||||
Additional explanation of the deprecation. Displayed in the
|
||||
docstring after the warning.
|
||||
|
||||
Returns
|
||||
-------
|
||||
obj : object
|
||||
|
||||
"""
|
||||
|
||||
# Deprecated in NumPy 2.0, 2023-07-11
|
||||
warnings.warn(
|
||||
"`deprecate` is deprecated, "
|
||||
"use `warn` with `DeprecationWarning` instead. "
|
||||
"(deprecated in NumPy 2.0)",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
|
||||
return _Deprecate(message=msg)
|
||||
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
|
||||
# NOTE: pydoc defines a help function which works similarly to this
|
||||
# except it uses a pager to take over the screen.
|
||||
|
||||
# combine name and arguments and split to multiple lines of width
|
||||
# characters. End lines on a comma and begin argument list indented with
|
||||
# the rest of the arguments.
|
||||
def _split_line(name, arguments, width):
|
||||
firstwidth = len(name)
|
||||
k = firstwidth
|
||||
newstr = name
|
||||
sepstr = ", "
|
||||
arglist = arguments.split(sepstr)
|
||||
for argument in arglist:
|
||||
if k == firstwidth:
|
||||
addstr = ""
|
||||
else:
|
||||
addstr = sepstr
|
||||
k = k + len(argument) + len(addstr)
|
||||
if k > width:
|
||||
k = firstwidth + 1 + len(argument)
|
||||
newstr = newstr + ",\n" + " " * (firstwidth + 2) + argument
|
||||
else:
|
||||
newstr = newstr + addstr + argument
|
||||
return newstr
|
||||
|
||||
|
||||
_namedict = None
|
||||
_dictlist = None
|
||||
|
||||
# Traverse all module directories underneath globals
|
||||
# to see if something is defined
|
||||
def _makenamedict(module='numpy'):
|
||||
module = __import__(module, globals(), locals(), [])
|
||||
thedict = {module.__name__: module.__dict__}
|
||||
dictlist = [module.__name__]
|
||||
totraverse = [module.__dict__]
|
||||
while True:
|
||||
if len(totraverse) == 0:
|
||||
break
|
||||
thisdict = totraverse.pop(0)
|
||||
for x in thisdict.keys():
|
||||
if isinstance(thisdict[x], types.ModuleType):
|
||||
modname = thisdict[x].__name__
|
||||
if modname not in dictlist:
|
||||
moddict = thisdict[x].__dict__
|
||||
dictlist.append(modname)
|
||||
totraverse.append(moddict)
|
||||
thedict[modname] = moddict
|
||||
return thedict, dictlist
|
||||
|
||||
|
||||
def _info(obj, output=None):
|
||||
"""Provide information about ndarray obj.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
obj : ndarray
|
||||
Must be ndarray, not checked.
|
||||
output
|
||||
Where printed output goes.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Copied over from the numarray module prior to its removal.
|
||||
Adapted somewhat as only numpy is an option now.
|
||||
|
||||
Called by info.
|
||||
|
||||
"""
|
||||
extra = ""
|
||||
tic = ""
|
||||
bp = lambda x: x
|
||||
cls = getattr(obj, '__class__', type(obj))
|
||||
nm = getattr(cls, '__name__', cls)
|
||||
strides = obj.strides
|
||||
endian = obj.dtype.byteorder
|
||||
|
||||
if output is None:
|
||||
output = sys.stdout
|
||||
|
||||
print("class: ", nm, file=output)
|
||||
print("shape: ", obj.shape, file=output)
|
||||
print("strides: ", strides, file=output)
|
||||
print("itemsize: ", obj.itemsize, file=output)
|
||||
print("aligned: ", bp(obj.flags.aligned), file=output)
|
||||
print("contiguous: ", bp(obj.flags.contiguous), file=output)
|
||||
print("fortran: ", obj.flags.fortran, file=output)
|
||||
print(
|
||||
f"data pointer: {hex(obj.ctypes._as_parameter_.value)}{extra}",
|
||||
file=output
|
||||
)
|
||||
print("byteorder: ", end=' ', file=output)
|
||||
if endian in ['|', '=']:
|
||||
print(f"{tic}{sys.byteorder}{tic}", file=output)
|
||||
byteswap = False
|
||||
elif endian == '>':
|
||||
print(f"{tic}big{tic}", file=output)
|
||||
byteswap = sys.byteorder != "big"
|
||||
else:
|
||||
print(f"{tic}little{tic}", file=output)
|
||||
byteswap = sys.byteorder != "little"
|
||||
print("byteswap: ", bp(byteswap), file=output)
|
||||
print(f"type: {obj.dtype}", file=output)
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
def info(object=None, maxwidth=76, output=None, toplevel='numpy'):
|
||||
"""
|
||||
Get help information for an array, function, class, or module.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
object : object or str, optional
|
||||
Input object or name to get information about. If `object` is
|
||||
an `ndarray` instance, information about the array is printed.
|
||||
If `object` is a numpy object, its docstring is given. If it is
|
||||
a string, available modules are searched for matching objects.
|
||||
If None, information about `info` itself is returned.
|
||||
maxwidth : int, optional
|
||||
Printing width.
|
||||
output : file like object, optional
|
||||
File like object that the output is written to, default is
|
||||
``None``, in which case ``sys.stdout`` will be used.
|
||||
The object has to be opened in 'w' or 'a' mode.
|
||||
toplevel : str, optional
|
||||
Start search at this level.
|
||||
|
||||
Notes
|
||||
-----
|
||||
When used interactively with an object, ``np.info(obj)`` is equivalent
|
||||
to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython
|
||||
prompt.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.info(np.polyval) # doctest: +SKIP
|
||||
polyval(p, x)
|
||||
Evaluate the polynomial p at x.
|
||||
...
|
||||
|
||||
When using a string for `object` it is possible to get multiple results.
|
||||
|
||||
>>> np.info('fft') # doctest: +SKIP
|
||||
*** Found in numpy ***
|
||||
Core FFT routines
|
||||
...
|
||||
*** Found in numpy.fft ***
|
||||
fft(a, n=None, axis=-1)
|
||||
...
|
||||
*** Repeat reference found in numpy.fft.fftpack ***
|
||||
*** Total of 3 references found. ***
|
||||
|
||||
When the argument is an array, information about the array is printed.
|
||||
|
||||
>>> a = np.array([[1 + 2j, 3, -4], [-5j, 6, 0]], dtype=np.complex64)
|
||||
>>> np.info(a)
|
||||
class: ndarray
|
||||
shape: (2, 3)
|
||||
strides: (24, 8)
|
||||
itemsize: 8
|
||||
aligned: True
|
||||
contiguous: True
|
||||
fortran: False
|
||||
data pointer: 0x562b6e0d2860 # may vary
|
||||
byteorder: little
|
||||
byteswap: False
|
||||
type: complex64
|
||||
|
||||
"""
|
||||
global _namedict, _dictlist
|
||||
# Local import to speed up numpy's import time.
|
||||
import inspect
|
||||
import pydoc
|
||||
|
||||
if (hasattr(object, '_ppimport_importer') or
|
||||
hasattr(object, '_ppimport_module')):
|
||||
object = object._ppimport_module
|
||||
elif hasattr(object, '_ppimport_attr'):
|
||||
object = object._ppimport_attr
|
||||
|
||||
if output is None:
|
||||
output = sys.stdout
|
||||
|
||||
if object is None:
|
||||
info(info)
|
||||
elif isinstance(object, ndarray):
|
||||
_info(object, output=output)
|
||||
elif isinstance(object, str):
|
||||
if _namedict is None:
|
||||
_namedict, _dictlist = _makenamedict(toplevel)
|
||||
numfound = 0
|
||||
objlist = []
|
||||
for namestr in _dictlist:
|
||||
try:
|
||||
obj = _namedict[namestr][object]
|
||||
if id(obj) in objlist:
|
||||
print(f"\n *** Repeat reference found in {namestr} *** ",
|
||||
file=output
|
||||
)
|
||||
else:
|
||||
objlist.append(id(obj))
|
||||
print(f" *** Found in {namestr} ***", file=output)
|
||||
info(obj)
|
||||
print("-" * maxwidth, file=output)
|
||||
numfound += 1
|
||||
except KeyError:
|
||||
pass
|
||||
if numfound == 0:
|
||||
print(f"Help for {object} not found.", file=output)
|
||||
else:
|
||||
print("\n "
|
||||
"*** Total of %d references found. ***" % numfound,
|
||||
file=output
|
||||
)
|
||||
|
||||
elif inspect.isfunction(object) or inspect.ismethod(object):
|
||||
name = object.__name__
|
||||
try:
|
||||
arguments = str(inspect.signature(object))
|
||||
except Exception:
|
||||
arguments = "()"
|
||||
|
||||
if len(name + arguments) > maxwidth:
|
||||
argstr = _split_line(name, arguments, maxwidth)
|
||||
else:
|
||||
argstr = name + arguments
|
||||
|
||||
print(" " + argstr + "\n", file=output)
|
||||
print(inspect.getdoc(object), file=output)
|
||||
|
||||
elif inspect.isclass(object):
|
||||
name = object.__name__
|
||||
try:
|
||||
arguments = str(inspect.signature(object))
|
||||
except Exception:
|
||||
arguments = "()"
|
||||
|
||||
if len(name + arguments) > maxwidth:
|
||||
argstr = _split_line(name, arguments, maxwidth)
|
||||
else:
|
||||
argstr = name + arguments
|
||||
|
||||
print(" " + argstr + "\n", file=output)
|
||||
doc1 = inspect.getdoc(object)
|
||||
if doc1 is None:
|
||||
if hasattr(object, '__init__'):
|
||||
print(inspect.getdoc(object.__init__), file=output)
|
||||
else:
|
||||
print(inspect.getdoc(object), file=output)
|
||||
|
||||
methods = pydoc.allmethods(object)
|
||||
|
||||
public_methods = [meth for meth in methods if meth[0] != '_']
|
||||
if public_methods:
|
||||
print("\n\nMethods:\n", file=output)
|
||||
for meth in public_methods:
|
||||
thisobj = getattr(object, meth, None)
|
||||
if thisobj is not None:
|
||||
methstr, other = pydoc.splitdoc(
|
||||
inspect.getdoc(thisobj) or "None"
|
||||
)
|
||||
print(f" {meth} -- {methstr}", file=output)
|
||||
|
||||
elif hasattr(object, '__doc__'):
|
||||
print(inspect.getdoc(object), file=output)
|
||||
|
||||
|
||||
def safe_eval(source):
|
||||
"""
|
||||
Protected string evaluation.
|
||||
|
||||
.. deprecated:: 2.0
|
||||
Use `ast.literal_eval` instead.
|
||||
|
||||
Evaluate a string containing a Python literal expression without
|
||||
allowing the execution of arbitrary non-literal code.
|
||||
|
||||
.. warning::
|
||||
|
||||
This function is identical to :py:meth:`ast.literal_eval` and
|
||||
has the same security implications. It may not always be safe
|
||||
to evaluate large input strings.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
source : str
|
||||
The string to evaluate.
|
||||
|
||||
Returns
|
||||
-------
|
||||
obj : object
|
||||
The result of evaluating `source`.
|
||||
|
||||
Raises
|
||||
------
|
||||
SyntaxError
|
||||
If the code has invalid Python syntax, or if it contains
|
||||
non-literal code.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.safe_eval('1')
|
||||
1
|
||||
>>> np.safe_eval('[1, 2, 3]')
|
||||
[1, 2, 3]
|
||||
>>> np.safe_eval('{"foo": ("bar", 10.0)}')
|
||||
{'foo': ('bar', 10.0)}
|
||||
|
||||
>>> np.safe_eval('import os')
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
SyntaxError: invalid syntax
|
||||
|
||||
>>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()')
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
ValueError: malformed node or string: <_ast.Call object at 0x...>
|
||||
|
||||
"""
|
||||
|
||||
# Deprecated in NumPy 2.0, 2023-07-11
|
||||
warnings.warn(
|
||||
"`safe_eval` is deprecated. Use `ast.literal_eval` instead. "
|
||||
"Be aware of security implications, such as memory exhaustion "
|
||||
"based attacks (deprecated in NumPy 2.0)",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
|
||||
# Local import to speed up numpy's import time.
|
||||
import ast
|
||||
return ast.literal_eval(source)
|
||||
|
||||
|
||||
def _median_nancheck(data, result, axis):
|
||||
"""
|
||||
Utility function to check median result from data for NaN values at the end
|
||||
and return NaN in that case. Input result can also be a MaskedArray.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data : array
|
||||
Sorted input data to median function
|
||||
result : Array or MaskedArray
|
||||
Result of median function.
|
||||
axis : int
|
||||
Axis along which the median was computed.
|
||||
|
||||
Returns
|
||||
-------
|
||||
result : scalar or ndarray
|
||||
Median or NaN in axes which contained NaN in the input. If the input
|
||||
was an array, NaN will be inserted in-place. If a scalar, either the
|
||||
input itself or a scalar NaN.
|
||||
"""
|
||||
if data.size == 0:
|
||||
return result
|
||||
potential_nans = data.take(-1, axis=axis)
|
||||
n = np.isnan(potential_nans)
|
||||
# masked NaN values are ok, although for masked the copyto may fail for
|
||||
# unmasked ones (this was always broken) when the result is a scalar.
|
||||
if np.ma.isMaskedArray(n):
|
||||
n = n.filled(False)
|
||||
|
||||
if not n.any():
|
||||
return result
|
||||
|
||||
# Without given output, it is possible that the current result is a
|
||||
# numpy scalar, which is not writeable. If so, just return nan.
|
||||
if isinstance(result, np.generic):
|
||||
return potential_nans
|
||||
|
||||
# Otherwise copy NaNs (if there are any)
|
||||
np.copyto(result, potential_nans, where=n)
|
||||
return result
|
||||
|
||||
def _opt_info():
|
||||
"""
|
||||
Returns a string containing the CPU features supported
|
||||
by the current build.
|
||||
|
||||
The format of the string can be explained as follows:
|
||||
- Dispatched features supported by the running machine end with `*`.
|
||||
- Dispatched features not supported by the running machine
|
||||
end with `?`.
|
||||
- Remaining features represent the baseline.
|
||||
|
||||
Returns:
|
||||
str: A formatted string indicating the supported CPU features.
|
||||
"""
|
||||
from numpy._core._multiarray_umath import (
|
||||
__cpu_baseline__,
|
||||
__cpu_dispatch__,
|
||||
__cpu_features__,
|
||||
)
|
||||
|
||||
if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0:
|
||||
return ''
|
||||
|
||||
enabled_features = ' '.join(__cpu_baseline__)
|
||||
for feature in __cpu_dispatch__:
|
||||
if __cpu_features__[feature]:
|
||||
enabled_features += f" {feature}*"
|
||||
else:
|
||||
enabled_features += f" {feature}?"
|
||||
|
||||
return enabled_features
|
||||
|
||||
def drop_metadata(dtype, /):
|
||||
"""
|
||||
Returns the dtype unchanged if it contained no metadata or a copy of the
|
||||
dtype if it (or any of its structure dtypes) contained metadata.
|
||||
|
||||
This utility is used by `np.save` and `np.savez` to drop metadata before
|
||||
saving.
|
||||
|
||||
.. note::
|
||||
|
||||
Due to its limitation this function may move to a more appropriate
|
||||
home or change in the future and is considered semi-public API only.
|
||||
|
||||
.. warning::
|
||||
|
||||
This function does not preserve more strange things like record dtypes
|
||||
and user dtypes may simply return the wrong thing. If you need to be
|
||||
sure about the latter, check the result with:
|
||||
``np.can_cast(new_dtype, dtype, casting="no")``.
|
||||
|
||||
"""
|
||||
if dtype.fields is not None:
|
||||
found_metadata = dtype.metadata is not None
|
||||
|
||||
names = []
|
||||
formats = []
|
||||
offsets = []
|
||||
titles = []
|
||||
for name, field in dtype.fields.items():
|
||||
field_dt = drop_metadata(field[0])
|
||||
if field_dt is not field[0]:
|
||||
found_metadata = True
|
||||
|
||||
names.append(name)
|
||||
formats.append(field_dt)
|
||||
offsets.append(field[1])
|
||||
titles.append(None if len(field) < 3 else field[2])
|
||||
|
||||
if not found_metadata:
|
||||
return dtype
|
||||
|
||||
structure = {
|
||||
'names': names, 'formats': formats, 'offsets': offsets, 'titles': titles,
|
||||
'itemsize': dtype.itemsize}
|
||||
|
||||
# NOTE: Could pass (dtype.type, structure) to preserve record dtypes...
|
||||
return np.dtype(structure, align=dtype.isalignedstruct)
|
||||
elif dtype.subdtype is not None:
|
||||
# subarray dtype
|
||||
subdtype, shape = dtype.subdtype
|
||||
new_subdtype = drop_metadata(subdtype)
|
||||
if dtype.metadata is None and new_subdtype is subdtype:
|
||||
return dtype
|
||||
|
||||
return np.dtype((new_subdtype, shape))
|
||||
else:
|
||||
# Normal unstructured dtype
|
||||
if dtype.metadata is None:
|
||||
return dtype
|
||||
# Note that `dt.str` doesn't round-trip e.g. for user-dtypes.
|
||||
return np.dtype(dtype.str)
|
||||
10
lib/python3.11/site-packages/numpy/lib/_utils_impl.pyi
Normal file
10
lib/python3.11/site-packages/numpy/lib/_utils_impl.pyi
Normal file
@ -0,0 +1,10 @@
|
||||
from _typeshed import SupportsWrite
|
||||
|
||||
from numpy._typing import DTypeLike
|
||||
|
||||
__all__ = ["get_include", "info", "show_runtime"]
|
||||
|
||||
def get_include() -> str: ...
|
||||
def show_runtime() -> None: ...
|
||||
def info(object: object = ..., maxwidth: int = ..., output: SupportsWrite[str] | None = ..., toplevel: str = ...) -> None: ...
|
||||
def drop_metadata(dtype: DTypeLike, /) -> DTypeLike: ...
|
||||
154
lib/python3.11/site-packages/numpy/lib/_version.py
Normal file
154
lib/python3.11/site-packages/numpy/lib/_version.py
Normal file
@ -0,0 +1,154 @@
|
||||
"""Utility to compare (NumPy) version strings.
|
||||
|
||||
The NumpyVersion class allows properly comparing numpy version strings.
|
||||
The LooseVersion and StrictVersion classes that distutils provides don't
|
||||
work; they don't recognize anything like alpha/beta/rc/dev versions.
|
||||
|
||||
"""
|
||||
import re
|
||||
|
||||
__all__ = ['NumpyVersion']
|
||||
|
||||
|
||||
class NumpyVersion:
|
||||
"""Parse and compare numpy version strings.
|
||||
|
||||
NumPy has the following versioning scheme (numbers given are examples; they
|
||||
can be > 9 in principle):
|
||||
|
||||
- Released version: '1.8.0', '1.8.1', etc.
|
||||
- Alpha: '1.8.0a1', '1.8.0a2', etc.
|
||||
- Beta: '1.8.0b1', '1.8.0b2', etc.
|
||||
- Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
|
||||
- Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
|
||||
- Development versions after a1: '1.8.0a1.dev-f1234afa',
|
||||
'1.8.0b2.dev-f1234afa',
|
||||
'1.8.1rc1.dev-f1234afa', etc.
|
||||
- Development versions (no git hash available): '1.8.0.dev-Unknown'
|
||||
|
||||
Comparing needs to be done against a valid version string or other
|
||||
`NumpyVersion` instance. Note that all development versions of the same
|
||||
(pre-)release compare equal.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
vstring : str
|
||||
NumPy version string (``np.__version__``).
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from numpy.lib import NumpyVersion
|
||||
>>> if NumpyVersion(np.__version__) < '1.7.0':
|
||||
... print('skip')
|
||||
>>> # skip
|
||||
|
||||
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
ValueError: Not a valid numpy version string
|
||||
|
||||
"""
|
||||
|
||||
__module__ = "numpy.lib"
|
||||
|
||||
def __init__(self, vstring):
|
||||
self.vstring = vstring
|
||||
ver_main = re.match(r'\d+\.\d+\.\d+', vstring)
|
||||
if not ver_main:
|
||||
raise ValueError("Not a valid numpy version string")
|
||||
|
||||
self.version = ver_main.group()
|
||||
self.major, self.minor, self.bugfix = [int(x) for x in
|
||||
self.version.split('.')]
|
||||
if len(vstring) == ver_main.end():
|
||||
self.pre_release = 'final'
|
||||
else:
|
||||
alpha = re.match(r'a\d', vstring[ver_main.end():])
|
||||
beta = re.match(r'b\d', vstring[ver_main.end():])
|
||||
rc = re.match(r'rc\d', vstring[ver_main.end():])
|
||||
pre_rel = [m for m in [alpha, beta, rc] if m is not None]
|
||||
if pre_rel:
|
||||
self.pre_release = pre_rel[0].group()
|
||||
else:
|
||||
self.pre_release = ''
|
||||
|
||||
self.is_devversion = bool(re.search(r'.dev', vstring))
|
||||
|
||||
def _compare_version(self, other):
|
||||
"""Compare major.minor.bugfix"""
|
||||
if self.major == other.major:
|
||||
if self.minor == other.minor:
|
||||
if self.bugfix == other.bugfix:
|
||||
vercmp = 0
|
||||
elif self.bugfix > other.bugfix:
|
||||
vercmp = 1
|
||||
else:
|
||||
vercmp = -1
|
||||
elif self.minor > other.minor:
|
||||
vercmp = 1
|
||||
else:
|
||||
vercmp = -1
|
||||
elif self.major > other.major:
|
||||
vercmp = 1
|
||||
else:
|
||||
vercmp = -1
|
||||
|
||||
return vercmp
|
||||
|
||||
def _compare_pre_release(self, other):
|
||||
"""Compare alpha/beta/rc/final."""
|
||||
if self.pre_release == other.pre_release:
|
||||
vercmp = 0
|
||||
elif self.pre_release == 'final':
|
||||
vercmp = 1
|
||||
elif other.pre_release == 'final':
|
||||
vercmp = -1
|
||||
elif self.pre_release > other.pre_release:
|
||||
vercmp = 1
|
||||
else:
|
||||
vercmp = -1
|
||||
|
||||
return vercmp
|
||||
|
||||
def _compare(self, other):
|
||||
if not isinstance(other, (str, NumpyVersion)):
|
||||
raise ValueError("Invalid object to compare with NumpyVersion.")
|
||||
|
||||
if isinstance(other, str):
|
||||
other = NumpyVersion(other)
|
||||
|
||||
vercmp = self._compare_version(other)
|
||||
if vercmp == 0:
|
||||
# Same x.y.z version, check for alpha/beta/rc
|
||||
vercmp = self._compare_pre_release(other)
|
||||
if vercmp == 0:
|
||||
# Same version and same pre-release, check if dev version
|
||||
if self.is_devversion is other.is_devversion:
|
||||
vercmp = 0
|
||||
elif self.is_devversion:
|
||||
vercmp = -1
|
||||
else:
|
||||
vercmp = 1
|
||||
|
||||
return vercmp
|
||||
|
||||
def __lt__(self, other):
|
||||
return self._compare(other) < 0
|
||||
|
||||
def __le__(self, other):
|
||||
return self._compare(other) <= 0
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._compare(other) == 0
|
||||
|
||||
def __ne__(self, other):
|
||||
return self._compare(other) != 0
|
||||
|
||||
def __gt__(self, other):
|
||||
return self._compare(other) > 0
|
||||
|
||||
def __ge__(self, other):
|
||||
return self._compare(other) >= 0
|
||||
|
||||
def __repr__(self):
|
||||
return f"NumpyVersion({self.vstring})"
|
||||
17
lib/python3.11/site-packages/numpy/lib/_version.pyi
Normal file
17
lib/python3.11/site-packages/numpy/lib/_version.pyi
Normal file
@ -0,0 +1,17 @@
|
||||
__all__ = ["NumpyVersion"]
|
||||
|
||||
class NumpyVersion:
|
||||
vstring: str
|
||||
version: str
|
||||
major: int
|
||||
minor: int
|
||||
bugfix: int
|
||||
pre_release: str
|
||||
is_devversion: bool
|
||||
def __init__(self, vstring: str) -> None: ...
|
||||
def __lt__(self, other: str | NumpyVersion) -> bool: ...
|
||||
def __le__(self, other: str | NumpyVersion) -> bool: ...
|
||||
def __eq__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override]
|
||||
def __ne__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override]
|
||||
def __gt__(self, other: str | NumpyVersion) -> bool: ...
|
||||
def __ge__(self, other: str | NumpyVersion) -> bool: ...
|
||||
7
lib/python3.11/site-packages/numpy/lib/array_utils.py
Normal file
7
lib/python3.11/site-packages/numpy/lib/array_utils.py
Normal file
@ -0,0 +1,7 @@
|
||||
from ._array_utils_impl import ( # noqa: F401
|
||||
__all__,
|
||||
__doc__,
|
||||
byte_bounds,
|
||||
normalize_axis_index,
|
||||
normalize_axis_tuple,
|
||||
)
|
||||
12
lib/python3.11/site-packages/numpy/lib/array_utils.pyi
Normal file
12
lib/python3.11/site-packages/numpy/lib/array_utils.pyi
Normal file
@ -0,0 +1,12 @@
|
||||
from ._array_utils_impl import (
|
||||
__all__ as __all__,
|
||||
)
|
||||
from ._array_utils_impl import (
|
||||
byte_bounds as byte_bounds,
|
||||
)
|
||||
from ._array_utils_impl import (
|
||||
normalize_axis_index as normalize_axis_index,
|
||||
)
|
||||
from ._array_utils_impl import (
|
||||
normalize_axis_tuple as normalize_axis_tuple,
|
||||
)
|
||||
24
lib/python3.11/site-packages/numpy/lib/format.py
Normal file
24
lib/python3.11/site-packages/numpy/lib/format.py
Normal file
@ -0,0 +1,24 @@
|
||||
from ._format_impl import ( # noqa: F401
|
||||
ARRAY_ALIGN,
|
||||
BUFFER_SIZE,
|
||||
EXPECTED_KEYS,
|
||||
GROWTH_AXIS_MAX_DIGITS,
|
||||
MAGIC_LEN,
|
||||
MAGIC_PREFIX,
|
||||
__all__,
|
||||
__doc__,
|
||||
descr_to_dtype,
|
||||
drop_metadata,
|
||||
dtype_to_descr,
|
||||
header_data_from_array_1_0,
|
||||
isfileobj,
|
||||
magic,
|
||||
open_memmap,
|
||||
read_array,
|
||||
read_array_header_1_0,
|
||||
read_array_header_2_0,
|
||||
read_magic,
|
||||
write_array,
|
||||
write_array_header_1_0,
|
||||
write_array_header_2_0,
|
||||
)
|
||||
66
lib/python3.11/site-packages/numpy/lib/format.pyi
Normal file
66
lib/python3.11/site-packages/numpy/lib/format.pyi
Normal file
@ -0,0 +1,66 @@
|
||||
from ._format_impl import (
|
||||
ARRAY_ALIGN as ARRAY_ALIGN,
|
||||
)
|
||||
from ._format_impl import (
|
||||
BUFFER_SIZE as BUFFER_SIZE,
|
||||
)
|
||||
from ._format_impl import (
|
||||
EXPECTED_KEYS as EXPECTED_KEYS,
|
||||
)
|
||||
from ._format_impl import (
|
||||
GROWTH_AXIS_MAX_DIGITS as GROWTH_AXIS_MAX_DIGITS,
|
||||
)
|
||||
from ._format_impl import (
|
||||
MAGIC_LEN as MAGIC_LEN,
|
||||
)
|
||||
from ._format_impl import (
|
||||
MAGIC_PREFIX as MAGIC_PREFIX,
|
||||
)
|
||||
from ._format_impl import (
|
||||
__all__ as __all__,
|
||||
)
|
||||
from ._format_impl import (
|
||||
__doc__ as __doc__,
|
||||
)
|
||||
from ._format_impl import (
|
||||
descr_to_dtype as descr_to_dtype,
|
||||
)
|
||||
from ._format_impl import (
|
||||
drop_metadata as drop_metadata,
|
||||
)
|
||||
from ._format_impl import (
|
||||
dtype_to_descr as dtype_to_descr,
|
||||
)
|
||||
from ._format_impl import (
|
||||
header_data_from_array_1_0 as header_data_from_array_1_0,
|
||||
)
|
||||
from ._format_impl import (
|
||||
isfileobj as isfileobj,
|
||||
)
|
||||
from ._format_impl import (
|
||||
magic as magic,
|
||||
)
|
||||
from ._format_impl import (
|
||||
open_memmap as open_memmap,
|
||||
)
|
||||
from ._format_impl import (
|
||||
read_array as read_array,
|
||||
)
|
||||
from ._format_impl import (
|
||||
read_array_header_1_0 as read_array_header_1_0,
|
||||
)
|
||||
from ._format_impl import (
|
||||
read_array_header_2_0 as read_array_header_2_0,
|
||||
)
|
||||
from ._format_impl import (
|
||||
read_magic as read_magic,
|
||||
)
|
||||
from ._format_impl import (
|
||||
write_array as write_array,
|
||||
)
|
||||
from ._format_impl import (
|
||||
write_array_header_1_0 as write_array_header_1_0,
|
||||
)
|
||||
from ._format_impl import (
|
||||
write_array_header_2_0 as write_array_header_2_0,
|
||||
)
|
||||
95
lib/python3.11/site-packages/numpy/lib/introspect.py
Normal file
95
lib/python3.11/site-packages/numpy/lib/introspect.py
Normal file
@ -0,0 +1,95 @@
|
||||
"""
|
||||
Introspection helper functions.
|
||||
"""
|
||||
|
||||
__all__ = ['opt_func_info']
|
||||
|
||||
|
||||
def opt_func_info(func_name=None, signature=None):
|
||||
"""
|
||||
Returns a dictionary containing the currently supported CPU dispatched
|
||||
features for all optimized functions.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
func_name : str (optional)
|
||||
Regular expression to filter by function name.
|
||||
|
||||
signature : str (optional)
|
||||
Regular expression to filter by data type.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict
|
||||
A dictionary where keys are optimized function names and values are
|
||||
nested dictionaries indicating supported targets based on data types.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Retrieve dispatch information for functions named 'add' or 'sub' and
|
||||
data types 'float64' or 'float32':
|
||||
|
||||
>>> import numpy as np
|
||||
>>> dict = np.lib.introspect.opt_func_info(
|
||||
... func_name="add|abs", signature="float64|complex64"
|
||||
... )
|
||||
>>> import json
|
||||
>>> print(json.dumps(dict, indent=2))
|
||||
{
|
||||
"absolute": {
|
||||
"dd": {
|
||||
"current": "SSE41",
|
||||
"available": "SSE41 baseline(SSE SSE2 SSE3)"
|
||||
},
|
||||
"Ff": {
|
||||
"current": "FMA3__AVX2",
|
||||
"available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)"
|
||||
},
|
||||
"Dd": {
|
||||
"current": "FMA3__AVX2",
|
||||
"available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)"
|
||||
}
|
||||
},
|
||||
"add": {
|
||||
"ddd": {
|
||||
"current": "FMA3__AVX2",
|
||||
"available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)"
|
||||
},
|
||||
"FFF": {
|
||||
"current": "FMA3__AVX2",
|
||||
"available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
import re
|
||||
|
||||
from numpy._core._multiarray_umath import __cpu_targets_info__ as targets
|
||||
from numpy._core._multiarray_umath import dtype
|
||||
|
||||
if func_name is not None:
|
||||
func_pattern = re.compile(func_name)
|
||||
matching_funcs = {
|
||||
k: v for k, v in targets.items()
|
||||
if func_pattern.search(k)
|
||||
}
|
||||
else:
|
||||
matching_funcs = targets
|
||||
|
||||
if signature is not None:
|
||||
sig_pattern = re.compile(signature)
|
||||
matching_sigs = {}
|
||||
for k, v in matching_funcs.items():
|
||||
matching_chars = {}
|
||||
for chars, targets in v.items():
|
||||
if any(
|
||||
sig_pattern.search(c) or sig_pattern.search(dtype(c).name)
|
||||
for c in chars
|
||||
):
|
||||
matching_chars[chars] = targets
|
||||
if matching_chars:
|
||||
matching_sigs[k] = matching_chars
|
||||
else:
|
||||
matching_sigs = matching_funcs
|
||||
return matching_sigs
|
||||
3
lib/python3.11/site-packages/numpy/lib/introspect.pyi
Normal file
3
lib/python3.11/site-packages/numpy/lib/introspect.pyi
Normal file
@ -0,0 +1,3 @@
|
||||
__all__ = ["opt_func_info"]
|
||||
|
||||
def opt_func_info(func_name: str | None = None, signature: str | None = None) -> dict[str, dict[str, dict[str, str]]]: ...
|
||||
180
lib/python3.11/site-packages/numpy/lib/mixins.py
Normal file
180
lib/python3.11/site-packages/numpy/lib/mixins.py
Normal file
@ -0,0 +1,180 @@
|
||||
"""
|
||||
Mixin classes for custom array types that don't inherit from ndarray.
|
||||
"""
|
||||
|
||||
__all__ = ['NDArrayOperatorsMixin']
|
||||
|
||||
|
||||
def _disables_array_ufunc(obj):
|
||||
"""True when __array_ufunc__ is set to None."""
|
||||
try:
|
||||
return obj.__array_ufunc__ is None
|
||||
except AttributeError:
|
||||
return False
|
||||
|
||||
|
||||
def _binary_method(ufunc, name):
|
||||
"""Implement a forward binary method with a ufunc, e.g., __add__."""
|
||||
def func(self, other):
|
||||
if _disables_array_ufunc(other):
|
||||
return NotImplemented
|
||||
return ufunc(self, other)
|
||||
func.__name__ = f'__{name}__'
|
||||
return func
|
||||
|
||||
|
||||
def _reflected_binary_method(ufunc, name):
|
||||
"""Implement a reflected binary method with a ufunc, e.g., __radd__."""
|
||||
def func(self, other):
|
||||
if _disables_array_ufunc(other):
|
||||
return NotImplemented
|
||||
return ufunc(other, self)
|
||||
func.__name__ = f'__r{name}__'
|
||||
return func
|
||||
|
||||
|
||||
def _inplace_binary_method(ufunc, name):
|
||||
"""Implement an in-place binary method with a ufunc, e.g., __iadd__."""
|
||||
def func(self, other):
|
||||
return ufunc(self, other, out=(self,))
|
||||
func.__name__ = f'__i{name}__'
|
||||
return func
|
||||
|
||||
|
||||
def _numeric_methods(ufunc, name):
|
||||
"""Implement forward, reflected and inplace binary methods with a ufunc."""
|
||||
return (_binary_method(ufunc, name),
|
||||
_reflected_binary_method(ufunc, name),
|
||||
_inplace_binary_method(ufunc, name))
|
||||
|
||||
|
||||
def _unary_method(ufunc, name):
|
||||
"""Implement a unary special method with a ufunc."""
|
||||
def func(self):
|
||||
return ufunc(self)
|
||||
func.__name__ = f'__{name}__'
|
||||
return func
|
||||
|
||||
|
||||
class NDArrayOperatorsMixin:
|
||||
"""Mixin defining all operator special methods using __array_ufunc__.
|
||||
|
||||
This class implements the special methods for almost all of Python's
|
||||
builtin operators defined in the `operator` module, including comparisons
|
||||
(``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by
|
||||
deferring to the ``__array_ufunc__`` method, which subclasses must
|
||||
implement.
|
||||
|
||||
It is useful for writing classes that do not inherit from `numpy.ndarray`,
|
||||
but that should support arithmetic and numpy universal functions like
|
||||
arrays as described in :external+neps:doc:`nep-0013-ufunc-overrides`.
|
||||
|
||||
As an trivial example, consider this implementation of an ``ArrayLike``
|
||||
class that simply wraps a NumPy array and ensures that the result of any
|
||||
arithmetic operation is also an ``ArrayLike`` object:
|
||||
|
||||
>>> import numbers
|
||||
>>> class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
|
||||
... def __init__(self, value):
|
||||
... self.value = np.asarray(value)
|
||||
...
|
||||
... # One might also consider adding the built-in list type to this
|
||||
... # list, to support operations like np.add(array_like, list)
|
||||
... _HANDLED_TYPES = (np.ndarray, numbers.Number)
|
||||
...
|
||||
... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
|
||||
... out = kwargs.get('out', ())
|
||||
... for x in inputs + out:
|
||||
... # Only support operations with instances of
|
||||
... # _HANDLED_TYPES. Use ArrayLike instead of type(self)
|
||||
... # for isinstance to allow subclasses that don't
|
||||
... # override __array_ufunc__ to handle ArrayLike objects.
|
||||
... if not isinstance(
|
||||
... x, self._HANDLED_TYPES + (ArrayLike,)
|
||||
... ):
|
||||
... return NotImplemented
|
||||
...
|
||||
... # Defer to the implementation of the ufunc
|
||||
... # on unwrapped values.
|
||||
... inputs = tuple(x.value if isinstance(x, ArrayLike) else x
|
||||
... for x in inputs)
|
||||
... if out:
|
||||
... kwargs['out'] = tuple(
|
||||
... x.value if isinstance(x, ArrayLike) else x
|
||||
... for x in out)
|
||||
... result = getattr(ufunc, method)(*inputs, **kwargs)
|
||||
...
|
||||
... if type(result) is tuple:
|
||||
... # multiple return values
|
||||
... return tuple(type(self)(x) for x in result)
|
||||
... elif method == 'at':
|
||||
... # no return value
|
||||
... return None
|
||||
... else:
|
||||
... # one return value
|
||||
... return type(self)(result)
|
||||
...
|
||||
... def __repr__(self):
|
||||
... return '%s(%r)' % (type(self).__name__, self.value)
|
||||
|
||||
In interactions between ``ArrayLike`` objects and numbers or numpy arrays,
|
||||
the result is always another ``ArrayLike``:
|
||||
|
||||
>>> x = ArrayLike([1, 2, 3])
|
||||
>>> x - 1
|
||||
ArrayLike(array([0, 1, 2]))
|
||||
>>> 1 - x
|
||||
ArrayLike(array([ 0, -1, -2]))
|
||||
>>> np.arange(3) - x
|
||||
ArrayLike(array([-1, -1, -1]))
|
||||
>>> x - np.arange(3)
|
||||
ArrayLike(array([1, 1, 1]))
|
||||
|
||||
Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations
|
||||
with arbitrary, unrecognized types. This ensures that interactions with
|
||||
ArrayLike preserve a well-defined casting hierarchy.
|
||||
|
||||
"""
|
||||
from numpy._core import umath as um
|
||||
|
||||
__slots__ = ()
|
||||
# Like np.ndarray, this mixin class implements "Option 1" from the ufunc
|
||||
# overrides NEP.
|
||||
|
||||
# comparisons don't have reflected and in-place versions
|
||||
__lt__ = _binary_method(um.less, 'lt')
|
||||
__le__ = _binary_method(um.less_equal, 'le')
|
||||
__eq__ = _binary_method(um.equal, 'eq')
|
||||
__ne__ = _binary_method(um.not_equal, 'ne')
|
||||
__gt__ = _binary_method(um.greater, 'gt')
|
||||
__ge__ = _binary_method(um.greater_equal, 'ge')
|
||||
|
||||
# numeric methods
|
||||
__add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add')
|
||||
__sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub')
|
||||
__mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul')
|
||||
__matmul__, __rmatmul__, __imatmul__ = _numeric_methods(
|
||||
um.matmul, 'matmul')
|
||||
__truediv__, __rtruediv__, __itruediv__ = _numeric_methods(
|
||||
um.true_divide, 'truediv')
|
||||
__floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods(
|
||||
um.floor_divide, 'floordiv')
|
||||
__mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod')
|
||||
__divmod__ = _binary_method(um.divmod, 'divmod')
|
||||
__rdivmod__ = _reflected_binary_method(um.divmod, 'divmod')
|
||||
# __idivmod__ does not exist
|
||||
# TODO: handle the optional third argument for __pow__?
|
||||
__pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow')
|
||||
__lshift__, __rlshift__, __ilshift__ = _numeric_methods(
|
||||
um.left_shift, 'lshift')
|
||||
__rshift__, __rrshift__, __irshift__ = _numeric_methods(
|
||||
um.right_shift, 'rshift')
|
||||
__and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and')
|
||||
__xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor')
|
||||
__or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or')
|
||||
|
||||
# unary methods
|
||||
__neg__ = _unary_method(um.negative, 'neg')
|
||||
__pos__ = _unary_method(um.positive, 'pos')
|
||||
__abs__ = _unary_method(um.absolute, 'abs')
|
||||
__invert__ = _unary_method(um.invert, 'invert')
|
||||
75
lib/python3.11/site-packages/numpy/lib/mixins.pyi
Normal file
75
lib/python3.11/site-packages/numpy/lib/mixins.pyi
Normal file
@ -0,0 +1,75 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any
|
||||
from typing import Literal as L
|
||||
|
||||
from numpy import ufunc
|
||||
|
||||
__all__ = ["NDArrayOperatorsMixin"]
|
||||
|
||||
# NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass,
|
||||
# even though it's reliant on subclasses implementing `__array_ufunc__`
|
||||
|
||||
# NOTE: The accepted input- and output-types of the various dunders are
|
||||
# completely dependent on how `__array_ufunc__` is implemented.
|
||||
# As such, only little type safety can be provided here.
|
||||
|
||||
class NDArrayOperatorsMixin(ABC):
|
||||
@abstractmethod
|
||||
def __array_ufunc__(
|
||||
self,
|
||||
ufunc: ufunc,
|
||||
method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"],
|
||||
*inputs: Any,
|
||||
**kwargs: Any,
|
||||
) -> Any: ...
|
||||
def __lt__(self, other: Any) -> Any: ...
|
||||
def __le__(self, other: Any) -> Any: ...
|
||||
def __eq__(self, other: Any) -> Any: ...
|
||||
def __ne__(self, other: Any) -> Any: ...
|
||||
def __gt__(self, other: Any) -> Any: ...
|
||||
def __ge__(self, other: Any) -> Any: ...
|
||||
def __add__(self, other: Any) -> Any: ...
|
||||
def __radd__(self, other: Any) -> Any: ...
|
||||
def __iadd__(self, other: Any) -> Any: ...
|
||||
def __sub__(self, other: Any) -> Any: ...
|
||||
def __rsub__(self, other: Any) -> Any: ...
|
||||
def __isub__(self, other: Any) -> Any: ...
|
||||
def __mul__(self, other: Any) -> Any: ...
|
||||
def __rmul__(self, other: Any) -> Any: ...
|
||||
def __imul__(self, other: Any) -> Any: ...
|
||||
def __matmul__(self, other: Any) -> Any: ...
|
||||
def __rmatmul__(self, other: Any) -> Any: ...
|
||||
def __imatmul__(self, other: Any) -> Any: ...
|
||||
def __truediv__(self, other: Any) -> Any: ...
|
||||
def __rtruediv__(self, other: Any) -> Any: ...
|
||||
def __itruediv__(self, other: Any) -> Any: ...
|
||||
def __floordiv__(self, other: Any) -> Any: ...
|
||||
def __rfloordiv__(self, other: Any) -> Any: ...
|
||||
def __ifloordiv__(self, other: Any) -> Any: ...
|
||||
def __mod__(self, other: Any) -> Any: ...
|
||||
def __rmod__(self, other: Any) -> Any: ...
|
||||
def __imod__(self, other: Any) -> Any: ...
|
||||
def __divmod__(self, other: Any) -> Any: ...
|
||||
def __rdivmod__(self, other: Any) -> Any: ...
|
||||
def __pow__(self, other: Any) -> Any: ...
|
||||
def __rpow__(self, other: Any) -> Any: ...
|
||||
def __ipow__(self, other: Any) -> Any: ...
|
||||
def __lshift__(self, other: Any) -> Any: ...
|
||||
def __rlshift__(self, other: Any) -> Any: ...
|
||||
def __ilshift__(self, other: Any) -> Any: ...
|
||||
def __rshift__(self, other: Any) -> Any: ...
|
||||
def __rrshift__(self, other: Any) -> Any: ...
|
||||
def __irshift__(self, other: Any) -> Any: ...
|
||||
def __and__(self, other: Any) -> Any: ...
|
||||
def __rand__(self, other: Any) -> Any: ...
|
||||
def __iand__(self, other: Any) -> Any: ...
|
||||
def __xor__(self, other: Any) -> Any: ...
|
||||
def __rxor__(self, other: Any) -> Any: ...
|
||||
def __ixor__(self, other: Any) -> Any: ...
|
||||
def __or__(self, other: Any) -> Any: ...
|
||||
def __ror__(self, other: Any) -> Any: ...
|
||||
def __ior__(self, other: Any) -> Any: ...
|
||||
def __neg__(self) -> Any: ...
|
||||
def __pos__(self) -> Any: ...
|
||||
def __abs__(self) -> Any: ...
|
||||
def __invert__(self) -> Any: ...
|
||||
1
lib/python3.11/site-packages/numpy/lib/npyio.py
Normal file
1
lib/python3.11/site-packages/numpy/lib/npyio.py
Normal file
@ -0,0 +1 @@
|
||||
from ._npyio_impl import DataSource, NpzFile, __doc__ # noqa: F401
|
||||
9
lib/python3.11/site-packages/numpy/lib/npyio.pyi
Normal file
9
lib/python3.11/site-packages/numpy/lib/npyio.pyi
Normal file
@ -0,0 +1,9 @@
|
||||
from numpy.lib._npyio_impl import (
|
||||
DataSource as DataSource,
|
||||
)
|
||||
from numpy.lib._npyio_impl import (
|
||||
NpzFile as NpzFile,
|
||||
)
|
||||
from numpy.lib._npyio_impl import (
|
||||
__doc__ as __doc__,
|
||||
)
|
||||
1681
lib/python3.11/site-packages/numpy/lib/recfunctions.py
Normal file
1681
lib/python3.11/site-packages/numpy/lib/recfunctions.py
Normal file
File diff suppressed because it is too large
Load Diff
435
lib/python3.11/site-packages/numpy/lib/recfunctions.pyi
Normal file
435
lib/python3.11/site-packages/numpy/lib/recfunctions.pyi
Normal file
@ -0,0 +1,435 @@
|
||||
from collections.abc import Callable, Iterable, Mapping, Sequence
|
||||
from typing import Any, Literal, TypeAlias, overload
|
||||
|
||||
from _typeshed import Incomplete
|
||||
from typing_extensions import TypeVar
|
||||
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
from numpy._typing import _AnyShape, _DTypeLike, _DTypeLikeVoid
|
||||
from numpy.ma.mrecords import MaskedRecords
|
||||
|
||||
__all__ = [
|
||||
"append_fields",
|
||||
"apply_along_fields",
|
||||
"assign_fields_by_name",
|
||||
"drop_fields",
|
||||
"find_duplicates",
|
||||
"flatten_descr",
|
||||
"get_fieldstructure",
|
||||
"get_names",
|
||||
"get_names_flat",
|
||||
"join_by",
|
||||
"merge_arrays",
|
||||
"rec_append_fields",
|
||||
"rec_drop_fields",
|
||||
"rec_join",
|
||||
"recursive_fill_fields",
|
||||
"rename_fields",
|
||||
"repack_fields",
|
||||
"require_fields",
|
||||
"stack_arrays",
|
||||
"structured_to_unstructured",
|
||||
"unstructured_to_structured",
|
||||
]
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...])
|
||||
_ScalarT = TypeVar("_ScalarT", bound=np.generic)
|
||||
_DTypeT = TypeVar("_DTypeT", bound=np.dtype)
|
||||
_ArrayT = TypeVar("_ArrayT", bound=npt.NDArray[Any])
|
||||
_VoidArrayT = TypeVar("_VoidArrayT", bound=npt.NDArray[np.void])
|
||||
_NonVoidDTypeT = TypeVar("_NonVoidDTypeT", bound=_NonVoidDType)
|
||||
|
||||
_OneOrMany: TypeAlias = _T | Iterable[_T]
|
||||
_BuiltinSequence: TypeAlias = tuple[_T, ...] | list[_T]
|
||||
|
||||
_NestedNames: TypeAlias = tuple[str | _NestedNames, ...]
|
||||
_NonVoid: TypeAlias = np.bool | np.number | np.character | np.datetime64 | np.timedelta64 | np.object_
|
||||
_NonVoidDType: TypeAlias = np.dtype[_NonVoid] | np.dtypes.StringDType
|
||||
|
||||
_JoinType: TypeAlias = Literal["inner", "outer", "leftouter"]
|
||||
|
||||
###
|
||||
|
||||
def recursive_fill_fields(input: npt.NDArray[np.void], output: _VoidArrayT) -> _VoidArrayT: ...
|
||||
|
||||
#
|
||||
def get_names(adtype: np.dtype[np.void]) -> _NestedNames: ...
|
||||
def get_names_flat(adtype: np.dtype[np.void]) -> tuple[str, ...]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def flatten_descr(ndtype: _NonVoidDTypeT) -> tuple[tuple[Literal[""], _NonVoidDTypeT]]: ...
|
||||
@overload
|
||||
def flatten_descr(ndtype: np.dtype[np.void]) -> tuple[tuple[str, np.dtype]]: ...
|
||||
|
||||
#
|
||||
def get_fieldstructure(
|
||||
adtype: np.dtype[np.void],
|
||||
lastname: str | None = None,
|
||||
parents: dict[str, list[str]] | None = None,
|
||||
) -> dict[str, list[str]]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def merge_arrays(
|
||||
seqarrays: Sequence[np.ndarray[_ShapeT, np.dtype]] | np.ndarray[_ShapeT, np.dtype],
|
||||
fill_value: float = -1,
|
||||
flatten: bool = False,
|
||||
usemask: bool = False,
|
||||
asrecarray: bool = False,
|
||||
) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def merge_arrays(
|
||||
seqarrays: Sequence[npt.ArrayLike] | np.void,
|
||||
fill_value: float = -1,
|
||||
flatten: bool = False,
|
||||
usemask: bool = False,
|
||||
asrecarray: bool = False,
|
||||
) -> np.recarray[_AnyShape, np.dtype[np.void]]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def drop_fields(
|
||||
base: np.ndarray[_ShapeT, np.dtype[np.void]],
|
||||
drop_names: str | Iterable[str],
|
||||
usemask: bool = True,
|
||||
asrecarray: Literal[False] = False,
|
||||
) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def drop_fields(
|
||||
base: np.ndarray[_ShapeT, np.dtype[np.void]],
|
||||
drop_names: str | Iterable[str],
|
||||
usemask: bool,
|
||||
asrecarray: Literal[True],
|
||||
) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def drop_fields(
|
||||
base: np.ndarray[_ShapeT, np.dtype[np.void]],
|
||||
drop_names: str | Iterable[str],
|
||||
usemask: bool = True,
|
||||
*,
|
||||
asrecarray: Literal[True],
|
||||
) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def rename_fields(
|
||||
base: MaskedRecords[_ShapeT, np.dtype[np.void]],
|
||||
namemapper: Mapping[str, str],
|
||||
) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def rename_fields(
|
||||
base: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]],
|
||||
namemapper: Mapping[str, str],
|
||||
) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def rename_fields(
|
||||
base: np.recarray[_ShapeT, np.dtype[np.void]],
|
||||
namemapper: Mapping[str, str],
|
||||
) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def rename_fields(
|
||||
base: np.ndarray[_ShapeT, np.dtype[np.void]],
|
||||
namemapper: Mapping[str, str],
|
||||
) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def append_fields(
|
||||
base: np.ndarray[_ShapeT, np.dtype[np.void]],
|
||||
names: _OneOrMany[str],
|
||||
data: _OneOrMany[npt.NDArray[Any]],
|
||||
dtypes: _BuiltinSequence[np.dtype] | None,
|
||||
fill_value: int,
|
||||
usemask: Literal[False],
|
||||
asrecarray: Literal[False] = False,
|
||||
) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def append_fields(
|
||||
base: np.ndarray[_ShapeT, np.dtype[np.void]],
|
||||
names: _OneOrMany[str],
|
||||
data: _OneOrMany[npt.NDArray[Any]],
|
||||
dtypes: _BuiltinSequence[np.dtype] | None = None,
|
||||
fill_value: int = -1,
|
||||
*,
|
||||
usemask: Literal[False],
|
||||
asrecarray: Literal[False] = False,
|
||||
) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def append_fields(
|
||||
base: np.ndarray[_ShapeT, np.dtype[np.void]],
|
||||
names: _OneOrMany[str],
|
||||
data: _OneOrMany[npt.NDArray[Any]],
|
||||
dtypes: _BuiltinSequence[np.dtype] | None,
|
||||
fill_value: int,
|
||||
usemask: Literal[False],
|
||||
asrecarray: Literal[True],
|
||||
) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def append_fields(
|
||||
base: np.ndarray[_ShapeT, np.dtype[np.void]],
|
||||
names: _OneOrMany[str],
|
||||
data: _OneOrMany[npt.NDArray[Any]],
|
||||
dtypes: _BuiltinSequence[np.dtype] | None = None,
|
||||
fill_value: int = -1,
|
||||
*,
|
||||
usemask: Literal[False],
|
||||
asrecarray: Literal[True],
|
||||
) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def append_fields(
|
||||
base: np.ndarray[_ShapeT, np.dtype[np.void]],
|
||||
names: _OneOrMany[str],
|
||||
data: _OneOrMany[npt.NDArray[Any]],
|
||||
dtypes: _BuiltinSequence[np.dtype] | None = None,
|
||||
fill_value: int = -1,
|
||||
usemask: Literal[True] = True,
|
||||
asrecarray: Literal[False] = False,
|
||||
) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def append_fields(
|
||||
base: np.ndarray[_ShapeT, np.dtype[np.void]],
|
||||
names: _OneOrMany[str],
|
||||
data: _OneOrMany[npt.NDArray[Any]],
|
||||
dtypes: _BuiltinSequence[np.dtype] | None,
|
||||
fill_value: int,
|
||||
usemask: Literal[True],
|
||||
asrecarray: Literal[True],
|
||||
) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def append_fields(
|
||||
base: np.ndarray[_ShapeT, np.dtype[np.void]],
|
||||
names: _OneOrMany[str],
|
||||
data: _OneOrMany[npt.NDArray[Any]],
|
||||
dtypes: _BuiltinSequence[np.dtype] | None = None,
|
||||
fill_value: int = -1,
|
||||
usemask: Literal[True] = True,
|
||||
*,
|
||||
asrecarray: Literal[True],
|
||||
) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ...
|
||||
|
||||
#
|
||||
def rec_drop_fields(
|
||||
base: np.ndarray[_ShapeT, np.dtype[np.void]],
|
||||
drop_names: str | Iterable[str],
|
||||
) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...
|
||||
|
||||
#
|
||||
def rec_append_fields(
|
||||
base: np.ndarray[_ShapeT, np.dtype[np.void]],
|
||||
names: _OneOrMany[str],
|
||||
data: _OneOrMany[npt.NDArray[Any]],
|
||||
dtypes: _BuiltinSequence[np.dtype] | None = None,
|
||||
) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ...
|
||||
|
||||
# TODO(jorenham): Stop passing `void` directly once structured dtypes are implemented,
|
||||
# e.g. using a `TypeVar` with constraints.
|
||||
# https://github.com/numpy/numtype/issues/92
|
||||
@overload
|
||||
def repack_fields(a: _DTypeT, align: bool = False, recurse: bool = False) -> _DTypeT: ...
|
||||
@overload
|
||||
def repack_fields(a: _ScalarT, align: bool = False, recurse: bool = False) -> _ScalarT: ...
|
||||
@overload
|
||||
def repack_fields(a: _ArrayT, align: bool = False, recurse: bool = False) -> _ArrayT: ...
|
||||
|
||||
# TODO(jorenham): Attempt shape-typing (return type has ndim == arr.ndim + 1)
|
||||
@overload
|
||||
def structured_to_unstructured(
|
||||
arr: npt.NDArray[np.void],
|
||||
dtype: _DTypeLike[_ScalarT],
|
||||
copy: bool = False,
|
||||
casting: np._CastingKind = "unsafe",
|
||||
) -> npt.NDArray[_ScalarT]: ...
|
||||
@overload
|
||||
def structured_to_unstructured(
|
||||
arr: npt.NDArray[np.void],
|
||||
dtype: npt.DTypeLike | None = None,
|
||||
copy: bool = False,
|
||||
casting: np._CastingKind = "unsafe",
|
||||
) -> npt.NDArray[Any]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def unstructured_to_structured(
|
||||
arr: npt.NDArray[Any],
|
||||
dtype: npt.DTypeLike,
|
||||
names: None = None,
|
||||
align: bool = False,
|
||||
copy: bool = False,
|
||||
casting: str = "unsafe",
|
||||
) -> npt.NDArray[np.void]: ...
|
||||
@overload
|
||||
def unstructured_to_structured(
|
||||
arr: npt.NDArray[Any],
|
||||
dtype: None,
|
||||
names: _OneOrMany[str],
|
||||
align: bool = False,
|
||||
copy: bool = False,
|
||||
casting: str = "unsafe",
|
||||
) -> npt.NDArray[np.void]: ...
|
||||
|
||||
#
|
||||
def apply_along_fields(
|
||||
func: Callable[[np.ndarray[_ShapeT, Any]], npt.NDArray[Any]],
|
||||
arr: np.ndarray[_ShapeT, np.dtype[np.void]],
|
||||
) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ...
|
||||
|
||||
#
|
||||
def assign_fields_by_name(dst: npt.NDArray[np.void], src: npt.NDArray[np.void], zero_unassigned: bool = True) -> None: ...
|
||||
|
||||
#
|
||||
def require_fields(
|
||||
array: np.ndarray[_ShapeT, np.dtype[np.void]],
|
||||
required_dtype: _DTypeLikeVoid,
|
||||
) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ...
|
||||
|
||||
# TODO(jorenham): Attempt shape-typing
|
||||
@overload
|
||||
def stack_arrays(
|
||||
arrays: _ArrayT,
|
||||
defaults: Mapping[str, object] | None = None,
|
||||
usemask: bool = True,
|
||||
asrecarray: bool = False,
|
||||
autoconvert: bool = False,
|
||||
) -> _ArrayT: ...
|
||||
@overload
|
||||
def stack_arrays(
|
||||
arrays: Sequence[npt.NDArray[Any]],
|
||||
defaults: Mapping[str, Incomplete] | None,
|
||||
usemask: Literal[False],
|
||||
asrecarray: Literal[False] = False,
|
||||
autoconvert: bool = False,
|
||||
) -> npt.NDArray[np.void]: ...
|
||||
@overload
|
||||
def stack_arrays(
|
||||
arrays: Sequence[npt.NDArray[Any]],
|
||||
defaults: Mapping[str, Incomplete] | None = None,
|
||||
*,
|
||||
usemask: Literal[False],
|
||||
asrecarray: Literal[False] = False,
|
||||
autoconvert: bool = False,
|
||||
) -> npt.NDArray[np.void]: ...
|
||||
@overload
|
||||
def stack_arrays(
|
||||
arrays: Sequence[npt.NDArray[Any]],
|
||||
defaults: Mapping[str, Incomplete] | None = None,
|
||||
*,
|
||||
usemask: Literal[False],
|
||||
asrecarray: Literal[True],
|
||||
autoconvert: bool = False,
|
||||
) -> np.recarray[_AnyShape, np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def stack_arrays(
|
||||
arrays: Sequence[npt.NDArray[Any]],
|
||||
defaults: Mapping[str, Incomplete] | None = None,
|
||||
usemask: Literal[True] = True,
|
||||
asrecarray: Literal[False] = False,
|
||||
autoconvert: bool = False,
|
||||
) -> np.ma.MaskedArray[_AnyShape, np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def stack_arrays(
|
||||
arrays: Sequence[npt.NDArray[Any]],
|
||||
defaults: Mapping[str, Incomplete] | None,
|
||||
usemask: Literal[True],
|
||||
asrecarray: Literal[True],
|
||||
autoconvert: bool = False,
|
||||
) -> MaskedRecords[_AnyShape, np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def stack_arrays(
|
||||
arrays: Sequence[npt.NDArray[Any]],
|
||||
defaults: Mapping[str, Incomplete] | None = None,
|
||||
usemask: Literal[True] = True,
|
||||
*,
|
||||
asrecarray: Literal[True],
|
||||
autoconvert: bool = False,
|
||||
) -> MaskedRecords[_AnyShape, np.dtype[np.void]]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def find_duplicates(
|
||||
a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]],
|
||||
key: str | None = None,
|
||||
ignoremask: bool = True,
|
||||
return_index: Literal[False] = False,
|
||||
) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def find_duplicates(
|
||||
a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]],
|
||||
key: str | None,
|
||||
ignoremask: bool,
|
||||
return_index: Literal[True],
|
||||
) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ...
|
||||
@overload
|
||||
def find_duplicates(
|
||||
a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]],
|
||||
key: str | None = None,
|
||||
ignoremask: bool = True,
|
||||
*,
|
||||
return_index: Literal[True],
|
||||
) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def join_by(
|
||||
key: str | Sequence[str],
|
||||
r1: npt.NDArray[np.void],
|
||||
r2: npt.NDArray[np.void],
|
||||
jointype: _JoinType = "inner",
|
||||
r1postfix: str = "1",
|
||||
r2postfix: str = "2",
|
||||
defaults: Mapping[str, object] | None = None,
|
||||
*,
|
||||
usemask: Literal[False],
|
||||
asrecarray: Literal[False] = False,
|
||||
) -> np.ndarray[tuple[int], np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def join_by(
|
||||
key: str | Sequence[str],
|
||||
r1: npt.NDArray[np.void],
|
||||
r2: npt.NDArray[np.void],
|
||||
jointype: _JoinType = "inner",
|
||||
r1postfix: str = "1",
|
||||
r2postfix: str = "2",
|
||||
defaults: Mapping[str, object] | None = None,
|
||||
*,
|
||||
usemask: Literal[False],
|
||||
asrecarray: Literal[True],
|
||||
) -> np.recarray[tuple[int], np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def join_by(
|
||||
key: str | Sequence[str],
|
||||
r1: npt.NDArray[np.void],
|
||||
r2: npt.NDArray[np.void],
|
||||
jointype: _JoinType = "inner",
|
||||
r1postfix: str = "1",
|
||||
r2postfix: str = "2",
|
||||
defaults: Mapping[str, object] | None = None,
|
||||
usemask: Literal[True] = True,
|
||||
asrecarray: Literal[False] = False,
|
||||
) -> np.ma.MaskedArray[tuple[int], np.dtype[np.void]]: ...
|
||||
@overload
|
||||
def join_by(
|
||||
key: str | Sequence[str],
|
||||
r1: npt.NDArray[np.void],
|
||||
r2: npt.NDArray[np.void],
|
||||
jointype: _JoinType = "inner",
|
||||
r1postfix: str = "1",
|
||||
r2postfix: str = "2",
|
||||
defaults: Mapping[str, object] | None = None,
|
||||
usemask: Literal[True] = True,
|
||||
*,
|
||||
asrecarray: Literal[True],
|
||||
) -> MaskedRecords[tuple[int], np.dtype[np.void]]: ...
|
||||
|
||||
#
|
||||
def rec_join(
|
||||
key: str | Sequence[str],
|
||||
r1: npt.NDArray[np.void],
|
||||
r2: npt.NDArray[np.void],
|
||||
jointype: _JoinType = "inner",
|
||||
r1postfix: str = "1",
|
||||
r2postfix: str = "2",
|
||||
defaults: Mapping[str, object] | None = None,
|
||||
) -> np.recarray[tuple[int], np.dtype[np.void]]: ...
|
||||
13
lib/python3.11/site-packages/numpy/lib/scimath.py
Normal file
13
lib/python3.11/site-packages/numpy/lib/scimath.py
Normal file
@ -0,0 +1,13 @@
|
||||
from ._scimath_impl import ( # noqa: F401
|
||||
__all__,
|
||||
__doc__,
|
||||
arccos,
|
||||
arcsin,
|
||||
arctanh,
|
||||
log,
|
||||
log2,
|
||||
log10,
|
||||
logn,
|
||||
power,
|
||||
sqrt,
|
||||
)
|
||||
30
lib/python3.11/site-packages/numpy/lib/scimath.pyi
Normal file
30
lib/python3.11/site-packages/numpy/lib/scimath.pyi
Normal file
@ -0,0 +1,30 @@
|
||||
from ._scimath_impl import (
|
||||
__all__ as __all__,
|
||||
)
|
||||
from ._scimath_impl import (
|
||||
arccos as arccos,
|
||||
)
|
||||
from ._scimath_impl import (
|
||||
arcsin as arcsin,
|
||||
)
|
||||
from ._scimath_impl import (
|
||||
arctanh as arctanh,
|
||||
)
|
||||
from ._scimath_impl import (
|
||||
log as log,
|
||||
)
|
||||
from ._scimath_impl import (
|
||||
log2 as log2,
|
||||
)
|
||||
from ._scimath_impl import (
|
||||
log10 as log10,
|
||||
)
|
||||
from ._scimath_impl import (
|
||||
logn as logn,
|
||||
)
|
||||
from ._scimath_impl import (
|
||||
power as power,
|
||||
)
|
||||
from ._scimath_impl import (
|
||||
sqrt as sqrt,
|
||||
)
|
||||
1
lib/python3.11/site-packages/numpy/lib/stride_tricks.py
Normal file
1
lib/python3.11/site-packages/numpy/lib/stride_tricks.py
Normal file
@ -0,0 +1 @@
|
||||
from ._stride_tricks_impl import __doc__, as_strided, sliding_window_view # noqa: F401
|
||||
6
lib/python3.11/site-packages/numpy/lib/stride_tricks.pyi
Normal file
6
lib/python3.11/site-packages/numpy/lib/stride_tricks.pyi
Normal file
@ -0,0 +1,6 @@
|
||||
from numpy.lib._stride_tricks_impl import (
|
||||
as_strided as as_strided,
|
||||
)
|
||||
from numpy.lib._stride_tricks_impl import (
|
||||
sliding_window_view as sliding_window_view,
|
||||
)
|
||||
Binary file not shown.
BIN
lib/python3.11/site-packages/numpy/lib/tests/data/py2-objarr.npy
Normal file
BIN
lib/python3.11/site-packages/numpy/lib/tests/data/py2-objarr.npy
Normal file
Binary file not shown.
BIN
lib/python3.11/site-packages/numpy/lib/tests/data/py2-objarr.npz
Normal file
BIN
lib/python3.11/site-packages/numpy/lib/tests/data/py2-objarr.npz
Normal file
Binary file not shown.
BIN
lib/python3.11/site-packages/numpy/lib/tests/data/py3-objarr.npy
Normal file
BIN
lib/python3.11/site-packages/numpy/lib/tests/data/py3-objarr.npy
Normal file
Binary file not shown.
BIN
lib/python3.11/site-packages/numpy/lib/tests/data/py3-objarr.npz
Normal file
BIN
lib/python3.11/site-packages/numpy/lib/tests/data/py3-objarr.npz
Normal file
Binary file not shown.
BIN
lib/python3.11/site-packages/numpy/lib/tests/data/python3.npy
Normal file
BIN
lib/python3.11/site-packages/numpy/lib/tests/data/python3.npy
Normal file
Binary file not shown.
Binary file not shown.
352
lib/python3.11/site-packages/numpy/lib/tests/test__datasource.py
Normal file
352
lib/python3.11/site-packages/numpy/lib/tests/test__datasource.py
Normal file
@ -0,0 +1,352 @@
|
||||
import os
|
||||
import urllib.request as urllib_request
|
||||
from shutil import rmtree
|
||||
from tempfile import NamedTemporaryFile, mkdtemp, mkstemp
|
||||
from urllib.error import URLError
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import pytest
|
||||
|
||||
import numpy.lib._datasource as datasource
|
||||
from numpy.testing import assert_, assert_equal, assert_raises
|
||||
|
||||
|
||||
def urlopen_stub(url, data=None):
|
||||
'''Stub to replace urlopen for testing.'''
|
||||
if url == valid_httpurl():
|
||||
tmpfile = NamedTemporaryFile(prefix='urltmp_')
|
||||
return tmpfile
|
||||
else:
|
||||
raise URLError('Name or service not known')
|
||||
|
||||
|
||||
# setup and teardown
|
||||
old_urlopen = None
|
||||
|
||||
|
||||
def setup_module():
|
||||
global old_urlopen
|
||||
|
||||
old_urlopen = urllib_request.urlopen
|
||||
urllib_request.urlopen = urlopen_stub
|
||||
|
||||
|
||||
def teardown_module():
|
||||
urllib_request.urlopen = old_urlopen
|
||||
|
||||
|
||||
# A valid website for more robust testing
|
||||
http_path = 'http://www.google.com/'
|
||||
http_file = 'index.html'
|
||||
|
||||
http_fakepath = 'http://fake.abc.web/site/'
|
||||
http_fakefile = 'fake.txt'
|
||||
|
||||
malicious_files = ['/etc/shadow', '../../shadow',
|
||||
'..\\system.dat', 'c:\\windows\\system.dat']
|
||||
|
||||
magic_line = b'three is the magic number'
|
||||
|
||||
|
||||
# Utility functions used by many tests
|
||||
def valid_textfile(filedir):
|
||||
# Generate and return a valid temporary file.
|
||||
fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True)
|
||||
os.close(fd)
|
||||
return path
|
||||
|
||||
|
||||
def invalid_textfile(filedir):
|
||||
# Generate and return an invalid filename.
|
||||
fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir)
|
||||
os.close(fd)
|
||||
os.remove(path)
|
||||
return path
|
||||
|
||||
|
||||
def valid_httpurl():
|
||||
return http_path + http_file
|
||||
|
||||
|
||||
def invalid_httpurl():
|
||||
return http_fakepath + http_fakefile
|
||||
|
||||
|
||||
def valid_baseurl():
|
||||
return http_path
|
||||
|
||||
|
||||
def invalid_baseurl():
|
||||
return http_fakepath
|
||||
|
||||
|
||||
def valid_httpfile():
|
||||
return http_file
|
||||
|
||||
|
||||
def invalid_httpfile():
|
||||
return http_fakefile
|
||||
|
||||
|
||||
class TestDataSourceOpen:
|
||||
def setup_method(self):
|
||||
self.tmpdir = mkdtemp()
|
||||
self.ds = datasource.DataSource(self.tmpdir)
|
||||
|
||||
def teardown_method(self):
|
||||
rmtree(self.tmpdir)
|
||||
del self.ds
|
||||
|
||||
def test_ValidHTTP(self):
|
||||
fh = self.ds.open(valid_httpurl())
|
||||
assert_(fh)
|
||||
fh.close()
|
||||
|
||||
def test_InvalidHTTP(self):
|
||||
url = invalid_httpurl()
|
||||
assert_raises(OSError, self.ds.open, url)
|
||||
try:
|
||||
self.ds.open(url)
|
||||
except OSError as e:
|
||||
# Regression test for bug fixed in r4342.
|
||||
assert_(e.errno is None)
|
||||
|
||||
def test_InvalidHTTPCacheURLError(self):
|
||||
assert_raises(URLError, self.ds._cache, invalid_httpurl())
|
||||
|
||||
def test_ValidFile(self):
|
||||
local_file = valid_textfile(self.tmpdir)
|
||||
fh = self.ds.open(local_file)
|
||||
assert_(fh)
|
||||
fh.close()
|
||||
|
||||
def test_InvalidFile(self):
|
||||
invalid_file = invalid_textfile(self.tmpdir)
|
||||
assert_raises(OSError, self.ds.open, invalid_file)
|
||||
|
||||
def test_ValidGzipFile(self):
|
||||
try:
|
||||
import gzip
|
||||
except ImportError:
|
||||
# We don't have the gzip capabilities to test.
|
||||
pytest.skip()
|
||||
# Test datasource's internal file_opener for Gzip files.
|
||||
filepath = os.path.join(self.tmpdir, 'foobar.txt.gz')
|
||||
fp = gzip.open(filepath, 'w')
|
||||
fp.write(magic_line)
|
||||
fp.close()
|
||||
fp = self.ds.open(filepath)
|
||||
result = fp.readline()
|
||||
fp.close()
|
||||
assert_equal(magic_line, result)
|
||||
|
||||
def test_ValidBz2File(self):
|
||||
try:
|
||||
import bz2
|
||||
except ImportError:
|
||||
# We don't have the bz2 capabilities to test.
|
||||
pytest.skip()
|
||||
# Test datasource's internal file_opener for BZip2 files.
|
||||
filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2')
|
||||
fp = bz2.BZ2File(filepath, 'w')
|
||||
fp.write(magic_line)
|
||||
fp.close()
|
||||
fp = self.ds.open(filepath)
|
||||
result = fp.readline()
|
||||
fp.close()
|
||||
assert_equal(magic_line, result)
|
||||
|
||||
|
||||
class TestDataSourceExists:
|
||||
def setup_method(self):
|
||||
self.tmpdir = mkdtemp()
|
||||
self.ds = datasource.DataSource(self.tmpdir)
|
||||
|
||||
def teardown_method(self):
|
||||
rmtree(self.tmpdir)
|
||||
del self.ds
|
||||
|
||||
def test_ValidHTTP(self):
|
||||
assert_(self.ds.exists(valid_httpurl()))
|
||||
|
||||
def test_InvalidHTTP(self):
|
||||
assert_equal(self.ds.exists(invalid_httpurl()), False)
|
||||
|
||||
def test_ValidFile(self):
|
||||
# Test valid file in destpath
|
||||
tmpfile = valid_textfile(self.tmpdir)
|
||||
assert_(self.ds.exists(tmpfile))
|
||||
# Test valid local file not in destpath
|
||||
localdir = mkdtemp()
|
||||
tmpfile = valid_textfile(localdir)
|
||||
assert_(self.ds.exists(tmpfile))
|
||||
rmtree(localdir)
|
||||
|
||||
def test_InvalidFile(self):
|
||||
tmpfile = invalid_textfile(self.tmpdir)
|
||||
assert_equal(self.ds.exists(tmpfile), False)
|
||||
|
||||
|
||||
class TestDataSourceAbspath:
|
||||
def setup_method(self):
|
||||
self.tmpdir = os.path.abspath(mkdtemp())
|
||||
self.ds = datasource.DataSource(self.tmpdir)
|
||||
|
||||
def teardown_method(self):
|
||||
rmtree(self.tmpdir)
|
||||
del self.ds
|
||||
|
||||
def test_ValidHTTP(self):
|
||||
scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
|
||||
local_path = os.path.join(self.tmpdir, netloc,
|
||||
upath.strip(os.sep).strip('/'))
|
||||
assert_equal(local_path, self.ds.abspath(valid_httpurl()))
|
||||
|
||||
def test_ValidFile(self):
|
||||
tmpfile = valid_textfile(self.tmpdir)
|
||||
tmpfilename = os.path.split(tmpfile)[-1]
|
||||
# Test with filename only
|
||||
assert_equal(tmpfile, self.ds.abspath(tmpfilename))
|
||||
# Test filename with complete path
|
||||
assert_equal(tmpfile, self.ds.abspath(tmpfile))
|
||||
|
||||
def test_InvalidHTTP(self):
|
||||
scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl())
|
||||
invalidhttp = os.path.join(self.tmpdir, netloc,
|
||||
upath.strip(os.sep).strip('/'))
|
||||
assert_(invalidhttp != self.ds.abspath(valid_httpurl()))
|
||||
|
||||
def test_InvalidFile(self):
|
||||
invalidfile = valid_textfile(self.tmpdir)
|
||||
tmpfile = valid_textfile(self.tmpdir)
|
||||
tmpfilename = os.path.split(tmpfile)[-1]
|
||||
# Test with filename only
|
||||
assert_(invalidfile != self.ds.abspath(tmpfilename))
|
||||
# Test filename with complete path
|
||||
assert_(invalidfile != self.ds.abspath(tmpfile))
|
||||
|
||||
def test_sandboxing(self):
|
||||
tmpfile = valid_textfile(self.tmpdir)
|
||||
tmpfilename = os.path.split(tmpfile)[-1]
|
||||
|
||||
tmp_path = lambda x: os.path.abspath(self.ds.abspath(x))
|
||||
|
||||
assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir))
|
||||
assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir))
|
||||
assert_(tmp_path(tmpfile).startswith(self.tmpdir))
|
||||
assert_(tmp_path(tmpfilename).startswith(self.tmpdir))
|
||||
for fn in malicious_files:
|
||||
assert_(tmp_path(http_path + fn).startswith(self.tmpdir))
|
||||
assert_(tmp_path(fn).startswith(self.tmpdir))
|
||||
|
||||
def test_windows_os_sep(self):
|
||||
orig_os_sep = os.sep
|
||||
try:
|
||||
os.sep = '\\'
|
||||
self.test_ValidHTTP()
|
||||
self.test_ValidFile()
|
||||
self.test_InvalidHTTP()
|
||||
self.test_InvalidFile()
|
||||
self.test_sandboxing()
|
||||
finally:
|
||||
os.sep = orig_os_sep
|
||||
|
||||
|
||||
class TestRepositoryAbspath:
|
||||
def setup_method(self):
|
||||
self.tmpdir = os.path.abspath(mkdtemp())
|
||||
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
|
||||
|
||||
def teardown_method(self):
|
||||
rmtree(self.tmpdir)
|
||||
del self.repos
|
||||
|
||||
def test_ValidHTTP(self):
|
||||
scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
|
||||
local_path = os.path.join(self.repos._destpath, netloc,
|
||||
upath.strip(os.sep).strip('/'))
|
||||
filepath = self.repos.abspath(valid_httpfile())
|
||||
assert_equal(local_path, filepath)
|
||||
|
||||
def test_sandboxing(self):
|
||||
tmp_path = lambda x: os.path.abspath(self.repos.abspath(x))
|
||||
assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir))
|
||||
for fn in malicious_files:
|
||||
assert_(tmp_path(http_path + fn).startswith(self.tmpdir))
|
||||
assert_(tmp_path(fn).startswith(self.tmpdir))
|
||||
|
||||
def test_windows_os_sep(self):
|
||||
orig_os_sep = os.sep
|
||||
try:
|
||||
os.sep = '\\'
|
||||
self.test_ValidHTTP()
|
||||
self.test_sandboxing()
|
||||
finally:
|
||||
os.sep = orig_os_sep
|
||||
|
||||
|
||||
class TestRepositoryExists:
|
||||
def setup_method(self):
|
||||
self.tmpdir = mkdtemp()
|
||||
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
|
||||
|
||||
def teardown_method(self):
|
||||
rmtree(self.tmpdir)
|
||||
del self.repos
|
||||
|
||||
def test_ValidFile(self):
|
||||
# Create local temp file
|
||||
tmpfile = valid_textfile(self.tmpdir)
|
||||
assert_(self.repos.exists(tmpfile))
|
||||
|
||||
def test_InvalidFile(self):
|
||||
tmpfile = invalid_textfile(self.tmpdir)
|
||||
assert_equal(self.repos.exists(tmpfile), False)
|
||||
|
||||
def test_RemoveHTTPFile(self):
|
||||
assert_(self.repos.exists(valid_httpurl()))
|
||||
|
||||
def test_CachedHTTPFile(self):
|
||||
localfile = valid_httpurl()
|
||||
# Create a locally cached temp file with an URL based
|
||||
# directory structure. This is similar to what Repository.open
|
||||
# would do.
|
||||
scheme, netloc, upath, pms, qry, frg = urlparse(localfile)
|
||||
local_path = os.path.join(self.repos._destpath, netloc)
|
||||
os.mkdir(local_path, 0o0700)
|
||||
tmpfile = valid_textfile(local_path)
|
||||
assert_(self.repos.exists(tmpfile))
|
||||
|
||||
|
||||
class TestOpenFunc:
|
||||
def setup_method(self):
|
||||
self.tmpdir = mkdtemp()
|
||||
|
||||
def teardown_method(self):
|
||||
rmtree(self.tmpdir)
|
||||
|
||||
def test_DataSourceOpen(self):
|
||||
local_file = valid_textfile(self.tmpdir)
|
||||
# Test case where destpath is passed in
|
||||
fp = datasource.open(local_file, destpath=self.tmpdir)
|
||||
assert_(fp)
|
||||
fp.close()
|
||||
# Test case where default destpath is used
|
||||
fp = datasource.open(local_file)
|
||||
assert_(fp)
|
||||
fp.close()
|
||||
|
||||
def test_del_attr_handling():
|
||||
# DataSource __del__ can be called
|
||||
# even if __init__ fails when the
|
||||
# Exception object is caught by the
|
||||
# caller as happens in refguide_check
|
||||
# is_deprecated() function
|
||||
|
||||
ds = datasource.DataSource()
|
||||
# simulate failed __init__ by removing key attribute
|
||||
# produced within __init__ and expected by __del__
|
||||
del ds._istmpdest
|
||||
# should not raise an AttributeError if __del__
|
||||
# gracefully handles failed __init__:
|
||||
ds.__del__()
|
||||
360
lib/python3.11/site-packages/numpy/lib/tests/test__iotools.py
Normal file
360
lib/python3.11/site-packages/numpy/lib/tests/test__iotools.py
Normal file
@ -0,0 +1,360 @@
|
||||
import time
|
||||
from datetime import date
|
||||
|
||||
import numpy as np
|
||||
from numpy.lib._iotools import (
|
||||
LineSplitter,
|
||||
NameValidator,
|
||||
StringConverter,
|
||||
easy_dtype,
|
||||
flatten_dtype,
|
||||
has_nested_fields,
|
||||
)
|
||||
from numpy.testing import (
|
||||
assert_,
|
||||
assert_allclose,
|
||||
assert_equal,
|
||||
assert_raises,
|
||||
)
|
||||
|
||||
|
||||
class TestLineSplitter:
|
||||
"Tests the LineSplitter class."
|
||||
|
||||
def test_no_delimiter(self):
|
||||
"Test LineSplitter w/o delimiter"
|
||||
strg = " 1 2 3 4 5 # test"
|
||||
test = LineSplitter()(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '5'])
|
||||
test = LineSplitter('')(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '5'])
|
||||
|
||||
def test_space_delimiter(self):
|
||||
"Test space delimiter"
|
||||
strg = " 1 2 3 4 5 # test"
|
||||
test = LineSplitter(' ')(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '', '5'])
|
||||
test = LineSplitter(' ')(strg)
|
||||
assert_equal(test, ['1 2 3 4', '5'])
|
||||
|
||||
def test_tab_delimiter(self):
|
||||
"Test tab delimiter"
|
||||
strg = " 1\t 2\t 3\t 4\t 5 6"
|
||||
test = LineSplitter('\t')(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '5 6'])
|
||||
strg = " 1 2\t 3 4\t 5 6"
|
||||
test = LineSplitter('\t')(strg)
|
||||
assert_equal(test, ['1 2', '3 4', '5 6'])
|
||||
|
||||
def test_other_delimiter(self):
|
||||
"Test LineSplitter on delimiter"
|
||||
strg = "1,2,3,4,,5"
|
||||
test = LineSplitter(',')(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '', '5'])
|
||||
#
|
||||
strg = " 1,2,3,4,,5 # test"
|
||||
test = LineSplitter(',')(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '', '5'])
|
||||
|
||||
# gh-11028 bytes comment/delimiters should get encoded
|
||||
strg = b" 1,2,3,4,,5 % test"
|
||||
test = LineSplitter(delimiter=b',', comments=b'%')(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '', '5'])
|
||||
|
||||
def test_constant_fixed_width(self):
|
||||
"Test LineSplitter w/ fixed-width fields"
|
||||
strg = " 1 2 3 4 5 # test"
|
||||
test = LineSplitter(3)(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '', '5', ''])
|
||||
#
|
||||
strg = " 1 3 4 5 6# test"
|
||||
test = LineSplitter(20)(strg)
|
||||
assert_equal(test, ['1 3 4 5 6'])
|
||||
#
|
||||
strg = " 1 3 4 5 6# test"
|
||||
test = LineSplitter(30)(strg)
|
||||
assert_equal(test, ['1 3 4 5 6'])
|
||||
|
||||
def test_variable_fixed_width(self):
|
||||
strg = " 1 3 4 5 6# test"
|
||||
test = LineSplitter((3, 6, 6, 3))(strg)
|
||||
assert_equal(test, ['1', '3', '4 5', '6'])
|
||||
#
|
||||
strg = " 1 3 4 5 6# test"
|
||||
test = LineSplitter((6, 6, 9))(strg)
|
||||
assert_equal(test, ['1', '3 4', '5 6'])
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestNameValidator:
|
||||
|
||||
def test_case_sensitivity(self):
|
||||
"Test case sensitivity"
|
||||
names = ['A', 'a', 'b', 'c']
|
||||
test = NameValidator().validate(names)
|
||||
assert_equal(test, ['A', 'a', 'b', 'c'])
|
||||
test = NameValidator(case_sensitive=False).validate(names)
|
||||
assert_equal(test, ['A', 'A_1', 'B', 'C'])
|
||||
test = NameValidator(case_sensitive='upper').validate(names)
|
||||
assert_equal(test, ['A', 'A_1', 'B', 'C'])
|
||||
test = NameValidator(case_sensitive='lower').validate(names)
|
||||
assert_equal(test, ['a', 'a_1', 'b', 'c'])
|
||||
|
||||
# check exceptions
|
||||
assert_raises(ValueError, NameValidator, case_sensitive='foobar')
|
||||
|
||||
def test_excludelist(self):
|
||||
"Test excludelist"
|
||||
names = ['dates', 'data', 'Other Data', 'mask']
|
||||
validator = NameValidator(excludelist=['dates', 'data', 'mask'])
|
||||
test = validator.validate(names)
|
||||
assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_'])
|
||||
|
||||
def test_missing_names(self):
|
||||
"Test validate missing names"
|
||||
namelist = ('a', 'b', 'c')
|
||||
validator = NameValidator()
|
||||
assert_equal(validator(namelist), ['a', 'b', 'c'])
|
||||
namelist = ('', 'b', 'c')
|
||||
assert_equal(validator(namelist), ['f0', 'b', 'c'])
|
||||
namelist = ('a', 'b', '')
|
||||
assert_equal(validator(namelist), ['a', 'b', 'f0'])
|
||||
namelist = ('', 'f0', '')
|
||||
assert_equal(validator(namelist), ['f1', 'f0', 'f2'])
|
||||
|
||||
def test_validate_nb_names(self):
|
||||
"Test validate nb names"
|
||||
namelist = ('a', 'b', 'c')
|
||||
validator = NameValidator()
|
||||
assert_equal(validator(namelist, nbfields=1), ('a',))
|
||||
assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"),
|
||||
['a', 'b', 'c', 'g0', 'g1'])
|
||||
|
||||
def test_validate_wo_names(self):
|
||||
"Test validate no names"
|
||||
namelist = None
|
||||
validator = NameValidator()
|
||||
assert_(validator(namelist) is None)
|
||||
assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2'])
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _bytes_to_date(s):
|
||||
return date(*time.strptime(s, "%Y-%m-%d")[:3])
|
||||
|
||||
|
||||
class TestStringConverter:
|
||||
"Test StringConverter"
|
||||
|
||||
def test_creation(self):
|
||||
"Test creation of a StringConverter"
|
||||
converter = StringConverter(int, -99999)
|
||||
assert_equal(converter._status, 1)
|
||||
assert_equal(converter.default, -99999)
|
||||
|
||||
def test_upgrade(self):
|
||||
"Tests the upgrade method."
|
||||
|
||||
converter = StringConverter()
|
||||
assert_equal(converter._status, 0)
|
||||
|
||||
# test int
|
||||
assert_equal(converter.upgrade('0'), 0)
|
||||
assert_equal(converter._status, 1)
|
||||
|
||||
# On systems where long defaults to 32-bit, the statuses will be
|
||||
# offset by one, so we check for this here.
|
||||
import numpy._core.numeric as nx
|
||||
status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize)
|
||||
|
||||
# test int > 2**32
|
||||
assert_equal(converter.upgrade('17179869184'), 17179869184)
|
||||
assert_equal(converter._status, 1 + status_offset)
|
||||
|
||||
# test float
|
||||
assert_allclose(converter.upgrade('0.'), 0.0)
|
||||
assert_equal(converter._status, 2 + status_offset)
|
||||
|
||||
# test complex
|
||||
assert_equal(converter.upgrade('0j'), complex('0j'))
|
||||
assert_equal(converter._status, 3 + status_offset)
|
||||
|
||||
# test str
|
||||
# note that the longdouble type has been skipped, so the
|
||||
# _status increases by 2. Everything should succeed with
|
||||
# unicode conversion (8).
|
||||
for s in ['a', b'a']:
|
||||
res = converter.upgrade(s)
|
||||
assert_(type(res) is str)
|
||||
assert_equal(res, 'a')
|
||||
assert_equal(converter._status, 8 + status_offset)
|
||||
|
||||
def test_missing(self):
|
||||
"Tests the use of missing values."
|
||||
converter = StringConverter(missing_values=('missing',
|
||||
'missed'))
|
||||
converter.upgrade('0')
|
||||
assert_equal(converter('0'), 0)
|
||||
assert_equal(converter(''), converter.default)
|
||||
assert_equal(converter('missing'), converter.default)
|
||||
assert_equal(converter('missed'), converter.default)
|
||||
try:
|
||||
converter('miss')
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def test_upgrademapper(self):
|
||||
"Tests updatemapper"
|
||||
dateparser = _bytes_to_date
|
||||
_original_mapper = StringConverter._mapper[:]
|
||||
try:
|
||||
StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1))
|
||||
convert = StringConverter(dateparser, date(2000, 1, 1))
|
||||
test = convert('2001-01-01')
|
||||
assert_equal(test, date(2001, 1, 1))
|
||||
test = convert('2009-01-01')
|
||||
assert_equal(test, date(2009, 1, 1))
|
||||
test = convert('')
|
||||
assert_equal(test, date(2000, 1, 1))
|
||||
finally:
|
||||
StringConverter._mapper = _original_mapper
|
||||
|
||||
def test_string_to_object(self):
|
||||
"Make sure that string-to-object functions are properly recognized"
|
||||
old_mapper = StringConverter._mapper[:] # copy of list
|
||||
conv = StringConverter(_bytes_to_date)
|
||||
assert_equal(conv._mapper, old_mapper)
|
||||
assert_(hasattr(conv, 'default'))
|
||||
|
||||
def test_keep_default(self):
|
||||
"Make sure we don't lose an explicit default"
|
||||
converter = StringConverter(None, missing_values='',
|
||||
default=-999)
|
||||
converter.upgrade('3.14159265')
|
||||
assert_equal(converter.default, -999)
|
||||
assert_equal(converter.type, np.dtype(float))
|
||||
#
|
||||
converter = StringConverter(
|
||||
None, missing_values='', default=0)
|
||||
converter.upgrade('3.14159265')
|
||||
assert_equal(converter.default, 0)
|
||||
assert_equal(converter.type, np.dtype(float))
|
||||
|
||||
def test_keep_default_zero(self):
|
||||
"Check that we don't lose a default of 0"
|
||||
converter = StringConverter(int, default=0,
|
||||
missing_values="N/A")
|
||||
assert_equal(converter.default, 0)
|
||||
|
||||
def test_keep_missing_values(self):
|
||||
"Check that we're not losing missing values"
|
||||
converter = StringConverter(int, default=0,
|
||||
missing_values="N/A")
|
||||
assert_equal(
|
||||
converter.missing_values, {'', 'N/A'})
|
||||
|
||||
def test_int64_dtype(self):
|
||||
"Check that int64 integer types can be specified"
|
||||
converter = StringConverter(np.int64, default=0)
|
||||
val = "-9223372036854775807"
|
||||
assert_(converter(val) == -9223372036854775807)
|
||||
val = "9223372036854775807"
|
||||
assert_(converter(val) == 9223372036854775807)
|
||||
|
||||
def test_uint64_dtype(self):
|
||||
"Check that uint64 integer types can be specified"
|
||||
converter = StringConverter(np.uint64, default=0)
|
||||
val = "9223372043271415339"
|
||||
assert_(converter(val) == 9223372043271415339)
|
||||
|
||||
|
||||
class TestMiscFunctions:
|
||||
|
||||
def test_has_nested_dtype(self):
|
||||
"Test has_nested_dtype"
|
||||
ndtype = np.dtype(float)
|
||||
assert_equal(has_nested_fields(ndtype), False)
|
||||
ndtype = np.dtype([('A', '|S3'), ('B', float)])
|
||||
assert_equal(has_nested_fields(ndtype), False)
|
||||
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
|
||||
assert_equal(has_nested_fields(ndtype), True)
|
||||
|
||||
def test_easy_dtype(self):
|
||||
"Test ndtype on dtypes"
|
||||
# Simple case
|
||||
ndtype = float
|
||||
assert_equal(easy_dtype(ndtype), np.dtype(float))
|
||||
# As string w/o names
|
||||
ndtype = "i4, f8"
|
||||
assert_equal(easy_dtype(ndtype),
|
||||
np.dtype([('f0', "i4"), ('f1', "f8")]))
|
||||
# As string w/o names but different default format
|
||||
assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"),
|
||||
np.dtype([('field_000', "i4"), ('field_001', "f8")]))
|
||||
# As string w/ names
|
||||
ndtype = "i4, f8"
|
||||
assert_equal(easy_dtype(ndtype, names="a, b"),
|
||||
np.dtype([('a', "i4"), ('b', "f8")]))
|
||||
# As string w/ names (too many)
|
||||
ndtype = "i4, f8"
|
||||
assert_equal(easy_dtype(ndtype, names="a, b, c"),
|
||||
np.dtype([('a', "i4"), ('b', "f8")]))
|
||||
# As string w/ names (not enough)
|
||||
ndtype = "i4, f8"
|
||||
assert_equal(easy_dtype(ndtype, names=", b"),
|
||||
np.dtype([('f0', "i4"), ('b', "f8")]))
|
||||
# ... (with different default format)
|
||||
assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"),
|
||||
np.dtype([('a', "i4"), ('f00', "f8")]))
|
||||
# As list of tuples w/o names
|
||||
ndtype = [('A', int), ('B', float)]
|
||||
assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)]))
|
||||
# As list of tuples w/ names
|
||||
assert_equal(easy_dtype(ndtype, names="a,b"),
|
||||
np.dtype([('a', int), ('b', float)]))
|
||||
# As list of tuples w/ not enough names
|
||||
assert_equal(easy_dtype(ndtype, names="a"),
|
||||
np.dtype([('a', int), ('f0', float)]))
|
||||
# As list of tuples w/ too many names
|
||||
assert_equal(easy_dtype(ndtype, names="a,b,c"),
|
||||
np.dtype([('a', int), ('b', float)]))
|
||||
# As list of types w/o names
|
||||
ndtype = (int, float, float)
|
||||
assert_equal(easy_dtype(ndtype),
|
||||
np.dtype([('f0', int), ('f1', float), ('f2', float)]))
|
||||
# As list of types w names
|
||||
ndtype = (int, float, float)
|
||||
assert_equal(easy_dtype(ndtype, names="a, b, c"),
|
||||
np.dtype([('a', int), ('b', float), ('c', float)]))
|
||||
# As simple dtype w/ names
|
||||
ndtype = np.dtype(float)
|
||||
assert_equal(easy_dtype(ndtype, names="a, b, c"),
|
||||
np.dtype([(_, float) for _ in ('a', 'b', 'c')]))
|
||||
# As simple dtype w/o names (but multiple fields)
|
||||
ndtype = np.dtype(float)
|
||||
assert_equal(
|
||||
easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"),
|
||||
np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')]))
|
||||
|
||||
def test_flatten_dtype(self):
|
||||
"Testing flatten_dtype"
|
||||
# Standard dtype
|
||||
dt = np.dtype([("a", "f8"), ("b", "f8")])
|
||||
dt_flat = flatten_dtype(dt)
|
||||
assert_equal(dt_flat, [float, float])
|
||||
# Recursive dtype
|
||||
dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)])
|
||||
dt_flat = flatten_dtype(dt)
|
||||
assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int])
|
||||
# dtype with shaped fields
|
||||
dt = np.dtype([("a", (float, 2)), ("b", (int, 3))])
|
||||
dt_flat = flatten_dtype(dt)
|
||||
assert_equal(dt_flat, [float, int])
|
||||
dt_flat = flatten_dtype(dt, True)
|
||||
assert_equal(dt_flat, [float] * 2 + [int] * 3)
|
||||
# dtype w/ titles
|
||||
dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")])
|
||||
dt_flat = flatten_dtype(dt)
|
||||
assert_equal(dt_flat, [float, float])
|
||||
@ -0,0 +1,64 @@
|
||||
"""Tests for the NumpyVersion class.
|
||||
|
||||
"""
|
||||
from numpy.lib import NumpyVersion
|
||||
from numpy.testing import assert_, assert_raises
|
||||
|
||||
|
||||
def test_main_versions():
|
||||
assert_(NumpyVersion('1.8.0') == '1.8.0')
|
||||
for ver in ['1.9.0', '2.0.0', '1.8.1', '10.0.1']:
|
||||
assert_(NumpyVersion('1.8.0') < ver)
|
||||
|
||||
for ver in ['1.7.0', '1.7.1', '0.9.9']:
|
||||
assert_(NumpyVersion('1.8.0') > ver)
|
||||
|
||||
|
||||
def test_version_1_point_10():
|
||||
# regression test for gh-2998.
|
||||
assert_(NumpyVersion('1.9.0') < '1.10.0')
|
||||
assert_(NumpyVersion('1.11.0') < '1.11.1')
|
||||
assert_(NumpyVersion('1.11.0') == '1.11.0')
|
||||
assert_(NumpyVersion('1.99.11') < '1.99.12')
|
||||
|
||||
|
||||
def test_alpha_beta_rc():
|
||||
assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1')
|
||||
for ver in ['1.8.0', '1.8.0rc2']:
|
||||
assert_(NumpyVersion('1.8.0rc1') < ver)
|
||||
|
||||
for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']:
|
||||
assert_(NumpyVersion('1.8.0rc1') > ver)
|
||||
|
||||
assert_(NumpyVersion('1.8.0b1') > '1.8.0a2')
|
||||
|
||||
|
||||
def test_dev_version():
|
||||
assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0')
|
||||
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']:
|
||||
assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver)
|
||||
|
||||
assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111')
|
||||
|
||||
|
||||
def test_dev_a_b_rc_mixed():
|
||||
assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111')
|
||||
assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2')
|
||||
|
||||
|
||||
def test_dev0_version():
|
||||
assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0')
|
||||
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']:
|
||||
assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver)
|
||||
|
||||
assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111')
|
||||
|
||||
|
||||
def test_dev0_a_b_rc_mixed():
|
||||
assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111')
|
||||
assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2')
|
||||
|
||||
|
||||
def test_raises():
|
||||
for ver in ['1.9', '1,9.0', '1.7.x']:
|
||||
assert_raises(ValueError, NumpyVersion, ver)
|
||||
@ -0,0 +1,32 @@
|
||||
import numpy as np
|
||||
from numpy.lib import array_utils
|
||||
from numpy.testing import assert_equal
|
||||
|
||||
|
||||
class TestByteBounds:
|
||||
def test_byte_bounds(self):
|
||||
# pointer difference matches size * itemsize
|
||||
# due to contiguity
|
||||
a = np.arange(12).reshape(3, 4)
|
||||
low, high = array_utils.byte_bounds(a)
|
||||
assert_equal(high - low, a.size * a.itemsize)
|
||||
|
||||
def test_unusual_order_positive_stride(self):
|
||||
a = np.arange(12).reshape(3, 4)
|
||||
b = a.T
|
||||
low, high = array_utils.byte_bounds(b)
|
||||
assert_equal(high - low, b.size * b.itemsize)
|
||||
|
||||
def test_unusual_order_negative_stride(self):
|
||||
a = np.arange(12).reshape(3, 4)
|
||||
b = a.T[::-1]
|
||||
low, high = array_utils.byte_bounds(b)
|
||||
assert_equal(high - low, b.size * b.itemsize)
|
||||
|
||||
def test_strided(self):
|
||||
a = np.arange(12)
|
||||
b = a[::2]
|
||||
low, high = array_utils.byte_bounds(b)
|
||||
# the largest pointer address is lost (even numbers only in the
|
||||
# stride), and compensate addresses for striding by 2
|
||||
assert_equal(high - low, b.size * 2 * b.itemsize - b.itemsize)
|
||||
1415
lib/python3.11/site-packages/numpy/lib/tests/test_arraypad.py
Normal file
1415
lib/python3.11/site-packages/numpy/lib/tests/test_arraypad.py
Normal file
File diff suppressed because it is too large
Load Diff
1074
lib/python3.11/site-packages/numpy/lib/tests/test_arraysetops.py
Normal file
1074
lib/python3.11/site-packages/numpy/lib/tests/test_arraysetops.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,46 @@
|
||||
from functools import reduce
|
||||
from operator import mul
|
||||
|
||||
import numpy as np
|
||||
from numpy.lib import Arrayterator
|
||||
from numpy.random import randint
|
||||
from numpy.testing import assert_
|
||||
|
||||
|
||||
def test():
|
||||
np.random.seed(np.arange(10))
|
||||
|
||||
# Create a random array
|
||||
ndims = randint(5) + 1
|
||||
shape = tuple(randint(10) + 1 for dim in range(ndims))
|
||||
els = reduce(mul, shape)
|
||||
a = np.arange(els)
|
||||
a.shape = shape
|
||||
|
||||
buf_size = randint(2 * els)
|
||||
b = Arrayterator(a, buf_size)
|
||||
|
||||
# Check that each block has at most ``buf_size`` elements
|
||||
for block in b:
|
||||
assert_(len(block.flat) <= (buf_size or els))
|
||||
|
||||
# Check that all elements are iterated correctly
|
||||
assert_(list(b.flat) == list(a.flat))
|
||||
|
||||
# Slice arrayterator
|
||||
start = [randint(dim) for dim in shape]
|
||||
stop = [randint(dim) + 1 for dim in shape]
|
||||
step = [randint(dim) + 1 for dim in shape]
|
||||
slice_ = tuple(slice(*t) for t in zip(start, stop, step))
|
||||
c = b[slice_]
|
||||
d = a[slice_]
|
||||
|
||||
# Check that each block has at most ``buf_size`` elements
|
||||
for block in c:
|
||||
assert_(len(block.flat) <= (buf_size or els))
|
||||
|
||||
# Check that the arrayterator is sliced correctly
|
||||
assert_(np.all(c.__array__() == d))
|
||||
|
||||
# Check that all elements are iterated correctly
|
||||
assert_(list(c.flat) == list(d.flat))
|
||||
1054
lib/python3.11/site-packages/numpy/lib/tests/test_format.py
Normal file
1054
lib/python3.11/site-packages/numpy/lib/tests/test_format.py
Normal file
File diff suppressed because it is too large
Load Diff
4573
lib/python3.11/site-packages/numpy/lib/tests/test_function_base.py
Normal file
4573
lib/python3.11/site-packages/numpy/lib/tests/test_function_base.py
Normal file
File diff suppressed because it is too large
Load Diff
855
lib/python3.11/site-packages/numpy/lib/tests/test_histograms.py
Normal file
855
lib/python3.11/site-packages/numpy/lib/tests/test_histograms.py
Normal file
@ -0,0 +1,855 @@
|
||||
import pytest
|
||||
|
||||
import numpy as np
|
||||
from numpy import histogram, histogram_bin_edges, histogramdd
|
||||
from numpy.testing import (
|
||||
assert_,
|
||||
assert_allclose,
|
||||
assert_almost_equal,
|
||||
assert_array_almost_equal,
|
||||
assert_array_equal,
|
||||
assert_array_max_ulp,
|
||||
assert_equal,
|
||||
assert_raises,
|
||||
assert_raises_regex,
|
||||
suppress_warnings,
|
||||
)
|
||||
|
||||
|
||||
class TestHistogram:
|
||||
|
||||
def setup_method(self):
|
||||
pass
|
||||
|
||||
def teardown_method(self):
|
||||
pass
|
||||
|
||||
def test_simple(self):
|
||||
n = 100
|
||||
v = np.random.rand(n)
|
||||
(a, b) = histogram(v)
|
||||
# check if the sum of the bins equals the number of samples
|
||||
assert_equal(np.sum(a, axis=0), n)
|
||||
# check that the bin counts are evenly spaced when the data is from
|
||||
# a linear function
|
||||
(a, b) = histogram(np.linspace(0, 10, 100))
|
||||
assert_array_equal(a, 10)
|
||||
|
||||
def test_one_bin(self):
|
||||
# Ticket 632
|
||||
hist, edges = histogram([1, 2, 3, 4], [1, 2])
|
||||
assert_array_equal(hist, [2, ])
|
||||
assert_array_equal(edges, [1, 2])
|
||||
assert_raises(ValueError, histogram, [1, 2], bins=0)
|
||||
h, e = histogram([1, 2], bins=1)
|
||||
assert_equal(h, np.array([2]))
|
||||
assert_allclose(e, np.array([1., 2.]))
|
||||
|
||||
def test_density(self):
|
||||
# Check that the integral of the density equals 1.
|
||||
n = 100
|
||||
v = np.random.rand(n)
|
||||
a, b = histogram(v, density=True)
|
||||
area = np.sum(a * np.diff(b))
|
||||
assert_almost_equal(area, 1)
|
||||
|
||||
# Check with non-constant bin widths
|
||||
v = np.arange(10)
|
||||
bins = [0, 1, 3, 6, 10]
|
||||
a, b = histogram(v, bins, density=True)
|
||||
assert_array_equal(a, .1)
|
||||
assert_equal(np.sum(a * np.diff(b)), 1)
|
||||
|
||||
# Test that passing False works too
|
||||
a, b = histogram(v, bins, density=False)
|
||||
assert_array_equal(a, [1, 2, 3, 4])
|
||||
|
||||
# Variable bin widths are especially useful to deal with
|
||||
# infinities.
|
||||
v = np.arange(10)
|
||||
bins = [0, 1, 3, 6, np.inf]
|
||||
a, b = histogram(v, bins, density=True)
|
||||
assert_array_equal(a, [.1, .1, .1, 0.])
|
||||
|
||||
# Taken from a bug report from N. Becker on the numpy-discussion
|
||||
# mailing list Aug. 6, 2010.
|
||||
counts, dmy = np.histogram(
|
||||
[1, 2, 3, 4], [0.5, 1.5, np.inf], density=True)
|
||||
assert_equal(counts, [.25, 0])
|
||||
|
||||
def test_outliers(self):
|
||||
# Check that outliers are not tallied
|
||||
a = np.arange(10) + .5
|
||||
|
||||
# Lower outliers
|
||||
h, b = histogram(a, range=[0, 9])
|
||||
assert_equal(h.sum(), 9)
|
||||
|
||||
# Upper outliers
|
||||
h, b = histogram(a, range=[1, 10])
|
||||
assert_equal(h.sum(), 9)
|
||||
|
||||
# Normalization
|
||||
h, b = histogram(a, range=[1, 9], density=True)
|
||||
assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15)
|
||||
|
||||
# Weights
|
||||
w = np.arange(10) + .5
|
||||
h, b = histogram(a, range=[1, 9], weights=w, density=True)
|
||||
assert_equal((h * np.diff(b)).sum(), 1)
|
||||
|
||||
h, b = histogram(a, bins=8, range=[1, 9], weights=w)
|
||||
assert_equal(h, w[1:-1])
|
||||
|
||||
def test_arr_weights_mismatch(self):
|
||||
a = np.arange(10) + .5
|
||||
w = np.arange(11) + .5
|
||||
with assert_raises_regex(ValueError, "same shape as"):
|
||||
h, b = histogram(a, range=[1, 9], weights=w, density=True)
|
||||
|
||||
def test_type(self):
|
||||
# Check the type of the returned histogram
|
||||
a = np.arange(10) + .5
|
||||
h, b = histogram(a)
|
||||
assert_(np.issubdtype(h.dtype, np.integer))
|
||||
|
||||
h, b = histogram(a, density=True)
|
||||
assert_(np.issubdtype(h.dtype, np.floating))
|
||||
|
||||
h, b = histogram(a, weights=np.ones(10, int))
|
||||
assert_(np.issubdtype(h.dtype, np.integer))
|
||||
|
||||
h, b = histogram(a, weights=np.ones(10, float))
|
||||
assert_(np.issubdtype(h.dtype, np.floating))
|
||||
|
||||
def test_f32_rounding(self):
|
||||
# gh-4799, check that the rounding of the edges works with float32
|
||||
x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32)
|
||||
y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32)
|
||||
counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100)
|
||||
assert_equal(counts_hist.sum(), 3.)
|
||||
|
||||
def test_bool_conversion(self):
|
||||
# gh-12107
|
||||
# Reference integer histogram
|
||||
a = np.array([1, 1, 0], dtype=np.uint8)
|
||||
int_hist, int_edges = np.histogram(a)
|
||||
|
||||
# Should raise an warning on booleans
|
||||
# Ensure that the histograms are equivalent, need to suppress
|
||||
# the warnings to get the actual outputs
|
||||
with suppress_warnings() as sup:
|
||||
rec = sup.record(RuntimeWarning, 'Converting input from .*')
|
||||
hist, edges = np.histogram([True, True, False])
|
||||
# A warning should be issued
|
||||
assert_equal(len(rec), 1)
|
||||
assert_array_equal(hist, int_hist)
|
||||
assert_array_equal(edges, int_edges)
|
||||
|
||||
def test_weights(self):
|
||||
v = np.random.rand(100)
|
||||
w = np.ones(100) * 5
|
||||
a, b = histogram(v)
|
||||
na, nb = histogram(v, density=True)
|
||||
wa, wb = histogram(v, weights=w)
|
||||
nwa, nwb = histogram(v, weights=w, density=True)
|
||||
assert_array_almost_equal(a * 5, wa)
|
||||
assert_array_almost_equal(na, nwa)
|
||||
|
||||
# Check weights are properly applied.
|
||||
v = np.linspace(0, 10, 10)
|
||||
w = np.concatenate((np.zeros(5), np.ones(5)))
|
||||
wa, wb = histogram(v, bins=np.arange(11), weights=w)
|
||||
assert_array_almost_equal(wa, w)
|
||||
|
||||
# Check with integer weights
|
||||
wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1])
|
||||
assert_array_equal(wa, [4, 5, 0, 1])
|
||||
wa, wb = histogram(
|
||||
[1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True)
|
||||
assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4)
|
||||
|
||||
# Check weights with non-uniform bin widths
|
||||
a, b = histogram(
|
||||
np.arange(9), [0, 1, 3, 6, 10],
|
||||
weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True)
|
||||
assert_almost_equal(a, [.2, .1, .1, .075])
|
||||
|
||||
def test_exotic_weights(self):
|
||||
|
||||
# Test the use of weights that are not integer or floats, but e.g.
|
||||
# complex numbers or object types.
|
||||
|
||||
# Complex weights
|
||||
values = np.array([1.3, 2.5, 2.3])
|
||||
weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2])
|
||||
|
||||
# Check with custom bins
|
||||
wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
|
||||
assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
|
||||
|
||||
# Check with even bins
|
||||
wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
|
||||
assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
|
||||
|
||||
# Decimal weights
|
||||
from decimal import Decimal
|
||||
values = np.array([1.3, 2.5, 2.3])
|
||||
weights = np.array([Decimal(1), Decimal(2), Decimal(3)])
|
||||
|
||||
# Check with custom bins
|
||||
wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
|
||||
assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
|
||||
|
||||
# Check with even bins
|
||||
wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
|
||||
assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
|
||||
|
||||
def test_no_side_effects(self):
|
||||
# This is a regression test that ensures that values passed to
|
||||
# ``histogram`` are unchanged.
|
||||
values = np.array([1.3, 2.5, 2.3])
|
||||
np.histogram(values, range=[-10, 10], bins=100)
|
||||
assert_array_almost_equal(values, [1.3, 2.5, 2.3])
|
||||
|
||||
def test_empty(self):
|
||||
a, b = histogram([], bins=([0, 1]))
|
||||
assert_array_equal(a, np.array([0]))
|
||||
assert_array_equal(b, np.array([0, 1]))
|
||||
|
||||
def test_error_binnum_type(self):
|
||||
# Tests if right Error is raised if bins argument is float
|
||||
vals = np.linspace(0.0, 1.0, num=100)
|
||||
histogram(vals, 5)
|
||||
assert_raises(TypeError, histogram, vals, 2.4)
|
||||
|
||||
def test_finite_range(self):
|
||||
# Normal ranges should be fine
|
||||
vals = np.linspace(0.0, 1.0, num=100)
|
||||
histogram(vals, range=[0.25, 0.75])
|
||||
assert_raises(ValueError, histogram, vals, range=[np.nan, 0.75])
|
||||
assert_raises(ValueError, histogram, vals, range=[0.25, np.inf])
|
||||
|
||||
def test_invalid_range(self):
|
||||
# start of range must be < end of range
|
||||
vals = np.linspace(0.0, 1.0, num=100)
|
||||
with assert_raises_regex(ValueError, "max must be larger than"):
|
||||
np.histogram(vals, range=[0.1, 0.01])
|
||||
|
||||
def test_bin_edge_cases(self):
|
||||
# Ensure that floating-point computations correctly place edge cases.
|
||||
arr = np.array([337, 404, 739, 806, 1007, 1811, 2012])
|
||||
hist, edges = np.histogram(arr, bins=8296, range=(2, 2280))
|
||||
mask = hist > 0
|
||||
left_edges = edges[:-1][mask]
|
||||
right_edges = edges[1:][mask]
|
||||
for x, left, right in zip(arr, left_edges, right_edges):
|
||||
assert_(x >= left)
|
||||
assert_(x < right)
|
||||
|
||||
def test_last_bin_inclusive_range(self):
|
||||
arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
|
||||
hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5))
|
||||
assert_equal(hist[-1], 1)
|
||||
|
||||
def test_bin_array_dims(self):
|
||||
# gracefully handle bins object > 1 dimension
|
||||
vals = np.linspace(0.0, 1.0, num=100)
|
||||
bins = np.array([[0, 0.5], [0.6, 1.0]])
|
||||
with assert_raises_regex(ValueError, "must be 1d"):
|
||||
np.histogram(vals, bins=bins)
|
||||
|
||||
def test_unsigned_monotonicity_check(self):
|
||||
# Ensures ValueError is raised if bins not increasing monotonically
|
||||
# when bins contain unsigned values (see #9222)
|
||||
arr = np.array([2])
|
||||
bins = np.array([1, 3, 1], dtype='uint64')
|
||||
with assert_raises(ValueError):
|
||||
hist, edges = np.histogram(arr, bins=bins)
|
||||
|
||||
def test_object_array_of_0d(self):
|
||||
# gh-7864
|
||||
assert_raises(ValueError,
|
||||
histogram, [np.array(0.4) for i in range(10)] + [-np.inf])
|
||||
assert_raises(ValueError,
|
||||
histogram, [np.array(0.4) for i in range(10)] + [np.inf])
|
||||
|
||||
# these should not crash
|
||||
np.histogram([np.array(0.5) for i in range(10)] + [.500000000000002])
|
||||
np.histogram([np.array(0.5) for i in range(10)] + [.5])
|
||||
|
||||
def test_some_nan_values(self):
|
||||
# gh-7503
|
||||
one_nan = np.array([0, 1, np.nan])
|
||||
all_nan = np.array([np.nan, np.nan])
|
||||
|
||||
# the internal comparisons with NaN give warnings
|
||||
sup = suppress_warnings()
|
||||
sup.filter(RuntimeWarning)
|
||||
with sup:
|
||||
# can't infer range with nan
|
||||
assert_raises(ValueError, histogram, one_nan, bins='auto')
|
||||
assert_raises(ValueError, histogram, all_nan, bins='auto')
|
||||
|
||||
# explicit range solves the problem
|
||||
h, b = histogram(one_nan, bins='auto', range=(0, 1))
|
||||
assert_equal(h.sum(), 2) # nan is not counted
|
||||
h, b = histogram(all_nan, bins='auto', range=(0, 1))
|
||||
assert_equal(h.sum(), 0) # nan is not counted
|
||||
|
||||
# as does an explicit set of bins
|
||||
h, b = histogram(one_nan, bins=[0, 1])
|
||||
assert_equal(h.sum(), 2) # nan is not counted
|
||||
h, b = histogram(all_nan, bins=[0, 1])
|
||||
assert_equal(h.sum(), 0) # nan is not counted
|
||||
|
||||
def test_datetime(self):
|
||||
begin = np.datetime64('2000-01-01', 'D')
|
||||
offsets = np.array([0, 0, 1, 1, 2, 3, 5, 10, 20])
|
||||
bins = np.array([0, 2, 7, 20])
|
||||
dates = begin + offsets
|
||||
date_bins = begin + bins
|
||||
|
||||
td = np.dtype('timedelta64[D]')
|
||||
|
||||
# Results should be the same for integer offsets or datetime values.
|
||||
# For now, only explicit bins are supported, since linspace does not
|
||||
# work on datetimes or timedeltas
|
||||
d_count, d_edge = histogram(dates, bins=date_bins)
|
||||
t_count, t_edge = histogram(offsets.astype(td), bins=bins.astype(td))
|
||||
i_count, i_edge = histogram(offsets, bins=bins)
|
||||
|
||||
assert_equal(d_count, i_count)
|
||||
assert_equal(t_count, i_count)
|
||||
|
||||
assert_equal((d_edge - begin).astype(int), i_edge)
|
||||
assert_equal(t_edge.astype(int), i_edge)
|
||||
|
||||
assert_equal(d_edge.dtype, dates.dtype)
|
||||
assert_equal(t_edge.dtype, td)
|
||||
|
||||
def do_signed_overflow_bounds(self, dtype):
|
||||
exponent = 8 * np.dtype(dtype).itemsize - 1
|
||||
arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype)
|
||||
hist, e = histogram(arr, bins=2)
|
||||
assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4])
|
||||
assert_equal(hist, [1, 1])
|
||||
|
||||
def test_signed_overflow_bounds(self):
|
||||
self.do_signed_overflow_bounds(np.byte)
|
||||
self.do_signed_overflow_bounds(np.short)
|
||||
self.do_signed_overflow_bounds(np.intc)
|
||||
self.do_signed_overflow_bounds(np.int_)
|
||||
self.do_signed_overflow_bounds(np.longlong)
|
||||
|
||||
def do_precision_lower_bound(self, float_small, float_large):
|
||||
eps = np.finfo(float_large).eps
|
||||
|
||||
arr = np.array([1.0], float_small)
|
||||
range = np.array([1.0 + eps, 2.0], float_large)
|
||||
|
||||
# test is looking for behavior when the bounds change between dtypes
|
||||
if range.astype(float_small)[0] != 1:
|
||||
return
|
||||
|
||||
# previously crashed
|
||||
count, x_loc = np.histogram(arr, bins=1, range=range)
|
||||
assert_equal(count, [0])
|
||||
assert_equal(x_loc.dtype, float_large)
|
||||
|
||||
def do_precision_upper_bound(self, float_small, float_large):
|
||||
eps = np.finfo(float_large).eps
|
||||
|
||||
arr = np.array([1.0], float_small)
|
||||
range = np.array([0.0, 1.0 - eps], float_large)
|
||||
|
||||
# test is looking for behavior when the bounds change between dtypes
|
||||
if range.astype(float_small)[-1] != 1:
|
||||
return
|
||||
|
||||
# previously crashed
|
||||
count, x_loc = np.histogram(arr, bins=1, range=range)
|
||||
assert_equal(count, [0])
|
||||
|
||||
assert_equal(x_loc.dtype, float_large)
|
||||
|
||||
def do_precision(self, float_small, float_large):
|
||||
self.do_precision_lower_bound(float_small, float_large)
|
||||
self.do_precision_upper_bound(float_small, float_large)
|
||||
|
||||
def test_precision(self):
|
||||
# not looping results in a useful stack trace upon failure
|
||||
self.do_precision(np.half, np.single)
|
||||
self.do_precision(np.half, np.double)
|
||||
self.do_precision(np.half, np.longdouble)
|
||||
self.do_precision(np.single, np.double)
|
||||
self.do_precision(np.single, np.longdouble)
|
||||
self.do_precision(np.double, np.longdouble)
|
||||
|
||||
def test_histogram_bin_edges(self):
|
||||
hist, e = histogram([1, 2, 3, 4], [1, 2])
|
||||
edges = histogram_bin_edges([1, 2, 3, 4], [1, 2])
|
||||
assert_array_equal(edges, e)
|
||||
|
||||
arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
|
||||
hist, e = histogram(arr, bins=30, range=(-0.5, 5))
|
||||
edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5))
|
||||
assert_array_equal(edges, e)
|
||||
|
||||
hist, e = histogram(arr, bins='auto', range=(0, 1))
|
||||
edges = histogram_bin_edges(arr, bins='auto', range=(0, 1))
|
||||
assert_array_equal(edges, e)
|
||||
|
||||
def test_small_value_range(self):
|
||||
arr = np.array([1, 1 + 2e-16] * 10)
|
||||
with pytest.raises(ValueError, match="Too many bins for data range"):
|
||||
histogram(arr, bins=10)
|
||||
|
||||
# @requires_memory(free_bytes=1e10)
|
||||
# @pytest.mark.slow
|
||||
@pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing")
|
||||
def test_big_arrays(self):
|
||||
sample = np.zeros([100000000, 3])
|
||||
xbins = 400
|
||||
ybins = 400
|
||||
zbins = np.arange(16000)
|
||||
hist = np.histogramdd(sample=sample, bins=(xbins, ybins, zbins))
|
||||
assert_equal(type(hist), type((1, 2)))
|
||||
|
||||
def test_gh_23110(self):
|
||||
hist, e = np.histogram(np.array([-0.9e-308], dtype='>f8'),
|
||||
bins=2,
|
||||
range=(-1e-308, -2e-313))
|
||||
expected_hist = np.array([1, 0])
|
||||
assert_array_equal(hist, expected_hist)
|
||||
|
||||
def test_gh_28400(self):
|
||||
e = 1 + 1e-12
|
||||
Z = [0, 1, 1, 1, 1, 1, e, e, e, e, e, e, 2]
|
||||
counts, edges = np.histogram(Z, bins="auto")
|
||||
assert len(counts) < 10
|
||||
assert edges[0] == Z[0]
|
||||
assert edges[-1] == Z[-1]
|
||||
|
||||
class TestHistogramOptimBinNums:
|
||||
"""
|
||||
Provide test coverage when using provided estimators for optimal number of
|
||||
bins
|
||||
"""
|
||||
|
||||
def test_empty(self):
|
||||
estimator_list = ['fd', 'scott', 'rice', 'sturges',
|
||||
'doane', 'sqrt', 'auto', 'stone']
|
||||
# check it can deal with empty data
|
||||
for estimator in estimator_list:
|
||||
a, b = histogram([], bins=estimator)
|
||||
assert_array_equal(a, np.array([0]))
|
||||
assert_array_equal(b, np.array([0, 1]))
|
||||
|
||||
def test_simple(self):
|
||||
"""
|
||||
Straightforward testing with a mixture of linspace data (for
|
||||
consistency). All test values have been precomputed and the values
|
||||
shouldn't change
|
||||
"""
|
||||
# Some basic sanity checking, with some fixed data.
|
||||
# Checking for the correct number of bins
|
||||
basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7,
|
||||
'doane': 8, 'sqrt': 8, 'auto': 7, 'stone': 2},
|
||||
500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10,
|
||||
'doane': 12, 'sqrt': 23, 'auto': 10, 'stone': 9},
|
||||
5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14,
|
||||
'doane': 17, 'sqrt': 71, 'auto': 17, 'stone': 20}}
|
||||
|
||||
for testlen, expectedResults in basic_test.items():
|
||||
# Create some sort of non uniform data to test with
|
||||
# (2 peak uniform mixture)
|
||||
x1 = np.linspace(-10, -1, testlen // 5 * 2)
|
||||
x2 = np.linspace(1, 10, testlen // 5 * 3)
|
||||
x = np.concatenate((x1, x2))
|
||||
for estimator, numbins in expectedResults.items():
|
||||
a, b = np.histogram(x, estimator)
|
||||
assert_equal(len(a), numbins, err_msg=f"For the {estimator} estimator "
|
||||
f"with datasize of {testlen}")
|
||||
|
||||
def test_small(self):
|
||||
"""
|
||||
Smaller datasets have the potential to cause issues with the data
|
||||
adaptive methods, especially the FD method. All bin numbers have been
|
||||
precalculated.
|
||||
"""
|
||||
small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
|
||||
'doane': 1, 'sqrt': 1, 'stone': 1},
|
||||
2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2,
|
||||
'doane': 1, 'sqrt': 2, 'stone': 1},
|
||||
3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3,
|
||||
'doane': 3, 'sqrt': 2, 'stone': 1}}
|
||||
|
||||
for testlen, expectedResults in small_dat.items():
|
||||
testdat = np.arange(testlen).astype(float)
|
||||
for estimator, expbins in expectedResults.items():
|
||||
a, b = np.histogram(testdat, estimator)
|
||||
assert_equal(len(a), expbins, err_msg=f"For the {estimator} estimator "
|
||||
f"with datasize of {testlen}")
|
||||
|
||||
def test_incorrect_methods(self):
|
||||
"""
|
||||
Check a Value Error is thrown when an unknown string is passed in
|
||||
"""
|
||||
check_list = ['mad', 'freeman', 'histograms', 'IQR']
|
||||
for estimator in check_list:
|
||||
assert_raises(ValueError, histogram, [1, 2, 3], estimator)
|
||||
|
||||
def test_novariance(self):
|
||||
"""
|
||||
Check that methods handle no variance in data
|
||||
Primarily for Scott and FD as the SD and IQR are both 0 in this case
|
||||
"""
|
||||
novar_dataset = np.ones(100)
|
||||
novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
|
||||
'doane': 1, 'sqrt': 1, 'auto': 1, 'stone': 1}
|
||||
|
||||
for estimator, numbins in novar_resultdict.items():
|
||||
a, b = np.histogram(novar_dataset, estimator)
|
||||
assert_equal(len(a), numbins,
|
||||
err_msg=f"{estimator} estimator, No Variance test")
|
||||
|
||||
def test_limited_variance(self):
|
||||
"""
|
||||
Check when IQR is 0, but variance exists, we return a reasonable value.
|
||||
"""
|
||||
lim_var_data = np.ones(1000)
|
||||
lim_var_data[:3] = 0
|
||||
lim_var_data[-4:] = 100
|
||||
|
||||
edges_auto = histogram_bin_edges(lim_var_data, 'auto')
|
||||
assert_equal(edges_auto[0], 0)
|
||||
assert_equal(edges_auto[-1], 100.)
|
||||
assert len(edges_auto) < 100
|
||||
|
||||
edges_fd = histogram_bin_edges(lim_var_data, 'fd')
|
||||
assert_equal(edges_fd, np.array([0, 100]))
|
||||
|
||||
edges_sturges = histogram_bin_edges(lim_var_data, 'sturges')
|
||||
assert_equal(edges_sturges, np.linspace(0, 100, 12))
|
||||
|
||||
def test_outlier(self):
|
||||
"""
|
||||
Check the FD, Scott and Doane with outliers.
|
||||
|
||||
The FD estimates a smaller binwidth since it's less affected by
|
||||
outliers. Since the range is so (artificially) large, this means more
|
||||
bins, most of which will be empty, but the data of interest usually is
|
||||
unaffected. The Scott estimator is more affected and returns fewer bins,
|
||||
despite most of the variance being in one area of the data. The Doane
|
||||
estimator lies somewhere between the other two.
|
||||
"""
|
||||
xcenter = np.linspace(-10, 10, 50)
|
||||
outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter))
|
||||
|
||||
outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11, 'stone': 6}
|
||||
|
||||
for estimator, numbins in outlier_resultdict.items():
|
||||
a, b = np.histogram(outlier_dataset, estimator)
|
||||
assert_equal(len(a), numbins)
|
||||
|
||||
def test_scott_vs_stone(self):
|
||||
"""Verify that Scott's rule and Stone's rule converges for normally distributed data"""
|
||||
|
||||
def nbins_ratio(seed, size):
|
||||
rng = np.random.RandomState(seed)
|
||||
x = rng.normal(loc=0, scale=2, size=size)
|
||||
a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0])
|
||||
return a / (a + b)
|
||||
|
||||
ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)]
|
||||
for seed in range(10)]
|
||||
|
||||
# the average difference between the two methods decreases as the dataset size increases.
|
||||
avg = abs(np.mean(ll, axis=0) - 0.5)
|
||||
assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2)
|
||||
|
||||
def test_simple_range(self):
|
||||
"""
|
||||
Straightforward testing with a mixture of linspace data (for
|
||||
consistency). Adding in a 3rd mixture that will then be
|
||||
completely ignored. All test values have been precomputed and
|
||||
the shouldn't change.
|
||||
"""
|
||||
# some basic sanity checking, with some fixed data.
|
||||
# Checking for the correct number of bins
|
||||
basic_test = {
|
||||
50: {'fd': 8, 'scott': 8, 'rice': 15,
|
||||
'sturges': 14, 'auto': 14, 'stone': 8},
|
||||
500: {'fd': 15, 'scott': 16, 'rice': 32,
|
||||
'sturges': 20, 'auto': 20, 'stone': 80},
|
||||
5000: {'fd': 33, 'scott': 33, 'rice': 69,
|
||||
'sturges': 27, 'auto': 33, 'stone': 80}
|
||||
}
|
||||
|
||||
for testlen, expectedResults in basic_test.items():
|
||||
# create some sort of non uniform data to test with
|
||||
# (3 peak uniform mixture)
|
||||
x1 = np.linspace(-10, -1, testlen // 5 * 2)
|
||||
x2 = np.linspace(1, 10, testlen // 5 * 3)
|
||||
x3 = np.linspace(-100, -50, testlen)
|
||||
x = np.hstack((x1, x2, x3))
|
||||
for estimator, numbins in expectedResults.items():
|
||||
a, b = np.histogram(x, estimator, range=(-20, 20))
|
||||
msg = f"For the {estimator} estimator"
|
||||
msg += f" with datasize of {testlen}"
|
||||
assert_equal(len(a), numbins, err_msg=msg)
|
||||
|
||||
@pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott',
|
||||
'stone', 'rice', 'sturges'])
|
||||
def test_signed_integer_data(self, bins):
|
||||
# Regression test for gh-14379.
|
||||
a = np.array([-2, 0, 127], dtype=np.int8)
|
||||
hist, edges = np.histogram(a, bins=bins)
|
||||
hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins)
|
||||
assert_array_equal(hist, hist32)
|
||||
assert_array_equal(edges, edges32)
|
||||
|
||||
@pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott',
|
||||
'stone', 'rice', 'sturges'])
|
||||
def test_integer(self, bins):
|
||||
"""
|
||||
Test that bin width for integer data is at least 1.
|
||||
"""
|
||||
with suppress_warnings() as sup:
|
||||
if bins == 'stone':
|
||||
sup.filter(RuntimeWarning)
|
||||
assert_equal(
|
||||
np.histogram_bin_edges(np.tile(np.arange(9), 1000), bins),
|
||||
np.arange(9))
|
||||
|
||||
def test_integer_non_auto(self):
|
||||
"""
|
||||
Test that the bin-width>=1 requirement *only* applies to auto binning.
|
||||
"""
|
||||
assert_equal(
|
||||
np.histogram_bin_edges(np.tile(np.arange(9), 1000), 16),
|
||||
np.arange(17) / 2)
|
||||
assert_equal(
|
||||
np.histogram_bin_edges(np.tile(np.arange(9), 1000), [.1, .2]),
|
||||
[.1, .2])
|
||||
|
||||
def test_simple_weighted(self):
|
||||
"""
|
||||
Check that weighted data raises a TypeError
|
||||
"""
|
||||
estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto']
|
||||
for estimator in estimator_list:
|
||||
assert_raises(TypeError, histogram, [1, 2, 3],
|
||||
estimator, weights=[1, 2, 3])
|
||||
|
||||
|
||||
class TestHistogramdd:
|
||||
|
||||
def test_simple(self):
|
||||
x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5],
|
||||
[.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]])
|
||||
H, edges = histogramdd(x, (2, 3, 3),
|
||||
range=[[-1, 1], [0, 3], [0, 3]])
|
||||
answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]],
|
||||
[[0, 1, 0], [0, 0, 1], [0, 0, 1]]])
|
||||
assert_array_equal(H, answer)
|
||||
|
||||
# Check normalization
|
||||
ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]]
|
||||
H, edges = histogramdd(x, bins=ed, density=True)
|
||||
assert_(np.all(H == answer / 12.))
|
||||
|
||||
# Check that H has the correct shape.
|
||||
H, edges = histogramdd(x, (2, 3, 4),
|
||||
range=[[-1, 1], [0, 3], [0, 4]],
|
||||
density=True)
|
||||
answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]],
|
||||
[[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]])
|
||||
assert_array_almost_equal(H, answer / 6., 4)
|
||||
# Check that a sequence of arrays is accepted and H has the correct
|
||||
# shape.
|
||||
z = [np.squeeze(y) for y in np.split(x, 3, axis=1)]
|
||||
H, edges = histogramdd(
|
||||
z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]])
|
||||
answer = np.array([[[0, 0], [0, 0], [0, 0]],
|
||||
[[0, 1], [0, 0], [1, 0]],
|
||||
[[0, 1], [0, 0], [0, 0]],
|
||||
[[0, 0], [0, 0], [0, 0]]])
|
||||
assert_array_equal(H, answer)
|
||||
|
||||
Z = np.zeros((5, 5, 5))
|
||||
Z[list(range(5)), list(range(5)), list(range(5))] = 1.
|
||||
H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5)
|
||||
assert_array_equal(H, Z)
|
||||
|
||||
def test_shape_3d(self):
|
||||
# All possible permutations for bins of different lengths in 3D.
|
||||
bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4),
|
||||
(4, 5, 6))
|
||||
r = np.random.rand(10, 3)
|
||||
for b in bins:
|
||||
H, edges = histogramdd(r, b)
|
||||
assert_(H.shape == b)
|
||||
|
||||
def test_shape_4d(self):
|
||||
# All possible permutations for bins of different lengths in 4D.
|
||||
bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4),
|
||||
(5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6),
|
||||
(7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7),
|
||||
(4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5),
|
||||
(6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5),
|
||||
(5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4))
|
||||
|
||||
r = np.random.rand(10, 4)
|
||||
for b in bins:
|
||||
H, edges = histogramdd(r, b)
|
||||
assert_(H.shape == b)
|
||||
|
||||
def test_weights(self):
|
||||
v = np.random.rand(100, 2)
|
||||
hist, edges = histogramdd(v)
|
||||
n_hist, edges = histogramdd(v, density=True)
|
||||
w_hist, edges = histogramdd(v, weights=np.ones(100))
|
||||
assert_array_equal(w_hist, hist)
|
||||
w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True)
|
||||
assert_array_equal(w_hist, n_hist)
|
||||
w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2)
|
||||
assert_array_equal(w_hist, 2 * hist)
|
||||
|
||||
def test_identical_samples(self):
|
||||
x = np.zeros((10, 2), int)
|
||||
hist, edges = histogramdd(x, bins=2)
|
||||
assert_array_equal(edges[0], np.array([-0.5, 0., 0.5]))
|
||||
|
||||
def test_empty(self):
|
||||
a, b = histogramdd([[], []], bins=([0, 1], [0, 1]))
|
||||
assert_array_max_ulp(a, np.array([[0.]]))
|
||||
a, b = np.histogramdd([[], [], []], bins=2)
|
||||
assert_array_max_ulp(a, np.zeros((2, 2, 2)))
|
||||
|
||||
def test_bins_errors(self):
|
||||
# There are two ways to specify bins. Check for the right errors
|
||||
# when mixing those.
|
||||
x = np.arange(8).reshape(2, 4)
|
||||
assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5])
|
||||
assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1])
|
||||
assert_raises(
|
||||
ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]])
|
||||
assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
|
||||
|
||||
def test_inf_edges(self):
|
||||
# Test using +/-inf bin edges works. See #1788.
|
||||
with np.errstate(invalid='ignore'):
|
||||
x = np.arange(6).reshape(3, 2)
|
||||
expected = np.array([[1, 0], [0, 1], [0, 1]])
|
||||
h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]])
|
||||
assert_allclose(h, expected)
|
||||
h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])])
|
||||
assert_allclose(h, expected)
|
||||
h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]])
|
||||
assert_allclose(h, expected)
|
||||
|
||||
def test_rightmost_binedge(self):
|
||||
# Test event very close to rightmost binedge. See Github issue #4266
|
||||
x = [0.9999999995]
|
||||
bins = [[0., 0.5, 1.0]]
|
||||
hist, _ = histogramdd(x, bins=bins)
|
||||
assert_(hist[0] == 0.0)
|
||||
assert_(hist[1] == 1.)
|
||||
x = [1.0]
|
||||
bins = [[0., 0.5, 1.0]]
|
||||
hist, _ = histogramdd(x, bins=bins)
|
||||
assert_(hist[0] == 0.0)
|
||||
assert_(hist[1] == 1.)
|
||||
x = [1.0000000001]
|
||||
bins = [[0., 0.5, 1.0]]
|
||||
hist, _ = histogramdd(x, bins=bins)
|
||||
assert_(hist[0] == 0.0)
|
||||
assert_(hist[1] == 0.0)
|
||||
x = [1.0001]
|
||||
bins = [[0., 0.5, 1.0]]
|
||||
hist, _ = histogramdd(x, bins=bins)
|
||||
assert_(hist[0] == 0.0)
|
||||
assert_(hist[1] == 0.0)
|
||||
|
||||
def test_finite_range(self):
|
||||
vals = np.random.random((100, 3))
|
||||
histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]])
|
||||
assert_raises(ValueError, histogramdd, vals,
|
||||
range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]])
|
||||
assert_raises(ValueError, histogramdd, vals,
|
||||
range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
|
||||
|
||||
def test_equal_edges(self):
|
||||
""" Test that adjacent entries in an edge array can be equal """
|
||||
x = np.array([0, 1, 2])
|
||||
y = np.array([0, 1, 2])
|
||||
x_edges = np.array([0, 2, 2])
|
||||
y_edges = 1
|
||||
hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
|
||||
|
||||
hist_expected = np.array([
|
||||
[2.],
|
||||
[1.], # x == 2 falls in the final bin
|
||||
])
|
||||
assert_equal(hist, hist_expected)
|
||||
|
||||
def test_edge_dtype(self):
|
||||
""" Test that if an edge array is input, its type is preserved """
|
||||
x = np.array([0, 10, 20])
|
||||
y = x / 10
|
||||
x_edges = np.array([0, 5, 15, 20])
|
||||
y_edges = x_edges / 10
|
||||
hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
|
||||
|
||||
assert_equal(edges[0].dtype, x_edges.dtype)
|
||||
assert_equal(edges[1].dtype, y_edges.dtype)
|
||||
|
||||
def test_large_integers(self):
|
||||
big = 2**60 # Too large to represent with a full precision float
|
||||
|
||||
x = np.array([0], np.int64)
|
||||
x_edges = np.array([-1, +1], np.int64)
|
||||
y = big + x
|
||||
y_edges = big + x_edges
|
||||
|
||||
hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
|
||||
|
||||
assert_equal(hist[0, 0], 1)
|
||||
|
||||
def test_density_non_uniform_2d(self):
|
||||
# Defines the following grid:
|
||||
#
|
||||
# 0 2 8
|
||||
# 0+-+-----+
|
||||
# + | +
|
||||
# + | +
|
||||
# 6+-+-----+
|
||||
# 8+-+-----+
|
||||
x_edges = np.array([0, 2, 8])
|
||||
y_edges = np.array([0, 6, 8])
|
||||
relative_areas = np.array([
|
||||
[3, 9],
|
||||
[1, 3]])
|
||||
|
||||
# ensure the number of points in each region is proportional to its area
|
||||
x = np.array([1] + [1] * 3 + [7] * 3 + [7] * 9)
|
||||
y = np.array([7] + [1] * 3 + [7] * 3 + [1] * 9)
|
||||
|
||||
# sanity check that the above worked as intended
|
||||
hist, edges = histogramdd((y, x), bins=(y_edges, x_edges))
|
||||
assert_equal(hist, relative_areas)
|
||||
|
||||
# resulting histogram should be uniform, since counts and areas are proportional
|
||||
hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True)
|
||||
assert_equal(hist, 1 / (8 * 8))
|
||||
|
||||
def test_density_non_uniform_1d(self):
|
||||
# compare to histogram to show the results are the same
|
||||
v = np.arange(10)
|
||||
bins = np.array([0, 1, 3, 6, 10])
|
||||
hist, edges = histogram(v, bins, density=True)
|
||||
hist_dd, edges_dd = histogramdd((v,), (bins,), density=True)
|
||||
assert_equal(hist, hist_dd)
|
||||
assert_equal(edges, edges_dd[0])
|
||||
@ -0,0 +1,568 @@
|
||||
import pytest
|
||||
|
||||
import numpy as np
|
||||
from numpy.lib._index_tricks_impl import (
|
||||
c_,
|
||||
diag_indices,
|
||||
diag_indices_from,
|
||||
fill_diagonal,
|
||||
index_exp,
|
||||
ix_,
|
||||
mgrid,
|
||||
ndenumerate,
|
||||
ndindex,
|
||||
ogrid,
|
||||
r_,
|
||||
s_,
|
||||
)
|
||||
from numpy.testing import (
|
||||
assert_,
|
||||
assert_almost_equal,
|
||||
assert_array_almost_equal,
|
||||
assert_array_equal,
|
||||
assert_equal,
|
||||
assert_raises,
|
||||
assert_raises_regex,
|
||||
)
|
||||
|
||||
|
||||
class TestRavelUnravelIndex:
|
||||
def test_basic(self):
|
||||
assert_equal(np.unravel_index(2, (2, 2)), (1, 0))
|
||||
|
||||
# test that new shape argument works properly
|
||||
assert_equal(np.unravel_index(indices=2,
|
||||
shape=(2, 2)),
|
||||
(1, 0))
|
||||
|
||||
# test that an invalid second keyword argument
|
||||
# is properly handled, including the old name `dims`.
|
||||
with assert_raises(TypeError):
|
||||
np.unravel_index(indices=2, hape=(2, 2))
|
||||
|
||||
with assert_raises(TypeError):
|
||||
np.unravel_index(2, hape=(2, 2))
|
||||
|
||||
with assert_raises(TypeError):
|
||||
np.unravel_index(254, ims=(17, 94))
|
||||
|
||||
with assert_raises(TypeError):
|
||||
np.unravel_index(254, dims=(17, 94))
|
||||
|
||||
assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2)
|
||||
assert_equal(np.unravel_index(254, (17, 94)), (2, 66))
|
||||
assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254)
|
||||
assert_raises(ValueError, np.unravel_index, -1, (2, 2))
|
||||
assert_raises(TypeError, np.unravel_index, 0.5, (2, 2))
|
||||
assert_raises(ValueError, np.unravel_index, 4, (2, 2))
|
||||
assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2))
|
||||
assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2))
|
||||
assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2))
|
||||
assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2))
|
||||
assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2))
|
||||
|
||||
assert_equal(np.unravel_index((2 * 3 + 1) * 6 + 4, (4, 3, 6)), [2, 1, 4])
|
||||
assert_equal(
|
||||
np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2 * 3 + 1) * 6 + 4)
|
||||
|
||||
arr = np.array([[3, 6, 6], [4, 5, 1]])
|
||||
assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37])
|
||||
assert_equal(
|
||||
np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13])
|
||||
assert_equal(
|
||||
np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19])
|
||||
assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')),
|
||||
[12, 13, 13])
|
||||
assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621)
|
||||
|
||||
assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)),
|
||||
[[3, 6, 6], [4, 5, 1]])
|
||||
assert_equal(
|
||||
np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'),
|
||||
[[3, 6, 6], [4, 5, 1]])
|
||||
assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1])
|
||||
|
||||
def test_empty_indices(self):
|
||||
msg1 = 'indices must be integral: the provided empty sequence was'
|
||||
msg2 = 'only int indices permitted'
|
||||
assert_raises_regex(TypeError, msg1, np.unravel_index, [], (10, 3, 5))
|
||||
assert_raises_regex(TypeError, msg1, np.unravel_index, (), (10, 3, 5))
|
||||
assert_raises_regex(TypeError, msg2, np.unravel_index, np.array([]),
|
||||
(10, 3, 5))
|
||||
assert_equal(np.unravel_index(np.array([], dtype=int), (10, 3, 5)),
|
||||
[[], [], []])
|
||||
assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], []),
|
||||
(10, 3))
|
||||
assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], ['abc']),
|
||||
(10, 3))
|
||||
assert_raises_regex(TypeError, msg2, np.ravel_multi_index,
|
||||
(np.array([]), np.array([])), (5, 3))
|
||||
assert_equal(np.ravel_multi_index(
|
||||
(np.array([], dtype=int), np.array([], dtype=int)), (5, 3)), [])
|
||||
assert_equal(np.ravel_multi_index(np.array([[], []], dtype=int),
|
||||
(5, 3)), [])
|
||||
|
||||
def test_big_indices(self):
|
||||
# ravel_multi_index for big indices (issue #7546)
|
||||
if np.intp == np.int64:
|
||||
arr = ([1, 29], [3, 5], [3, 117], [19, 2],
|
||||
[2379, 1284], [2, 2], [0, 1])
|
||||
assert_equal(
|
||||
np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)),
|
||||
[5627771580, 117259570957])
|
||||
|
||||
# test unravel_index for big indices (issue #9538)
|
||||
assert_raises(ValueError, np.unravel_index, 1, (2**32 - 1, 2**31 + 1))
|
||||
|
||||
# test overflow checking for too big array (issue #7546)
|
||||
dummy_arr = ([0], [0])
|
||||
half_max = np.iinfo(np.intp).max // 2
|
||||
assert_equal(
|
||||
np.ravel_multi_index(dummy_arr, (half_max, 2)), [0])
|
||||
assert_raises(ValueError,
|
||||
np.ravel_multi_index, dummy_arr, (half_max + 1, 2))
|
||||
assert_equal(
|
||||
np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0])
|
||||
assert_raises(ValueError,
|
||||
np.ravel_multi_index, dummy_arr, (half_max + 1, 2), order='F')
|
||||
|
||||
def test_dtypes(self):
|
||||
# Test with different data types
|
||||
for dtype in [np.int16, np.uint16, np.int32,
|
||||
np.uint32, np.int64, np.uint64]:
|
||||
coords = np.array(
|
||||
[[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype)
|
||||
shape = (5, 8)
|
||||
uncoords = 8 * coords[0] + coords[1]
|
||||
assert_equal(np.ravel_multi_index(coords, shape), uncoords)
|
||||
assert_equal(coords, np.unravel_index(uncoords, shape))
|
||||
uncoords = coords[0] + 5 * coords[1]
|
||||
assert_equal(
|
||||
np.ravel_multi_index(coords, shape, order='F'), uncoords)
|
||||
assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
|
||||
|
||||
coords = np.array(
|
||||
[[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]],
|
||||
dtype=dtype)
|
||||
shape = (5, 8, 10)
|
||||
uncoords = 10 * (8 * coords[0] + coords[1]) + coords[2]
|
||||
assert_equal(np.ravel_multi_index(coords, shape), uncoords)
|
||||
assert_equal(coords, np.unravel_index(uncoords, shape))
|
||||
uncoords = coords[0] + 5 * (coords[1] + 8 * coords[2])
|
||||
assert_equal(
|
||||
np.ravel_multi_index(coords, shape, order='F'), uncoords)
|
||||
assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
|
||||
|
||||
def test_clipmodes(self):
|
||||
# Test clipmodes
|
||||
assert_equal(
|
||||
np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'),
|
||||
np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12)))
|
||||
assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12),
|
||||
mode=(
|
||||
'wrap', 'raise', 'clip', 'raise')),
|
||||
np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12)))
|
||||
assert_raises(
|
||||
ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12))
|
||||
|
||||
def test_writeability(self):
|
||||
# gh-7269
|
||||
x, y = np.unravel_index([1, 2, 3], (4, 5))
|
||||
assert_(x.flags.writeable)
|
||||
assert_(y.flags.writeable)
|
||||
|
||||
def test_0d(self):
|
||||
# gh-580
|
||||
x = np.unravel_index(0, ())
|
||||
assert_equal(x, ())
|
||||
|
||||
assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ())
|
||||
assert_raises_regex(
|
||||
ValueError, "out of bounds", np.unravel_index, [1], ())
|
||||
|
||||
@pytest.mark.parametrize("mode", ["clip", "wrap", "raise"])
|
||||
def test_empty_array_ravel(self, mode):
|
||||
res = np.ravel_multi_index(
|
||||
np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode)
|
||||
assert res.shape == (0,)
|
||||
|
||||
with assert_raises(ValueError):
|
||||
np.ravel_multi_index(
|
||||
np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode)
|
||||
|
||||
def test_empty_array_unravel(self):
|
||||
res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0))
|
||||
# res is a tuple of three empty arrays
|
||||
assert len(res) == 3
|
||||
assert all(a.shape == (0,) for a in res)
|
||||
|
||||
with assert_raises(ValueError):
|
||||
np.unravel_index([1], (2, 1, 0))
|
||||
|
||||
class TestGrid:
|
||||
def test_basic(self):
|
||||
a = mgrid[-1:1:10j]
|
||||
b = mgrid[-1:1:0.1]
|
||||
assert_(a.shape == (10,))
|
||||
assert_(b.shape == (20,))
|
||||
assert_(a[0] == -1)
|
||||
assert_almost_equal(a[-1], 1)
|
||||
assert_(b[0] == -1)
|
||||
assert_almost_equal(b[1] - b[0], 0.1, 11)
|
||||
assert_almost_equal(b[-1], b[0] + 19 * 0.1, 11)
|
||||
assert_almost_equal(a[1] - a[0], 2.0 / 9.0, 11)
|
||||
|
||||
def test_linspace_equivalence(self):
|
||||
y, st = np.linspace(2, 10, retstep=True)
|
||||
assert_almost_equal(st, 8 / 49.0)
|
||||
assert_array_almost_equal(y, mgrid[2:10:50j], 13)
|
||||
|
||||
def test_nd(self):
|
||||
c = mgrid[-1:1:10j, -2:2:10j]
|
||||
d = mgrid[-1:1:0.1, -2:2:0.2]
|
||||
assert_(c.shape == (2, 10, 10))
|
||||
assert_(d.shape == (2, 20, 20))
|
||||
assert_array_equal(c[0][0, :], -np.ones(10, 'd'))
|
||||
assert_array_equal(c[1][:, 0], -2 * np.ones(10, 'd'))
|
||||
assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11)
|
||||
assert_array_almost_equal(c[1][:, -1], 2 * np.ones(10, 'd'), 11)
|
||||
assert_array_almost_equal(d[0, 1, :] - d[0, 0, :],
|
||||
0.1 * np.ones(20, 'd'), 11)
|
||||
assert_array_almost_equal(d[1, :, 1] - d[1, :, 0],
|
||||
0.2 * np.ones(20, 'd'), 11)
|
||||
|
||||
def test_sparse(self):
|
||||
grid_full = mgrid[-1:1:10j, -2:2:10j]
|
||||
grid_sparse = ogrid[-1:1:10j, -2:2:10j]
|
||||
|
||||
# sparse grids can be made dense by broadcasting
|
||||
grid_broadcast = np.broadcast_arrays(*grid_sparse)
|
||||
for f, b in zip(grid_full, grid_broadcast):
|
||||
assert_equal(f, b)
|
||||
|
||||
@pytest.mark.parametrize("start, stop, step, expected", [
|
||||
(None, 10, 10j, (200, 10)),
|
||||
(-10, 20, None, (1800, 30)),
|
||||
])
|
||||
def test_mgrid_size_none_handling(self, start, stop, step, expected):
|
||||
# regression test None value handling for
|
||||
# start and step values used by mgrid;
|
||||
# internally, this aims to cover previously
|
||||
# unexplored code paths in nd_grid()
|
||||
grid = mgrid[start:stop:step, start:stop:step]
|
||||
# need a smaller grid to explore one of the
|
||||
# untested code paths
|
||||
grid_small = mgrid[start:stop:step]
|
||||
assert_equal(grid.size, expected[0])
|
||||
assert_equal(grid_small.size, expected[1])
|
||||
|
||||
def test_accepts_npfloating(self):
|
||||
# regression test for #16466
|
||||
grid64 = mgrid[0.1:0.33:0.1, ]
|
||||
grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1), ]
|
||||
assert_array_almost_equal(grid64, grid32)
|
||||
# At some point this was float64, but NEP 50 changed it:
|
||||
assert grid32.dtype == np.float32
|
||||
assert grid64.dtype == np.float64
|
||||
|
||||
# different code path for single slice
|
||||
grid64 = mgrid[0.1:0.33:0.1]
|
||||
grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1)]
|
||||
assert_(grid32.dtype == np.float64)
|
||||
assert_array_almost_equal(grid64, grid32)
|
||||
|
||||
def test_accepts_longdouble(self):
|
||||
# regression tests for #16945
|
||||
grid64 = mgrid[0.1:0.33:0.1, ]
|
||||
grid128 = mgrid[
|
||||
np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1),
|
||||
]
|
||||
assert_(grid128.dtype == np.longdouble)
|
||||
assert_array_almost_equal(grid64, grid128)
|
||||
|
||||
grid128c_a = mgrid[0:np.longdouble(1):3.4j]
|
||||
grid128c_b = mgrid[0:np.longdouble(1):3.4j, ]
|
||||
assert_(grid128c_a.dtype == grid128c_b.dtype == np.longdouble)
|
||||
assert_array_equal(grid128c_a, grid128c_b[0])
|
||||
|
||||
# different code path for single slice
|
||||
grid64 = mgrid[0.1:0.33:0.1]
|
||||
grid128 = mgrid[
|
||||
np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1)
|
||||
]
|
||||
assert_(grid128.dtype == np.longdouble)
|
||||
assert_array_almost_equal(grid64, grid128)
|
||||
|
||||
def test_accepts_npcomplexfloating(self):
|
||||
# Related to #16466
|
||||
assert_array_almost_equal(
|
||||
mgrid[0.1:0.3:3j, ], mgrid[0.1:0.3:np.complex64(3j), ]
|
||||
)
|
||||
|
||||
# different code path for single slice
|
||||
assert_array_almost_equal(
|
||||
mgrid[0.1:0.3:3j], mgrid[0.1:0.3:np.complex64(3j)]
|
||||
)
|
||||
|
||||
# Related to #16945
|
||||
grid64_a = mgrid[0.1:0.3:3.3j]
|
||||
grid64_b = mgrid[0.1:0.3:3.3j, ][0]
|
||||
assert_(grid64_a.dtype == grid64_b.dtype == np.float64)
|
||||
assert_array_equal(grid64_a, grid64_b)
|
||||
|
||||
grid128_a = mgrid[0.1:0.3:np.clongdouble(3.3j)]
|
||||
grid128_b = mgrid[0.1:0.3:np.clongdouble(3.3j), ][0]
|
||||
assert_(grid128_a.dtype == grid128_b.dtype == np.longdouble)
|
||||
assert_array_equal(grid64_a, grid64_b)
|
||||
|
||||
|
||||
class TestConcatenator:
|
||||
def test_1d(self):
|
||||
assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6]))
|
||||
b = np.ones(5)
|
||||
c = r_[b, 0, 0, b]
|
||||
assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1])
|
||||
|
||||
def test_mixed_type(self):
|
||||
g = r_[10.1, 1:10]
|
||||
assert_(g.dtype == 'f8')
|
||||
|
||||
def test_more_mixed_type(self):
|
||||
g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0]
|
||||
assert_(g.dtype == 'f8')
|
||||
|
||||
def test_complex_step(self):
|
||||
# Regression test for #12262
|
||||
g = r_[0:36:100j]
|
||||
assert_(g.shape == (100,))
|
||||
|
||||
# Related to #16466
|
||||
g = r_[0:36:np.complex64(100j)]
|
||||
assert_(g.shape == (100,))
|
||||
|
||||
def test_2d(self):
|
||||
b = np.random.rand(5, 5)
|
||||
c = np.random.rand(5, 5)
|
||||
d = r_['1', b, c] # append columns
|
||||
assert_(d.shape == (5, 10))
|
||||
assert_array_equal(d[:, :5], b)
|
||||
assert_array_equal(d[:, 5:], c)
|
||||
d = r_[b, c]
|
||||
assert_(d.shape == (10, 5))
|
||||
assert_array_equal(d[:5, :], b)
|
||||
assert_array_equal(d[5:, :], c)
|
||||
|
||||
def test_0d(self):
|
||||
assert_equal(r_[0, np.array(1), 2], [0, 1, 2])
|
||||
assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3])
|
||||
assert_equal(r_[np.array(0), [1, 2, 3]], [0, 1, 2, 3])
|
||||
|
||||
|
||||
class TestNdenumerate:
|
||||
def test_basic(self):
|
||||
a = np.array([[1, 2], [3, 4]])
|
||||
assert_equal(list(ndenumerate(a)),
|
||||
[((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)])
|
||||
|
||||
|
||||
class TestIndexExpression:
|
||||
def test_regression_1(self):
|
||||
# ticket #1196
|
||||
a = np.arange(2)
|
||||
assert_equal(a[:-1], a[s_[:-1]])
|
||||
assert_equal(a[:-1], a[index_exp[:-1]])
|
||||
|
||||
def test_simple_1(self):
|
||||
a = np.random.rand(4, 5, 6)
|
||||
|
||||
assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]])
|
||||
assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]])
|
||||
|
||||
|
||||
class TestIx_:
|
||||
def test_regression_1(self):
|
||||
# Test empty untyped inputs create outputs of indexing type, gh-5804
|
||||
a, = np.ix_(range(0))
|
||||
assert_equal(a.dtype, np.intp)
|
||||
|
||||
a, = np.ix_([])
|
||||
assert_equal(a.dtype, np.intp)
|
||||
|
||||
# but if the type is specified, don't change it
|
||||
a, = np.ix_(np.array([], dtype=np.float32))
|
||||
assert_equal(a.dtype, np.float32)
|
||||
|
||||
def test_shape_and_dtype(self):
|
||||
sizes = (4, 5, 3, 2)
|
||||
# Test both lists and arrays
|
||||
for func in (range, np.arange):
|
||||
arrays = np.ix_(*[func(sz) for sz in sizes])
|
||||
for k, (a, sz) in enumerate(zip(arrays, sizes)):
|
||||
assert_equal(a.shape[k], sz)
|
||||
assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k))
|
||||
assert_(np.issubdtype(a.dtype, np.integer))
|
||||
|
||||
def test_bool(self):
|
||||
bool_a = [True, False, True, True]
|
||||
int_a, = np.nonzero(bool_a)
|
||||
assert_equal(np.ix_(bool_a)[0], int_a)
|
||||
|
||||
def test_1d_only(self):
|
||||
idx2d = [[1, 2, 3], [4, 5, 6]]
|
||||
assert_raises(ValueError, np.ix_, idx2d)
|
||||
|
||||
def test_repeated_input(self):
|
||||
length_of_vector = 5
|
||||
x = np.arange(length_of_vector)
|
||||
out = ix_(x, x)
|
||||
assert_equal(out[0].shape, (length_of_vector, 1))
|
||||
assert_equal(out[1].shape, (1, length_of_vector))
|
||||
# check that input shape is not modified
|
||||
assert_equal(x.shape, (length_of_vector,))
|
||||
|
||||
|
||||
def test_c_():
|
||||
a = c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])]
|
||||
assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]])
|
||||
|
||||
|
||||
class TestFillDiagonal:
|
||||
def test_basic(self):
|
||||
a = np.zeros((3, 3), int)
|
||||
fill_diagonal(a, 5)
|
||||
assert_array_equal(
|
||||
a, np.array([[5, 0, 0],
|
||||
[0, 5, 0],
|
||||
[0, 0, 5]])
|
||||
)
|
||||
|
||||
def test_tall_matrix(self):
|
||||
a = np.zeros((10, 3), int)
|
||||
fill_diagonal(a, 5)
|
||||
assert_array_equal(
|
||||
a, np.array([[5, 0, 0],
|
||||
[0, 5, 0],
|
||||
[0, 0, 5],
|
||||
[0, 0, 0],
|
||||
[0, 0, 0],
|
||||
[0, 0, 0],
|
||||
[0, 0, 0],
|
||||
[0, 0, 0],
|
||||
[0, 0, 0],
|
||||
[0, 0, 0]])
|
||||
)
|
||||
|
||||
def test_tall_matrix_wrap(self):
|
||||
a = np.zeros((10, 3), int)
|
||||
fill_diagonal(a, 5, True)
|
||||
assert_array_equal(
|
||||
a, np.array([[5, 0, 0],
|
||||
[0, 5, 0],
|
||||
[0, 0, 5],
|
||||
[0, 0, 0],
|
||||
[5, 0, 0],
|
||||
[0, 5, 0],
|
||||
[0, 0, 5],
|
||||
[0, 0, 0],
|
||||
[5, 0, 0],
|
||||
[0, 5, 0]])
|
||||
)
|
||||
|
||||
def test_wide_matrix(self):
|
||||
a = np.zeros((3, 10), int)
|
||||
fill_diagonal(a, 5)
|
||||
assert_array_equal(
|
||||
a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 5, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 5, 0, 0, 0, 0, 0, 0, 0]])
|
||||
)
|
||||
|
||||
def test_operate_4d_array(self):
|
||||
a = np.zeros((3, 3, 3, 3), int)
|
||||
fill_diagonal(a, 4)
|
||||
i = np.array([0, 1, 2])
|
||||
assert_equal(np.where(a != 0), (i, i, i, i))
|
||||
|
||||
def test_low_dim_handling(self):
|
||||
# raise error with low dimensionality
|
||||
a = np.zeros(3, int)
|
||||
with assert_raises_regex(ValueError, "at least 2-d"):
|
||||
fill_diagonal(a, 5)
|
||||
|
||||
def test_hetero_shape_handling(self):
|
||||
# raise error with high dimensionality and
|
||||
# shape mismatch
|
||||
a = np.zeros((3, 3, 7, 3), int)
|
||||
with assert_raises_regex(ValueError, "equal length"):
|
||||
fill_diagonal(a, 2)
|
||||
|
||||
|
||||
def test_diag_indices():
|
||||
di = diag_indices(4)
|
||||
a = np.array([[1, 2, 3, 4],
|
||||
[5, 6, 7, 8],
|
||||
[9, 10, 11, 12],
|
||||
[13, 14, 15, 16]])
|
||||
a[di] = 100
|
||||
assert_array_equal(
|
||||
a, np.array([[100, 2, 3, 4],
|
||||
[5, 100, 7, 8],
|
||||
[9, 10, 100, 12],
|
||||
[13, 14, 15, 100]])
|
||||
)
|
||||
|
||||
# Now, we create indices to manipulate a 3-d array:
|
||||
d3 = diag_indices(2, 3)
|
||||
|
||||
# And use it to set the diagonal of a zeros array to 1:
|
||||
a = np.zeros((2, 2, 2), int)
|
||||
a[d3] = 1
|
||||
assert_array_equal(
|
||||
a, np.array([[[1, 0],
|
||||
[0, 0]],
|
||||
[[0, 0],
|
||||
[0, 1]]])
|
||||
)
|
||||
|
||||
|
||||
class TestDiagIndicesFrom:
|
||||
|
||||
def test_diag_indices_from(self):
|
||||
x = np.random.random((4, 4))
|
||||
r, c = diag_indices_from(x)
|
||||
assert_array_equal(r, np.arange(4))
|
||||
assert_array_equal(c, np.arange(4))
|
||||
|
||||
def test_error_small_input(self):
|
||||
x = np.ones(7)
|
||||
with assert_raises_regex(ValueError, "at least 2-d"):
|
||||
diag_indices_from(x)
|
||||
|
||||
def test_error_shape_mismatch(self):
|
||||
x = np.zeros((3, 3, 2, 3), int)
|
||||
with assert_raises_regex(ValueError, "equal length"):
|
||||
diag_indices_from(x)
|
||||
|
||||
|
||||
def test_ndindex():
|
||||
x = list(ndindex(1, 2, 3))
|
||||
expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))]
|
||||
assert_array_equal(x, expected)
|
||||
|
||||
x = list(ndindex((1, 2, 3)))
|
||||
assert_array_equal(x, expected)
|
||||
|
||||
# Test use of scalars and tuples
|
||||
x = list(ndindex((3,)))
|
||||
assert_array_equal(x, list(ndindex(3)))
|
||||
|
||||
# Make sure size argument is optional
|
||||
x = list(ndindex())
|
||||
assert_equal(x, [()])
|
||||
|
||||
x = list(ndindex(()))
|
||||
assert_equal(x, [()])
|
||||
|
||||
# Make sure 0-sized ndindex works correctly
|
||||
x = list(ndindex(*[0]))
|
||||
assert_equal(x, [])
|
||||
2848
lib/python3.11/site-packages/numpy/lib/tests/test_io.py
Normal file
2848
lib/python3.11/site-packages/numpy/lib/tests/test_io.py
Normal file
File diff suppressed because it is too large
Load Diff
1101
lib/python3.11/site-packages/numpy/lib/tests/test_loadtxt.py
Normal file
1101
lib/python3.11/site-packages/numpy/lib/tests/test_loadtxt.py
Normal file
File diff suppressed because it is too large
Load Diff
215
lib/python3.11/site-packages/numpy/lib/tests/test_mixins.py
Normal file
215
lib/python3.11/site-packages/numpy/lib/tests/test_mixins.py
Normal file
@ -0,0 +1,215 @@
|
||||
import numbers
|
||||
import operator
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import assert_, assert_equal, assert_raises
|
||||
|
||||
# NOTE: This class should be kept as an exact copy of the example from the
|
||||
# docstring for NDArrayOperatorsMixin.
|
||||
|
||||
class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
|
||||
def __init__(self, value):
|
||||
self.value = np.asarray(value)
|
||||
|
||||
# One might also consider adding the built-in list type to this
|
||||
# list, to support operations like np.add(array_like, list)
|
||||
_HANDLED_TYPES = (np.ndarray, numbers.Number)
|
||||
|
||||
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
|
||||
out = kwargs.get('out', ())
|
||||
for x in inputs + out:
|
||||
# Only support operations with instances of _HANDLED_TYPES.
|
||||
# Use ArrayLike instead of type(self) for isinstance to
|
||||
# allow subclasses that don't override __array_ufunc__ to
|
||||
# handle ArrayLike objects.
|
||||
if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):
|
||||
return NotImplemented
|
||||
|
||||
# Defer to the implementation of the ufunc on unwrapped values.
|
||||
inputs = tuple(x.value if isinstance(x, ArrayLike) else x
|
||||
for x in inputs)
|
||||
if out:
|
||||
kwargs['out'] = tuple(
|
||||
x.value if isinstance(x, ArrayLike) else x
|
||||
for x in out)
|
||||
result = getattr(ufunc, method)(*inputs, **kwargs)
|
||||
|
||||
if type(result) is tuple:
|
||||
# multiple return values
|
||||
return tuple(type(self)(x) for x in result)
|
||||
elif method == 'at':
|
||||
# no return value
|
||||
return None
|
||||
else:
|
||||
# one return value
|
||||
return type(self)(result)
|
||||
|
||||
def __repr__(self):
|
||||
return f'{type(self).__name__}({self.value!r})'
|
||||
|
||||
|
||||
def wrap_array_like(result):
|
||||
if type(result) is tuple:
|
||||
return tuple(ArrayLike(r) for r in result)
|
||||
else:
|
||||
return ArrayLike(result)
|
||||
|
||||
|
||||
def _assert_equal_type_and_value(result, expected, err_msg=None):
|
||||
assert_equal(type(result), type(expected), err_msg=err_msg)
|
||||
if isinstance(result, tuple):
|
||||
assert_equal(len(result), len(expected), err_msg=err_msg)
|
||||
for result_item, expected_item in zip(result, expected):
|
||||
_assert_equal_type_and_value(result_item, expected_item, err_msg)
|
||||
else:
|
||||
assert_equal(result.value, expected.value, err_msg=err_msg)
|
||||
assert_equal(getattr(result.value, 'dtype', None),
|
||||
getattr(expected.value, 'dtype', None), err_msg=err_msg)
|
||||
|
||||
|
||||
_ALL_BINARY_OPERATORS = [
|
||||
operator.lt,
|
||||
operator.le,
|
||||
operator.eq,
|
||||
operator.ne,
|
||||
operator.gt,
|
||||
operator.ge,
|
||||
operator.add,
|
||||
operator.sub,
|
||||
operator.mul,
|
||||
operator.truediv,
|
||||
operator.floordiv,
|
||||
operator.mod,
|
||||
divmod,
|
||||
pow,
|
||||
operator.lshift,
|
||||
operator.rshift,
|
||||
operator.and_,
|
||||
operator.xor,
|
||||
operator.or_,
|
||||
]
|
||||
|
||||
|
||||
class TestNDArrayOperatorsMixin:
|
||||
|
||||
def test_array_like_add(self):
|
||||
|
||||
def check(result):
|
||||
_assert_equal_type_and_value(result, ArrayLike(0))
|
||||
|
||||
check(ArrayLike(0) + 0)
|
||||
check(0 + ArrayLike(0))
|
||||
|
||||
check(ArrayLike(0) + np.array(0))
|
||||
check(np.array(0) + ArrayLike(0))
|
||||
|
||||
check(ArrayLike(np.array(0)) + 0)
|
||||
check(0 + ArrayLike(np.array(0)))
|
||||
|
||||
check(ArrayLike(np.array(0)) + np.array(0))
|
||||
check(np.array(0) + ArrayLike(np.array(0)))
|
||||
|
||||
def test_inplace(self):
|
||||
array_like = ArrayLike(np.array([0]))
|
||||
array_like += 1
|
||||
_assert_equal_type_and_value(array_like, ArrayLike(np.array([1])))
|
||||
|
||||
array = np.array([0])
|
||||
array += ArrayLike(1)
|
||||
_assert_equal_type_and_value(array, ArrayLike(np.array([1])))
|
||||
|
||||
def test_opt_out(self):
|
||||
|
||||
class OptOut:
|
||||
"""Object that opts out of __array_ufunc__."""
|
||||
__array_ufunc__ = None
|
||||
|
||||
def __add__(self, other):
|
||||
return self
|
||||
|
||||
def __radd__(self, other):
|
||||
return self
|
||||
|
||||
array_like = ArrayLike(1)
|
||||
opt_out = OptOut()
|
||||
|
||||
# supported operations
|
||||
assert_(array_like + opt_out is opt_out)
|
||||
assert_(opt_out + array_like is opt_out)
|
||||
|
||||
# not supported
|
||||
with assert_raises(TypeError):
|
||||
# don't use the Python default, array_like = array_like + opt_out
|
||||
array_like += opt_out
|
||||
with assert_raises(TypeError):
|
||||
array_like - opt_out
|
||||
with assert_raises(TypeError):
|
||||
opt_out - array_like
|
||||
|
||||
def test_subclass(self):
|
||||
|
||||
class SubArrayLike(ArrayLike):
|
||||
"""Should take precedence over ArrayLike."""
|
||||
|
||||
x = ArrayLike(0)
|
||||
y = SubArrayLike(1)
|
||||
_assert_equal_type_and_value(x + y, y)
|
||||
_assert_equal_type_and_value(y + x, y)
|
||||
|
||||
def test_object(self):
|
||||
x = ArrayLike(0)
|
||||
obj = object()
|
||||
with assert_raises(TypeError):
|
||||
x + obj
|
||||
with assert_raises(TypeError):
|
||||
obj + x
|
||||
with assert_raises(TypeError):
|
||||
x += obj
|
||||
|
||||
def test_unary_methods(self):
|
||||
array = np.array([-1, 0, 1, 2])
|
||||
array_like = ArrayLike(array)
|
||||
for op in [operator.neg,
|
||||
operator.pos,
|
||||
abs,
|
||||
operator.invert]:
|
||||
_assert_equal_type_and_value(op(array_like), ArrayLike(op(array)))
|
||||
|
||||
def test_forward_binary_methods(self):
|
||||
array = np.array([-1, 0, 1, 2])
|
||||
array_like = ArrayLike(array)
|
||||
for op in _ALL_BINARY_OPERATORS:
|
||||
expected = wrap_array_like(op(array, 1))
|
||||
actual = op(array_like, 1)
|
||||
err_msg = f'failed for operator {op}'
|
||||
_assert_equal_type_and_value(expected, actual, err_msg=err_msg)
|
||||
|
||||
def test_reflected_binary_methods(self):
|
||||
for op in _ALL_BINARY_OPERATORS:
|
||||
expected = wrap_array_like(op(2, 1))
|
||||
actual = op(2, ArrayLike(1))
|
||||
err_msg = f'failed for operator {op}'
|
||||
_assert_equal_type_and_value(expected, actual, err_msg=err_msg)
|
||||
|
||||
def test_matmul(self):
|
||||
array = np.array([1, 2], dtype=np.float64)
|
||||
array_like = ArrayLike(array)
|
||||
expected = ArrayLike(np.float64(5))
|
||||
_assert_equal_type_and_value(expected, np.matmul(array_like, array))
|
||||
_assert_equal_type_and_value(
|
||||
expected, operator.matmul(array_like, array))
|
||||
_assert_equal_type_and_value(
|
||||
expected, operator.matmul(array, array_like))
|
||||
|
||||
def test_ufunc_at(self):
|
||||
array = ArrayLike(np.array([1, 2, 3, 4]))
|
||||
assert_(np.negative.at(array, np.array([0, 1])) is None)
|
||||
_assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4]))
|
||||
|
||||
def test_ufunc_two_outputs(self):
|
||||
mantissa, exponent = np.frexp(2 ** -3)
|
||||
expected = (ArrayLike(mantissa), ArrayLike(exponent))
|
||||
_assert_equal_type_and_value(
|
||||
np.frexp(ArrayLike(2 ** -3)), expected)
|
||||
_assert_equal_type_and_value(
|
||||
np.frexp(ArrayLike(np.array(2 ** -3))), expected)
|
||||
1438
lib/python3.11/site-packages/numpy/lib/tests/test_nanfunctions.py
Normal file
1438
lib/python3.11/site-packages/numpy/lib/tests/test_nanfunctions.py
Normal file
File diff suppressed because it is too large
Load Diff
376
lib/python3.11/site-packages/numpy/lib/tests/test_packbits.py
Normal file
376
lib/python3.11/site-packages/numpy/lib/tests/test_packbits.py
Normal file
@ -0,0 +1,376 @@
|
||||
from itertools import chain
|
||||
|
||||
import pytest
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import assert_array_equal, assert_equal, assert_raises
|
||||
|
||||
|
||||
def test_packbits():
|
||||
# Copied from the docstring.
|
||||
a = [[[1, 0, 1], [0, 1, 0]],
|
||||
[[1, 1, 0], [0, 0, 1]]]
|
||||
for dt in '?bBhHiIlLqQ':
|
||||
arr = np.array(a, dtype=dt)
|
||||
b = np.packbits(arr, axis=-1)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_array_equal(b, np.array([[[160], [64]], [[192], [32]]]))
|
||||
|
||||
assert_raises(TypeError, np.packbits, np.array(a, dtype=float))
|
||||
|
||||
|
||||
def test_packbits_empty():
|
||||
shapes = [
|
||||
(0,), (10, 20, 0), (10, 0, 20), (0, 10, 20), (20, 0, 0), (0, 20, 0),
|
||||
(0, 0, 20), (0, 0, 0),
|
||||
]
|
||||
for dt in '?bBhHiIlLqQ':
|
||||
for shape in shapes:
|
||||
a = np.empty(shape, dtype=dt)
|
||||
b = np.packbits(a)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_equal(b.shape, (0,))
|
||||
|
||||
|
||||
def test_packbits_empty_with_axis():
|
||||
# Original shapes and lists of packed shapes for different axes.
|
||||
shapes = [
|
||||
((0,), [(0,)]),
|
||||
((10, 20, 0), [(2, 20, 0), (10, 3, 0), (10, 20, 0)]),
|
||||
((10, 0, 20), [(2, 0, 20), (10, 0, 20), (10, 0, 3)]),
|
||||
((0, 10, 20), [(0, 10, 20), (0, 2, 20), (0, 10, 3)]),
|
||||
((20, 0, 0), [(3, 0, 0), (20, 0, 0), (20, 0, 0)]),
|
||||
((0, 20, 0), [(0, 20, 0), (0, 3, 0), (0, 20, 0)]),
|
||||
((0, 0, 20), [(0, 0, 20), (0, 0, 20), (0, 0, 3)]),
|
||||
((0, 0, 0), [(0, 0, 0), (0, 0, 0), (0, 0, 0)]),
|
||||
]
|
||||
for dt in '?bBhHiIlLqQ':
|
||||
for in_shape, out_shapes in shapes:
|
||||
for ax, out_shape in enumerate(out_shapes):
|
||||
a = np.empty(in_shape, dtype=dt)
|
||||
b = np.packbits(a, axis=ax)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_equal(b.shape, out_shape)
|
||||
|
||||
@pytest.mark.parametrize('bitorder', ('little', 'big'))
|
||||
def test_packbits_large(bitorder):
|
||||
# test data large enough for 16 byte vectorization
|
||||
a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0,
|
||||
0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1,
|
||||
1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0,
|
||||
1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1,
|
||||
1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1,
|
||||
1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1,
|
||||
1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1,
|
||||
0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0,
|
||||
1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1,
|
||||
1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0,
|
||||
0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1,
|
||||
1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0,
|
||||
1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0,
|
||||
1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0])
|
||||
a = a.repeat(3)
|
||||
for dtype in '?bBhHiIlLqQ':
|
||||
arr = np.array(a, dtype=dtype)
|
||||
b = np.packbits(arr, axis=None, bitorder=bitorder)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252,
|
||||
113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255,
|
||||
227, 142, 7, 31, 142, 63, 28, 126, 56, 227, 240, 0, 227, 128, 63,
|
||||
224, 14, 56, 252, 112, 56, 255, 241, 248, 3, 240, 56, 224, 112,
|
||||
63, 255, 255, 199, 224, 14, 0, 31, 143, 192, 3, 255, 199, 0, 1,
|
||||
255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15,
|
||||
199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227,
|
||||
129, 248, 227, 129, 199, 31, 128]
|
||||
if bitorder == 'big':
|
||||
assert_array_equal(b, r)
|
||||
# equal for size being multiple of 8
|
||||
assert_array_equal(np.unpackbits(b, bitorder=bitorder)[:-4], a)
|
||||
|
||||
# check last byte of different remainders (16 byte vectorization)
|
||||
b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)]
|
||||
assert_array_equal(b, [128, 128, 128, 31, 30, 28, 24, 16, 0, 0, 0, 199,
|
||||
198, 196, 192])
|
||||
|
||||
arr = arr.reshape(36, 25)
|
||||
b = np.packbits(arr, axis=0)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_array_equal(b, [[190, 186, 178, 178, 150, 215, 87, 83, 83, 195,
|
||||
199, 206, 204, 204, 140, 140, 136, 136, 8, 40, 105,
|
||||
107, 75, 74, 88],
|
||||
[72, 216, 248, 241, 227, 195, 202, 90, 90, 83,
|
||||
83, 119, 127, 109, 73, 64, 208, 244, 189, 45,
|
||||
41, 104, 122, 90, 18],
|
||||
[113, 120, 248, 216, 152, 24, 60, 52, 182, 150,
|
||||
150, 150, 146, 210, 210, 246, 255, 255, 223,
|
||||
151, 21, 17, 17, 131, 163],
|
||||
[214, 210, 210, 64, 68, 5, 5, 1, 72, 88, 92,
|
||||
92, 78, 110, 39, 181, 149, 220, 222, 218, 218,
|
||||
202, 234, 170, 168],
|
||||
[0, 128, 128, 192, 80, 112, 48, 160, 160, 224,
|
||||
240, 208, 144, 128, 160, 224, 240, 208, 144,
|
||||
144, 176, 240, 224, 192, 128]])
|
||||
|
||||
b = np.packbits(arr, axis=1)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_array_equal(b, [[252, 127, 192, 0],
|
||||
[ 7, 252, 15, 128],
|
||||
[240, 0, 28, 0],
|
||||
[255, 128, 0, 128],
|
||||
[192, 31, 255, 128],
|
||||
[142, 63, 0, 0],
|
||||
[255, 240, 7, 0],
|
||||
[ 7, 224, 14, 0],
|
||||
[126, 0, 224, 0],
|
||||
[255, 255, 199, 0],
|
||||
[ 56, 28, 126, 0],
|
||||
[113, 248, 227, 128],
|
||||
[227, 142, 63, 0],
|
||||
[ 0, 28, 112, 0],
|
||||
[ 15, 248, 3, 128],
|
||||
[ 28, 126, 56, 0],
|
||||
[ 56, 255, 241, 128],
|
||||
[240, 7, 224, 0],
|
||||
[227, 129, 192, 128],
|
||||
[255, 255, 254, 0],
|
||||
[126, 0, 224, 0],
|
||||
[ 3, 241, 248, 0],
|
||||
[ 0, 255, 241, 128],
|
||||
[128, 0, 255, 128],
|
||||
[224, 1, 255, 128],
|
||||
[248, 252, 126, 0],
|
||||
[ 0, 7, 3, 128],
|
||||
[224, 113, 248, 0],
|
||||
[ 0, 252, 127, 128],
|
||||
[142, 63, 224, 0],
|
||||
[224, 14, 63, 0],
|
||||
[ 7, 3, 128, 0],
|
||||
[113, 255, 255, 128],
|
||||
[ 28, 113, 199, 0],
|
||||
[ 7, 227, 142, 0],
|
||||
[ 14, 56, 252, 0]])
|
||||
|
||||
arr = arr.T.copy()
|
||||
b = np.packbits(arr, axis=0)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_array_equal(b, [[252, 7, 240, 255, 192, 142, 255, 7, 126, 255,
|
||||
56, 113, 227, 0, 15, 28, 56, 240, 227, 255,
|
||||
126, 3, 0, 128, 224, 248, 0, 224, 0, 142, 224,
|
||||
7, 113, 28, 7, 14],
|
||||
[127, 252, 0, 128, 31, 63, 240, 224, 0, 255,
|
||||
28, 248, 142, 28, 248, 126, 255, 7, 129, 255,
|
||||
0, 241, 255, 0, 1, 252, 7, 113, 252, 63, 14,
|
||||
3, 255, 113, 227, 56],
|
||||
[192, 15, 28, 0, 255, 0, 7, 14, 224, 199, 126,
|
||||
227, 63, 112, 3, 56, 241, 224, 192, 254, 224,
|
||||
248, 241, 255, 255, 126, 3, 248, 127, 224, 63,
|
||||
128, 255, 199, 142, 252],
|
||||
[0, 128, 0, 128, 128, 0, 0, 0, 0, 0, 0, 128, 0,
|
||||
0, 128, 0, 128, 0, 128, 0, 0, 0, 128, 128,
|
||||
128, 0, 128, 0, 128, 0, 0, 0, 128, 0, 0, 0]])
|
||||
|
||||
b = np.packbits(arr, axis=1)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_array_equal(b, [[190, 72, 113, 214, 0],
|
||||
[186, 216, 120, 210, 128],
|
||||
[178, 248, 248, 210, 128],
|
||||
[178, 241, 216, 64, 192],
|
||||
[150, 227, 152, 68, 80],
|
||||
[215, 195, 24, 5, 112],
|
||||
[ 87, 202, 60, 5, 48],
|
||||
[ 83, 90, 52, 1, 160],
|
||||
[ 83, 90, 182, 72, 160],
|
||||
[195, 83, 150, 88, 224],
|
||||
[199, 83, 150, 92, 240],
|
||||
[206, 119, 150, 92, 208],
|
||||
[204, 127, 146, 78, 144],
|
||||
[204, 109, 210, 110, 128],
|
||||
[140, 73, 210, 39, 160],
|
||||
[140, 64, 246, 181, 224],
|
||||
[136, 208, 255, 149, 240],
|
||||
[136, 244, 255, 220, 208],
|
||||
[ 8, 189, 223, 222, 144],
|
||||
[ 40, 45, 151, 218, 144],
|
||||
[105, 41, 21, 218, 176],
|
||||
[107, 104, 17, 202, 240],
|
||||
[ 75, 122, 17, 234, 224],
|
||||
[ 74, 90, 131, 170, 192],
|
||||
[ 88, 18, 163, 168, 128]])
|
||||
|
||||
# result is the same if input is multiplied with a nonzero value
|
||||
for dtype in 'bBhHiIlLqQ':
|
||||
arr = np.array(a, dtype=dtype)
|
||||
rnd = np.random.randint(low=np.iinfo(dtype).min,
|
||||
high=np.iinfo(dtype).max, size=arr.size,
|
||||
dtype=dtype)
|
||||
rnd[rnd == 0] = 1
|
||||
arr *= rnd.astype(dtype)
|
||||
b = np.packbits(arr, axis=-1)
|
||||
assert_array_equal(np.unpackbits(b)[:-4], a)
|
||||
|
||||
assert_raises(TypeError, np.packbits, np.array(a, dtype=float))
|
||||
|
||||
|
||||
def test_packbits_very_large():
|
||||
# test some with a larger arrays gh-8637
|
||||
# code is covered earlier but larger array makes crash on bug more likely
|
||||
for s in range(950, 1050):
|
||||
for dt in '?bBhHiIlLqQ':
|
||||
x = np.ones((200, s), dtype=bool)
|
||||
np.packbits(x, axis=1)
|
||||
|
||||
|
||||
def test_unpackbits():
|
||||
# Copied from the docstring.
|
||||
a = np.array([[2], [7], [23]], dtype=np.uint8)
|
||||
b = np.unpackbits(a, axis=1)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_array_equal(b, np.array([[0, 0, 0, 0, 0, 0, 1, 0],
|
||||
[0, 0, 0, 0, 0, 1, 1, 1],
|
||||
[0, 0, 0, 1, 0, 1, 1, 1]]))
|
||||
|
||||
def test_pack_unpack_order():
|
||||
a = np.array([[2], [7], [23]], dtype=np.uint8)
|
||||
b = np.unpackbits(a, axis=1)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
b_little = np.unpackbits(a, axis=1, bitorder='little')
|
||||
b_big = np.unpackbits(a, axis=1, bitorder='big')
|
||||
assert_array_equal(b, b_big)
|
||||
assert_array_equal(a, np.packbits(b_little, axis=1, bitorder='little'))
|
||||
assert_array_equal(b[:, ::-1], b_little)
|
||||
assert_array_equal(a, np.packbits(b_big, axis=1, bitorder='big'))
|
||||
assert_raises(ValueError, np.unpackbits, a, bitorder='r')
|
||||
assert_raises(TypeError, np.unpackbits, a, bitorder=10)
|
||||
|
||||
|
||||
def test_unpackbits_empty():
|
||||
a = np.empty((0,), dtype=np.uint8)
|
||||
b = np.unpackbits(a)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_array_equal(b, np.empty((0,)))
|
||||
|
||||
|
||||
def test_unpackbits_empty_with_axis():
|
||||
# Lists of packed shapes for different axes and unpacked shapes.
|
||||
shapes = [
|
||||
([(0,)], (0,)),
|
||||
([(2, 24, 0), (16, 3, 0), (16, 24, 0)], (16, 24, 0)),
|
||||
([(2, 0, 24), (16, 0, 24), (16, 0, 3)], (16, 0, 24)),
|
||||
([(0, 16, 24), (0, 2, 24), (0, 16, 3)], (0, 16, 24)),
|
||||
([(3, 0, 0), (24, 0, 0), (24, 0, 0)], (24, 0, 0)),
|
||||
([(0, 24, 0), (0, 3, 0), (0, 24, 0)], (0, 24, 0)),
|
||||
([(0, 0, 24), (0, 0, 24), (0, 0, 3)], (0, 0, 24)),
|
||||
([(0, 0, 0), (0, 0, 0), (0, 0, 0)], (0, 0, 0)),
|
||||
]
|
||||
for in_shapes, out_shape in shapes:
|
||||
for ax, in_shape in enumerate(in_shapes):
|
||||
a = np.empty(in_shape, dtype=np.uint8)
|
||||
b = np.unpackbits(a, axis=ax)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_equal(b.shape, out_shape)
|
||||
|
||||
|
||||
def test_unpackbits_large():
|
||||
# test all possible numbers via comparison to already tested packbits
|
||||
d = np.arange(277, dtype=np.uint8)
|
||||
assert_array_equal(np.packbits(np.unpackbits(d)), d)
|
||||
assert_array_equal(np.packbits(np.unpackbits(d[::2])), d[::2])
|
||||
d = np.tile(d, (3, 1))
|
||||
assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d)
|
||||
d = d.T.copy()
|
||||
assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d)
|
||||
|
||||
|
||||
class TestCount:
|
||||
x = np.array([
|
||||
[1, 0, 1, 0, 0, 1, 0],
|
||||
[0, 1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 1, 0, 0, 1, 1],
|
||||
[1, 1, 0, 0, 0, 1, 1],
|
||||
[1, 0, 1, 0, 1, 0, 1],
|
||||
[0, 0, 1, 1, 1, 0, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
], dtype=np.uint8)
|
||||
padded1 = np.zeros(57, dtype=np.uint8)
|
||||
padded1[:49] = x.ravel()
|
||||
padded1b = np.zeros(57, dtype=np.uint8)
|
||||
padded1b[:49] = x[::-1].copy().ravel()
|
||||
padded2 = np.zeros((9, 9), dtype=np.uint8)
|
||||
padded2[:7, :7] = x
|
||||
|
||||
@pytest.mark.parametrize('bitorder', ('little', 'big'))
|
||||
@pytest.mark.parametrize('count', chain(range(58), range(-1, -57, -1)))
|
||||
def test_roundtrip(self, bitorder, count):
|
||||
if count < 0:
|
||||
# one extra zero of padding
|
||||
cutoff = count - 1
|
||||
else:
|
||||
cutoff = count
|
||||
# test complete invertibility of packbits and unpackbits with count
|
||||
packed = np.packbits(self.x, bitorder=bitorder)
|
||||
unpacked = np.unpackbits(packed, count=count, bitorder=bitorder)
|
||||
assert_equal(unpacked.dtype, np.uint8)
|
||||
assert_array_equal(unpacked, self.padded1[:cutoff])
|
||||
|
||||
@pytest.mark.parametrize('kwargs', [
|
||||
{}, {'count': None},
|
||||
])
|
||||
def test_count(self, kwargs):
|
||||
packed = np.packbits(self.x)
|
||||
unpacked = np.unpackbits(packed, **kwargs)
|
||||
assert_equal(unpacked.dtype, np.uint8)
|
||||
assert_array_equal(unpacked, self.padded1[:-1])
|
||||
|
||||
@pytest.mark.parametrize('bitorder', ('little', 'big'))
|
||||
# delta==-1 when count<0 because one extra zero of padding
|
||||
@pytest.mark.parametrize('count', chain(range(8), range(-1, -9, -1)))
|
||||
def test_roundtrip_axis(self, bitorder, count):
|
||||
if count < 0:
|
||||
# one extra zero of padding
|
||||
cutoff = count - 1
|
||||
else:
|
||||
cutoff = count
|
||||
packed0 = np.packbits(self.x, axis=0, bitorder=bitorder)
|
||||
unpacked0 = np.unpackbits(packed0, axis=0, count=count,
|
||||
bitorder=bitorder)
|
||||
assert_equal(unpacked0.dtype, np.uint8)
|
||||
assert_array_equal(unpacked0, self.padded2[:cutoff, :self.x.shape[1]])
|
||||
|
||||
packed1 = np.packbits(self.x, axis=1, bitorder=bitorder)
|
||||
unpacked1 = np.unpackbits(packed1, axis=1, count=count,
|
||||
bitorder=bitorder)
|
||||
assert_equal(unpacked1.dtype, np.uint8)
|
||||
assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :cutoff])
|
||||
|
||||
@pytest.mark.parametrize('kwargs', [
|
||||
{}, {'count': None},
|
||||
{'bitorder': 'little'},
|
||||
{'bitorder': 'little', 'count': None},
|
||||
{'bitorder': 'big'},
|
||||
{'bitorder': 'big', 'count': None},
|
||||
])
|
||||
def test_axis_count(self, kwargs):
|
||||
packed0 = np.packbits(self.x, axis=0)
|
||||
unpacked0 = np.unpackbits(packed0, axis=0, **kwargs)
|
||||
assert_equal(unpacked0.dtype, np.uint8)
|
||||
if kwargs.get('bitorder', 'big') == 'big':
|
||||
assert_array_equal(unpacked0, self.padded2[:-1, :self.x.shape[1]])
|
||||
else:
|
||||
assert_array_equal(unpacked0[::-1, :], self.padded2[:-1, :self.x.shape[1]])
|
||||
|
||||
packed1 = np.packbits(self.x, axis=1)
|
||||
unpacked1 = np.unpackbits(packed1, axis=1, **kwargs)
|
||||
assert_equal(unpacked1.dtype, np.uint8)
|
||||
if kwargs.get('bitorder', 'big') == 'big':
|
||||
assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :-1])
|
||||
else:
|
||||
assert_array_equal(unpacked1[:, ::-1], self.padded2[:self.x.shape[0], :-1])
|
||||
|
||||
def test_bad_count(self):
|
||||
packed0 = np.packbits(self.x, axis=0)
|
||||
assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9)
|
||||
packed1 = np.packbits(self.x, axis=1)
|
||||
assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9)
|
||||
packed = np.packbits(self.x)
|
||||
assert_raises(ValueError, np.unpackbits, packed, count=-57)
|
||||
320
lib/python3.11/site-packages/numpy/lib/tests/test_polynomial.py
Normal file
320
lib/python3.11/site-packages/numpy/lib/tests/test_polynomial.py
Normal file
@ -0,0 +1,320 @@
|
||||
import pytest
|
||||
|
||||
import numpy as np
|
||||
import numpy.polynomial.polynomial as poly
|
||||
from numpy.testing import (
|
||||
assert_,
|
||||
assert_allclose,
|
||||
assert_almost_equal,
|
||||
assert_array_almost_equal,
|
||||
assert_array_equal,
|
||||
assert_equal,
|
||||
assert_raises,
|
||||
)
|
||||
|
||||
# `poly1d` has some support for `np.bool` and `np.timedelta64`,
|
||||
# but it is limited and they are therefore excluded here
|
||||
TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O"
|
||||
|
||||
|
||||
class TestPolynomial:
|
||||
def test_poly1d_str_and_repr(self):
|
||||
p = np.poly1d([1., 2, 3])
|
||||
assert_equal(repr(p), 'poly1d([1., 2., 3.])')
|
||||
assert_equal(str(p),
|
||||
' 2\n'
|
||||
'1 x + 2 x + 3')
|
||||
|
||||
q = np.poly1d([3., 2, 1])
|
||||
assert_equal(repr(q), 'poly1d([3., 2., 1.])')
|
||||
assert_equal(str(q),
|
||||
' 2\n'
|
||||
'3 x + 2 x + 1')
|
||||
|
||||
r = np.poly1d([1.89999 + 2j, -3j, -5.12345678, 2 + 1j])
|
||||
assert_equal(str(r),
|
||||
' 3 2\n'
|
||||
'(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)')
|
||||
|
||||
assert_equal(str(np.poly1d([-3, -2, -1])),
|
||||
' 2\n'
|
||||
'-3 x - 2 x - 1')
|
||||
|
||||
def test_poly1d_resolution(self):
|
||||
p = np.poly1d([1., 2, 3])
|
||||
q = np.poly1d([3., 2, 1])
|
||||
assert_equal(p(0), 3.0)
|
||||
assert_equal(p(5), 38.0)
|
||||
assert_equal(q(0), 1.0)
|
||||
assert_equal(q(5), 86.0)
|
||||
|
||||
def test_poly1d_math(self):
|
||||
# here we use some simple coeffs to make calculations easier
|
||||
p = np.poly1d([1., 2, 4])
|
||||
q = np.poly1d([4., 2, 1])
|
||||
assert_equal(p / q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75])))
|
||||
assert_equal(p.integ(), np.poly1d([1 / 3, 1., 4., 0.]))
|
||||
assert_equal(p.integ(1), np.poly1d([1 / 3, 1., 4., 0.]))
|
||||
|
||||
p = np.poly1d([1., 2, 3])
|
||||
q = np.poly1d([3., 2, 1])
|
||||
assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.]))
|
||||
assert_equal(p + q, np.poly1d([4., 4., 4.]))
|
||||
assert_equal(p - q, np.poly1d([-2., 0., 2.]))
|
||||
assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.]))
|
||||
assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.]))
|
||||
assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.]))
|
||||
assert_equal(p.deriv(), np.poly1d([2., 2.]))
|
||||
assert_equal(p.deriv(2), np.poly1d([2.]))
|
||||
assert_equal(np.polydiv(np.poly1d([1, 0, -1]), np.poly1d([1, 1])),
|
||||
(np.poly1d([1., -1.]), np.poly1d([0.])))
|
||||
|
||||
@pytest.mark.parametrize("type_code", TYPE_CODES)
|
||||
def test_poly1d_misc(self, type_code: str) -> None:
|
||||
dtype = np.dtype(type_code)
|
||||
ar = np.array([1, 2, 3], dtype=dtype)
|
||||
p = np.poly1d(ar)
|
||||
|
||||
# `__eq__`
|
||||
assert_equal(np.asarray(p), ar)
|
||||
assert_equal(np.asarray(p).dtype, dtype)
|
||||
assert_equal(len(p), 2)
|
||||
|
||||
# `__getitem__`
|
||||
comparison_dct = {-1: 0, 0: 3, 1: 2, 2: 1, 3: 0}
|
||||
for index, ref in comparison_dct.items():
|
||||
scalar = p[index]
|
||||
assert_equal(scalar, ref)
|
||||
if dtype == np.object_:
|
||||
assert isinstance(scalar, int)
|
||||
else:
|
||||
assert_equal(scalar.dtype, dtype)
|
||||
|
||||
def test_poly1d_variable_arg(self):
|
||||
q = np.poly1d([1., 2, 3], variable='y')
|
||||
assert_equal(str(q),
|
||||
' 2\n'
|
||||
'1 y + 2 y + 3')
|
||||
q = np.poly1d([1., 2, 3], variable='lambda')
|
||||
assert_equal(str(q),
|
||||
' 2\n'
|
||||
'1 lambda + 2 lambda + 3')
|
||||
|
||||
def test_poly(self):
|
||||
assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]),
|
||||
[1, -3, -2, 6])
|
||||
|
||||
# From matlab docs
|
||||
A = [[1, 2, 3], [4, 5, 6], [7, 8, 0]]
|
||||
assert_array_almost_equal(np.poly(A), [1, -6, -72, -27])
|
||||
|
||||
# Should produce real output for perfect conjugates
|
||||
assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j])))
|
||||
assert_(np.isrealobj(np.poly([0 + 1j, -0 + -1j, 1 + 2j,
|
||||
1 - 2j, 1. + 3.5j, 1 - 3.5j])))
|
||||
assert_(np.isrealobj(np.poly([1j, -1j, 1 + 2j, 1 - 2j, 1 + 3j, 1 - 3.j])))
|
||||
assert_(np.isrealobj(np.poly([1j, -1j, 1 + 2j, 1 - 2j])))
|
||||
assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j])))
|
||||
assert_(np.isrealobj(np.poly([1j, -1j])))
|
||||
assert_(np.isrealobj(np.poly([1, -1])))
|
||||
|
||||
assert_(np.iscomplexobj(np.poly([1j, -1.0000001j])))
|
||||
|
||||
np.random.seed(42)
|
||||
a = np.random.randn(100) + 1j * np.random.randn(100)
|
||||
assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a))))))
|
||||
|
||||
def test_roots(self):
|
||||
assert_array_equal(np.roots([1, 0, 0]), [0, 0])
|
||||
|
||||
# Testing for larger root values
|
||||
for i in np.logspace(10, 25, num=1000, base=10):
|
||||
tgt = np.array([-1, 1, i])
|
||||
res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1]))
|
||||
assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error
|
||||
|
||||
for i in np.logspace(10, 25, num=1000, base=10):
|
||||
tgt = np.array([-1, 1.01, i])
|
||||
res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1]))
|
||||
assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error
|
||||
|
||||
def test_str_leading_zeros(self):
|
||||
p = np.poly1d([4, 3, 2, 1])
|
||||
p[3] = 0
|
||||
assert_equal(str(p),
|
||||
" 2\n"
|
||||
"3 x + 2 x + 1")
|
||||
|
||||
p = np.poly1d([1, 2])
|
||||
p[0] = 0
|
||||
p[1] = 0
|
||||
assert_equal(str(p), " \n0")
|
||||
|
||||
def test_polyfit(self):
|
||||
c = np.array([3., 2., 1.])
|
||||
x = np.linspace(0, 2, 7)
|
||||
y = np.polyval(c, x)
|
||||
err = [1, -1, 1, -1, 1, -1, 1]
|
||||
weights = np.arange(8, 1, -1)**2 / 7.0
|
||||
|
||||
# Check exception when too few points for variance estimate. Note that
|
||||
# the estimate requires the number of data points to exceed
|
||||
# degree + 1
|
||||
assert_raises(ValueError, np.polyfit,
|
||||
[1], [1], deg=0, cov=True)
|
||||
|
||||
# check 1D case
|
||||
m, cov = np.polyfit(x, y + err, 2, cov=True)
|
||||
est = [3.8571, 0.2857, 1.619]
|
||||
assert_almost_equal(est, m, decimal=4)
|
||||
val0 = [[ 1.4694, -2.9388, 0.8163],
|
||||
[-2.9388, 6.3673, -2.1224],
|
||||
[ 0.8163, -2.1224, 1.161 ]] # noqa: E202
|
||||
assert_almost_equal(val0, cov, decimal=4)
|
||||
|
||||
m2, cov2 = np.polyfit(x, y + err, 2, w=weights, cov=True)
|
||||
assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4)
|
||||
val = [[ 4.3964, -5.0052, 0.4878],
|
||||
[-5.0052, 6.8067, -0.9089],
|
||||
[ 0.4878, -0.9089, 0.3337]]
|
||||
assert_almost_equal(val, cov2, decimal=4)
|
||||
|
||||
m3, cov3 = np.polyfit(x, y + err, 2, w=weights, cov="unscaled")
|
||||
assert_almost_equal([4.8927, -1.0177, 1.7768], m3, decimal=4)
|
||||
val = [[ 0.1473, -0.1677, 0.0163],
|
||||
[-0.1677, 0.228 , -0.0304], # noqa: E203
|
||||
[ 0.0163, -0.0304, 0.0112]]
|
||||
assert_almost_equal(val, cov3, decimal=4)
|
||||
|
||||
# check 2D (n,1) case
|
||||
y = y[:, np.newaxis]
|
||||
c = c[:, np.newaxis]
|
||||
assert_almost_equal(c, np.polyfit(x, y, 2))
|
||||
# check 2D (n,2) case
|
||||
yy = np.concatenate((y, y), axis=1)
|
||||
cc = np.concatenate((c, c), axis=1)
|
||||
assert_almost_equal(cc, np.polyfit(x, yy, 2))
|
||||
|
||||
m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True)
|
||||
assert_almost_equal(est, m[:, 0], decimal=4)
|
||||
assert_almost_equal(est, m[:, 1], decimal=4)
|
||||
assert_almost_equal(val0, cov[:, :, 0], decimal=4)
|
||||
assert_almost_equal(val0, cov[:, :, 1], decimal=4)
|
||||
|
||||
# check order 1 (deg=0) case, were the analytic results are simple
|
||||
np.random.seed(123)
|
||||
y = np.random.normal(size=(4, 10000))
|
||||
mean, cov = np.polyfit(np.zeros(y.shape[0]), y, deg=0, cov=True)
|
||||
# Should get sigma_mean = sigma/sqrt(N) = 1./sqrt(4) = 0.5.
|
||||
assert_allclose(mean.std(), 0.5, atol=0.01)
|
||||
assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01)
|
||||
# Without scaling, since reduced chi2 is 1, the result should be the same.
|
||||
mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=np.ones(y.shape[0]),
|
||||
deg=0, cov="unscaled")
|
||||
assert_allclose(mean.std(), 0.5, atol=0.01)
|
||||
assert_almost_equal(np.sqrt(cov.mean()), 0.5)
|
||||
# If we estimate our errors wrong, no change with scaling:
|
||||
w = np.full(y.shape[0], 1. / 0.5)
|
||||
mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov=True)
|
||||
assert_allclose(mean.std(), 0.5, atol=0.01)
|
||||
assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01)
|
||||
# But if we do not scale, our estimate for the error in the mean will
|
||||
# differ.
|
||||
mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov="unscaled")
|
||||
assert_allclose(mean.std(), 0.5, atol=0.01)
|
||||
assert_almost_equal(np.sqrt(cov.mean()), 0.25)
|
||||
|
||||
def test_objects(self):
|
||||
from decimal import Decimal
|
||||
p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')])
|
||||
p2 = p * Decimal('1.333333333333333')
|
||||
assert_(p2[1] == Decimal("3.9999999999999990"))
|
||||
p2 = p.deriv()
|
||||
assert_(p2[1] == Decimal('8.0'))
|
||||
p2 = p.integ()
|
||||
assert_(p2[3] == Decimal("1.333333333333333333333333333"))
|
||||
assert_(p2[2] == Decimal('1.5'))
|
||||
assert_(np.issubdtype(p2.coeffs.dtype, np.object_))
|
||||
p = np.poly([Decimal(1), Decimal(2)])
|
||||
assert_equal(np.poly([Decimal(1), Decimal(2)]),
|
||||
[1, Decimal(-3), Decimal(2)])
|
||||
|
||||
def test_complex(self):
|
||||
p = np.poly1d([3j, 2j, 1j])
|
||||
p2 = p.integ()
|
||||
assert_((p2.coeffs == [1j, 1j, 1j, 0]).all())
|
||||
p2 = p.deriv()
|
||||
assert_((p2.coeffs == [6j, 2j]).all())
|
||||
|
||||
def test_integ_coeffs(self):
|
||||
p = np.poly1d([3, 2, 1])
|
||||
p2 = p.integ(3, k=[9, 7, 6])
|
||||
assert_(
|
||||
(p2.coeffs == [1 / 4. / 5., 1 / 3. / 4., 1 / 2. / 3., 9 / 1. / 2., 7, 6]).all())
|
||||
|
||||
def test_zero_dims(self):
|
||||
try:
|
||||
np.poly(np.zeros((0, 0)))
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def test_poly_int_overflow(self):
|
||||
"""
|
||||
Regression test for gh-5096.
|
||||
"""
|
||||
v = np.arange(1, 21)
|
||||
assert_almost_equal(np.poly(v), np.poly(np.diag(v)))
|
||||
|
||||
def test_zero_poly_dtype(self):
|
||||
"""
|
||||
Regression test for gh-16354.
|
||||
"""
|
||||
z = np.array([0, 0, 0])
|
||||
p = np.poly1d(z.astype(np.int64))
|
||||
assert_equal(p.coeffs.dtype, np.int64)
|
||||
|
||||
p = np.poly1d(z.astype(np.float32))
|
||||
assert_equal(p.coeffs.dtype, np.float32)
|
||||
|
||||
p = np.poly1d(z.astype(np.complex64))
|
||||
assert_equal(p.coeffs.dtype, np.complex64)
|
||||
|
||||
def test_poly_eq(self):
|
||||
p = np.poly1d([1, 2, 3])
|
||||
p2 = np.poly1d([1, 2, 4])
|
||||
assert_equal(p == None, False) # noqa: E711
|
||||
assert_equal(p != None, True) # noqa: E711
|
||||
assert_equal(p == p, True)
|
||||
assert_equal(p == p2, False)
|
||||
assert_equal(p != p2, True)
|
||||
|
||||
def test_polydiv(self):
|
||||
b = np.poly1d([2, 6, 6, 1])
|
||||
a = np.poly1d([-1j, (1 + 2j), -(2 + 1j), 1])
|
||||
q, r = np.polydiv(b, a)
|
||||
assert_equal(q.coeffs.dtype, np.complex128)
|
||||
assert_equal(r.coeffs.dtype, np.complex128)
|
||||
assert_equal(q * a + r, b)
|
||||
|
||||
c = [1, 2, 3]
|
||||
d = np.poly1d([1, 2, 3])
|
||||
s, t = np.polydiv(c, d)
|
||||
assert isinstance(s, np.poly1d)
|
||||
assert isinstance(t, np.poly1d)
|
||||
u, v = np.polydiv(d, c)
|
||||
assert isinstance(u, np.poly1d)
|
||||
assert isinstance(v, np.poly1d)
|
||||
|
||||
def test_poly_coeffs_mutable(self):
|
||||
""" Coefficients should be modifiable """
|
||||
p = np.poly1d([1, 2, 3])
|
||||
|
||||
p.coeffs += 1
|
||||
assert_equal(p.coeffs, [2, 3, 4])
|
||||
|
||||
p.coeffs[2] += 10
|
||||
assert_equal(p.coeffs, [2, 3, 14])
|
||||
|
||||
# this never used to be allowed - let's not add features to deprecated
|
||||
# APIs
|
||||
assert_raises(AttributeError, setattr, p, 'coeffs', np.array(1))
|
||||
1052
lib/python3.11/site-packages/numpy/lib/tests/test_recfunctions.py
Normal file
1052
lib/python3.11/site-packages/numpy/lib/tests/test_recfunctions.py
Normal file
File diff suppressed because it is too large
Load Diff
231
lib/python3.11/site-packages/numpy/lib/tests/test_regression.py
Normal file
231
lib/python3.11/site-packages/numpy/lib/tests/test_regression.py
Normal file
@ -0,0 +1,231 @@
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import (
|
||||
_assert_valid_refcount,
|
||||
assert_,
|
||||
assert_array_almost_equal,
|
||||
assert_array_equal,
|
||||
assert_equal,
|
||||
assert_raises,
|
||||
)
|
||||
|
||||
|
||||
class TestRegression:
|
||||
def test_poly1d(self):
|
||||
# Ticket #28
|
||||
assert_equal(np.poly1d([1]) - np.poly1d([1, 0]),
|
||||
np.poly1d([-1, 1]))
|
||||
|
||||
def test_cov_parameters(self):
|
||||
# Ticket #91
|
||||
x = np.random.random((3, 3))
|
||||
y = x.copy()
|
||||
np.cov(x, rowvar=True)
|
||||
np.cov(y, rowvar=False)
|
||||
assert_array_equal(x, y)
|
||||
|
||||
def test_mem_digitize(self):
|
||||
# Ticket #95
|
||||
for i in range(100):
|
||||
np.digitize([1, 2, 3, 4], [1, 3])
|
||||
np.digitize([0, 1, 2, 3, 4], [1, 3])
|
||||
|
||||
def test_unique_zero_sized(self):
|
||||
# Ticket #205
|
||||
assert_array_equal([], np.unique(np.array([])))
|
||||
|
||||
def test_mem_vectorise(self):
|
||||
# Ticket #325
|
||||
vt = np.vectorize(lambda *args: args)
|
||||
vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2)))
|
||||
vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1,
|
||||
1, 2)), np.zeros((2, 2)))
|
||||
|
||||
def test_mgrid_single_element(self):
|
||||
# Ticket #339
|
||||
assert_array_equal(np.mgrid[0:0:1j], [0])
|
||||
assert_array_equal(np.mgrid[0:0], [])
|
||||
|
||||
def test_refcount_vectorize(self):
|
||||
# Ticket #378
|
||||
def p(x, y):
|
||||
return 123
|
||||
v = np.vectorize(p)
|
||||
_assert_valid_refcount(v)
|
||||
|
||||
def test_poly1d_nan_roots(self):
|
||||
# Ticket #396
|
||||
p = np.poly1d([np.nan, np.nan, 1], r=False)
|
||||
assert_raises(np.linalg.LinAlgError, getattr, p, "r")
|
||||
|
||||
def test_mem_polymul(self):
|
||||
# Ticket #448
|
||||
np.polymul([], [1.])
|
||||
|
||||
def test_mem_string_concat(self):
|
||||
# Ticket #469
|
||||
x = np.array([])
|
||||
np.append(x, 'asdasd\tasdasd')
|
||||
|
||||
def test_poly_div(self):
|
||||
# Ticket #553
|
||||
u = np.poly1d([1, 2, 3])
|
||||
v = np.poly1d([1, 2, 3, 4, 5])
|
||||
q, r = np.polydiv(u, v)
|
||||
assert_equal(q * v + r, u)
|
||||
|
||||
def test_poly_eq(self):
|
||||
# Ticket #554
|
||||
x = np.poly1d([1, 2, 3])
|
||||
y = np.poly1d([3, 4])
|
||||
assert_(x != y)
|
||||
assert_(x == x)
|
||||
|
||||
def test_polyfit_build(self):
|
||||
# Ticket #628
|
||||
ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01,
|
||||
9.95368241e+00, -3.14526520e+02]
|
||||
x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
|
||||
104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
|
||||
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 129,
|
||||
130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
|
||||
146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
|
||||
158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
|
||||
170, 171, 172, 173, 174, 175, 176]
|
||||
y = [9.0, 3.0, 7.0, 4.0, 4.0, 8.0, 6.0, 11.0, 9.0, 8.0, 11.0, 5.0,
|
||||
6.0, 5.0, 9.0, 8.0, 6.0, 10.0, 6.0, 10.0, 7.0, 6.0, 6.0, 6.0,
|
||||
13.0, 4.0, 9.0, 11.0, 4.0, 5.0, 8.0, 5.0, 7.0, 7.0, 6.0, 12.0,
|
||||
7.0, 7.0, 9.0, 4.0, 12.0, 6.0, 6.0, 4.0, 3.0, 9.0, 8.0, 8.0,
|
||||
6.0, 7.0, 9.0, 10.0, 6.0, 8.0, 4.0, 7.0, 7.0, 10.0, 8.0, 8.0,
|
||||
6.0, 3.0, 8.0, 4.0, 5.0, 7.0, 8.0, 6.0, 6.0, 4.0, 12.0, 9.0,
|
||||
8.0, 8.0, 8.0, 6.0, 7.0, 4.0, 4.0, 5.0, 7.0]
|
||||
tested = np.polyfit(x, y, 4)
|
||||
assert_array_almost_equal(ref, tested)
|
||||
|
||||
def test_polydiv_type(self):
|
||||
# Make polydiv work for complex types
|
||||
msg = "Wrong type, should be complex"
|
||||
x = np.ones(3, dtype=complex)
|
||||
q, r = np.polydiv(x, x)
|
||||
assert_(q.dtype == complex, msg)
|
||||
msg = "Wrong type, should be float"
|
||||
x = np.ones(3, dtype=int)
|
||||
q, r = np.polydiv(x, x)
|
||||
assert_(q.dtype == float, msg)
|
||||
|
||||
def test_histogramdd_too_many_bins(self):
|
||||
# Ticket 928.
|
||||
assert_raises(ValueError, np.histogramdd, np.ones((1, 10)), bins=2**10)
|
||||
|
||||
def test_polyint_type(self):
|
||||
# Ticket #944
|
||||
msg = "Wrong type, should be complex"
|
||||
x = np.ones(3, dtype=complex)
|
||||
assert_(np.polyint(x).dtype == complex, msg)
|
||||
msg = "Wrong type, should be float"
|
||||
x = np.ones(3, dtype=int)
|
||||
assert_(np.polyint(x).dtype == float, msg)
|
||||
|
||||
def test_ndenumerate_crash(self):
|
||||
# Ticket 1140
|
||||
# Shouldn't crash:
|
||||
list(np.ndenumerate(np.array([[]])))
|
||||
|
||||
def test_large_fancy_indexing(self):
|
||||
# Large enough to fail on 64-bit.
|
||||
nbits = np.dtype(np.intp).itemsize * 8
|
||||
thesize = int((2**nbits)**(1.0 / 5.0) + 1)
|
||||
|
||||
def dp():
|
||||
n = 3
|
||||
a = np.ones((n,) * 5)
|
||||
i = np.random.randint(0, n, size=thesize)
|
||||
a[np.ix_(i, i, i, i, i)] = 0
|
||||
|
||||
def dp2():
|
||||
n = 3
|
||||
a = np.ones((n,) * 5)
|
||||
i = np.random.randint(0, n, size=thesize)
|
||||
a[np.ix_(i, i, i, i, i)]
|
||||
|
||||
assert_raises(ValueError, dp)
|
||||
assert_raises(ValueError, dp2)
|
||||
|
||||
def test_void_coercion(self):
|
||||
dt = np.dtype([('a', 'f4'), ('b', 'i4')])
|
||||
x = np.zeros((1,), dt)
|
||||
assert_(np.r_[x, x].dtype == dt)
|
||||
|
||||
def test_include_dirs(self):
|
||||
# As a sanity check, just test that get_include
|
||||
# includes something reasonable. Somewhat
|
||||
# related to ticket #1405.
|
||||
include_dirs = [np.get_include()]
|
||||
for path in include_dirs:
|
||||
assert_(isinstance(path, str))
|
||||
assert_(path != '')
|
||||
|
||||
def test_polyder_return_type(self):
|
||||
# Ticket #1249
|
||||
assert_(isinstance(np.polyder(np.poly1d([1]), 0), np.poly1d))
|
||||
assert_(isinstance(np.polyder([1], 0), np.ndarray))
|
||||
assert_(isinstance(np.polyder(np.poly1d([1]), 1), np.poly1d))
|
||||
assert_(isinstance(np.polyder([1], 1), np.ndarray))
|
||||
|
||||
def test_append_fields_dtype_list(self):
|
||||
# Ticket #1676
|
||||
from numpy.lib.recfunctions import append_fields
|
||||
|
||||
base = np.array([1, 2, 3], dtype=np.int32)
|
||||
names = ['a', 'b', 'c']
|
||||
data = np.eye(3).astype(np.int32)
|
||||
dlist = [np.float64, np.int32, np.int32]
|
||||
try:
|
||||
append_fields(base, names, data, dlist)
|
||||
except Exception:
|
||||
raise AssertionError
|
||||
|
||||
def test_loadtxt_fields_subarrays(self):
|
||||
# For ticket #1936
|
||||
from io import StringIO
|
||||
|
||||
dt = [("a", 'u1', 2), ("b", 'u1', 2)]
|
||||
x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
|
||||
assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
|
||||
|
||||
dt = [("a", [("a", 'u1', (1, 3)), ("b", 'u1')])]
|
||||
x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
|
||||
assert_equal(x, np.array([(((0, 1, 2), 3),)], dtype=dt))
|
||||
|
||||
dt = [("a", 'u1', (2, 2))]
|
||||
x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
|
||||
assert_equal(x, np.array([(((0, 1), (2, 3)),)], dtype=dt))
|
||||
|
||||
dt = [("a", 'u1', (2, 3, 2))]
|
||||
x = np.loadtxt(StringIO("0 1 2 3 4 5 6 7 8 9 10 11"), dtype=dt)
|
||||
data = [((((0, 1), (2, 3), (4, 5)), ((6, 7), (8, 9), (10, 11))),)]
|
||||
assert_equal(x, np.array(data, dtype=dt))
|
||||
|
||||
def test_nansum_with_boolean(self):
|
||||
# gh-2978
|
||||
a = np.zeros(2, dtype=bool)
|
||||
try:
|
||||
np.nansum(a)
|
||||
except Exception:
|
||||
raise AssertionError
|
||||
|
||||
def test_py3_compat(self):
|
||||
# gh-2561
|
||||
# Test if the oldstyle class test is bypassed in python3
|
||||
class C:
|
||||
"""Old-style class in python2, normal class in python3"""
|
||||
pass
|
||||
|
||||
out = open(os.devnull, 'w')
|
||||
try:
|
||||
np.info(C(), output=out)
|
||||
except AttributeError:
|
||||
raise AssertionError
|
||||
finally:
|
||||
out.close()
|
||||
813
lib/python3.11/site-packages/numpy/lib/tests/test_shape_base.py
Normal file
813
lib/python3.11/site-packages/numpy/lib/tests/test_shape_base.py
Normal file
@ -0,0 +1,813 @@
|
||||
import functools
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
import numpy as np
|
||||
from numpy import (
|
||||
apply_along_axis,
|
||||
apply_over_axes,
|
||||
array_split,
|
||||
column_stack,
|
||||
dsplit,
|
||||
dstack,
|
||||
expand_dims,
|
||||
hsplit,
|
||||
kron,
|
||||
put_along_axis,
|
||||
split,
|
||||
take_along_axis,
|
||||
tile,
|
||||
vsplit,
|
||||
)
|
||||
from numpy.exceptions import AxisError
|
||||
from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises
|
||||
|
||||
IS_64BIT = sys.maxsize > 2**32
|
||||
|
||||
|
||||
def _add_keepdims(func):
|
||||
""" hack in keepdims behavior into a function taking an axis """
|
||||
@functools.wraps(func)
|
||||
def wrapped(a, axis, **kwargs):
|
||||
res = func(a, axis=axis, **kwargs)
|
||||
if axis is None:
|
||||
axis = 0 # res is now a scalar, so we can insert this anywhere
|
||||
return np.expand_dims(res, axis=axis)
|
||||
return wrapped
|
||||
|
||||
|
||||
class TestTakeAlongAxis:
|
||||
def test_argequivalent(self):
|
||||
""" Test it translates from arg<func> to <func> """
|
||||
from numpy.random import rand
|
||||
a = rand(3, 4, 5)
|
||||
|
||||
funcs = [
|
||||
(np.sort, np.argsort, {}),
|
||||
(_add_keepdims(np.min), _add_keepdims(np.argmin), {}),
|
||||
(_add_keepdims(np.max), _add_keepdims(np.argmax), {}),
|
||||
#(np.partition, np.argpartition, dict(kth=2)),
|
||||
]
|
||||
|
||||
for func, argfunc, kwargs in funcs:
|
||||
for axis in list(range(a.ndim)) + [None]:
|
||||
a_func = func(a, axis=axis, **kwargs)
|
||||
ai_func = argfunc(a, axis=axis, **kwargs)
|
||||
assert_equal(a_func, take_along_axis(a, ai_func, axis=axis))
|
||||
|
||||
def test_invalid(self):
|
||||
""" Test it errors when indices has too few dimensions """
|
||||
a = np.ones((10, 10))
|
||||
ai = np.ones((10, 2), dtype=np.intp)
|
||||
|
||||
# sanity check
|
||||
take_along_axis(a, ai, axis=1)
|
||||
|
||||
# not enough indices
|
||||
assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1)
|
||||
# bool arrays not allowed
|
||||
assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1)
|
||||
# float arrays not allowed
|
||||
assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1)
|
||||
# invalid axis
|
||||
assert_raises(AxisError, take_along_axis, a, ai, axis=10)
|
||||
# invalid indices
|
||||
assert_raises(ValueError, take_along_axis, a, ai, axis=None)
|
||||
|
||||
def test_empty(self):
|
||||
""" Test everything is ok with empty results, even with inserted dims """
|
||||
a = np.ones((3, 4, 5))
|
||||
ai = np.ones((3, 0, 5), dtype=np.intp)
|
||||
|
||||
actual = take_along_axis(a, ai, axis=1)
|
||||
assert_equal(actual.shape, ai.shape)
|
||||
|
||||
def test_broadcast(self):
|
||||
""" Test that non-indexing dimensions are broadcast in both directions """
|
||||
a = np.ones((3, 4, 1))
|
||||
ai = np.ones((1, 2, 5), dtype=np.intp)
|
||||
actual = take_along_axis(a, ai, axis=1)
|
||||
assert_equal(actual.shape, (3, 2, 5))
|
||||
|
||||
|
||||
class TestPutAlongAxis:
|
||||
def test_replace_max(self):
|
||||
a_base = np.array([[10, 30, 20], [60, 40, 50]])
|
||||
|
||||
for axis in list(range(a_base.ndim)) + [None]:
|
||||
# we mutate this in the loop
|
||||
a = a_base.copy()
|
||||
|
||||
# replace the max with a small value
|
||||
i_max = _add_keepdims(np.argmax)(a, axis=axis)
|
||||
put_along_axis(a, i_max, -99, axis=axis)
|
||||
|
||||
# find the new minimum, which should max
|
||||
i_min = _add_keepdims(np.argmin)(a, axis=axis)
|
||||
|
||||
assert_equal(i_min, i_max)
|
||||
|
||||
def test_broadcast(self):
|
||||
""" Test that non-indexing dimensions are broadcast in both directions """
|
||||
a = np.ones((3, 4, 1))
|
||||
ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4
|
||||
put_along_axis(a, ai, 20, axis=1)
|
||||
assert_equal(take_along_axis(a, ai, axis=1), 20)
|
||||
|
||||
def test_invalid(self):
|
||||
""" Test invalid inputs """
|
||||
a_base = np.array([[10, 30, 20], [60, 40, 50]])
|
||||
indices = np.array([[0], [1]])
|
||||
values = np.array([[2], [1]])
|
||||
|
||||
# sanity check
|
||||
a = a_base.copy()
|
||||
put_along_axis(a, indices, values, axis=0)
|
||||
assert np.all(a == [[2, 2, 2], [1, 1, 1]])
|
||||
|
||||
# invalid indices
|
||||
a = a_base.copy()
|
||||
with assert_raises(ValueError) as exc:
|
||||
put_along_axis(a, indices, values, axis=None)
|
||||
assert "single dimension" in str(exc.exception)
|
||||
|
||||
|
||||
class TestApplyAlongAxis:
|
||||
def test_simple(self):
|
||||
a = np.ones((20, 10), 'd')
|
||||
assert_array_equal(
|
||||
apply_along_axis(len, 0, a), len(a) * np.ones(a.shape[1]))
|
||||
|
||||
def test_simple101(self):
|
||||
a = np.ones((10, 101), 'd')
|
||||
assert_array_equal(
|
||||
apply_along_axis(len, 0, a), len(a) * np.ones(a.shape[1]))
|
||||
|
||||
def test_3d(self):
|
||||
a = np.arange(27).reshape((3, 3, 3))
|
||||
assert_array_equal(apply_along_axis(np.sum, 0, a),
|
||||
[[27, 30, 33], [36, 39, 42], [45, 48, 51]])
|
||||
|
||||
def test_preserve_subclass(self):
|
||||
def double(row):
|
||||
return row * 2
|
||||
|
||||
class MyNDArray(np.ndarray):
|
||||
pass
|
||||
|
||||
m = np.array([[0, 1], [2, 3]]).view(MyNDArray)
|
||||
expected = np.array([[0, 2], [4, 6]]).view(MyNDArray)
|
||||
|
||||
result = apply_along_axis(double, 0, m)
|
||||
assert_(isinstance(result, MyNDArray))
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
result = apply_along_axis(double, 1, m)
|
||||
assert_(isinstance(result, MyNDArray))
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
def test_subclass(self):
|
||||
class MinimalSubclass(np.ndarray):
|
||||
data = 1
|
||||
|
||||
def minimal_function(array):
|
||||
return array.data
|
||||
|
||||
a = np.zeros((6, 3)).view(MinimalSubclass)
|
||||
|
||||
assert_array_equal(
|
||||
apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1])
|
||||
)
|
||||
|
||||
def test_scalar_array(self, cls=np.ndarray):
|
||||
a = np.ones((6, 3)).view(cls)
|
||||
res = apply_along_axis(np.sum, 0, a)
|
||||
assert_(isinstance(res, cls))
|
||||
assert_array_equal(res, np.array([6, 6, 6]).view(cls))
|
||||
|
||||
def test_0d_array(self, cls=np.ndarray):
|
||||
def sum_to_0d(x):
|
||||
""" Sum x, returning a 0d array of the same class """
|
||||
assert_equal(x.ndim, 1)
|
||||
return np.squeeze(np.sum(x, keepdims=True))
|
||||
a = np.ones((6, 3)).view(cls)
|
||||
res = apply_along_axis(sum_to_0d, 0, a)
|
||||
assert_(isinstance(res, cls))
|
||||
assert_array_equal(res, np.array([6, 6, 6]).view(cls))
|
||||
|
||||
res = apply_along_axis(sum_to_0d, 1, a)
|
||||
assert_(isinstance(res, cls))
|
||||
assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls))
|
||||
|
||||
def test_axis_insertion(self, cls=np.ndarray):
|
||||
def f1to2(x):
|
||||
"""produces an asymmetric non-square matrix from x"""
|
||||
assert_equal(x.ndim, 1)
|
||||
return (x[::-1] * x[1:, None]).view(cls)
|
||||
|
||||
a2d = np.arange(6 * 3).reshape((6, 3))
|
||||
|
||||
# 2d insertion along first axis
|
||||
actual = apply_along_axis(f1to2, 0, a2d)
|
||||
expected = np.stack([
|
||||
f1to2(a2d[:, i]) for i in range(a2d.shape[1])
|
||||
], axis=-1).view(cls)
|
||||
assert_equal(type(actual), type(expected))
|
||||
assert_equal(actual, expected)
|
||||
|
||||
# 2d insertion along last axis
|
||||
actual = apply_along_axis(f1to2, 1, a2d)
|
||||
expected = np.stack([
|
||||
f1to2(a2d[i, :]) for i in range(a2d.shape[0])
|
||||
], axis=0).view(cls)
|
||||
assert_equal(type(actual), type(expected))
|
||||
assert_equal(actual, expected)
|
||||
|
||||
# 3d insertion along middle axis
|
||||
a3d = np.arange(6 * 5 * 3).reshape((6, 5, 3))
|
||||
|
||||
actual = apply_along_axis(f1to2, 1, a3d)
|
||||
expected = np.stack([
|
||||
np.stack([
|
||||
f1to2(a3d[i, :, j]) for i in range(a3d.shape[0])
|
||||
], axis=0)
|
||||
for j in range(a3d.shape[2])
|
||||
], axis=-1).view(cls)
|
||||
assert_equal(type(actual), type(expected))
|
||||
assert_equal(actual, expected)
|
||||
|
||||
def test_subclass_preservation(self):
|
||||
class MinimalSubclass(np.ndarray):
|
||||
pass
|
||||
self.test_scalar_array(MinimalSubclass)
|
||||
self.test_0d_array(MinimalSubclass)
|
||||
self.test_axis_insertion(MinimalSubclass)
|
||||
|
||||
def test_axis_insertion_ma(self):
|
||||
def f1to2(x):
|
||||
"""produces an asymmetric non-square matrix from x"""
|
||||
assert_equal(x.ndim, 1)
|
||||
res = x[::-1] * x[1:, None]
|
||||
return np.ma.masked_where(res % 5 == 0, res)
|
||||
a = np.arange(6 * 3).reshape((6, 3))
|
||||
res = apply_along_axis(f1to2, 0, a)
|
||||
assert_(isinstance(res, np.ma.masked_array))
|
||||
assert_equal(res.ndim, 3)
|
||||
assert_array_equal(res[:, :, 0].mask, f1to2(a[:, 0]).mask)
|
||||
assert_array_equal(res[:, :, 1].mask, f1to2(a[:, 1]).mask)
|
||||
assert_array_equal(res[:, :, 2].mask, f1to2(a[:, 2]).mask)
|
||||
|
||||
def test_tuple_func1d(self):
|
||||
def sample_1d(x):
|
||||
return x[1], x[0]
|
||||
res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]]))
|
||||
assert_array_equal(res, np.array([[2, 1], [4, 3]]))
|
||||
|
||||
def test_empty(self):
|
||||
# can't apply_along_axis when there's no chance to call the function
|
||||
def never_call(x):
|
||||
assert_(False) # should never be reached
|
||||
|
||||
a = np.empty((0, 0))
|
||||
assert_raises(ValueError, np.apply_along_axis, never_call, 0, a)
|
||||
assert_raises(ValueError, np.apply_along_axis, never_call, 1, a)
|
||||
|
||||
# but it's sometimes ok with some non-zero dimensions
|
||||
def empty_to_1(x):
|
||||
assert_(len(x) == 0)
|
||||
return 1
|
||||
|
||||
a = np.empty((10, 0))
|
||||
actual = np.apply_along_axis(empty_to_1, 1, a)
|
||||
assert_equal(actual, np.ones(10))
|
||||
assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a)
|
||||
|
||||
def test_with_iterable_object(self):
|
||||
# from issue 5248
|
||||
d = np.array([
|
||||
[{1, 11}, {2, 22}, {3, 33}],
|
||||
[{4, 44}, {5, 55}, {6, 66}]
|
||||
])
|
||||
actual = np.apply_along_axis(lambda a: set.union(*a), 0, d)
|
||||
expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}])
|
||||
|
||||
assert_equal(actual, expected)
|
||||
|
||||
# issue 8642 - assert_equal doesn't detect this!
|
||||
for i in np.ndindex(actual.shape):
|
||||
assert_equal(type(actual[i]), type(expected[i]))
|
||||
|
||||
|
||||
class TestApplyOverAxes:
|
||||
def test_simple(self):
|
||||
a = np.arange(24).reshape(2, 3, 4)
|
||||
aoa_a = apply_over_axes(np.sum, a, [0, 2])
|
||||
assert_array_equal(aoa_a, np.array([[[60], [92], [124]]]))
|
||||
|
||||
|
||||
class TestExpandDims:
|
||||
def test_functionality(self):
|
||||
s = (2, 3, 4, 5)
|
||||
a = np.empty(s)
|
||||
for axis in range(-5, 4):
|
||||
b = expand_dims(a, axis)
|
||||
assert_(b.shape[axis] == 1)
|
||||
assert_(np.squeeze(b).shape == s)
|
||||
|
||||
def test_axis_tuple(self):
|
||||
a = np.empty((3, 3, 3))
|
||||
assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3)
|
||||
assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1)
|
||||
assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1)
|
||||
assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3)
|
||||
|
||||
def test_axis_out_of_range(self):
|
||||
s = (2, 3, 4, 5)
|
||||
a = np.empty(s)
|
||||
assert_raises(AxisError, expand_dims, a, -6)
|
||||
assert_raises(AxisError, expand_dims, a, 5)
|
||||
|
||||
a = np.empty((3, 3, 3))
|
||||
assert_raises(AxisError, expand_dims, a, (0, -6))
|
||||
assert_raises(AxisError, expand_dims, a, (0, 5))
|
||||
|
||||
def test_repeated_axis(self):
|
||||
a = np.empty((3, 3, 3))
|
||||
assert_raises(ValueError, expand_dims, a, axis=(1, 1))
|
||||
|
||||
def test_subclasses(self):
|
||||
a = np.arange(10).reshape((2, 5))
|
||||
a = np.ma.array(a, mask=a % 3 == 0)
|
||||
|
||||
expanded = np.expand_dims(a, axis=1)
|
||||
assert_(isinstance(expanded, np.ma.MaskedArray))
|
||||
assert_equal(expanded.shape, (2, 1, 5))
|
||||
assert_equal(expanded.mask.shape, (2, 1, 5))
|
||||
|
||||
|
||||
class TestArraySplit:
|
||||
def test_integer_0_split(self):
|
||||
a = np.arange(10)
|
||||
assert_raises(ValueError, array_split, a, 0)
|
||||
|
||||
def test_integer_split(self):
|
||||
a = np.arange(10)
|
||||
res = array_split(a, 1)
|
||||
desired = [np.arange(10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 2)
|
||||
desired = [np.arange(5), np.arange(5, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 3)
|
||||
desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 4)
|
||||
desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8),
|
||||
np.arange(8, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 5)
|
||||
desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),
|
||||
np.arange(6, 8), np.arange(8, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 6)
|
||||
desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),
|
||||
np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 7)
|
||||
desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),
|
||||
np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),
|
||||
np.arange(9, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 8)
|
||||
desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5),
|
||||
np.arange(5, 6), np.arange(6, 7), np.arange(7, 8),
|
||||
np.arange(8, 9), np.arange(9, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 9)
|
||||
desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4),
|
||||
np.arange(4, 5), np.arange(5, 6), np.arange(6, 7),
|
||||
np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 10)
|
||||
desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3),
|
||||
np.arange(3, 4), np.arange(4, 5), np.arange(5, 6),
|
||||
np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),
|
||||
np.arange(9, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 11)
|
||||
desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3),
|
||||
np.arange(3, 4), np.arange(4, 5), np.arange(5, 6),
|
||||
np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),
|
||||
np.arange(9, 10), np.array([])]
|
||||
compare_results(res, desired)
|
||||
|
||||
def test_integer_split_2D_rows(self):
|
||||
a = np.array([np.arange(10), np.arange(10)])
|
||||
res = array_split(a, 3, axis=0)
|
||||
tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]),
|
||||
np.zeros((0, 10))]
|
||||
compare_results(res, tgt)
|
||||
assert_(a.dtype.type is res[-1].dtype.type)
|
||||
|
||||
# Same thing for manual splits:
|
||||
res = array_split(a, [0, 1], axis=0)
|
||||
tgt = [np.zeros((0, 10)), np.array([np.arange(10)]),
|
||||
np.array([np.arange(10)])]
|
||||
compare_results(res, tgt)
|
||||
assert_(a.dtype.type is res[-1].dtype.type)
|
||||
|
||||
def test_integer_split_2D_cols(self):
|
||||
a = np.array([np.arange(10), np.arange(10)])
|
||||
res = array_split(a, 3, axis=-1)
|
||||
desired = [np.array([np.arange(4), np.arange(4)]),
|
||||
np.array([np.arange(4, 7), np.arange(4, 7)]),
|
||||
np.array([np.arange(7, 10), np.arange(7, 10)])]
|
||||
compare_results(res, desired)
|
||||
|
||||
def test_integer_split_2D_default(self):
|
||||
""" This will fail if we change default axis
|
||||
"""
|
||||
a = np.array([np.arange(10), np.arange(10)])
|
||||
res = array_split(a, 3)
|
||||
tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]),
|
||||
np.zeros((0, 10))]
|
||||
compare_results(res, tgt)
|
||||
assert_(a.dtype.type is res[-1].dtype.type)
|
||||
# perhaps should check higher dimensions
|
||||
|
||||
@pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")
|
||||
def test_integer_split_2D_rows_greater_max_int32(self):
|
||||
a = np.broadcast_to([0], (1 << 32, 2))
|
||||
res = array_split(a, 4)
|
||||
chunk = np.broadcast_to([0], (1 << 30, 2))
|
||||
tgt = [chunk] * 4
|
||||
for i in range(len(tgt)):
|
||||
assert_equal(res[i].shape, tgt[i].shape)
|
||||
|
||||
def test_index_split_simple(self):
|
||||
a = np.arange(10)
|
||||
indices = [1, 5, 7]
|
||||
res = array_split(a, indices, axis=-1)
|
||||
desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7),
|
||||
np.arange(7, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
def test_index_split_low_bound(self):
|
||||
a = np.arange(10)
|
||||
indices = [0, 5, 7]
|
||||
res = array_split(a, indices, axis=-1)
|
||||
desired = [np.array([]), np.arange(0, 5), np.arange(5, 7),
|
||||
np.arange(7, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
def test_index_split_high_bound(self):
|
||||
a = np.arange(10)
|
||||
indices = [0, 5, 7, 10, 12]
|
||||
res = array_split(a, indices, axis=-1)
|
||||
desired = [np.array([]), np.arange(0, 5), np.arange(5, 7),
|
||||
np.arange(7, 10), np.array([]), np.array([])]
|
||||
compare_results(res, desired)
|
||||
|
||||
|
||||
class TestSplit:
|
||||
# The split function is essentially the same as array_split,
|
||||
# except that it test if splitting will result in an
|
||||
# equal split. Only test for this case.
|
||||
|
||||
def test_equal_split(self):
|
||||
a = np.arange(10)
|
||||
res = split(a, 2)
|
||||
desired = [np.arange(5), np.arange(5, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
def test_unequal_split(self):
|
||||
a = np.arange(10)
|
||||
assert_raises(ValueError, split, a, 3)
|
||||
|
||||
|
||||
class TestColumnStack:
|
||||
def test_non_iterable(self):
|
||||
assert_raises(TypeError, column_stack, 1)
|
||||
|
||||
def test_1D_arrays(self):
|
||||
# example from docstring
|
||||
a = np.array((1, 2, 3))
|
||||
b = np.array((2, 3, 4))
|
||||
expected = np.array([[1, 2],
|
||||
[2, 3],
|
||||
[3, 4]])
|
||||
actual = np.column_stack((a, b))
|
||||
assert_equal(actual, expected)
|
||||
|
||||
def test_2D_arrays(self):
|
||||
# same as hstack 2D docstring example
|
||||
a = np.array([[1], [2], [3]])
|
||||
b = np.array([[2], [3], [4]])
|
||||
expected = np.array([[1, 2],
|
||||
[2, 3],
|
||||
[3, 4]])
|
||||
actual = np.column_stack((a, b))
|
||||
assert_equal(actual, expected)
|
||||
|
||||
def test_generator(self):
|
||||
with pytest.raises(TypeError, match="arrays to stack must be"):
|
||||
column_stack(np.arange(3) for _ in range(2))
|
||||
|
||||
|
||||
class TestDstack:
|
||||
def test_non_iterable(self):
|
||||
assert_raises(TypeError, dstack, 1)
|
||||
|
||||
def test_0D_array(self):
|
||||
a = np.array(1)
|
||||
b = np.array(2)
|
||||
res = dstack([a, b])
|
||||
desired = np.array([[[1, 2]]])
|
||||
assert_array_equal(res, desired)
|
||||
|
||||
def test_1D_array(self):
|
||||
a = np.array([1])
|
||||
b = np.array([2])
|
||||
res = dstack([a, b])
|
||||
desired = np.array([[[1, 2]]])
|
||||
assert_array_equal(res, desired)
|
||||
|
||||
def test_2D_array(self):
|
||||
a = np.array([[1], [2]])
|
||||
b = np.array([[1], [2]])
|
||||
res = dstack([a, b])
|
||||
desired = np.array([[[1, 1]], [[2, 2, ]]])
|
||||
assert_array_equal(res, desired)
|
||||
|
||||
def test_2D_array2(self):
|
||||
a = np.array([1, 2])
|
||||
b = np.array([1, 2])
|
||||
res = dstack([a, b])
|
||||
desired = np.array([[[1, 1], [2, 2]]])
|
||||
assert_array_equal(res, desired)
|
||||
|
||||
def test_generator(self):
|
||||
with pytest.raises(TypeError, match="arrays to stack must be"):
|
||||
dstack(np.arange(3) for _ in range(2))
|
||||
|
||||
|
||||
# array_split has more comprehensive test of splitting.
|
||||
# only do simple test on hsplit, vsplit, and dsplit
|
||||
class TestHsplit:
|
||||
"""Only testing for integer splits.
|
||||
|
||||
"""
|
||||
def test_non_iterable(self):
|
||||
assert_raises(ValueError, hsplit, 1, 1)
|
||||
|
||||
def test_0D_array(self):
|
||||
a = np.array(1)
|
||||
try:
|
||||
hsplit(a, 2)
|
||||
assert_(0)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def test_1D_array(self):
|
||||
a = np.array([1, 2, 3, 4])
|
||||
res = hsplit(a, 2)
|
||||
desired = [np.array([1, 2]), np.array([3, 4])]
|
||||
compare_results(res, desired)
|
||||
|
||||
def test_2D_array(self):
|
||||
a = np.array([[1, 2, 3, 4],
|
||||
[1, 2, 3, 4]])
|
||||
res = hsplit(a, 2)
|
||||
desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])]
|
||||
compare_results(res, desired)
|
||||
|
||||
|
||||
class TestVsplit:
|
||||
"""Only testing for integer splits.
|
||||
|
||||
"""
|
||||
def test_non_iterable(self):
|
||||
assert_raises(ValueError, vsplit, 1, 1)
|
||||
|
||||
def test_0D_array(self):
|
||||
a = np.array(1)
|
||||
assert_raises(ValueError, vsplit, a, 2)
|
||||
|
||||
def test_1D_array(self):
|
||||
a = np.array([1, 2, 3, 4])
|
||||
try:
|
||||
vsplit(a, 2)
|
||||
assert_(0)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def test_2D_array(self):
|
||||
a = np.array([[1, 2, 3, 4],
|
||||
[1, 2, 3, 4]])
|
||||
res = vsplit(a, 2)
|
||||
desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])]
|
||||
compare_results(res, desired)
|
||||
|
||||
|
||||
class TestDsplit:
|
||||
# Only testing for integer splits.
|
||||
def test_non_iterable(self):
|
||||
assert_raises(ValueError, dsplit, 1, 1)
|
||||
|
||||
def test_0D_array(self):
|
||||
a = np.array(1)
|
||||
assert_raises(ValueError, dsplit, a, 2)
|
||||
|
||||
def test_1D_array(self):
|
||||
a = np.array([1, 2, 3, 4])
|
||||
assert_raises(ValueError, dsplit, a, 2)
|
||||
|
||||
def test_2D_array(self):
|
||||
a = np.array([[1, 2, 3, 4],
|
||||
[1, 2, 3, 4]])
|
||||
try:
|
||||
dsplit(a, 2)
|
||||
assert_(0)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def test_3D_array(self):
|
||||
a = np.array([[[1, 2, 3, 4],
|
||||
[1, 2, 3, 4]],
|
||||
[[1, 2, 3, 4],
|
||||
[1, 2, 3, 4]]])
|
||||
res = dsplit(a, 2)
|
||||
desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]),
|
||||
np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])]
|
||||
compare_results(res, desired)
|
||||
|
||||
|
||||
class TestSqueeze:
|
||||
def test_basic(self):
|
||||
from numpy.random import rand
|
||||
|
||||
a = rand(20, 10, 10, 1, 1)
|
||||
b = rand(20, 1, 10, 1, 20)
|
||||
c = rand(1, 1, 20, 10)
|
||||
assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10)))
|
||||
assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20)))
|
||||
assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10)))
|
||||
|
||||
# Squeezing to 0-dim should still give an ndarray
|
||||
a = [[[1.5]]]
|
||||
res = np.squeeze(a)
|
||||
assert_equal(res, 1.5)
|
||||
assert_equal(res.ndim, 0)
|
||||
assert_equal(type(res), np.ndarray)
|
||||
|
||||
|
||||
class TestKron:
|
||||
def test_basic(self):
|
||||
# Using 0-dimensional ndarray
|
||||
a = np.array(1)
|
||||
b = np.array([[1, 2], [3, 4]])
|
||||
k = np.array([[1, 2], [3, 4]])
|
||||
assert_array_equal(np.kron(a, b), k)
|
||||
a = np.array([[1, 2], [3, 4]])
|
||||
b = np.array(1)
|
||||
assert_array_equal(np.kron(a, b), k)
|
||||
|
||||
# Using 1-dimensional ndarray
|
||||
a = np.array([3])
|
||||
b = np.array([[1, 2], [3, 4]])
|
||||
k = np.array([[3, 6], [9, 12]])
|
||||
assert_array_equal(np.kron(a, b), k)
|
||||
a = np.array([[1, 2], [3, 4]])
|
||||
b = np.array([3])
|
||||
assert_array_equal(np.kron(a, b), k)
|
||||
|
||||
# Using 3-dimensional ndarray
|
||||
a = np.array([[[1]], [[2]]])
|
||||
b = np.array([[1, 2], [3, 4]])
|
||||
k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]])
|
||||
assert_array_equal(np.kron(a, b), k)
|
||||
a = np.array([[1, 2], [3, 4]])
|
||||
b = np.array([[[1]], [[2]]])
|
||||
k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]])
|
||||
assert_array_equal(np.kron(a, b), k)
|
||||
|
||||
def test_return_type(self):
|
||||
class myarray(np.ndarray):
|
||||
__array_priority__ = 1.0
|
||||
|
||||
a = np.ones([2, 2])
|
||||
ma = myarray(a.shape, a.dtype, a.data)
|
||||
assert_equal(type(kron(a, a)), np.ndarray)
|
||||
assert_equal(type(kron(ma, ma)), myarray)
|
||||
assert_equal(type(kron(a, ma)), myarray)
|
||||
assert_equal(type(kron(ma, a)), myarray)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"array_class", [np.asarray, np.asmatrix]
|
||||
)
|
||||
def test_kron_smoke(self, array_class):
|
||||
a = array_class(np.ones([3, 3]))
|
||||
b = array_class(np.ones([3, 3]))
|
||||
k = array_class(np.ones([9, 9]))
|
||||
|
||||
assert_array_equal(np.kron(a, b), k)
|
||||
|
||||
def test_kron_ma(self):
|
||||
x = np.ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]])
|
||||
k = np.ma.array(np.diag([1, 4, 4, 16]),
|
||||
mask=~np.array(np.identity(4), dtype=bool))
|
||||
|
||||
assert_array_equal(k, np.kron(x, x))
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"shape_a,shape_b", [
|
||||
((1, 1), (1, 1)),
|
||||
((1, 2, 3), (4, 5, 6)),
|
||||
((2, 2), (2, 2, 2)),
|
||||
((1, 0), (1, 1)),
|
||||
((2, 0, 2), (2, 2)),
|
||||
((2, 0, 0, 2), (2, 0, 2)),
|
||||
])
|
||||
def test_kron_shape(self, shape_a, shape_b):
|
||||
a = np.ones(shape_a)
|
||||
b = np.ones(shape_b)
|
||||
normalised_shape_a = (1,) * max(0, len(shape_b) - len(shape_a)) + shape_a
|
||||
normalised_shape_b = (1,) * max(0, len(shape_a) - len(shape_b)) + shape_b
|
||||
expected_shape = np.multiply(normalised_shape_a, normalised_shape_b)
|
||||
|
||||
k = np.kron(a, b)
|
||||
assert np.array_equal(
|
||||
k.shape, expected_shape), "Unexpected shape from kron"
|
||||
|
||||
|
||||
class TestTile:
|
||||
def test_basic(self):
|
||||
a = np.array([0, 1, 2])
|
||||
b = [[1, 2], [3, 4]]
|
||||
assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2])
|
||||
assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]])
|
||||
assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]])
|
||||
assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]])
|
||||
assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]])
|
||||
assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4],
|
||||
[1, 2, 1, 2], [3, 4, 3, 4]])
|
||||
|
||||
def test_tile_one_repetition_on_array_gh4679(self):
|
||||
a = np.arange(5)
|
||||
b = tile(a, 1)
|
||||
b += 2
|
||||
assert_equal(a, np.arange(5))
|
||||
|
||||
def test_empty(self):
|
||||
a = np.array([[[]]])
|
||||
b = np.array([[], []])
|
||||
c = tile(b, 2).shape
|
||||
d = tile(a, (3, 2, 5)).shape
|
||||
assert_equal(c, (2, 0))
|
||||
assert_equal(d, (3, 2, 0))
|
||||
|
||||
def test_kroncompare(self):
|
||||
from numpy.random import randint
|
||||
|
||||
reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)]
|
||||
shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)]
|
||||
for s in shape:
|
||||
b = randint(0, 10, size=s)
|
||||
for r in reps:
|
||||
a = np.ones(r, b.dtype)
|
||||
large = tile(b, r)
|
||||
klarge = kron(a, b)
|
||||
assert_equal(large, klarge)
|
||||
|
||||
|
||||
class TestMayShareMemory:
|
||||
def test_basic(self):
|
||||
d = np.ones((50, 60))
|
||||
d2 = np.ones((30, 60, 6))
|
||||
assert_(np.may_share_memory(d, d))
|
||||
assert_(np.may_share_memory(d, d[::-1]))
|
||||
assert_(np.may_share_memory(d, d[::2]))
|
||||
assert_(np.may_share_memory(d, d[1:, ::-1]))
|
||||
|
||||
assert_(not np.may_share_memory(d[::-1], d2))
|
||||
assert_(not np.may_share_memory(d[::2], d2))
|
||||
assert_(not np.may_share_memory(d[1:, ::-1], d2))
|
||||
assert_(np.may_share_memory(d2[1:, ::-1], d2))
|
||||
|
||||
|
||||
# Utility
|
||||
def compare_results(res, desired):
|
||||
"""Compare lists of arrays."""
|
||||
for x, y in zip(res, desired, strict=False):
|
||||
assert_array_equal(x, y)
|
||||
@ -0,0 +1,656 @@
|
||||
import pytest
|
||||
|
||||
import numpy as np
|
||||
from numpy._core._rational_tests import rational
|
||||
from numpy.lib._stride_tricks_impl import (
|
||||
_broadcast_shape,
|
||||
as_strided,
|
||||
broadcast_arrays,
|
||||
broadcast_shapes,
|
||||
broadcast_to,
|
||||
sliding_window_view,
|
||||
)
|
||||
from numpy.testing import (
|
||||
assert_,
|
||||
assert_array_equal,
|
||||
assert_equal,
|
||||
assert_raises,
|
||||
assert_raises_regex,
|
||||
assert_warns,
|
||||
)
|
||||
|
||||
|
||||
def assert_shapes_correct(input_shapes, expected_shape):
|
||||
# Broadcast a list of arrays with the given input shapes and check the
|
||||
# common output shape.
|
||||
|
||||
inarrays = [np.zeros(s) for s in input_shapes]
|
||||
outarrays = broadcast_arrays(*inarrays)
|
||||
outshapes = [a.shape for a in outarrays]
|
||||
expected = [expected_shape] * len(inarrays)
|
||||
assert_equal(outshapes, expected)
|
||||
|
||||
|
||||
def assert_incompatible_shapes_raise(input_shapes):
|
||||
# Broadcast a list of arrays with the given (incompatible) input shapes
|
||||
# and check that they raise a ValueError.
|
||||
|
||||
inarrays = [np.zeros(s) for s in input_shapes]
|
||||
assert_raises(ValueError, broadcast_arrays, *inarrays)
|
||||
|
||||
|
||||
def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False):
|
||||
# Broadcast two shapes against each other and check that the data layout
|
||||
# is the same as if a ufunc did the broadcasting.
|
||||
|
||||
x0 = np.zeros(shape0, dtype=int)
|
||||
# Note that multiply.reduce's identity element is 1.0, so when shape1==(),
|
||||
# this gives the desired n==1.
|
||||
n = int(np.multiply.reduce(shape1))
|
||||
x1 = np.arange(n).reshape(shape1)
|
||||
if transposed:
|
||||
x0 = x0.T
|
||||
x1 = x1.T
|
||||
if flipped:
|
||||
x0 = x0[::-1]
|
||||
x1 = x1[::-1]
|
||||
# Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the
|
||||
# result should be exactly the same as the broadcasted view of x1.
|
||||
y = x0 + x1
|
||||
b0, b1 = broadcast_arrays(x0, x1)
|
||||
assert_array_equal(y, b1)
|
||||
|
||||
|
||||
def test_same():
|
||||
x = np.arange(10)
|
||||
y = np.arange(10)
|
||||
bx, by = broadcast_arrays(x, y)
|
||||
assert_array_equal(x, bx)
|
||||
assert_array_equal(y, by)
|
||||
|
||||
def test_broadcast_kwargs():
|
||||
# ensure that a TypeError is appropriately raised when
|
||||
# np.broadcast_arrays() is called with any keyword
|
||||
# argument other than 'subok'
|
||||
x = np.arange(10)
|
||||
y = np.arange(10)
|
||||
|
||||
with assert_raises_regex(TypeError, 'got an unexpected keyword'):
|
||||
broadcast_arrays(x, y, dtype='float64')
|
||||
|
||||
|
||||
def test_one_off():
|
||||
x = np.array([[1, 2, 3]])
|
||||
y = np.array([[1], [2], [3]])
|
||||
bx, by = broadcast_arrays(x, y)
|
||||
bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
|
||||
by0 = bx0.T
|
||||
assert_array_equal(bx0, bx)
|
||||
assert_array_equal(by0, by)
|
||||
|
||||
|
||||
def test_same_input_shapes():
|
||||
# Check that the final shape is just the input shape.
|
||||
|
||||
data = [
|
||||
(),
|
||||
(1,),
|
||||
(3,),
|
||||
(0, 1),
|
||||
(0, 3),
|
||||
(1, 0),
|
||||
(3, 0),
|
||||
(1, 3),
|
||||
(3, 1),
|
||||
(3, 3),
|
||||
]
|
||||
for shape in data:
|
||||
input_shapes = [shape]
|
||||
# Single input.
|
||||
assert_shapes_correct(input_shapes, shape)
|
||||
# Double input.
|
||||
input_shapes2 = [shape, shape]
|
||||
assert_shapes_correct(input_shapes2, shape)
|
||||
# Triple input.
|
||||
input_shapes3 = [shape, shape, shape]
|
||||
assert_shapes_correct(input_shapes3, shape)
|
||||
|
||||
|
||||
def test_two_compatible_by_ones_input_shapes():
|
||||
# Check that two different input shapes of the same length, but some have
|
||||
# ones, broadcast to the correct shape.
|
||||
|
||||
data = [
|
||||
[[(1,), (3,)], (3,)],
|
||||
[[(1, 3), (3, 3)], (3, 3)],
|
||||
[[(3, 1), (3, 3)], (3, 3)],
|
||||
[[(1, 3), (3, 1)], (3, 3)],
|
||||
[[(1, 1), (3, 3)], (3, 3)],
|
||||
[[(1, 1), (1, 3)], (1, 3)],
|
||||
[[(1, 1), (3, 1)], (3, 1)],
|
||||
[[(1, 0), (0, 0)], (0, 0)],
|
||||
[[(0, 1), (0, 0)], (0, 0)],
|
||||
[[(1, 0), (0, 1)], (0, 0)],
|
||||
[[(1, 1), (0, 0)], (0, 0)],
|
||||
[[(1, 1), (1, 0)], (1, 0)],
|
||||
[[(1, 1), (0, 1)], (0, 1)],
|
||||
]
|
||||
for input_shapes, expected_shape in data:
|
||||
assert_shapes_correct(input_shapes, expected_shape)
|
||||
# Reverse the input shapes since broadcasting should be symmetric.
|
||||
assert_shapes_correct(input_shapes[::-1], expected_shape)
|
||||
|
||||
|
||||
def test_two_compatible_by_prepending_ones_input_shapes():
|
||||
# Check that two different input shapes (of different lengths) broadcast
|
||||
# to the correct shape.
|
||||
|
||||
data = [
|
||||
[[(), (3,)], (3,)],
|
||||
[[(3,), (3, 3)], (3, 3)],
|
||||
[[(3,), (3, 1)], (3, 3)],
|
||||
[[(1,), (3, 3)], (3, 3)],
|
||||
[[(), (3, 3)], (3, 3)],
|
||||
[[(1, 1), (3,)], (1, 3)],
|
||||
[[(1,), (3, 1)], (3, 1)],
|
||||
[[(1,), (1, 3)], (1, 3)],
|
||||
[[(), (1, 3)], (1, 3)],
|
||||
[[(), (3, 1)], (3, 1)],
|
||||
[[(), (0,)], (0,)],
|
||||
[[(0,), (0, 0)], (0, 0)],
|
||||
[[(0,), (0, 1)], (0, 0)],
|
||||
[[(1,), (0, 0)], (0, 0)],
|
||||
[[(), (0, 0)], (0, 0)],
|
||||
[[(1, 1), (0,)], (1, 0)],
|
||||
[[(1,), (0, 1)], (0, 1)],
|
||||
[[(1,), (1, 0)], (1, 0)],
|
||||
[[(), (1, 0)], (1, 0)],
|
||||
[[(), (0, 1)], (0, 1)],
|
||||
]
|
||||
for input_shapes, expected_shape in data:
|
||||
assert_shapes_correct(input_shapes, expected_shape)
|
||||
# Reverse the input shapes since broadcasting should be symmetric.
|
||||
assert_shapes_correct(input_shapes[::-1], expected_shape)
|
||||
|
||||
|
||||
def test_incompatible_shapes_raise_valueerror():
|
||||
# Check that a ValueError is raised for incompatible shapes.
|
||||
|
||||
data = [
|
||||
[(3,), (4,)],
|
||||
[(2, 3), (2,)],
|
||||
[(3,), (3,), (4,)],
|
||||
[(1, 3, 4), (2, 3, 3)],
|
||||
]
|
||||
for input_shapes in data:
|
||||
assert_incompatible_shapes_raise(input_shapes)
|
||||
# Reverse the input shapes since broadcasting should be symmetric.
|
||||
assert_incompatible_shapes_raise(input_shapes[::-1])
|
||||
|
||||
|
||||
def test_same_as_ufunc():
|
||||
# Check that the data layout is the same as if a ufunc did the operation.
|
||||
|
||||
data = [
|
||||
[[(1,), (3,)], (3,)],
|
||||
[[(1, 3), (3, 3)], (3, 3)],
|
||||
[[(3, 1), (3, 3)], (3, 3)],
|
||||
[[(1, 3), (3, 1)], (3, 3)],
|
||||
[[(1, 1), (3, 3)], (3, 3)],
|
||||
[[(1, 1), (1, 3)], (1, 3)],
|
||||
[[(1, 1), (3, 1)], (3, 1)],
|
||||
[[(1, 0), (0, 0)], (0, 0)],
|
||||
[[(0, 1), (0, 0)], (0, 0)],
|
||||
[[(1, 0), (0, 1)], (0, 0)],
|
||||
[[(1, 1), (0, 0)], (0, 0)],
|
||||
[[(1, 1), (1, 0)], (1, 0)],
|
||||
[[(1, 1), (0, 1)], (0, 1)],
|
||||
[[(), (3,)], (3,)],
|
||||
[[(3,), (3, 3)], (3, 3)],
|
||||
[[(3,), (3, 1)], (3, 3)],
|
||||
[[(1,), (3, 3)], (3, 3)],
|
||||
[[(), (3, 3)], (3, 3)],
|
||||
[[(1, 1), (3,)], (1, 3)],
|
||||
[[(1,), (3, 1)], (3, 1)],
|
||||
[[(1,), (1, 3)], (1, 3)],
|
||||
[[(), (1, 3)], (1, 3)],
|
||||
[[(), (3, 1)], (3, 1)],
|
||||
[[(), (0,)], (0,)],
|
||||
[[(0,), (0, 0)], (0, 0)],
|
||||
[[(0,), (0, 1)], (0, 0)],
|
||||
[[(1,), (0, 0)], (0, 0)],
|
||||
[[(), (0, 0)], (0, 0)],
|
||||
[[(1, 1), (0,)], (1, 0)],
|
||||
[[(1,), (0, 1)], (0, 1)],
|
||||
[[(1,), (1, 0)], (1, 0)],
|
||||
[[(), (1, 0)], (1, 0)],
|
||||
[[(), (0, 1)], (0, 1)],
|
||||
]
|
||||
for input_shapes, expected_shape in data:
|
||||
assert_same_as_ufunc(input_shapes[0], input_shapes[1],
|
||||
f"Shapes: {input_shapes[0]} {input_shapes[1]}")
|
||||
# Reverse the input shapes since broadcasting should be symmetric.
|
||||
assert_same_as_ufunc(input_shapes[1], input_shapes[0])
|
||||
# Try them transposed, too.
|
||||
assert_same_as_ufunc(input_shapes[0], input_shapes[1], True)
|
||||
# ... and flipped for non-rank-0 inputs in order to test negative
|
||||
# strides.
|
||||
if () not in input_shapes:
|
||||
assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True)
|
||||
assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True)
|
||||
|
||||
|
||||
def test_broadcast_to_succeeds():
|
||||
data = [
|
||||
[np.array(0), (0,), np.array(0)],
|
||||
[np.array(0), (1,), np.zeros(1)],
|
||||
[np.array(0), (3,), np.zeros(3)],
|
||||
[np.ones(1), (1,), np.ones(1)],
|
||||
[np.ones(1), (2,), np.ones(2)],
|
||||
[np.ones(1), (1, 2, 3), np.ones((1, 2, 3))],
|
||||
[np.arange(3), (3,), np.arange(3)],
|
||||
[np.arange(3), (1, 3), np.arange(3).reshape(1, -1)],
|
||||
[np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])],
|
||||
# test if shape is not a tuple
|
||||
[np.ones(0), 0, np.ones(0)],
|
||||
[np.ones(1), 1, np.ones(1)],
|
||||
[np.ones(1), 2, np.ones(2)],
|
||||
# these cases with size 0 are strange, but they reproduce the behavior
|
||||
# of broadcasting with ufuncs (see test_same_as_ufunc above)
|
||||
[np.ones(1), (0,), np.ones(0)],
|
||||
[np.ones((1, 2)), (0, 2), np.ones((0, 2))],
|
||||
[np.ones((2, 1)), (2, 0), np.ones((2, 0))],
|
||||
]
|
||||
for input_array, shape, expected in data:
|
||||
actual = broadcast_to(input_array, shape)
|
||||
assert_array_equal(expected, actual)
|
||||
|
||||
|
||||
def test_broadcast_to_raises():
|
||||
data = [
|
||||
[(0,), ()],
|
||||
[(1,), ()],
|
||||
[(3,), ()],
|
||||
[(3,), (1,)],
|
||||
[(3,), (2,)],
|
||||
[(3,), (4,)],
|
||||
[(1, 2), (2, 1)],
|
||||
[(1, 1), (1,)],
|
||||
[(1,), -1],
|
||||
[(1,), (-1,)],
|
||||
[(1, 2), (-1, 2)],
|
||||
]
|
||||
for orig_shape, target_shape in data:
|
||||
arr = np.zeros(orig_shape)
|
||||
assert_raises(ValueError, lambda: broadcast_to(arr, target_shape))
|
||||
|
||||
|
||||
def test_broadcast_shape():
|
||||
# tests internal _broadcast_shape
|
||||
# _broadcast_shape is already exercised indirectly by broadcast_arrays
|
||||
# _broadcast_shape is also exercised by the public broadcast_shapes function
|
||||
assert_equal(_broadcast_shape(), ())
|
||||
assert_equal(_broadcast_shape([1, 2]), (2,))
|
||||
assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1))
|
||||
assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4))
|
||||
assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2))
|
||||
assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2))
|
||||
|
||||
# regression tests for gh-5862
|
||||
assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,))
|
||||
bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32
|
||||
assert_raises(ValueError, lambda: _broadcast_shape(*bad_args))
|
||||
|
||||
|
||||
def test_broadcast_shapes_succeeds():
|
||||
# tests public broadcast_shapes
|
||||
data = [
|
||||
[[], ()],
|
||||
[[()], ()],
|
||||
[[(7,)], (7,)],
|
||||
[[(1, 2), (2,)], (1, 2)],
|
||||
[[(1, 1)], (1, 1)],
|
||||
[[(1, 1), (3, 4)], (3, 4)],
|
||||
[[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)],
|
||||
[[(5, 6, 1)], (5, 6, 1)],
|
||||
[[(1, 3), (3, 1)], (3, 3)],
|
||||
[[(1, 0), (0, 0)], (0, 0)],
|
||||
[[(0, 1), (0, 0)], (0, 0)],
|
||||
[[(1, 0), (0, 1)], (0, 0)],
|
||||
[[(1, 1), (0, 0)], (0, 0)],
|
||||
[[(1, 1), (1, 0)], (1, 0)],
|
||||
[[(1, 1), (0, 1)], (0, 1)],
|
||||
[[(), (0,)], (0,)],
|
||||
[[(0,), (0, 0)], (0, 0)],
|
||||
[[(0,), (0, 1)], (0, 0)],
|
||||
[[(1,), (0, 0)], (0, 0)],
|
||||
[[(), (0, 0)], (0, 0)],
|
||||
[[(1, 1), (0,)], (1, 0)],
|
||||
[[(1,), (0, 1)], (0, 1)],
|
||||
[[(1,), (1, 0)], (1, 0)],
|
||||
[[(), (1, 0)], (1, 0)],
|
||||
[[(), (0, 1)], (0, 1)],
|
||||
[[(1,), (3,)], (3,)],
|
||||
[[2, (3, 2)], (3, 2)],
|
||||
]
|
||||
for input_shapes, target_shape in data:
|
||||
assert_equal(broadcast_shapes(*input_shapes), target_shape)
|
||||
|
||||
assert_equal(broadcast_shapes(*([(1, 2)] * 32)), (1, 2))
|
||||
assert_equal(broadcast_shapes(*([(1, 2)] * 100)), (1, 2))
|
||||
|
||||
# regression tests for gh-5862
|
||||
assert_equal(broadcast_shapes(*([(2,)] * 32)), (2,))
|
||||
|
||||
|
||||
def test_broadcast_shapes_raises():
|
||||
# tests public broadcast_shapes
|
||||
data = [
|
||||
[(3,), (4,)],
|
||||
[(2, 3), (2,)],
|
||||
[(3,), (3,), (4,)],
|
||||
[(1, 3, 4), (2, 3, 3)],
|
||||
[(1, 2), (3, 1), (3, 2), (10, 5)],
|
||||
[2, (2, 3)],
|
||||
]
|
||||
for input_shapes in data:
|
||||
assert_raises(ValueError, lambda: broadcast_shapes(*input_shapes))
|
||||
|
||||
bad_args = [(2,)] * 32 + [(3,)] * 32
|
||||
assert_raises(ValueError, lambda: broadcast_shapes(*bad_args))
|
||||
|
||||
|
||||
def test_as_strided():
|
||||
a = np.array([None])
|
||||
a_view = as_strided(a)
|
||||
expected = np.array([None])
|
||||
assert_array_equal(a_view, np.array([None]))
|
||||
|
||||
a = np.array([1, 2, 3, 4])
|
||||
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))
|
||||
expected = np.array([1, 3])
|
||||
assert_array_equal(a_view, expected)
|
||||
|
||||
a = np.array([1, 2, 3, 4])
|
||||
a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize))
|
||||
expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
|
||||
assert_array_equal(a_view, expected)
|
||||
|
||||
# Regression test for gh-5081
|
||||
dt = np.dtype([('num', 'i4'), ('obj', 'O')])
|
||||
a = np.empty((4,), dtype=dt)
|
||||
a['num'] = np.arange(1, 5)
|
||||
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
|
||||
expected_num = [[1, 2, 3, 4]] * 3
|
||||
expected_obj = [[None] * 4] * 3
|
||||
assert_equal(a_view.dtype, dt)
|
||||
assert_array_equal(expected_num, a_view['num'])
|
||||
assert_array_equal(expected_obj, a_view['obj'])
|
||||
|
||||
# Make sure that void types without fields are kept unchanged
|
||||
a = np.empty((4,), dtype='V4')
|
||||
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
|
||||
assert_equal(a.dtype, a_view.dtype)
|
||||
|
||||
# Make sure that the only type that could fail is properly handled
|
||||
dt = np.dtype({'names': [''], 'formats': ['V4']})
|
||||
a = np.empty((4,), dtype=dt)
|
||||
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
|
||||
assert_equal(a.dtype, a_view.dtype)
|
||||
|
||||
# Custom dtypes should not be lost (gh-9161)
|
||||
r = [rational(i) for i in range(4)]
|
||||
a = np.array(r, dtype=rational)
|
||||
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
|
||||
assert_equal(a.dtype, a_view.dtype)
|
||||
assert_array_equal([r] * 3, a_view)
|
||||
|
||||
|
||||
class TestSlidingWindowView:
|
||||
def test_1d(self):
|
||||
arr = np.arange(5)
|
||||
arr_view = sliding_window_view(arr, 2)
|
||||
expected = np.array([[0, 1],
|
||||
[1, 2],
|
||||
[2, 3],
|
||||
[3, 4]])
|
||||
assert_array_equal(arr_view, expected)
|
||||
|
||||
def test_2d(self):
|
||||
i, j = np.ogrid[:3, :4]
|
||||
arr = 10 * i + j
|
||||
shape = (2, 2)
|
||||
arr_view = sliding_window_view(arr, shape)
|
||||
expected = np.array([[[[0, 1], [10, 11]],
|
||||
[[1, 2], [11, 12]],
|
||||
[[2, 3], [12, 13]]],
|
||||
[[[10, 11], [20, 21]],
|
||||
[[11, 12], [21, 22]],
|
||||
[[12, 13], [22, 23]]]])
|
||||
assert_array_equal(arr_view, expected)
|
||||
|
||||
def test_2d_with_axis(self):
|
||||
i, j = np.ogrid[:3, :4]
|
||||
arr = 10 * i + j
|
||||
arr_view = sliding_window_view(arr, 3, 0)
|
||||
expected = np.array([[[0, 10, 20],
|
||||
[1, 11, 21],
|
||||
[2, 12, 22],
|
||||
[3, 13, 23]]])
|
||||
assert_array_equal(arr_view, expected)
|
||||
|
||||
def test_2d_repeated_axis(self):
|
||||
i, j = np.ogrid[:3, :4]
|
||||
arr = 10 * i + j
|
||||
arr_view = sliding_window_view(arr, (2, 3), (1, 1))
|
||||
expected = np.array([[[[0, 1, 2],
|
||||
[1, 2, 3]]],
|
||||
[[[10, 11, 12],
|
||||
[11, 12, 13]]],
|
||||
[[[20, 21, 22],
|
||||
[21, 22, 23]]]])
|
||||
assert_array_equal(arr_view, expected)
|
||||
|
||||
def test_2d_without_axis(self):
|
||||
i, j = np.ogrid[:4, :4]
|
||||
arr = 10 * i + j
|
||||
shape = (2, 3)
|
||||
arr_view = sliding_window_view(arr, shape)
|
||||
expected = np.array([[[[0, 1, 2], [10, 11, 12]],
|
||||
[[1, 2, 3], [11, 12, 13]]],
|
||||
[[[10, 11, 12], [20, 21, 22]],
|
||||
[[11, 12, 13], [21, 22, 23]]],
|
||||
[[[20, 21, 22], [30, 31, 32]],
|
||||
[[21, 22, 23], [31, 32, 33]]]])
|
||||
assert_array_equal(arr_view, expected)
|
||||
|
||||
def test_errors(self):
|
||||
i, j = np.ogrid[:4, :4]
|
||||
arr = 10 * i + j
|
||||
with pytest.raises(ValueError, match='cannot contain negative values'):
|
||||
sliding_window_view(arr, (-1, 3))
|
||||
with pytest.raises(
|
||||
ValueError,
|
||||
match='must provide window_shape for all dimensions of `x`'):
|
||||
sliding_window_view(arr, (1,))
|
||||
with pytest.raises(
|
||||
ValueError,
|
||||
match='Must provide matching length window_shape and axis'):
|
||||
sliding_window_view(arr, (1, 3, 4), axis=(0, 1))
|
||||
with pytest.raises(
|
||||
ValueError,
|
||||
match='window shape cannot be larger than input array'):
|
||||
sliding_window_view(arr, (5, 5))
|
||||
|
||||
def test_writeable(self):
|
||||
arr = np.arange(5)
|
||||
view = sliding_window_view(arr, 2, writeable=False)
|
||||
assert_(not view.flags.writeable)
|
||||
with pytest.raises(
|
||||
ValueError,
|
||||
match='assignment destination is read-only'):
|
||||
view[0, 0] = 3
|
||||
view = sliding_window_view(arr, 2, writeable=True)
|
||||
assert_(view.flags.writeable)
|
||||
view[0, 1] = 3
|
||||
assert_array_equal(arr, np.array([0, 3, 2, 3, 4]))
|
||||
|
||||
def test_subok(self):
|
||||
class MyArray(np.ndarray):
|
||||
pass
|
||||
|
||||
arr = np.arange(5).view(MyArray)
|
||||
assert_(not isinstance(sliding_window_view(arr, 2,
|
||||
subok=False),
|
||||
MyArray))
|
||||
assert_(isinstance(sliding_window_view(arr, 2, subok=True), MyArray))
|
||||
# Default behavior
|
||||
assert_(not isinstance(sliding_window_view(arr, 2), MyArray))
|
||||
|
||||
|
||||
def as_strided_writeable():
|
||||
arr = np.ones(10)
|
||||
view = as_strided(arr, writeable=False)
|
||||
assert_(not view.flags.writeable)
|
||||
|
||||
# Check that writeable also is fine:
|
||||
view = as_strided(arr, writeable=True)
|
||||
assert_(view.flags.writeable)
|
||||
view[...] = 3
|
||||
assert_array_equal(arr, np.full_like(arr, 3))
|
||||
|
||||
# Test that things do not break down for readonly:
|
||||
arr.flags.writeable = False
|
||||
view = as_strided(arr, writeable=False)
|
||||
view = as_strided(arr, writeable=True)
|
||||
assert_(not view.flags.writeable)
|
||||
|
||||
|
||||
class VerySimpleSubClass(np.ndarray):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
return np.array(*args, subok=True, **kwargs).view(cls)
|
||||
|
||||
|
||||
class SimpleSubClass(VerySimpleSubClass):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
self = np.array(*args, subok=True, **kwargs).view(cls)
|
||||
self.info = 'simple'
|
||||
return self
|
||||
|
||||
def __array_finalize__(self, obj):
|
||||
self.info = getattr(obj, 'info', '') + ' finalized'
|
||||
|
||||
|
||||
def test_subclasses():
|
||||
# test that subclass is preserved only if subok=True
|
||||
a = VerySimpleSubClass([1, 2, 3, 4])
|
||||
assert_(type(a) is VerySimpleSubClass)
|
||||
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))
|
||||
assert_(type(a_view) is np.ndarray)
|
||||
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True)
|
||||
assert_(type(a_view) is VerySimpleSubClass)
|
||||
# test that if a subclass has __array_finalize__, it is used
|
||||
a = SimpleSubClass([1, 2, 3, 4])
|
||||
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True)
|
||||
assert_(type(a_view) is SimpleSubClass)
|
||||
assert_(a_view.info == 'simple finalized')
|
||||
|
||||
# similar tests for broadcast_arrays
|
||||
b = np.arange(len(a)).reshape(-1, 1)
|
||||
a_view, b_view = broadcast_arrays(a, b)
|
||||
assert_(type(a_view) is np.ndarray)
|
||||
assert_(type(b_view) is np.ndarray)
|
||||
assert_(a_view.shape == b_view.shape)
|
||||
a_view, b_view = broadcast_arrays(a, b, subok=True)
|
||||
assert_(type(a_view) is SimpleSubClass)
|
||||
assert_(a_view.info == 'simple finalized')
|
||||
assert_(type(b_view) is np.ndarray)
|
||||
assert_(a_view.shape == b_view.shape)
|
||||
|
||||
# and for broadcast_to
|
||||
shape = (2, 4)
|
||||
a_view = broadcast_to(a, shape)
|
||||
assert_(type(a_view) is np.ndarray)
|
||||
assert_(a_view.shape == shape)
|
||||
a_view = broadcast_to(a, shape, subok=True)
|
||||
assert_(type(a_view) is SimpleSubClass)
|
||||
assert_(a_view.info == 'simple finalized')
|
||||
assert_(a_view.shape == shape)
|
||||
|
||||
|
||||
def test_writeable():
|
||||
# broadcast_to should return a readonly array
|
||||
original = np.array([1, 2, 3])
|
||||
result = broadcast_to(original, (2, 3))
|
||||
assert_equal(result.flags.writeable, False)
|
||||
assert_raises(ValueError, result.__setitem__, slice(None), 0)
|
||||
|
||||
# but the result of broadcast_arrays needs to be writeable, to
|
||||
# preserve backwards compatibility
|
||||
test_cases = [((False,), broadcast_arrays(original,)),
|
||||
((True, False), broadcast_arrays(0, original))]
|
||||
for is_broadcast, results in test_cases:
|
||||
for array_is_broadcast, result in zip(is_broadcast, results):
|
||||
# This will change to False in a future version
|
||||
if array_is_broadcast:
|
||||
with assert_warns(FutureWarning):
|
||||
assert_equal(result.flags.writeable, True)
|
||||
with assert_warns(DeprecationWarning):
|
||||
result[:] = 0
|
||||
# Warning not emitted, writing to the array resets it
|
||||
assert_equal(result.flags.writeable, True)
|
||||
else:
|
||||
# No warning:
|
||||
assert_equal(result.flags.writeable, True)
|
||||
|
||||
for results in [broadcast_arrays(original),
|
||||
broadcast_arrays(0, original)]:
|
||||
for result in results:
|
||||
# resets the warn_on_write DeprecationWarning
|
||||
result.flags.writeable = True
|
||||
# check: no warning emitted
|
||||
assert_equal(result.flags.writeable, True)
|
||||
result[:] = 0
|
||||
|
||||
# keep readonly input readonly
|
||||
original.flags.writeable = False
|
||||
_, result = broadcast_arrays(0, original)
|
||||
assert_equal(result.flags.writeable, False)
|
||||
|
||||
# regression test for GH6491
|
||||
shape = (2,)
|
||||
strides = [0]
|
||||
tricky_array = as_strided(np.array(0), shape, strides)
|
||||
other = np.zeros((1,))
|
||||
first, second = broadcast_arrays(tricky_array, other)
|
||||
assert_(first.shape == second.shape)
|
||||
|
||||
|
||||
def test_writeable_memoryview():
|
||||
# The result of broadcast_arrays exports as a non-writeable memoryview
|
||||
# because otherwise there is no good way to opt in to the new behaviour
|
||||
# (i.e. you would need to set writeable to False explicitly).
|
||||
# See gh-13929.
|
||||
original = np.array([1, 2, 3])
|
||||
|
||||
test_cases = [((False, ), broadcast_arrays(original,)),
|
||||
((True, False), broadcast_arrays(0, original))]
|
||||
for is_broadcast, results in test_cases:
|
||||
for array_is_broadcast, result in zip(is_broadcast, results):
|
||||
# This will change to False in a future version
|
||||
if array_is_broadcast:
|
||||
# memoryview(result, writable=True) will give warning but cannot
|
||||
# be tested using the python API.
|
||||
assert memoryview(result).readonly
|
||||
else:
|
||||
assert not memoryview(result).readonly
|
||||
|
||||
|
||||
def test_reference_types():
|
||||
input_array = np.array('a', dtype=object)
|
||||
expected = np.array(['a'] * 3, dtype=object)
|
||||
actual = broadcast_to(input_array, (3,))
|
||||
assert_array_equal(expected, actual)
|
||||
|
||||
actual, _ = broadcast_arrays(input_array, np.ones(3))
|
||||
assert_array_equal(expected, actual)
|
||||
559
lib/python3.11/site-packages/numpy/lib/tests/test_twodim_base.py
Normal file
559
lib/python3.11/site-packages/numpy/lib/tests/test_twodim_base.py
Normal file
@ -0,0 +1,559 @@
|
||||
"""Test functions for matrix module
|
||||
|
||||
"""
|
||||
import pytest
|
||||
|
||||
import numpy as np
|
||||
from numpy import (
|
||||
add,
|
||||
arange,
|
||||
array,
|
||||
diag,
|
||||
eye,
|
||||
fliplr,
|
||||
flipud,
|
||||
histogram2d,
|
||||
mask_indices,
|
||||
ones,
|
||||
tri,
|
||||
tril_indices,
|
||||
tril_indices_from,
|
||||
triu_indices,
|
||||
triu_indices_from,
|
||||
vander,
|
||||
zeros,
|
||||
)
|
||||
from numpy.testing import (
|
||||
assert_,
|
||||
assert_array_almost_equal,
|
||||
assert_array_equal,
|
||||
assert_array_max_ulp,
|
||||
assert_equal,
|
||||
assert_raises,
|
||||
)
|
||||
|
||||
|
||||
def get_mat(n):
|
||||
data = arange(n)
|
||||
data = add.outer(data, data)
|
||||
return data
|
||||
|
||||
|
||||
class TestEye:
|
||||
def test_basic(self):
|
||||
assert_equal(eye(4),
|
||||
array([[1, 0, 0, 0],
|
||||
[0, 1, 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 1]]))
|
||||
|
||||
assert_equal(eye(4, dtype='f'),
|
||||
array([[1, 0, 0, 0],
|
||||
[0, 1, 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 1]], 'f'))
|
||||
|
||||
assert_equal(eye(3) == 1,
|
||||
eye(3, dtype=bool))
|
||||
|
||||
def test_uint64(self):
|
||||
# Regression test for gh-9982
|
||||
assert_equal(eye(np.uint64(2), dtype=int), array([[1, 0], [0, 1]]))
|
||||
assert_equal(eye(np.uint64(2), M=np.uint64(4), k=np.uint64(1)),
|
||||
array([[0, 1, 0, 0], [0, 0, 1, 0]]))
|
||||
|
||||
def test_diag(self):
|
||||
assert_equal(eye(4, k=1),
|
||||
array([[0, 1, 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 1],
|
||||
[0, 0, 0, 0]]))
|
||||
|
||||
assert_equal(eye(4, k=-1),
|
||||
array([[0, 0, 0, 0],
|
||||
[1, 0, 0, 0],
|
||||
[0, 1, 0, 0],
|
||||
[0, 0, 1, 0]]))
|
||||
|
||||
def test_2d(self):
|
||||
assert_equal(eye(4, 3),
|
||||
array([[1, 0, 0],
|
||||
[0, 1, 0],
|
||||
[0, 0, 1],
|
||||
[0, 0, 0]]))
|
||||
|
||||
assert_equal(eye(3, 4),
|
||||
array([[1, 0, 0, 0],
|
||||
[0, 1, 0, 0],
|
||||
[0, 0, 1, 0]]))
|
||||
|
||||
def test_diag2d(self):
|
||||
assert_equal(eye(3, 4, k=2),
|
||||
array([[0, 0, 1, 0],
|
||||
[0, 0, 0, 1],
|
||||
[0, 0, 0, 0]]))
|
||||
|
||||
assert_equal(eye(4, 3, k=-2),
|
||||
array([[0, 0, 0],
|
||||
[0, 0, 0],
|
||||
[1, 0, 0],
|
||||
[0, 1, 0]]))
|
||||
|
||||
def test_eye_bounds(self):
|
||||
assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]])
|
||||
assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]])
|
||||
assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]])
|
||||
assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]])
|
||||
assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]])
|
||||
assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]])
|
||||
assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]])
|
||||
assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]])
|
||||
assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]])
|
||||
|
||||
def test_strings(self):
|
||||
assert_equal(eye(2, 2, dtype='S3'),
|
||||
[[b'1', b''], [b'', b'1']])
|
||||
|
||||
def test_bool(self):
|
||||
assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]])
|
||||
|
||||
def test_order(self):
|
||||
mat_c = eye(4, 3, k=-1)
|
||||
mat_f = eye(4, 3, k=-1, order='F')
|
||||
assert_equal(mat_c, mat_f)
|
||||
assert mat_c.flags.c_contiguous
|
||||
assert not mat_c.flags.f_contiguous
|
||||
assert not mat_f.flags.c_contiguous
|
||||
assert mat_f.flags.f_contiguous
|
||||
|
||||
|
||||
class TestDiag:
|
||||
def test_vector(self):
|
||||
vals = (100 * arange(5)).astype('l')
|
||||
b = zeros((5, 5))
|
||||
for k in range(5):
|
||||
b[k, k] = vals[k]
|
||||
assert_equal(diag(vals), b)
|
||||
b = zeros((7, 7))
|
||||
c = b.copy()
|
||||
for k in range(5):
|
||||
b[k, k + 2] = vals[k]
|
||||
c[k + 2, k] = vals[k]
|
||||
assert_equal(diag(vals, k=2), b)
|
||||
assert_equal(diag(vals, k=-2), c)
|
||||
|
||||
def test_matrix(self, vals=None):
|
||||
if vals is None:
|
||||
vals = (100 * get_mat(5) + 1).astype('l')
|
||||
b = zeros((5,))
|
||||
for k in range(5):
|
||||
b[k] = vals[k, k]
|
||||
assert_equal(diag(vals), b)
|
||||
b = b * 0
|
||||
for k in range(3):
|
||||
b[k] = vals[k, k + 2]
|
||||
assert_equal(diag(vals, 2), b[:3])
|
||||
for k in range(3):
|
||||
b[k] = vals[k + 2, k]
|
||||
assert_equal(diag(vals, -2), b[:3])
|
||||
|
||||
def test_fortran_order(self):
|
||||
vals = array((100 * get_mat(5) + 1), order='F', dtype='l')
|
||||
self.test_matrix(vals)
|
||||
|
||||
def test_diag_bounds(self):
|
||||
A = [[1, 2], [3, 4], [5, 6]]
|
||||
assert_equal(diag(A, k=2), [])
|
||||
assert_equal(diag(A, k=1), [2])
|
||||
assert_equal(diag(A, k=0), [1, 4])
|
||||
assert_equal(diag(A, k=-1), [3, 6])
|
||||
assert_equal(diag(A, k=-2), [5])
|
||||
assert_equal(diag(A, k=-3), [])
|
||||
|
||||
def test_failure(self):
|
||||
assert_raises(ValueError, diag, [[[1]]])
|
||||
|
||||
|
||||
class TestFliplr:
|
||||
def test_basic(self):
|
||||
assert_raises(ValueError, fliplr, ones(4))
|
||||
a = get_mat(4)
|
||||
b = a[:, ::-1]
|
||||
assert_equal(fliplr(a), b)
|
||||
a = [[0, 1, 2],
|
||||
[3, 4, 5]]
|
||||
b = [[2, 1, 0],
|
||||
[5, 4, 3]]
|
||||
assert_equal(fliplr(a), b)
|
||||
|
||||
|
||||
class TestFlipud:
|
||||
def test_basic(self):
|
||||
a = get_mat(4)
|
||||
b = a[::-1, :]
|
||||
assert_equal(flipud(a), b)
|
||||
a = [[0, 1, 2],
|
||||
[3, 4, 5]]
|
||||
b = [[3, 4, 5],
|
||||
[0, 1, 2]]
|
||||
assert_equal(flipud(a), b)
|
||||
|
||||
|
||||
class TestHistogram2d:
|
||||
def test_simple(self):
|
||||
x = array(
|
||||
[0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891])
|
||||
y = array(
|
||||
[0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673])
|
||||
xedges = np.linspace(0, 1, 10)
|
||||
yedges = np.linspace(0, 1, 10)
|
||||
H = histogram2d(x, y, (xedges, yedges))[0]
|
||||
answer = array(
|
||||
[[0, 0, 0, 1, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 1, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[1, 0, 1, 0, 0, 0, 0, 0, 0],
|
||||
[0, 1, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
|
||||
assert_array_equal(H.T, answer)
|
||||
H = histogram2d(x, y, xedges)[0]
|
||||
assert_array_equal(H.T, answer)
|
||||
H, xedges, yedges = histogram2d(list(range(10)), list(range(10)))
|
||||
assert_array_equal(H, eye(10, 10))
|
||||
assert_array_equal(xedges, np.linspace(0, 9, 11))
|
||||
assert_array_equal(yedges, np.linspace(0, 9, 11))
|
||||
|
||||
def test_asym(self):
|
||||
x = array([1, 1, 2, 3, 4, 4, 4, 5])
|
||||
y = array([1, 3, 2, 0, 1, 2, 3, 4])
|
||||
H, xed, yed = histogram2d(
|
||||
x, y, (6, 5), range=[[0, 6], [0, 5]], density=True)
|
||||
answer = array(
|
||||
[[0., 0, 0, 0, 0],
|
||||
[0, 1, 0, 1, 0],
|
||||
[0, 0, 1, 0, 0],
|
||||
[1, 0, 0, 0, 0],
|
||||
[0, 1, 1, 1, 0],
|
||||
[0, 0, 0, 0, 1]])
|
||||
assert_array_almost_equal(H, answer / 8., 3)
|
||||
assert_array_equal(xed, np.linspace(0, 6, 7))
|
||||
assert_array_equal(yed, np.linspace(0, 5, 6))
|
||||
|
||||
def test_density(self):
|
||||
x = array([1, 2, 3, 1, 2, 3, 1, 2, 3])
|
||||
y = array([1, 1, 1, 2, 2, 2, 3, 3, 3])
|
||||
H, xed, yed = histogram2d(
|
||||
x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], density=True)
|
||||
answer = array([[1, 1, .5],
|
||||
[1, 1, .5],
|
||||
[.5, .5, .25]]) / 9.
|
||||
assert_array_almost_equal(H, answer, 3)
|
||||
|
||||
def test_all_outliers(self):
|
||||
r = np.random.rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6
|
||||
H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1]))
|
||||
assert_array_equal(H, 0)
|
||||
|
||||
def test_empty(self):
|
||||
a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1]))
|
||||
assert_array_max_ulp(a, array([[0.]]))
|
||||
|
||||
a, edge1, edge2 = histogram2d([], [], bins=4)
|
||||
assert_array_max_ulp(a, np.zeros((4, 4)))
|
||||
|
||||
def test_binparameter_combination(self):
|
||||
x = array(
|
||||
[0, 0.09207008, 0.64575234, 0.12875982, 0.47390599,
|
||||
0.59944483, 1])
|
||||
y = array(
|
||||
[0, 0.14344267, 0.48988575, 0.30558665, 0.44700682,
|
||||
0.15886423, 1])
|
||||
edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)
|
||||
H, xe, ye = histogram2d(x, y, (edges, 4))
|
||||
answer = array(
|
||||
[[2., 0., 0., 0.],
|
||||
[0., 1., 0., 0.],
|
||||
[0., 0., 0., 0.],
|
||||
[0., 0., 0., 0.],
|
||||
[0., 1., 0., 0.],
|
||||
[1., 0., 0., 0.],
|
||||
[0., 1., 0., 0.],
|
||||
[0., 0., 0., 0.],
|
||||
[0., 0., 0., 0.],
|
||||
[0., 0., 0., 1.]])
|
||||
assert_array_equal(H, answer)
|
||||
assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1]))
|
||||
H, xe, ye = histogram2d(x, y, (4, edges))
|
||||
answer = array(
|
||||
[[1., 1., 0., 1., 0., 0., 0., 0., 0., 0.],
|
||||
[0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
|
||||
[0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
|
||||
[0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])
|
||||
assert_array_equal(H, answer)
|
||||
assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
|
||||
|
||||
def test_dispatch(self):
|
||||
class ShouldDispatch:
|
||||
def __array_function__(self, function, types, args, kwargs):
|
||||
return types, args, kwargs
|
||||
|
||||
xy = [1, 2]
|
||||
s_d = ShouldDispatch()
|
||||
r = histogram2d(s_d, xy)
|
||||
# Cannot use assert_equal since that dispatches...
|
||||
assert_(r == ((ShouldDispatch,), (s_d, xy), {}))
|
||||
r = histogram2d(xy, s_d)
|
||||
assert_(r == ((ShouldDispatch,), (xy, s_d), {}))
|
||||
r = histogram2d(xy, xy, bins=s_d)
|
||||
assert_(r, ((ShouldDispatch,), (xy, xy), {'bins': s_d}))
|
||||
r = histogram2d(xy, xy, bins=[s_d, 5])
|
||||
assert_(r, ((ShouldDispatch,), (xy, xy), {'bins': [s_d, 5]}))
|
||||
assert_raises(Exception, histogram2d, xy, xy, bins=[s_d])
|
||||
r = histogram2d(xy, xy, weights=s_d)
|
||||
assert_(r, ((ShouldDispatch,), (xy, xy), {'weights': s_d}))
|
||||
|
||||
@pytest.mark.parametrize(("x_len", "y_len"), [(10, 11), (20, 19)])
|
||||
def test_bad_length(self, x_len, y_len):
|
||||
x, y = np.ones(x_len), np.ones(y_len)
|
||||
with pytest.raises(ValueError,
|
||||
match='x and y must have the same length.'):
|
||||
histogram2d(x, y)
|
||||
|
||||
|
||||
class TestTri:
|
||||
def test_dtype(self):
|
||||
out = array([[1, 0, 0],
|
||||
[1, 1, 0],
|
||||
[1, 1, 1]])
|
||||
assert_array_equal(tri(3), out)
|
||||
assert_array_equal(tri(3, dtype=bool), out.astype(bool))
|
||||
|
||||
|
||||
def test_tril_triu_ndim2():
|
||||
for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']:
|
||||
a = np.ones((2, 2), dtype=dtype)
|
||||
b = np.tril(a)
|
||||
c = np.triu(a)
|
||||
assert_array_equal(b, [[1, 0], [1, 1]])
|
||||
assert_array_equal(c, b.T)
|
||||
# should return the same dtype as the original array
|
||||
assert_equal(b.dtype, a.dtype)
|
||||
assert_equal(c.dtype, a.dtype)
|
||||
|
||||
|
||||
def test_tril_triu_ndim3():
|
||||
for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']:
|
||||
a = np.array([
|
||||
[[1, 1], [1, 1]],
|
||||
[[1, 1], [1, 0]],
|
||||
[[1, 1], [0, 0]],
|
||||
], dtype=dtype)
|
||||
a_tril_desired = np.array([
|
||||
[[1, 0], [1, 1]],
|
||||
[[1, 0], [1, 0]],
|
||||
[[1, 0], [0, 0]],
|
||||
], dtype=dtype)
|
||||
a_triu_desired = np.array([
|
||||
[[1, 1], [0, 1]],
|
||||
[[1, 1], [0, 0]],
|
||||
[[1, 1], [0, 0]],
|
||||
], dtype=dtype)
|
||||
a_triu_observed = np.triu(a)
|
||||
a_tril_observed = np.tril(a)
|
||||
assert_array_equal(a_triu_observed, a_triu_desired)
|
||||
assert_array_equal(a_tril_observed, a_tril_desired)
|
||||
assert_equal(a_triu_observed.dtype, a.dtype)
|
||||
assert_equal(a_tril_observed.dtype, a.dtype)
|
||||
|
||||
|
||||
def test_tril_triu_with_inf():
|
||||
# Issue 4859
|
||||
arr = np.array([[1, 1, np.inf],
|
||||
[1, 1, 1],
|
||||
[np.inf, 1, 1]])
|
||||
out_tril = np.array([[1, 0, 0],
|
||||
[1, 1, 0],
|
||||
[np.inf, 1, 1]])
|
||||
out_triu = out_tril.T
|
||||
assert_array_equal(np.triu(arr), out_triu)
|
||||
assert_array_equal(np.tril(arr), out_tril)
|
||||
|
||||
|
||||
def test_tril_triu_dtype():
|
||||
# Issue 4916
|
||||
# tril and triu should return the same dtype as input
|
||||
for c in np.typecodes['All']:
|
||||
if c == 'V':
|
||||
continue
|
||||
arr = np.zeros((3, 3), dtype=c)
|
||||
assert_equal(np.triu(arr).dtype, arr.dtype)
|
||||
assert_equal(np.tril(arr).dtype, arr.dtype)
|
||||
|
||||
# check special cases
|
||||
arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'],
|
||||
['2004-01-01T12:00', '2003-01-03T13:45']],
|
||||
dtype='datetime64')
|
||||
assert_equal(np.triu(arr).dtype, arr.dtype)
|
||||
assert_equal(np.tril(arr).dtype, arr.dtype)
|
||||
|
||||
arr = np.zeros((3, 3), dtype='f4,f4')
|
||||
assert_equal(np.triu(arr).dtype, arr.dtype)
|
||||
assert_equal(np.tril(arr).dtype, arr.dtype)
|
||||
|
||||
|
||||
def test_mask_indices():
|
||||
# simple test without offset
|
||||
iu = mask_indices(3, np.triu)
|
||||
a = np.arange(9).reshape(3, 3)
|
||||
assert_array_equal(a[iu], array([0, 1, 2, 4, 5, 8]))
|
||||
# Now with an offset
|
||||
iu1 = mask_indices(3, np.triu, 1)
|
||||
assert_array_equal(a[iu1], array([1, 2, 5]))
|
||||
|
||||
|
||||
def test_tril_indices():
|
||||
# indices without and with offset
|
||||
il1 = tril_indices(4)
|
||||
il2 = tril_indices(4, k=2)
|
||||
il3 = tril_indices(4, m=5)
|
||||
il4 = tril_indices(4, k=2, m=5)
|
||||
|
||||
a = np.array([[1, 2, 3, 4],
|
||||
[5, 6, 7, 8],
|
||||
[9, 10, 11, 12],
|
||||
[13, 14, 15, 16]])
|
||||
b = np.arange(1, 21).reshape(4, 5)
|
||||
|
||||
# indexing:
|
||||
assert_array_equal(a[il1],
|
||||
array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
|
||||
assert_array_equal(b[il3],
|
||||
array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19]))
|
||||
|
||||
# And for assigning values:
|
||||
a[il1] = -1
|
||||
assert_array_equal(a,
|
||||
array([[-1, 2, 3, 4],
|
||||
[-1, -1, 7, 8],
|
||||
[-1, -1, -1, 12],
|
||||
[-1, -1, -1, -1]]))
|
||||
b[il3] = -1
|
||||
assert_array_equal(b,
|
||||
array([[-1, 2, 3, 4, 5],
|
||||
[-1, -1, 8, 9, 10],
|
||||
[-1, -1, -1, 14, 15],
|
||||
[-1, -1, -1, -1, 20]]))
|
||||
# These cover almost the whole array (two diagonals right of the main one):
|
||||
a[il2] = -10
|
||||
assert_array_equal(a,
|
||||
array([[-10, -10, -10, 4],
|
||||
[-10, -10, -10, -10],
|
||||
[-10, -10, -10, -10],
|
||||
[-10, -10, -10, -10]]))
|
||||
b[il4] = -10
|
||||
assert_array_equal(b,
|
||||
array([[-10, -10, -10, 4, 5],
|
||||
[-10, -10, -10, -10, 10],
|
||||
[-10, -10, -10, -10, -10],
|
||||
[-10, -10, -10, -10, -10]]))
|
||||
|
||||
|
||||
class TestTriuIndices:
|
||||
def test_triu_indices(self):
|
||||
iu1 = triu_indices(4)
|
||||
iu2 = triu_indices(4, k=2)
|
||||
iu3 = triu_indices(4, m=5)
|
||||
iu4 = triu_indices(4, k=2, m=5)
|
||||
|
||||
a = np.array([[1, 2, 3, 4],
|
||||
[5, 6, 7, 8],
|
||||
[9, 10, 11, 12],
|
||||
[13, 14, 15, 16]])
|
||||
b = np.arange(1, 21).reshape(4, 5)
|
||||
|
||||
# Both for indexing:
|
||||
assert_array_equal(a[iu1],
|
||||
array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
|
||||
assert_array_equal(b[iu3],
|
||||
array([1, 2, 3, 4, 5, 7, 8, 9,
|
||||
10, 13, 14, 15, 19, 20]))
|
||||
|
||||
# And for assigning values:
|
||||
a[iu1] = -1
|
||||
assert_array_equal(a,
|
||||
array([[-1, -1, -1, -1],
|
||||
[5, -1, -1, -1],
|
||||
[9, 10, -1, -1],
|
||||
[13, 14, 15, -1]]))
|
||||
b[iu3] = -1
|
||||
assert_array_equal(b,
|
||||
array([[-1, -1, -1, -1, -1],
|
||||
[6, -1, -1, -1, -1],
|
||||
[11, 12, -1, -1, -1],
|
||||
[16, 17, 18, -1, -1]]))
|
||||
|
||||
# These cover almost the whole array (two diagonals right of the
|
||||
# main one):
|
||||
a[iu2] = -10
|
||||
assert_array_equal(a,
|
||||
array([[-1, -1, -10, -10],
|
||||
[5, -1, -1, -10],
|
||||
[9, 10, -1, -1],
|
||||
[13, 14, 15, -1]]))
|
||||
b[iu4] = -10
|
||||
assert_array_equal(b,
|
||||
array([[-1, -1, -10, -10, -10],
|
||||
[6, -1, -1, -10, -10],
|
||||
[11, 12, -1, -1, -10],
|
||||
[16, 17, 18, -1, -1]]))
|
||||
|
||||
|
||||
class TestTrilIndicesFrom:
|
||||
def test_exceptions(self):
|
||||
assert_raises(ValueError, tril_indices_from, np.ones((2,)))
|
||||
assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2)))
|
||||
# assert_raises(ValueError, tril_indices_from, np.ones((2, 3)))
|
||||
|
||||
|
||||
class TestTriuIndicesFrom:
|
||||
def test_exceptions(self):
|
||||
assert_raises(ValueError, triu_indices_from, np.ones((2,)))
|
||||
assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2)))
|
||||
# assert_raises(ValueError, triu_indices_from, np.ones((2, 3)))
|
||||
|
||||
|
||||
class TestVander:
|
||||
def test_basic(self):
|
||||
c = np.array([0, 1, -2, 3])
|
||||
v = vander(c)
|
||||
powers = np.array([[0, 0, 0, 0, 1],
|
||||
[1, 1, 1, 1, 1],
|
||||
[16, -8, 4, -2, 1],
|
||||
[81, 27, 9, 3, 1]])
|
||||
# Check default value of N:
|
||||
assert_array_equal(v, powers[:, 1:])
|
||||
# Check a range of N values, including 0 and 5 (greater than default)
|
||||
m = powers.shape[1]
|
||||
for n in range(6):
|
||||
v = vander(c, N=n)
|
||||
assert_array_equal(v, powers[:, m - n:m])
|
||||
|
||||
def test_dtypes(self):
|
||||
c = array([11, -12, 13], dtype=np.int8)
|
||||
v = vander(c)
|
||||
expected = np.array([[121, 11, 1],
|
||||
[144, -12, 1],
|
||||
[169, 13, 1]])
|
||||
assert_array_equal(v, expected)
|
||||
|
||||
c = array([1.0 + 1j, 1.0 - 1j])
|
||||
v = vander(c, N=3)
|
||||
expected = np.array([[2j, 1 + 1j, 1],
|
||||
[-2j, 1 - 1j, 1]])
|
||||
# The data is floating point, but the values are small integers,
|
||||
# so assert_array_equal *should* be safe here (rather than, say,
|
||||
# assert_array_almost_equal).
|
||||
assert_array_equal(v, expected)
|
||||
473
lib/python3.11/site-packages/numpy/lib/tests/test_type_check.py
Normal file
473
lib/python3.11/site-packages/numpy/lib/tests/test_type_check.py
Normal file
@ -0,0 +1,473 @@
|
||||
import numpy as np
|
||||
from numpy import (
|
||||
common_type,
|
||||
iscomplex,
|
||||
iscomplexobj,
|
||||
isneginf,
|
||||
isposinf,
|
||||
isreal,
|
||||
isrealobj,
|
||||
mintypecode,
|
||||
nan_to_num,
|
||||
real_if_close,
|
||||
)
|
||||
from numpy.testing import assert_, assert_array_equal, assert_equal
|
||||
|
||||
|
||||
def assert_all(x):
|
||||
assert_(np.all(x), x)
|
||||
|
||||
|
||||
class TestCommonType:
|
||||
def test_basic(self):
|
||||
ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
|
||||
af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)
|
||||
af32 = np.array([[1, 2], [3, 4]], dtype=np.float32)
|
||||
af64 = np.array([[1, 2], [3, 4]], dtype=np.float64)
|
||||
acs = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.complex64)
|
||||
acd = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.complex128)
|
||||
assert_(common_type(ai32) == np.float64)
|
||||
assert_(common_type(af16) == np.float16)
|
||||
assert_(common_type(af32) == np.float32)
|
||||
assert_(common_type(af64) == np.float64)
|
||||
assert_(common_type(acs) == np.complex64)
|
||||
assert_(common_type(acd) == np.complex128)
|
||||
|
||||
|
||||
class TestMintypecode:
|
||||
|
||||
def test_default_1(self):
|
||||
for itype in '1bcsuwil':
|
||||
assert_equal(mintypecode(itype), 'd')
|
||||
assert_equal(mintypecode('f'), 'f')
|
||||
assert_equal(mintypecode('d'), 'd')
|
||||
assert_equal(mintypecode('F'), 'F')
|
||||
assert_equal(mintypecode('D'), 'D')
|
||||
|
||||
def test_default_2(self):
|
||||
for itype in '1bcsuwil':
|
||||
assert_equal(mintypecode(itype + 'f'), 'f')
|
||||
assert_equal(mintypecode(itype + 'd'), 'd')
|
||||
assert_equal(mintypecode(itype + 'F'), 'F')
|
||||
assert_equal(mintypecode(itype + 'D'), 'D')
|
||||
assert_equal(mintypecode('ff'), 'f')
|
||||
assert_equal(mintypecode('fd'), 'd')
|
||||
assert_equal(mintypecode('fF'), 'F')
|
||||
assert_equal(mintypecode('fD'), 'D')
|
||||
assert_equal(mintypecode('df'), 'd')
|
||||
assert_equal(mintypecode('dd'), 'd')
|
||||
#assert_equal(mintypecode('dF',savespace=1),'F')
|
||||
assert_equal(mintypecode('dF'), 'D')
|
||||
assert_equal(mintypecode('dD'), 'D')
|
||||
assert_equal(mintypecode('Ff'), 'F')
|
||||
#assert_equal(mintypecode('Fd',savespace=1),'F')
|
||||
assert_equal(mintypecode('Fd'), 'D')
|
||||
assert_equal(mintypecode('FF'), 'F')
|
||||
assert_equal(mintypecode('FD'), 'D')
|
||||
assert_equal(mintypecode('Df'), 'D')
|
||||
assert_equal(mintypecode('Dd'), 'D')
|
||||
assert_equal(mintypecode('DF'), 'D')
|
||||
assert_equal(mintypecode('DD'), 'D')
|
||||
|
||||
def test_default_3(self):
|
||||
assert_equal(mintypecode('fdF'), 'D')
|
||||
#assert_equal(mintypecode('fdF',savespace=1),'F')
|
||||
assert_equal(mintypecode('fdD'), 'D')
|
||||
assert_equal(mintypecode('fFD'), 'D')
|
||||
assert_equal(mintypecode('dFD'), 'D')
|
||||
|
||||
assert_equal(mintypecode('ifd'), 'd')
|
||||
assert_equal(mintypecode('ifF'), 'F')
|
||||
assert_equal(mintypecode('ifD'), 'D')
|
||||
assert_equal(mintypecode('idF'), 'D')
|
||||
#assert_equal(mintypecode('idF',savespace=1),'F')
|
||||
assert_equal(mintypecode('idD'), 'D')
|
||||
|
||||
|
||||
class TestIsscalar:
|
||||
|
||||
def test_basic(self):
|
||||
assert_(np.isscalar(3))
|
||||
assert_(not np.isscalar([3]))
|
||||
assert_(not np.isscalar((3,)))
|
||||
assert_(np.isscalar(3j))
|
||||
assert_(np.isscalar(4.0))
|
||||
|
||||
|
||||
class TestReal:
|
||||
|
||||
def test_real(self):
|
||||
y = np.random.rand(10,)
|
||||
assert_array_equal(y, np.real(y))
|
||||
|
||||
y = np.array(1)
|
||||
out = np.real(y)
|
||||
assert_array_equal(y, out)
|
||||
assert_(isinstance(out, np.ndarray))
|
||||
|
||||
y = 1
|
||||
out = np.real(y)
|
||||
assert_equal(y, out)
|
||||
assert_(not isinstance(out, np.ndarray))
|
||||
|
||||
def test_cmplx(self):
|
||||
y = np.random.rand(10,) + 1j * np.random.rand(10,)
|
||||
assert_array_equal(y.real, np.real(y))
|
||||
|
||||
y = np.array(1 + 1j)
|
||||
out = np.real(y)
|
||||
assert_array_equal(y.real, out)
|
||||
assert_(isinstance(out, np.ndarray))
|
||||
|
||||
y = 1 + 1j
|
||||
out = np.real(y)
|
||||
assert_equal(1.0, out)
|
||||
assert_(not isinstance(out, np.ndarray))
|
||||
|
||||
|
||||
class TestImag:
|
||||
|
||||
def test_real(self):
|
||||
y = np.random.rand(10,)
|
||||
assert_array_equal(0, np.imag(y))
|
||||
|
||||
y = np.array(1)
|
||||
out = np.imag(y)
|
||||
assert_array_equal(0, out)
|
||||
assert_(isinstance(out, np.ndarray))
|
||||
|
||||
y = 1
|
||||
out = np.imag(y)
|
||||
assert_equal(0, out)
|
||||
assert_(not isinstance(out, np.ndarray))
|
||||
|
||||
def test_cmplx(self):
|
||||
y = np.random.rand(10,) + 1j * np.random.rand(10,)
|
||||
assert_array_equal(y.imag, np.imag(y))
|
||||
|
||||
y = np.array(1 + 1j)
|
||||
out = np.imag(y)
|
||||
assert_array_equal(y.imag, out)
|
||||
assert_(isinstance(out, np.ndarray))
|
||||
|
||||
y = 1 + 1j
|
||||
out = np.imag(y)
|
||||
assert_equal(1.0, out)
|
||||
assert_(not isinstance(out, np.ndarray))
|
||||
|
||||
|
||||
class TestIscomplex:
|
||||
|
||||
def test_fail(self):
|
||||
z = np.array([-1, 0, 1])
|
||||
res = iscomplex(z)
|
||||
assert_(not np.any(res, axis=0))
|
||||
|
||||
def test_pass(self):
|
||||
z = np.array([-1j, 1, 0])
|
||||
res = iscomplex(z)
|
||||
assert_array_equal(res, [1, 0, 0])
|
||||
|
||||
|
||||
class TestIsreal:
|
||||
|
||||
def test_pass(self):
|
||||
z = np.array([-1, 0, 1j])
|
||||
res = isreal(z)
|
||||
assert_array_equal(res, [1, 1, 0])
|
||||
|
||||
def test_fail(self):
|
||||
z = np.array([-1j, 1, 0])
|
||||
res = isreal(z)
|
||||
assert_array_equal(res, [0, 1, 1])
|
||||
|
||||
|
||||
class TestIscomplexobj:
|
||||
|
||||
def test_basic(self):
|
||||
z = np.array([-1, 0, 1])
|
||||
assert_(not iscomplexobj(z))
|
||||
z = np.array([-1j, 0, -1])
|
||||
assert_(iscomplexobj(z))
|
||||
|
||||
def test_scalar(self):
|
||||
assert_(not iscomplexobj(1.0))
|
||||
assert_(iscomplexobj(1 + 0j))
|
||||
|
||||
def test_list(self):
|
||||
assert_(iscomplexobj([3, 1 + 0j, True]))
|
||||
assert_(not iscomplexobj([3, 1, True]))
|
||||
|
||||
def test_duck(self):
|
||||
class DummyComplexArray:
|
||||
@property
|
||||
def dtype(self):
|
||||
return np.dtype(complex)
|
||||
dummy = DummyComplexArray()
|
||||
assert_(iscomplexobj(dummy))
|
||||
|
||||
def test_pandas_duck(self):
|
||||
# This tests a custom np.dtype duck-typed class, such as used by pandas
|
||||
# (pandas.core.dtypes)
|
||||
class PdComplex(np.complex128):
|
||||
pass
|
||||
|
||||
class PdDtype:
|
||||
name = 'category'
|
||||
names = None
|
||||
type = PdComplex
|
||||
kind = 'c'
|
||||
str = '<c16'
|
||||
base = np.dtype('complex128')
|
||||
|
||||
class DummyPd:
|
||||
@property
|
||||
def dtype(self):
|
||||
return PdDtype
|
||||
dummy = DummyPd()
|
||||
assert_(iscomplexobj(dummy))
|
||||
|
||||
def test_custom_dtype_duck(self):
|
||||
class MyArray(list):
|
||||
@property
|
||||
def dtype(self):
|
||||
return complex
|
||||
|
||||
a = MyArray([1 + 0j, 2 + 0j, 3 + 0j])
|
||||
assert_(iscomplexobj(a))
|
||||
|
||||
|
||||
class TestIsrealobj:
|
||||
def test_basic(self):
|
||||
z = np.array([-1, 0, 1])
|
||||
assert_(isrealobj(z))
|
||||
z = np.array([-1j, 0, -1])
|
||||
assert_(not isrealobj(z))
|
||||
|
||||
|
||||
class TestIsnan:
|
||||
|
||||
def test_goodvalues(self):
|
||||
z = np.array((-1., 0., 1.))
|
||||
res = np.isnan(z) == 0
|
||||
assert_all(np.all(res, axis=0))
|
||||
|
||||
def test_posinf(self):
|
||||
with np.errstate(divide='ignore'):
|
||||
assert_all(np.isnan(np.array((1.,)) / 0.) == 0)
|
||||
|
||||
def test_neginf(self):
|
||||
with np.errstate(divide='ignore'):
|
||||
assert_all(np.isnan(np.array((-1.,)) / 0.) == 0)
|
||||
|
||||
def test_ind(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isnan(np.array((0.,)) / 0.) == 1)
|
||||
|
||||
def test_integer(self):
|
||||
assert_all(np.isnan(1) == 0)
|
||||
|
||||
def test_complex(self):
|
||||
assert_all(np.isnan(1 + 1j) == 0)
|
||||
|
||||
def test_complex1(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isnan(np.array(0 + 0j) / 0.) == 1)
|
||||
|
||||
|
||||
class TestIsfinite:
|
||||
# Fixme, wrong place, isfinite now ufunc
|
||||
|
||||
def test_goodvalues(self):
|
||||
z = np.array((-1., 0., 1.))
|
||||
res = np.isfinite(z) == 1
|
||||
assert_all(np.all(res, axis=0))
|
||||
|
||||
def test_posinf(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isfinite(np.array((1.,)) / 0.) == 0)
|
||||
|
||||
def test_neginf(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isfinite(np.array((-1.,)) / 0.) == 0)
|
||||
|
||||
def test_ind(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isfinite(np.array((0.,)) / 0.) == 0)
|
||||
|
||||
def test_integer(self):
|
||||
assert_all(np.isfinite(1) == 1)
|
||||
|
||||
def test_complex(self):
|
||||
assert_all(np.isfinite(1 + 1j) == 1)
|
||||
|
||||
def test_complex1(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isfinite(np.array(1 + 1j) / 0.) == 0)
|
||||
|
||||
|
||||
class TestIsinf:
|
||||
# Fixme, wrong place, isinf now ufunc
|
||||
|
||||
def test_goodvalues(self):
|
||||
z = np.array((-1., 0., 1.))
|
||||
res = np.isinf(z) == 0
|
||||
assert_all(np.all(res, axis=0))
|
||||
|
||||
def test_posinf(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isinf(np.array((1.,)) / 0.) == 1)
|
||||
|
||||
def test_posinf_scalar(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isinf(np.array(1.,) / 0.) == 1)
|
||||
|
||||
def test_neginf(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isinf(np.array((-1.,)) / 0.) == 1)
|
||||
|
||||
def test_neginf_scalar(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isinf(np.array(-1.) / 0.) == 1)
|
||||
|
||||
def test_ind(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isinf(np.array((0.,)) / 0.) == 0)
|
||||
|
||||
|
||||
class TestIsposinf:
|
||||
|
||||
def test_generic(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
vals = isposinf(np.array((-1., 0, 1)) / 0.)
|
||||
assert_(vals[0] == 0)
|
||||
assert_(vals[1] == 0)
|
||||
assert_(vals[2] == 1)
|
||||
|
||||
|
||||
class TestIsneginf:
|
||||
|
||||
def test_generic(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
vals = isneginf(np.array((-1., 0, 1)) / 0.)
|
||||
assert_(vals[0] == 1)
|
||||
assert_(vals[1] == 0)
|
||||
assert_(vals[2] == 0)
|
||||
|
||||
|
||||
class TestNanToNum:
|
||||
|
||||
def test_generic(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
vals = nan_to_num(np.array((-1., 0, 1)) / 0.)
|
||||
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
|
||||
assert_(vals[1] == 0)
|
||||
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
|
||||
assert_equal(type(vals), np.ndarray)
|
||||
|
||||
# perform the same tests but with nan, posinf and neginf keywords
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
vals = nan_to_num(np.array((-1., 0, 1)) / 0.,
|
||||
nan=10, posinf=20, neginf=30)
|
||||
assert_equal(vals, [30, 10, 20])
|
||||
assert_all(np.isfinite(vals[[0, 2]]))
|
||||
assert_equal(type(vals), np.ndarray)
|
||||
|
||||
# perform the same test but in-place
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
vals = np.array((-1., 0, 1)) / 0.
|
||||
result = nan_to_num(vals, copy=False)
|
||||
|
||||
assert_(result is vals)
|
||||
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
|
||||
assert_(vals[1] == 0)
|
||||
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
|
||||
assert_equal(type(vals), np.ndarray)
|
||||
|
||||
# perform the same test but in-place
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
vals = np.array((-1., 0, 1)) / 0.
|
||||
result = nan_to_num(vals, copy=False, nan=10, posinf=20, neginf=30)
|
||||
|
||||
assert_(result is vals)
|
||||
assert_equal(vals, [30, 10, 20])
|
||||
assert_all(np.isfinite(vals[[0, 2]]))
|
||||
assert_equal(type(vals), np.ndarray)
|
||||
|
||||
def test_array(self):
|
||||
vals = nan_to_num([1])
|
||||
assert_array_equal(vals, np.array([1], int))
|
||||
assert_equal(type(vals), np.ndarray)
|
||||
vals = nan_to_num([1], nan=10, posinf=20, neginf=30)
|
||||
assert_array_equal(vals, np.array([1], int))
|
||||
assert_equal(type(vals), np.ndarray)
|
||||
|
||||
def test_integer(self):
|
||||
vals = nan_to_num(1)
|
||||
assert_all(vals == 1)
|
||||
assert_equal(type(vals), np.int_)
|
||||
vals = nan_to_num(1, nan=10, posinf=20, neginf=30)
|
||||
assert_all(vals == 1)
|
||||
assert_equal(type(vals), np.int_)
|
||||
|
||||
def test_float(self):
|
||||
vals = nan_to_num(1.0)
|
||||
assert_all(vals == 1.0)
|
||||
assert_equal(type(vals), np.float64)
|
||||
vals = nan_to_num(1.1, nan=10, posinf=20, neginf=30)
|
||||
assert_all(vals == 1.1)
|
||||
assert_equal(type(vals), np.float64)
|
||||
|
||||
def test_complex_good(self):
|
||||
vals = nan_to_num(1 + 1j)
|
||||
assert_all(vals == 1 + 1j)
|
||||
assert_equal(type(vals), np.complex128)
|
||||
vals = nan_to_num(1 + 1j, nan=10, posinf=20, neginf=30)
|
||||
assert_all(vals == 1 + 1j)
|
||||
assert_equal(type(vals), np.complex128)
|
||||
|
||||
def test_complex_bad(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
v = 1 + 1j
|
||||
v += np.array(0 + 1.j) / 0.
|
||||
vals = nan_to_num(v)
|
||||
# !! This is actually (unexpectedly) zero
|
||||
assert_all(np.isfinite(vals))
|
||||
assert_equal(type(vals), np.complex128)
|
||||
|
||||
def test_complex_bad2(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
v = 1 + 1j
|
||||
v += np.array(-1 + 1.j) / 0.
|
||||
vals = nan_to_num(v)
|
||||
assert_all(np.isfinite(vals))
|
||||
assert_equal(type(vals), np.complex128)
|
||||
# Fixme
|
||||
#assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals))
|
||||
# !! This is actually (unexpectedly) positive
|
||||
# !! inf. Comment out for now, and see if it
|
||||
# !! changes
|
||||
#assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))
|
||||
|
||||
def test_do_not_rewrite_previous_keyword(self):
|
||||
# This is done to test that when, for instance, nan=np.inf then these
|
||||
# values are not rewritten by posinf keyword to the posinf value.
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
vals = nan_to_num(np.array((-1., 0, 1)) / 0., nan=np.inf, posinf=999)
|
||||
assert_all(np.isfinite(vals[[0, 2]]))
|
||||
assert_all(vals[0] < -1e10)
|
||||
assert_equal(vals[[1, 2]], [np.inf, 999])
|
||||
assert_equal(type(vals), np.ndarray)
|
||||
|
||||
|
||||
class TestRealIfClose:
|
||||
|
||||
def test_basic(self):
|
||||
a = np.random.rand(10)
|
||||
b = real_if_close(a + 1e-15j)
|
||||
assert_all(isrealobj(b))
|
||||
assert_array_equal(a, b)
|
||||
b = real_if_close(a + 1e-7j)
|
||||
assert_all(iscomplexobj(b))
|
||||
b = real_if_close(a + 1e-7j, tol=1e-6)
|
||||
assert_all(isrealobj(b))
|
||||
@ -0,0 +1,97 @@
|
||||
import numpy as np
|
||||
from numpy import fix, isneginf, isposinf
|
||||
from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises
|
||||
|
||||
|
||||
class TestUfunclike:
|
||||
|
||||
def test_isposinf(self):
|
||||
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
|
||||
out = np.zeros(a.shape, bool)
|
||||
tgt = np.array([True, False, False, False, False, False])
|
||||
|
||||
res = isposinf(a)
|
||||
assert_equal(res, tgt)
|
||||
res = isposinf(a, out)
|
||||
assert_equal(res, tgt)
|
||||
assert_equal(out, tgt)
|
||||
|
||||
a = a.astype(np.complex128)
|
||||
with assert_raises(TypeError):
|
||||
isposinf(a)
|
||||
|
||||
def test_isneginf(self):
|
||||
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
|
||||
out = np.zeros(a.shape, bool)
|
||||
tgt = np.array([False, True, False, False, False, False])
|
||||
|
||||
res = isneginf(a)
|
||||
assert_equal(res, tgt)
|
||||
res = isneginf(a, out)
|
||||
assert_equal(res, tgt)
|
||||
assert_equal(out, tgt)
|
||||
|
||||
a = a.astype(np.complex128)
|
||||
with assert_raises(TypeError):
|
||||
isneginf(a)
|
||||
|
||||
def test_fix(self):
|
||||
a = np.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]])
|
||||
out = np.zeros(a.shape, float)
|
||||
tgt = np.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]])
|
||||
|
||||
res = fix(a)
|
||||
assert_equal(res, tgt)
|
||||
res = fix(a, out)
|
||||
assert_equal(res, tgt)
|
||||
assert_equal(out, tgt)
|
||||
assert_equal(fix(3.14), 3)
|
||||
|
||||
def test_fix_with_subclass(self):
|
||||
class MyArray(np.ndarray):
|
||||
def __new__(cls, data, metadata=None):
|
||||
res = np.array(data, copy=True).view(cls)
|
||||
res.metadata = metadata
|
||||
return res
|
||||
|
||||
def __array_wrap__(self, obj, context=None, return_scalar=False):
|
||||
if not isinstance(obj, MyArray):
|
||||
obj = obj.view(MyArray)
|
||||
if obj.metadata is None:
|
||||
obj.metadata = self.metadata
|
||||
return obj
|
||||
|
||||
def __array_finalize__(self, obj):
|
||||
self.metadata = getattr(obj, 'metadata', None)
|
||||
return self
|
||||
|
||||
a = np.array([1.1, -1.1])
|
||||
m = MyArray(a, metadata='foo')
|
||||
f = fix(m)
|
||||
assert_array_equal(f, np.array([1, -1]))
|
||||
assert_(isinstance(f, MyArray))
|
||||
assert_equal(f.metadata, 'foo')
|
||||
|
||||
# check 0d arrays don't decay to scalars
|
||||
m0d = m[0, ...]
|
||||
m0d.metadata = 'bar'
|
||||
f0d = fix(m0d)
|
||||
assert_(isinstance(f0d, MyArray))
|
||||
assert_equal(f0d.metadata, 'bar')
|
||||
|
||||
def test_scalar(self):
|
||||
x = np.inf
|
||||
actual = np.isposinf(x)
|
||||
expected = np.True_
|
||||
assert_equal(actual, expected)
|
||||
assert_equal(type(actual), type(expected))
|
||||
|
||||
x = -3.4
|
||||
actual = np.fix(x)
|
||||
expected = np.float64(-3.0)
|
||||
assert_equal(actual, expected)
|
||||
assert_equal(type(actual), type(expected))
|
||||
|
||||
out = np.array(0.0)
|
||||
actual = np.fix(x, out=out)
|
||||
assert_(actual is out)
|
||||
80
lib/python3.11/site-packages/numpy/lib/tests/test_utils.py
Normal file
80
lib/python3.11/site-packages/numpy/lib/tests/test_utils.py
Normal file
@ -0,0 +1,80 @@
|
||||
from io import StringIO
|
||||
|
||||
import pytest
|
||||
|
||||
import numpy as np
|
||||
import numpy.lib._utils_impl as _utils_impl
|
||||
from numpy.testing import assert_raises_regex
|
||||
|
||||
|
||||
def test_assert_raises_regex_context_manager():
|
||||
with assert_raises_regex(ValueError, 'no deprecation warning'):
|
||||
raise ValueError('no deprecation warning')
|
||||
|
||||
|
||||
def test_info_method_heading():
|
||||
# info(class) should only print "Methods:" heading if methods exist
|
||||
|
||||
class NoPublicMethods:
|
||||
pass
|
||||
|
||||
class WithPublicMethods:
|
||||
def first_method():
|
||||
pass
|
||||
|
||||
def _has_method_heading(cls):
|
||||
out = StringIO()
|
||||
np.info(cls, output=out)
|
||||
return 'Methods:' in out.getvalue()
|
||||
|
||||
assert _has_method_heading(WithPublicMethods)
|
||||
assert not _has_method_heading(NoPublicMethods)
|
||||
|
||||
|
||||
def test_drop_metadata():
|
||||
def _compare_dtypes(dt1, dt2):
|
||||
return np.can_cast(dt1, dt2, casting='no')
|
||||
|
||||
# structured dtype
|
||||
dt = np.dtype([('l1', [('l2', np.dtype('S8', metadata={'msg': 'toto'}))])],
|
||||
metadata={'msg': 'titi'})
|
||||
dt_m = _utils_impl.drop_metadata(dt)
|
||||
assert _compare_dtypes(dt, dt_m) is True
|
||||
assert dt_m.metadata is None
|
||||
assert dt_m['l1'].metadata is None
|
||||
assert dt_m['l1']['l2'].metadata is None
|
||||
|
||||
# alignment
|
||||
dt = np.dtype([('x', '<f8'), ('y', '<i4')],
|
||||
align=True,
|
||||
metadata={'msg': 'toto'})
|
||||
dt_m = _utils_impl.drop_metadata(dt)
|
||||
assert _compare_dtypes(dt, dt_m) is True
|
||||
assert dt_m.metadata is None
|
||||
|
||||
# subdtype
|
||||
dt = np.dtype('8f',
|
||||
metadata={'msg': 'toto'})
|
||||
dt_m = _utils_impl.drop_metadata(dt)
|
||||
assert _compare_dtypes(dt, dt_m) is True
|
||||
assert dt_m.metadata is None
|
||||
|
||||
# scalar
|
||||
dt = np.dtype('uint32',
|
||||
metadata={'msg': 'toto'})
|
||||
dt_m = _utils_impl.drop_metadata(dt)
|
||||
assert _compare_dtypes(dt, dt_m) is True
|
||||
assert dt_m.metadata is None
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype",
|
||||
[np.dtype("i,i,i,i")[["f1", "f3"]],
|
||||
np.dtype("f8"),
|
||||
np.dtype("10i")])
|
||||
def test_drop_metadata_identity_and_copy(dtype):
|
||||
# If there is no metadata, the identity is preserved:
|
||||
assert _utils_impl.drop_metadata(dtype) is dtype
|
||||
|
||||
# If there is any, it is dropped (subforms are checked above)
|
||||
dtype = np.dtype(dtype, metadata={1: 2})
|
||||
assert _utils_impl.drop_metadata(dtype).metadata is None
|
||||
1
lib/python3.11/site-packages/numpy/lib/user_array.py
Normal file
1
lib/python3.11/site-packages/numpy/lib/user_array.py
Normal file
@ -0,0 +1 @@
|
||||
from ._user_array_impl import __doc__, container # noqa: F401
|
||||
1
lib/python3.11/site-packages/numpy/lib/user_array.pyi
Normal file
1
lib/python3.11/site-packages/numpy/lib/user_array.pyi
Normal file
@ -0,0 +1 @@
|
||||
from ._user_array_impl import container as container
|
||||
Reference in New Issue
Block a user