h"]
-_IntCCodes = Literal["intc", "i", "=i", "i"]
-_IntPCodes = Literal["intp", "int0", "p", "=p", "p"]
-_IntCodes = Literal["long", "int", "int_", "l", "=l", "l"]
-_LongLongCodes = Literal["longlong", "q", "=q", "q"]
-_UByteCodes = Literal["ubyte", "B", "=B", "B"]
-_UShortCodes = Literal["ushort", "H", "=H", "H"]
-_UIntCCodes = Literal["uintc", "I", "=I", "I"]
-_UIntPCodes = Literal["uintp", "uint0", "P", "=P", "P"]
-_UIntCodes = Literal["ulong", "uint", "L", "=L", "L"]
-_ULongLongCodes = Literal["ulonglong", "Q", "=Q", "Q"]
-_HalfCodes = Literal["half", "e", "=e", "e"]
-_SingleCodes = Literal["single", "f", "=f", "f"]
-_DoubleCodes = Literal["double", "float", "float_", "d", "=d", "d"]
-_LongDoubleCodes = Literal["longdouble", "longfloat", "g", "=g", "g"]
-_CSingleCodes = Literal["csingle", "singlecomplex", "F", "=F", "F"]
-_CDoubleCodes = Literal["cdouble", "complex", "complex_", "cfloat", "D", "=D", "D"]
-_CLongDoubleCodes = Literal["clongdouble", "clongfloat", "longcomplex", "G", "=G", "G"]
-_StrCodes = Literal["str", "str_", "str0", "unicode", "unicode_", "U", "=U", "U"]
-_BytesCodes = Literal["bytes", "bytes_", "bytes0", "S", "=S", "S"]
-_VoidCodes = Literal["void", "void0", "V", "=V", "V"]
-_ObjectCodes = Literal["object", "object_", "O", "=O", "O"]
-_DT64Codes = Literal["datetime64", "=datetime64", "datetime64", "datetime64[Y]", "=datetime64[Y]", "datetime64[Y]", "datetime64[M]", "=datetime64[M]", "datetime64[M]", "datetime64[W]", "=datetime64[W]", "datetime64[W]", "datetime64[D]", "=datetime64[D]", "datetime64[D]", "datetime64[h]", "=datetime64[h]", "datetime64[h]", "datetime64[m]", "=datetime64[m]", "datetime64[m]", "datetime64[s]", "=datetime64[s]", "datetime64[s]", "datetime64[ms]", "=datetime64[ms]", "datetime64[ms]", "datetime64[us]", "=datetime64[us]", "datetime64[us]", "datetime64[ns]", "=datetime64[ns]", "datetime64[ns]", "datetime64[ps]", "=datetime64[ps]", "datetime64[ps]", "datetime64[fs]", "=datetime64[fs]", "datetime64[fs]", "datetime64[as]", "=datetime64[as]", "datetime64[as]", "M", "=M", "M", "M8", "=M8", "M8", "M8[Y]", "=M8[Y]", "M8[Y]", "M8[M]", "=M8[M]", "M8[M]", "M8[W]", "=M8[W]", "M8[W]", "M8[D]", "=M8[D]", "M8[D]", "M8[h]", "=M8[h]", "M8[h]", "M8[m]", "=M8[m]", "M8[m]", "M8[s]", "=M8[s]", "M8[s]", "M8[ms]", "=M8[ms]", "M8[ms]", "M8[us]", "=M8[us]", "M8[us]", "M8[ns]", "=M8[ns]", "M8[ns]", "M8[ps]", "=M8[ps]", "M8[ps]", "M8[fs]", "=M8[fs]", "M8[fs]", "M8[as]", "=M8[as]", "M8[as]",]
-_TD64Codes = Literal["timedelta64", "=timedelta64", "timedelta64", "timedelta64[Y]", "=timedelta64[Y]", "timedelta64[Y]", "timedelta64[M]", "=timedelta64[M]", "timedelta64[M]", "timedelta64[W]", "=timedelta64[W]", "timedelta64[W]", "timedelta64[D]", "=timedelta64[D]", "timedelta64[D]", "timedelta64[h]", "=timedelta64[h]", "timedelta64[h]", "timedelta64[m]", "=timedelta64[m]", "timedelta64[m]", "timedelta64[s]", "=timedelta64[s]", "timedelta64[s]", "timedelta64[ms]", "=timedelta64[ms]", "timedelta64[ms]", "timedelta64[us]", "=timedelta64[us]", "timedelta64[us]", "timedelta64[ns]", "=timedelta64[ns]", "timedelta64[ns]", "timedelta64[ps]", "=timedelta64[ps]", "timedelta64[ps]", "timedelta64[fs]", "=timedelta64[fs]", "timedelta64[fs]", "timedelta64[as]", "=timedelta64[as]", "timedelta64[as]", "m", "=m", "m", "m8", "=m8", "m8", "m8[Y]", "=m8[Y]", "m8[Y]", "m8[M]", "=m8[M]", "m8[M]", "m8[W]", "=m8[W]", "m8[W]", "m8[D]", "=m8[D]", "m8[D]", "m8[h]", "=m8[h]", "m8[h]", "m8[m]", "=m8[m]", "m8[m]", "m8[s]", "=m8[s]", "m8[s]", "m8[ms]", "=m8[ms]", "m8[ms]", "m8[us]", "=m8[us]", "m8[us]", "m8[ns]", "=m8[ns]", "m8[ns]", "m8[ps]", "=m8[ps]", "m8[ps]", "m8[fs]", "=m8[fs]", "m8[fs]", "m8[as]", "=m8[as]", "m8[as]",]
diff --git a/typings/numpy/_typing/_dtype_like.pyi b/typings/numpy/_typing/_dtype_like.pyi
deleted file mode 100644
index 54fbfef..0000000
--- a/typings/numpy/_typing/_dtype_like.pyi
+++ /dev/null
@@ -1,50 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import numpy as np
-from collections.abc import Sequence
-from typing import Any, Protocol, Sequence, TypeVar, TypedDict, Union, runtime_checkable
-from ._shape import _ShapeLike
-from ._char_codes import _BoolCodes, _ByteCodes, _BytesCodes, _CDoubleCodes, _CLongDoubleCodes, _CSingleCodes, _Complex128Codes, _Complex64Codes, _DT64Codes, _DoubleCodes, _Float16Codes, _Float32Codes, _Float64Codes, _HalfCodes, _Int16Codes, _Int32Codes, _Int64Codes, _Int8Codes, _IntCCodes, _IntCodes, _IntPCodes, _LongDoubleCodes, _LongLongCodes, _ObjectCodes, _ShortCodes, _SingleCodes, _StrCodes, _TD64Codes, _UByteCodes, _UInt16Codes, _UInt32Codes, _UInt64Codes, _UInt8Codes, _UIntCCodes, _UIntCodes, _UIntPCodes, _ULongLongCodes, _UShortCodes, _VoidCodes
-
-_SCT = TypeVar("_SCT", bound=np.generic)
-_DType_co = TypeVar("_DType_co", covariant=True, bound=np.dtype[Any])
-_DTypeLikeNested = Any
-class _DTypeDictBase(TypedDict):
- names: Sequence[str]
- formats: Sequence[_DTypeLikeNested]
- ...
-
-
-class _DTypeDict(_DTypeDictBase, total=False):
- offsets: Sequence[int]
- titles: Sequence[Any]
- itemsize: int
- aligned: bool
- ...
-
-
-@runtime_checkable
-class _SupportsDType(Protocol[_DType_co]):
- @property
- def dtype(self) -> _DType_co:
- ...
-
-
-
-_DTypeLike = Union[np.dtype[_SCT], type[_SCT], _SupportsDType[np.dtype[_SCT]],]
-_VoidDTypeLike = Union[tuple[_DTypeLikeNested, int], tuple[_DTypeLikeNested, _ShapeLike], list[Any], _DTypeDict, tuple[_DTypeLikeNested, _DTypeLikeNested],]
-DTypeLike = Union[np.dtype[Any], None, type[Any], _SupportsDType[np.dtype[Any]], str, _VoidDTypeLike,]
-_DTypeLikeBool = Union[type[bool], type[np.bool_], np.dtype[np.bool_], _SupportsDType[np.dtype[np.bool_]], _BoolCodes,]
-_DTypeLikeUInt = Union[type[np.unsignedinteger], np.dtype[np.unsignedinteger], _SupportsDType[np.dtype[np.unsignedinteger]], _UInt8Codes, _UInt16Codes, _UInt32Codes, _UInt64Codes, _UByteCodes, _UShortCodes, _UIntCCodes, _UIntPCodes, _UIntCodes, _ULongLongCodes,]
-_DTypeLikeInt = Union[type[int], type[np.signedinteger], np.dtype[np.signedinteger], _SupportsDType[np.dtype[np.signedinteger]], _Int8Codes, _Int16Codes, _Int32Codes, _Int64Codes, _ByteCodes, _ShortCodes, _IntCCodes, _IntPCodes, _IntCodes, _LongLongCodes,]
-_DTypeLikeFloat = Union[type[float], type[np.floating], np.dtype[np.floating], _SupportsDType[np.dtype[np.floating]], _Float16Codes, _Float32Codes, _Float64Codes, _HalfCodes, _SingleCodes, _DoubleCodes, _LongDoubleCodes,]
-_DTypeLikeComplex = Union[type[complex], type[np.complexfloating], np.dtype[np.complexfloating], _SupportsDType[np.dtype[np.complexfloating]], _Complex64Codes, _Complex128Codes, _CSingleCodes, _CDoubleCodes, _CLongDoubleCodes,]
-_DTypeLikeDT64 = Union[type[np.timedelta64], np.dtype[np.timedelta64], _SupportsDType[np.dtype[np.timedelta64]], _TD64Codes,]
-_DTypeLikeTD64 = Union[type[np.datetime64], np.dtype[np.datetime64], _SupportsDType[np.dtype[np.datetime64]], _DT64Codes,]
-_DTypeLikeStr = Union[type[str], type[np.str_], np.dtype[np.str_], _SupportsDType[np.dtype[np.str_]], _StrCodes,]
-_DTypeLikeBytes = Union[type[bytes], type[np.bytes_], np.dtype[np.bytes_], _SupportsDType[np.dtype[np.bytes_]], _BytesCodes,]
-_DTypeLikeVoid = Union[type[np.void], np.dtype[np.void], _SupportsDType[np.dtype[np.void]], _VoidCodes, _VoidDTypeLike,]
-_DTypeLikeObject = Union[type, np.dtype[np.object_], _SupportsDType[np.dtype[np.object_]], _ObjectCodes,]
-_DTypeLikeComplex_co = Union[_DTypeLikeBool, _DTypeLikeUInt, _DTypeLikeInt, _DTypeLikeFloat, _DTypeLikeComplex,]
diff --git a/typings/numpy/_typing/_extended_precision.pyi b/typings/numpy/_typing/_extended_precision.pyi
deleted file mode 100644
index 0e6aee8..0000000
--- a/typings/numpy/_typing/_extended_precision.pyi
+++ /dev/null
@@ -1,25 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import numpy as np
-from . import _128Bit, _256Bit, _80Bit, _96Bit
-
-"""A module with platform-specific extended precision
-`numpy.number` subclasses.
-
-The subclasses are defined here (instead of ``__init__.pyi``) such
-that they can be imported conditionally via the numpy's mypy plugin.
-"""
-uint128 = np.unsignedinteger[_128Bit]
-uint256 = np.unsignedinteger[_256Bit]
-int128 = np.signedinteger[_128Bit]
-int256 = np.signedinteger[_256Bit]
-float80 = np.floating[_80Bit]
-float96 = np.floating[_96Bit]
-float128 = np.floating[_128Bit]
-float256 = np.floating[_256Bit]
-complex160 = np.complexfloating[_80Bit, _80Bit]
-complex192 = np.complexfloating[_96Bit, _96Bit]
-complex256 = np.complexfloating[_128Bit, _128Bit]
-complex512 = np.complexfloating[_256Bit, _256Bit]
diff --git a/typings/numpy/_typing/_nbit.pyi b/typings/numpy/_typing/_nbit.pyi
deleted file mode 100644
index 83e1efd..0000000
--- a/typings/numpy/_typing/_nbit.pyi
+++ /dev/null
@@ -1,17 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any
-
-"""A module with the precisions of platform-specific `~numpy.number`s."""
-_NBitByte = Any
-_NBitShort = Any
-_NBitIntC = Any
-_NBitIntP = Any
-_NBitInt = Any
-_NBitLongLong = Any
-_NBitHalf = Any
-_NBitSingle = Any
-_NBitDouble = Any
-_NBitLongDouble = Any
diff --git a/typings/numpy/_typing/_nested_sequence.pyi b/typings/numpy/_typing/_nested_sequence.pyi
deleted file mode 100644
index 6cf0234..0000000
--- a/typings/numpy/_typing/_nested_sequence.pyi
+++ /dev/null
@@ -1,81 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Iterator
-from typing import Any, Protocol, TypeVar, runtime_checkable
-
-"""A module containing the `_NestedSequence` protocol."""
-__all__ = ["_NestedSequence"]
-_T_co = TypeVar("_T_co", covariant=True)
-@runtime_checkable
-class _NestedSequence(Protocol[_T_co]):
- """A protocol for representing nested sequences.
-
- Warning
- -------
- `_NestedSequence` currently does not work in combination with typevars,
- *e.g.* ``def func(a: _NestedSequnce[T]) -> T: ...``.
-
- See Also
- --------
- collections.abc.Sequence
- ABCs for read-only and mutable :term:`sequences`.
-
- Examples
- --------
- .. code-block:: python
-
- >>> from __future__ import annotations
-
- >>> from typing import TYPE_CHECKING
- >>> import numpy as np
- >>> from numpy._typing import _NestedSequence
-
- >>> def get_dtype(seq: _NestedSequence[float]) -> np.dtype[np.float64]:
- ... return np.asarray(seq).dtype
-
- >>> a = get_dtype([1.0])
- >>> b = get_dtype([[1.0]])
- >>> c = get_dtype([[[1.0]]])
- >>> d = get_dtype([[[[1.0]]]])
-
- >>> if TYPE_CHECKING:
- ... reveal_locals()
- ... # note: Revealed local types are:
- ... # note: a: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
- ... # note: b: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
- ... # note: c: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
- ... # note: d: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
-
- """
- def __len__(self, /) -> int:
- """Implement ``len(self)``."""
- ...
-
- def __getitem__(self, index: int, /) -> _T_co | _NestedSequence[_T_co]:
- """Implement ``self[x]``."""
- ...
-
- def __contains__(self, x: object, /) -> bool:
- """Implement ``x in self``."""
- ...
-
- def __iter__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]:
- """Implement ``iter(self)``."""
- ...
-
- def __reversed__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]:
- """Implement ``reversed(self)``."""
- ...
-
- def count(self, value: Any, /) -> int:
- """Return the number of occurrences of `value`."""
- ...
-
- def index(self, value: Any, /) -> int:
- """Return the first index of `value`."""
- ...
-
-
-
diff --git a/typings/numpy/_typing/_scalars.pyi b/typings/numpy/_typing/_scalars.pyi
deleted file mode 100644
index b0cd431..0000000
--- a/typings/numpy/_typing/_scalars.pyi
+++ /dev/null
@@ -1,17 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import numpy as np
-from typing import Any, Union
-
-_CharLike_co = Union[str, bytes]
-_BoolLike_co = Union[bool, np.bool_]
-_UIntLike_co = Union[_BoolLike_co, np.unsignedinteger[Any]]
-_IntLike_co = Union[_BoolLike_co, int, np.integer[Any]]
-_FloatLike_co = Union[_IntLike_co, float, np.floating[Any]]
-_ComplexLike_co = Union[_FloatLike_co, complex, np.complexfloating[Any, Any]]
-_TD64Like_co = Union[_IntLike_co, np.timedelta64]
-_NumberLike_co = Union[int, float, complex, np.number[Any], np.bool_]
-_ScalarLike_co = Union[int, float, complex, str, bytes, np.generic,]
-_VoidLike_co = Union[tuple[Any, ...], np.void]
diff --git a/typings/numpy/_typing/_shape.pyi b/typings/numpy/_typing/_shape.pyi
deleted file mode 100644
index e9121e8..0000000
--- a/typings/numpy/_typing/_shape.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Sequence
-from typing import SupportsIndex, Union
-
-_Shape = tuple[int, ...]
-_ShapeLike = Union[SupportsIndex, Sequence[SupportsIndex]]
diff --git a/typings/numpy/_typing/_ufunc.pyi b/typings/numpy/_typing/_ufunc.pyi
deleted file mode 100644
index 092817e..0000000
--- a/typings/numpy/_typing/_ufunc.pyi
+++ /dev/null
@@ -1,335 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any, Generic, Literal, Protocol, SupportsIndex, TypeVar, overload
-from numpy import _CastingKind, _OrderKACF, ufunc
-from numpy.typing import NDArray
-from ._shape import _ShapeLike
-from ._scalars import _ScalarLike_co
-from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co
-from ._dtype_like import DTypeLike
-
-"""A module with private type-check-only `numpy.ufunc` subclasses.
-
-The signatures of the ufuncs are too varied to reasonably type
-with a single class. So instead, `ufunc` has been expanded into
-four private subclasses, one for each combination of
-`~ufunc.nin` and `~ufunc.nout`.
-
-"""
-_T = TypeVar("_T")
-_2Tuple = tuple[_T, _T]
-_3Tuple = tuple[_T, _T, _T]
-_4Tuple = tuple[_T, _T, _T, _T]
-_NTypes = TypeVar("_NTypes", bound=int)
-_IDType = TypeVar("_IDType", bound=Any)
-_NameType = TypeVar("_NameType", bound=str)
-class _SupportsArrayUFunc(Protocol):
- def __array_ufunc__(self, ufunc: ufunc, method: Literal["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"], *inputs: Any, **kwargs: Any) -> Any:
- ...
-
-
-
-class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]):
- @property
- def __name__(self) -> _NameType:
- ...
-
- @property
- def ntypes(self) -> _NTypes:
- ...
-
- @property
- def identity(self) -> _IDType:
- ...
-
- @property
- def nin(self) -> Literal[1]:
- ...
-
- @property
- def nout(self) -> Literal[1]:
- ...
-
- @property
- def nargs(self) -> Literal[2]:
- ...
-
- @property
- def signature(self) -> None:
- ...
-
- @property
- def reduce(self) -> None:
- ...
-
- @property
- def accumulate(self) -> None:
- ...
-
- @property
- def reduceat(self) -> None:
- ...
-
- @property
- def outer(self) -> None:
- ...
-
- @overload
- def __call__(self, __x1: _ScalarLike_co, out: None = ..., *, where: None | _ArrayLikeBool_co = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., dtype: DTypeLike = ..., subok: bool = ..., signature: str | _2Tuple[None | str] = ..., extobj: list[Any] = ...) -> Any:
- ...
-
- @overload
- def __call__(self, __x1: ArrayLike, out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., *, where: None | _ArrayLikeBool_co = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., dtype: DTypeLike = ..., subok: bool = ..., signature: str | _2Tuple[None | str] = ..., extobj: list[Any] = ...) -> NDArray[Any]:
- ...
-
- @overload
- def __call__(self, __x1: _SupportsArrayUFunc, out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., *, where: None | _ArrayLikeBool_co = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., dtype: DTypeLike = ..., subok: bool = ..., signature: str | _2Tuple[None | str] = ..., extobj: list[Any] = ...) -> Any:
- ...
-
- def at(self, a: _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None:
- ...
-
-
-
-class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]):
- @property
- def __name__(self) -> _NameType:
- ...
-
- @property
- def ntypes(self) -> _NTypes:
- ...
-
- @property
- def identity(self) -> _IDType:
- ...
-
- @property
- def nin(self) -> Literal[2]:
- ...
-
- @property
- def nout(self) -> Literal[1]:
- ...
-
- @property
- def nargs(self) -> Literal[3]:
- ...
-
- @property
- def signature(self) -> None:
- ...
-
- @overload
- def __call__(self, __x1: _ScalarLike_co, __x2: _ScalarLike_co, out: None = ..., *, where: None | _ArrayLikeBool_co = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[None | str] = ..., extobj: list[Any] = ...) -> Any:
- ...
-
- @overload
- def __call__(self, __x1: ArrayLike, __x2: ArrayLike, out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., *, where: None | _ArrayLikeBool_co = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[None | str] = ..., extobj: list[Any] = ...) -> NDArray[Any]:
- ...
-
- def at(self, a: NDArray[Any], indices: _ArrayLikeInt_co, b: ArrayLike, /) -> None:
- ...
-
- def reduce(self, array: ArrayLike, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: None | NDArray[Any] = ..., keepdims: bool = ..., initial: Any = ..., where: _ArrayLikeBool_co = ...) -> Any:
- ...
-
- def accumulate(self, array: ArrayLike, axis: SupportsIndex = ..., dtype: DTypeLike = ..., out: None | NDArray[Any] = ...) -> NDArray[Any]:
- ...
-
- def reduceat(self, array: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., dtype: DTypeLike = ..., out: None | NDArray[Any] = ...) -> NDArray[Any]:
- ...
-
- @overload
- def outer(self, A: _ScalarLike_co, B: _ScalarLike_co, /, *, out: None = ..., where: None | _ArrayLikeBool_co = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[None | str] = ..., extobj: list[Any] = ...) -> Any:
- ...
-
- @overload
- def outer(self, A: ArrayLike, B: ArrayLike, /, *, out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., where: None | _ArrayLikeBool_co = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[None | str] = ..., extobj: list[Any] = ...) -> NDArray[Any]:
- ...
-
-
-
-class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]):
- @property
- def __name__(self) -> _NameType:
- ...
-
- @property
- def ntypes(self) -> _NTypes:
- ...
-
- @property
- def identity(self) -> _IDType:
- ...
-
- @property
- def nin(self) -> Literal[1]:
- ...
-
- @property
- def nout(self) -> Literal[2]:
- ...
-
- @property
- def nargs(self) -> Literal[3]:
- ...
-
- @property
- def signature(self) -> None:
- ...
-
- @property
- def at(self) -> None:
- ...
-
- @property
- def reduce(self) -> None:
- ...
-
- @property
- def accumulate(self) -> None:
- ...
-
- @property
- def reduceat(self) -> None:
- ...
-
- @property
- def outer(self) -> None:
- ...
-
- @overload
- def __call__(self, __x1: _ScalarLike_co, __out1: None = ..., __out2: None = ..., *, where: None | _ArrayLikeBool_co = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[None | str] = ..., extobj: list[Any] = ...) -> _2Tuple[Any]:
- ...
-
- @overload
- def __call__(self, __x1: ArrayLike, __out1: None | NDArray[Any] = ..., __out2: None | NDArray[Any] = ..., *, out: _2Tuple[NDArray[Any]] = ..., where: None | _ArrayLikeBool_co = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[None | str] = ..., extobj: list[Any] = ...) -> _2Tuple[NDArray[Any]]:
- ...
-
- @overload
- def __call__(self, __x1: _SupportsArrayUFunc, __out1: None | NDArray[Any] = ..., __out2: None | NDArray[Any] = ..., *, out: _2Tuple[NDArray[Any]] = ..., where: None | _ArrayLikeBool_co = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[None | str] = ..., extobj: list[Any] = ...) -> _2Tuple[Any]:
- ...
-
-
-
-class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]):
- @property
- def __name__(self) -> _NameType:
- ...
-
- @property
- def ntypes(self) -> _NTypes:
- ...
-
- @property
- def identity(self) -> _IDType:
- ...
-
- @property
- def nin(self) -> Literal[2]:
- ...
-
- @property
- def nout(self) -> Literal[2]:
- ...
-
- @property
- def nargs(self) -> Literal[4]:
- ...
-
- @property
- def signature(self) -> None:
- ...
-
- @property
- def at(self) -> None:
- ...
-
- @property
- def reduce(self) -> None:
- ...
-
- @property
- def accumulate(self) -> None:
- ...
-
- @property
- def reduceat(self) -> None:
- ...
-
- @property
- def outer(self) -> None:
- ...
-
- @overload
- def __call__(self, __x1: _ScalarLike_co, __x2: _ScalarLike_co, __out1: None = ..., __out2: None = ..., *, where: None | _ArrayLikeBool_co = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., dtype: DTypeLike = ..., subok: bool = ..., signature: str | _4Tuple[None | str] = ..., extobj: list[Any] = ...) -> _2Tuple[Any]:
- ...
-
- @overload
- def __call__(self, __x1: ArrayLike, __x2: ArrayLike, __out1: None | NDArray[Any] = ..., __out2: None | NDArray[Any] = ..., *, out: _2Tuple[NDArray[Any]] = ..., where: None | _ArrayLikeBool_co = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., dtype: DTypeLike = ..., subok: bool = ..., signature: str | _4Tuple[None | str] = ..., extobj: list[Any] = ...) -> _2Tuple[NDArray[Any]]:
- ...
-
-
-
-class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]):
- @property
- def __name__(self) -> _NameType:
- ...
-
- @property
- def ntypes(self) -> _NTypes:
- ...
-
- @property
- def identity(self) -> _IDType:
- ...
-
- @property
- def nin(self) -> Literal[2]:
- ...
-
- @property
- def nout(self) -> Literal[1]:
- ...
-
- @property
- def nargs(self) -> Literal[3]:
- ...
-
- @property
- def signature(self) -> Literal["(n?,k),(k,m?)->(n?,m?)"]:
- ...
-
- @property
- def reduce(self) -> None:
- ...
-
- @property
- def accumulate(self) -> None:
- ...
-
- @property
- def reduceat(self) -> None:
- ...
-
- @property
- def outer(self) -> None:
- ...
-
- @property
- def at(self) -> None:
- ...
-
- @overload
- def __call__(self, __x1: ArrayLike, __x2: ArrayLike, out: None = ..., *, casting: _CastingKind = ..., order: _OrderKACF = ..., dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[None | str] = ..., extobj: list[Any] = ..., axes: list[_2Tuple[SupportsIndex]] = ...) -> Any:
- ...
-
- @overload
- def __call__(self, __x1: ArrayLike, __x2: ArrayLike, out: NDArray[Any] | tuple[NDArray[Any]], *, casting: _CastingKind = ..., order: _OrderKACF = ..., dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[None | str] = ..., extobj: list[Any] = ..., axes: list[_2Tuple[SupportsIndex]] = ...) -> NDArray[Any]:
- ...
-
-
-
diff --git a/typings/numpy/_utils/__init__.pyi b/typings/numpy/_utils/__init__.pyi
deleted file mode 100644
index 4dac46e..0000000
--- a/typings/numpy/_utils/__init__.pyi
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ._convertions import asbytes, asunicode
-
-"""
-This is a module for defining private helpers which do not depend on the
-rest of NumPy.
-
-Everything in here must be self-contained so that it can be
-imported anywhere else without creating circular imports.
-If a utility requires the import of NumPy, it probably belongs
-in ``numpy.core``.
-"""
-def set_module(module): # -> (func: Unknown) -> Unknown:
- """Private decorator for overriding __module__ on a function or class.
-
- Example usage::
-
- @set_module('numpy')
- def example():
- pass
-
- assert example.__module__ == 'numpy'
- """
- ...
-
diff --git a/typings/numpy/_utils/_convertions.pyi b/typings/numpy/_utils/_convertions.pyi
deleted file mode 100644
index e16a31a..0000000
--- a/typings/numpy/_utils/_convertions.pyi
+++ /dev/null
@@ -1,15 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-"""
-A set of methods retained from np.compat module that
-are still used across codebase.
-"""
-__all__ = ["asunicode", "asbytes"]
-def asunicode(s): # -> str:
- ...
-
-def asbytes(s): # -> bytes:
- ...
-
diff --git a/typings/numpy/_utils/_inspect.pyi b/typings/numpy/_utils/_inspect.pyi
deleted file mode 100644
index 29a7aa3..0000000
--- a/typings/numpy/_utils/_inspect.pyi
+++ /dev/null
@@ -1,123 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-"""Subset of inspect module from upstream python
-
-We use this instead of upstream because upstream inspect is slow to import, and
-significantly contributes to numpy import times. Importing this copy has almost
-no overhead.
-
-"""
-__all__ = ['getargspec', 'formatargspec']
-def ismethod(object): # -> bool:
- """Return true if the object is an instance method.
-
- Instance method objects provide these attributes:
- __doc__ documentation string
- __name__ name with which this method was defined
- im_class class object in which this method belongs
- im_func function object containing implementation of method
- im_self instance to which this method is bound, or None
-
- """
- ...
-
-def isfunction(object): # -> bool:
- """Return true if the object is a user-defined function.
-
- Function objects provide these attributes:
- __doc__ documentation string
- __name__ name with which this function was defined
- func_code code object containing compiled function bytecode
- func_defaults tuple of any default values for arguments
- func_doc (same as __doc__)
- func_globals global namespace in which this function was defined
- func_name (same as __name__)
-
- """
- ...
-
-def iscode(object): # -> bool:
- """Return true if the object is a code object.
-
- Code objects provide these attributes:
- co_argcount number of arguments (not including * or ** args)
- co_code string of raw compiled bytecode
- co_consts tuple of constants used in the bytecode
- co_filename name of file in which this code object was created
- co_firstlineno number of first line in Python source code
- co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
- co_lnotab encoded mapping of line numbers to bytecode indices
- co_name name with which this code object was defined
- co_names tuple of names of local variables
- co_nlocals number of local variables
- co_stacksize virtual machine stack space required
- co_varnames tuple of names of arguments and local variables
-
- """
- ...
-
-def getargs(co): # -> tuple[list[Unknown], Unknown | None, Unknown | None]:
- """Get information about the arguments accepted by a code object.
-
- Three things are returned: (args, varargs, varkw), where 'args' is
- a list of argument names (possibly containing nested lists), and
- 'varargs' and 'varkw' are the names of the * and ** arguments or None.
-
- """
- ...
-
-def getargspec(func): # -> tuple[list[Unknown], Unknown | None, Unknown | None, Unknown]:
- """Get the names and default values of a function's arguments.
-
- A tuple of four things is returned: (args, varargs, varkw, defaults).
- 'args' is a list of the argument names (it may contain nested lists).
- 'varargs' and 'varkw' are the names of the * and ** arguments or None.
- 'defaults' is an n-tuple of the default values of the last n arguments.
-
- """
- ...
-
-def getargvalues(frame): # -> tuple[list[Unknown], Unknown | None, Unknown | None, Unknown]:
- """Get information about arguments passed into a particular frame.
-
- A tuple of four things is returned: (args, varargs, varkw, locals).
- 'args' is a list of the argument names (it may contain nested lists).
- 'varargs' and 'varkw' are the names of the * and ** arguments or None.
- 'locals' is the locals dictionary of the given frame.
-
- """
- ...
-
-def joinseq(seq): # -> str:
- ...
-
-def strseq(object, convert, join=...):
- """Recursively walk a sequence, stringifying each element.
-
- """
- ...
-
-def formatargspec(args, varargs=..., varkw=..., defaults=..., formatarg=..., formatvarargs=..., formatvarkw=..., formatvalue=..., join=...): # -> LiteralString:
- """Format an argument spec from the 4 values returned by getargspec.
-
- The first four arguments are (args, varargs, varkw, defaults). The
- other four arguments are the corresponding optional formatting functions
- that are called to turn names and values into strings. The ninth
- argument is an optional function to format the sequence of arguments.
-
- """
- ...
-
-def formatargvalues(args, varargs, varkw, locals, formatarg=..., formatvarargs=..., formatvarkw=..., formatvalue=..., join=...): # -> LiteralString:
- """Format an argument spec from the 4 values returned by getargvalues.
-
- The first four arguments are (args, varargs, varkw, locals). The
- next four arguments are the corresponding optional formatting functions
- that are called to turn names and values into strings. The ninth
- argument is an optional function to format the sequence of arguments.
-
- """
- ...
-
diff --git a/typings/numpy/array_api/__init__.pyi b/typings/numpy/array_api/__init__.pyi
deleted file mode 100644
index b47d549..0000000
--- a/typings/numpy/array_api/__init__.pyi
+++ /dev/null
@@ -1,152 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import warnings
-from ._constants import e, inf, nan, pi
-from ._creation_functions import arange, asarray, empty, empty_like, eye, from_dlpack, full, full_like, linspace, meshgrid, ones, ones_like, tril, triu, zeros, zeros_like
-from ._data_type_functions import astype, broadcast_arrays, broadcast_to, can_cast, finfo, iinfo, isdtype, result_type
-from ._dtypes import bool, complex128, complex64, float32, float64, int16, int32, int64, int8, uint16, uint32, uint64, uint8
-from ._elementwise_functions import abs, acos, acosh, add, asin, asinh, atan, atan2, atanh, bitwise_and, bitwise_invert, bitwise_left_shift, bitwise_or, bitwise_right_shift, bitwise_xor, ceil, conj, cos, cosh, divide, equal, exp, expm1, floor, floor_divide, greater, greater_equal, imag, isfinite, isinf, isnan, less, less_equal, log, log10, log1p, log2, logaddexp, logical_and, logical_not, logical_or, logical_xor, multiply, negative, not_equal, positive, pow, real, remainder, round, sign, sin, sinh, sqrt, square, subtract, tan, tanh, trunc
-from ._indexing_functions import take
-from . import linalg
-from .linalg import matmul, matrix_transpose, tensordot, vecdot
-from ._manipulation_functions import concat, expand_dims, flip, permute_dims, reshape, roll, squeeze, stack
-from ._searching_functions import argmax, argmin, nonzero, where
-from ._set_functions import unique_all, unique_counts, unique_inverse, unique_values
-from ._sorting_functions import argsort, sort
-from ._statistical_functions import max, mean, min, prod, std, sum, var
-from ._utility_functions import all, any
-
-"""
-A NumPy sub-namespace that conforms to the Python array API standard.
-
-This submodule accompanies NEP 47, which proposes its inclusion in NumPy. It
-is still considered experimental, and will issue a warning when imported.
-
-This is a proof-of-concept namespace that wraps the corresponding NumPy
-functions to give a conforming implementation of the Python array API standard
-(https://data-apis.github.io/array-api/latest/). The standard is currently in
-an RFC phase and comments on it are both welcome and encouraged. Comments
-should be made either at https://github.com/data-apis/array-api or at
-https://github.com/data-apis/consortium-feedback/discussions.
-
-NumPy already follows the proposed spec for the most part, so this module
-serves mostly as a thin wrapper around it. However, NumPy also implements a
-lot of behavior that is not included in the spec, so this serves as a
-restricted subset of the API. Only those functions that are part of the spec
-are included in this namespace, and all functions are given with the exact
-signature given in the spec, including the use of position-only arguments, and
-omitting any extra keyword arguments implemented by NumPy but not part of the
-spec. The behavior of some functions is also modified from the NumPy behavior
-to conform to the standard. Note that the underlying array object itself is
-wrapped in a wrapper Array() class, but is otherwise unchanged. This submodule
-is implemented in pure Python with no C extensions.
-
-The array API spec is designed as a "minimal API subset" and explicitly allows
-libraries to include behaviors not specified by it. But users of this module
-that intend to write portable code should be aware that only those behaviors
-that are listed in the spec are guaranteed to be implemented across libraries.
-Consequently, the NumPy implementation was chosen to be both conforming and
-minimal, so that users can use this implementation of the array API namespace
-and be sure that behaviors that it defines will be available in conforming
-namespaces from other libraries.
-
-A few notes about the current state of this submodule:
-
-- There is a test suite that tests modules against the array API standard at
- https://github.com/data-apis/array-api-tests. The test suite is still a work
- in progress, but the existing tests pass on this module, with a few
- exceptions:
-
- - DLPack support (see https://github.com/data-apis/array-api/pull/106) is
- not included here, as it requires a full implementation in NumPy proper
- first.
-
- The test suite is not yet complete, and even the tests that exist are not
- guaranteed to give a comprehensive coverage of the spec. Therefore, when
- reviewing and using this submodule, you should refer to the standard
- documents themselves. There are some tests in numpy.array_api.tests, but
- they primarily focus on things that are not tested by the official array API
- test suite.
-
-- There is a custom array object, numpy.array_api.Array, which is returned by
- all functions in this module. All functions in the array API namespace
- implicitly assume that they will only receive this object as input. The only
- way to create instances of this object is to use one of the array creation
- functions. It does not have a public constructor on the object itself. The
- object is a small wrapper class around numpy.ndarray. The main purpose of it
- is to restrict the namespace of the array object to only those dtypes and
- only those methods that are required by the spec, as well as to limit/change
- certain behavior that differs in the spec. In particular:
-
- - The array API namespace does not have scalar objects, only 0-D arrays.
- Operations on Array that would create a scalar in NumPy create a 0-D
- array.
-
- - Indexing: Only a subset of indices supported by NumPy are required by the
- spec. The Array object restricts indexing to only allow those types of
- indices that are required by the spec. See the docstring of the
- numpy.array_api.Array._validate_indices helper function for more
- information.
-
- - Type promotion: Some type promotion rules are different in the spec. In
- particular, the spec does not have any value-based casting. The spec also
- does not require cross-kind casting, like integer -> floating-point. Only
- those promotions that are explicitly required by the array API
- specification are allowed in this module. See NEP 47 for more info.
-
- - Functions do not automatically call asarray() on their input, and will not
- work if the input type is not Array. The exception is array creation
- functions, and Python operators on the Array object, which accept Python
- scalars of the same type as the array dtype.
-
-- All functions include type annotations, corresponding to those given in the
- spec (see _typing.py for definitions of some custom types). These do not
- currently fully pass mypy due to some limitations in mypy.
-
-- Dtype objects are just the NumPy dtype objects, e.g., float64 =
- np.dtype('float64'). The spec does not require any behavior on these dtype
- objects other than that they be accessible by name and be comparable by
- equality, but it was considered too much extra complexity to create custom
- objects to represent dtypes.
-
-- All places where the implementations in this submodule are known to deviate
- from their corresponding functions in NumPy are marked with "# Note:"
- comments.
-
-Still TODO in this module are:
-
-- DLPack support for numpy.ndarray is still in progress. See
- https://github.com/numpy/numpy/pull/19083.
-
-- The copy=False keyword argument to asarray() is not yet implemented. This
- requires support in numpy.asarray() first.
-
-- Some functions are not yet fully tested in the array API test suite, and may
- require updates that are not yet known until the tests are written.
-
-- The spec is still in an RFC phase and may still have minor updates, which
- will need to be reflected here.
-
-- Complex number support in array API spec is planned but not yet finalized,
- as are the fft extension and certain linear algebra functions such as eig
- that require complex dtypes.
-
-"""
-__array_api_version__ = ...
-__all__ = ["__array_api_version__"]
-__all__ += ["e", "inf", "nan", "pi"]
-__all__ += ["asarray", "arange", "empty", "empty_like", "eye", "from_dlpack", "full", "full_like", "linspace", "meshgrid", "ones", "ones_like", "tril", "triu", "zeros", "zeros_like"]
-__all__ += ["astype", "broadcast_arrays", "broadcast_to", "can_cast", "finfo", "iinfo", "result_type"]
-__all__ += ["int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", "float32", "float64", "bool"]
-__all__ += ["abs", "acos", "acosh", "add", "asin", "asinh", "atan", "atan2", "atanh", "bitwise_and", "bitwise_left_shift", "bitwise_invert", "bitwise_or", "bitwise_right_shift", "bitwise_xor", "ceil", "cos", "cosh", "divide", "equal", "exp", "expm1", "floor", "floor_divide", "greater", "greater_equal", "isfinite", "isinf", "isnan", "less", "less_equal", "log", "log1p", "log2", "log10", "logaddexp", "logical_and", "logical_not", "logical_or", "logical_xor", "multiply", "negative", "not_equal", "positive", "pow", "remainder", "round", "sign", "sin", "sinh", "square", "sqrt", "subtract", "tan", "tanh", "trunc"]
-__all__ += ["take"]
-__all__ += ["linalg"]
-__all__ += ["matmul", "tensordot", "matrix_transpose", "vecdot"]
-__all__ += ["concat", "expand_dims", "flip", "permute_dims", "reshape", "roll", "squeeze", "stack"]
-__all__ += ["argmax", "argmin", "nonzero", "where"]
-__all__ += ["unique_all", "unique_counts", "unique_inverse", "unique_values"]
-__all__ += ["argsort", "sort"]
-__all__ += ["max", "mean", "min", "prod", "std", "sum", "var"]
-__all__ += ["all", "any"]
diff --git a/typings/numpy/array_api/_array_object.pyi b/typings/numpy/array_api/_array_object.pyi
deleted file mode 100644
index 5504c83..0000000
--- a/typings/numpy/array_api/_array_object.pyi
+++ /dev/null
@@ -1,476 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import types
-import numpy.typing as npt
-import numpy as np
-from enum import IntEnum
-from typing import Any, Optional, TYPE_CHECKING, Tuple, Union
-from ._typing import Any, Device, Dtype, PyCapsule
-
-"""
-Wrapper class around the ndarray object for the array API standard.
-
-The array API standard defines some behaviors differently than ndarray, in
-particular, type promotion rules are different (the standard has no
-value-based casting). The standard also specifies a more limited subset of
-array methods and functionalities than are implemented on ndarray. Since the
-goal of the array_api namespace is to be a minimal implementation of the array
-API standard, we need to define a separate wrapper class for the array_api
-namespace.
-
-The standard compliant class is only a wrapper class. It is *not* a subclass
-of ndarray.
-"""
-if TYPE_CHECKING:
- ...
-class Array:
- """
- n-d array object for the array API namespace.
-
- See the docstring of :py:obj:`np.ndarray ` for more
- information.
-
- This is a wrapper around numpy.ndarray that restricts the usage to only
- those things that are required by the array API namespace. Note,
- attributes on this object that start with a single underscore are not part
- of the API specification and should only be used internally. This object
- should not be constructed directly. Rather, use one of the creation
- functions, such as asarray().
-
- """
- _array: np.ndarray[Any, Any]
- def __new__(cls, *args, **kwargs):
- ...
-
- def __str__(self: Array, /) -> str:
- """
- Performs the operation __str__.
- """
- ...
-
- def __repr__(self: Array, /) -> str:
- """
- Performs the operation __repr__.
- """
- ...
-
- def __array__(self, dtype: None | np.dtype[Any] = ...) -> npt.NDArray[Any]:
- """
- Warning: this method is NOT part of the array API spec. Implementers
- of other libraries need not include it, and users should not assume it
- will be present in other implementations.
-
- """
- ...
-
- def __abs__(self: Array, /) -> Array:
- """
- Performs the operation __abs__.
- """
- ...
-
- def __add__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __add__.
- """
- ...
-
- def __and__(self: Array, other: Union[int, bool, Array], /) -> Array:
- """
- Performs the operation __and__.
- """
- ...
-
- def __array_namespace__(self: Array, /, *, api_version: Optional[str] = ...) -> types.ModuleType:
- ...
-
- def __bool__(self: Array, /) -> bool:
- """
- Performs the operation __bool__.
- """
- ...
-
- def __complex__(self: Array, /) -> complex:
- """
- Performs the operation __complex__.
- """
- ...
-
- def __dlpack__(self: Array, /, *, stream: None = ...) -> PyCapsule:
- """
- Performs the operation __dlpack__.
- """
- ...
-
- def __dlpack_device__(self: Array, /) -> Tuple[IntEnum, int]:
- """
- Performs the operation __dlpack_device__.
- """
- ...
-
- def __eq__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
- """
- Performs the operation __eq__.
- """
- ...
-
- def __float__(self: Array, /) -> float:
- """
- Performs the operation __float__.
- """
- ...
-
- def __floordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __floordiv__.
- """
- ...
-
- def __ge__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __ge__.
- """
- ...
-
- def __getitem__(self: Array, key: Union[int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array], /) -> Array:
- """
- Performs the operation __getitem__.
- """
- ...
-
- def __gt__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __gt__.
- """
- ...
-
- def __int__(self: Array, /) -> int:
- """
- Performs the operation __int__.
- """
- ...
-
- def __index__(self: Array, /) -> int:
- """
- Performs the operation __index__.
- """
- ...
-
- def __invert__(self: Array, /) -> Array:
- """
- Performs the operation __invert__.
- """
- ...
-
- def __le__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __le__.
- """
- ...
-
- def __lshift__(self: Array, other: Union[int, Array], /) -> Array:
- """
- Performs the operation __lshift__.
- """
- ...
-
- def __lt__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __lt__.
- """
- ...
-
- def __matmul__(self: Array, other: Array, /) -> Array:
- """
- Performs the operation __matmul__.
- """
- ...
-
- def __mod__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __mod__.
- """
- ...
-
- def __mul__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __mul__.
- """
- ...
-
- def __ne__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
- """
- Performs the operation __ne__.
- """
- ...
-
- def __neg__(self: Array, /) -> Array:
- """
- Performs the operation __neg__.
- """
- ...
-
- def __or__(self: Array, other: Union[int, bool, Array], /) -> Array:
- """
- Performs the operation __or__.
- """
- ...
-
- def __pos__(self: Array, /) -> Array:
- """
- Performs the operation __pos__.
- """
- ...
-
- def __pow__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __pow__.
- """
- ...
-
- def __rshift__(self: Array, other: Union[int, Array], /) -> Array:
- """
- Performs the operation __rshift__.
- """
- ...
-
- def __setitem__(self, key: Union[int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array], value: Union[int, float, bool, Array], /) -> None:
- """
- Performs the operation __setitem__.
- """
- ...
-
- def __sub__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __sub__.
- """
- ...
-
- def __truediv__(self: Array, other: Union[float, Array], /) -> Array:
- """
- Performs the operation __truediv__.
- """
- ...
-
- def __xor__(self: Array, other: Union[int, bool, Array], /) -> Array:
- """
- Performs the operation __xor__.
- """
- ...
-
- def __iadd__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __iadd__.
- """
- ...
-
- def __radd__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __radd__.
- """
- ...
-
- def __iand__(self: Array, other: Union[int, bool, Array], /) -> Array:
- """
- Performs the operation __iand__.
- """
- ...
-
- def __rand__(self: Array, other: Union[int, bool, Array], /) -> Array:
- """
- Performs the operation __rand__.
- """
- ...
-
- def __ifloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __ifloordiv__.
- """
- ...
-
- def __rfloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __rfloordiv__.
- """
- ...
-
- def __ilshift__(self: Array, other: Union[int, Array], /) -> Array:
- """
- Performs the operation __ilshift__.
- """
- ...
-
- def __rlshift__(self: Array, other: Union[int, Array], /) -> Array:
- """
- Performs the operation __rlshift__.
- """
- ...
-
- def __imatmul__(self: Array, other: Array, /) -> Array:
- """
- Performs the operation __imatmul__.
- """
- ...
-
- def __rmatmul__(self: Array, other: Array, /) -> Array:
- """
- Performs the operation __rmatmul__.
- """
- ...
-
- def __imod__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __imod__.
- """
- ...
-
- def __rmod__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __rmod__.
- """
- ...
-
- def __imul__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __imul__.
- """
- ...
-
- def __rmul__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __rmul__.
- """
- ...
-
- def __ior__(self: Array, other: Union[int, bool, Array], /) -> Array:
- """
- Performs the operation __ior__.
- """
- ...
-
- def __ror__(self: Array, other: Union[int, bool, Array], /) -> Array:
- """
- Performs the operation __ror__.
- """
- ...
-
- def __ipow__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __ipow__.
- """
- ...
-
- def __rpow__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __rpow__.
- """
- ...
-
- def __irshift__(self: Array, other: Union[int, Array], /) -> Array:
- """
- Performs the operation __irshift__.
- """
- ...
-
- def __rrshift__(self: Array, other: Union[int, Array], /) -> Array:
- """
- Performs the operation __rrshift__.
- """
- ...
-
- def __isub__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __isub__.
- """
- ...
-
- def __rsub__(self: Array, other: Union[int, float, Array], /) -> Array:
- """
- Performs the operation __rsub__.
- """
- ...
-
- def __itruediv__(self: Array, other: Union[float, Array], /) -> Array:
- """
- Performs the operation __itruediv__.
- """
- ...
-
- def __rtruediv__(self: Array, other: Union[float, Array], /) -> Array:
- """
- Performs the operation __rtruediv__.
- """
- ...
-
- def __ixor__(self: Array, other: Union[int, bool, Array], /) -> Array:
- """
- Performs the operation __ixor__.
- """
- ...
-
- def __rxor__(self: Array, other: Union[int, bool, Array], /) -> Array:
- """
- Performs the operation __rxor__.
- """
- ...
-
- def to_device(self: Array, device: Device, /, stream: None = ...) -> Array:
- ...
-
- @property
- def dtype(self) -> Dtype:
- """
- Array API compatible wrapper for :py:meth:`np.ndarray.dtype `.
-
- See its docstring for more information.
- """
- ...
-
- @property
- def device(self) -> Device:
- ...
-
- @property
- def mT(self) -> Array:
- ...
-
- @property
- def ndim(self) -> int:
- """
- Array API compatible wrapper for :py:meth:`np.ndarray.ndim `.
-
- See its docstring for more information.
- """
- ...
-
- @property
- def shape(self) -> Tuple[int, ...]:
- """
- Array API compatible wrapper for :py:meth:`np.ndarray.shape `.
-
- See its docstring for more information.
- """
- ...
-
- @property
- def size(self) -> int:
- """
- Array API compatible wrapper for :py:meth:`np.ndarray.size `.
-
- See its docstring for more information.
- """
- ...
-
- @property
- def T(self) -> Array:
- """
- Array API compatible wrapper for :py:meth:`np.ndarray.T `.
-
- See its docstring for more information.
- """
- ...
-
-
-
diff --git a/typings/numpy/array_api/_constants.pyi b/typings/numpy/array_api/_constants.pyi
deleted file mode 100644
index 8c0e4f8..0000000
--- a/typings/numpy/array_api/_constants.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-e = ...
-inf = ...
-nan = ...
-pi = ...
diff --git a/typings/numpy/array_api/_creation_functions.pyi b/typings/numpy/array_api/_creation_functions.pyi
deleted file mode 100644
index 1285930..0000000
--- a/typings/numpy/array_api/_creation_functions.pyi
+++ /dev/null
@@ -1,133 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import numpy as np
-from typing import List, Optional, TYPE_CHECKING, Tuple, Union
-from ._typing import Array, Device, Dtype, NestedSequence, SupportsBufferProtocol
-
-if TYPE_CHECKING:
- ...
-def asarray(obj: Union[Array, bool, int, float, NestedSequence[bool | int | float], SupportsBufferProtocol,], /, *, dtype: Optional[Dtype] = ..., device: Optional[Device] = ..., copy: Optional[Union[bool, np._CopyMode]] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.asarray `.
-
- See its docstring for more information.
- """
- ...
-
-def arange(start: Union[int, float], /, stop: Optional[Union[int, float]] = ..., step: Union[int, float] = ..., *, dtype: Optional[Dtype] = ..., device: Optional[Device] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.arange `.
-
- See its docstring for more information.
- """
- ...
-
-def empty(shape: Union[int, Tuple[int, ...]], *, dtype: Optional[Dtype] = ..., device: Optional[Device] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.empty `.
-
- See its docstring for more information.
- """
- ...
-
-def empty_like(x: Array, /, *, dtype: Optional[Dtype] = ..., device: Optional[Device] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.empty_like `.
-
- See its docstring for more information.
- """
- ...
-
-def eye(n_rows: int, n_cols: Optional[int] = ..., /, *, k: int = ..., dtype: Optional[Dtype] = ..., device: Optional[Device] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.eye `.
-
- See its docstring for more information.
- """
- ...
-
-def from_dlpack(x: object, /) -> Array:
- ...
-
-def full(shape: Union[int, Tuple[int, ...]], fill_value: Union[int, float], *, dtype: Optional[Dtype] = ..., device: Optional[Device] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.full `.
-
- See its docstring for more information.
- """
- ...
-
-def full_like(x: Array, /, fill_value: Union[int, float], *, dtype: Optional[Dtype] = ..., device: Optional[Device] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.full_like `.
-
- See its docstring for more information.
- """
- ...
-
-def linspace(start: Union[int, float], stop: Union[int, float], /, num: int, *, dtype: Optional[Dtype] = ..., device: Optional[Device] = ..., endpoint: bool = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.linspace `.
-
- See its docstring for more information.
- """
- ...
-
-def meshgrid(*arrays: Array, indexing: str = ...) -> List[Array]:
- """
- Array API compatible wrapper for :py:func:`np.meshgrid `.
-
- See its docstring for more information.
- """
- ...
-
-def ones(shape: Union[int, Tuple[int, ...]], *, dtype: Optional[Dtype] = ..., device: Optional[Device] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.ones `.
-
- See its docstring for more information.
- """
- ...
-
-def ones_like(x: Array, /, *, dtype: Optional[Dtype] = ..., device: Optional[Device] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.ones_like `.
-
- See its docstring for more information.
- """
- ...
-
-def tril(x: Array, /, *, k: int = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.tril `.
-
- See its docstring for more information.
- """
- ...
-
-def triu(x: Array, /, *, k: int = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.triu `.
-
- See its docstring for more information.
- """
- ...
-
-def zeros(shape: Union[int, Tuple[int, ...]], *, dtype: Optional[Dtype] = ..., device: Optional[Device] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.zeros `.
-
- See its docstring for more information.
- """
- ...
-
-def zeros_like(x: Array, /, *, dtype: Optional[Dtype] = ..., device: Optional[Device] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.zeros_like `.
-
- See its docstring for more information.
- """
- ...
-
diff --git a/typings/numpy/array_api/_data_type_functions.pyi b/typings/numpy/array_api/_data_type_functions.pyi
deleted file mode 100644
index a08e6ff..0000000
--- a/typings/numpy/array_api/_data_type_functions.pyi
+++ /dev/null
@@ -1,92 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ._array_object import Array
-from dataclasses import dataclass
-from typing import List, TYPE_CHECKING, Tuple, Union
-from ._typing import Dtype
-
-if TYPE_CHECKING:
- ...
-def astype(x: Array, dtype: Dtype, /, *, copy: bool = ...) -> Array:
- ...
-
-def broadcast_arrays(*arrays: Array) -> List[Array]:
- """
- Array API compatible wrapper for :py:func:`np.broadcast_arrays `.
-
- See its docstring for more information.
- """
- ...
-
-def broadcast_to(x: Array, /, shape: Tuple[int, ...]) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.broadcast_to `.
-
- See its docstring for more information.
- """
- ...
-
-def can_cast(from_: Union[Dtype, Array], to: Dtype, /) -> bool:
- """
- Array API compatible wrapper for :py:func:`np.can_cast `.
-
- See its docstring for more information.
- """
- ...
-
-@dataclass
-class finfo_object:
- bits: int
- eps: float
- max: float
- min: float
- smallest_normal: float
- dtype: Dtype
- ...
-
-
-@dataclass
-class iinfo_object:
- bits: int
- max: int
- min: int
- dtype: Dtype
- ...
-
-
-def finfo(type: Union[Dtype, Array], /) -> finfo_object:
- """
- Array API compatible wrapper for :py:func:`np.finfo `.
-
- See its docstring for more information.
- """
- ...
-
-def iinfo(type: Union[Dtype, Array], /) -> iinfo_object:
- """
- Array API compatible wrapper for :py:func:`np.iinfo `.
-
- See its docstring for more information.
- """
- ...
-
-def isdtype(dtype: Dtype, kind: Union[Dtype, str, Tuple[Union[Dtype, str], ...]]) -> bool:
- """
- Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``.
-
- See
- https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
- for more details
- """
- ...
-
-def result_type(*arrays_and_dtypes: Union[Array, Dtype]) -> Dtype:
- """
- Array API compatible wrapper for :py:func:`np.result_type `.
-
- See its docstring for more information.
- """
- ...
-
diff --git a/typings/numpy/array_api/_dtypes.pyi b/typings/numpy/array_api/_dtypes.pyi
deleted file mode 100644
index 405ab08..0000000
--- a/typings/numpy/array_api/_dtypes.pyi
+++ /dev/null
@@ -1,30 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-int8 = ...
-int16 = ...
-int32 = ...
-int64 = ...
-uint8 = ...
-uint16 = ...
-uint32 = ...
-uint64 = ...
-float32 = ...
-float64 = ...
-complex64 = ...
-complex128 = ...
-bool = ...
-_all_dtypes = ...
-_boolean_dtypes = ...
-_real_floating_dtypes = ...
-_floating_dtypes = ...
-_complex_floating_dtypes = ...
-_integer_dtypes = ...
-_signed_integer_dtypes = ...
-_unsigned_integer_dtypes = ...
-_integer_or_boolean_dtypes = ...
-_real_numeric_dtypes = ...
-_numeric_dtypes = ...
-_dtype_categories = ...
-_promotion_table = ...
diff --git a/typings/numpy/array_api/_elementwise_functions.pyi b/typings/numpy/array_api/_elementwise_functions.pyi
deleted file mode 100644
index 7958ffa..0000000
--- a/typings/numpy/array_api/_elementwise_functions.pyi
+++ /dev/null
@@ -1,478 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ._array_object import Array
-
-def abs(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.abs `.
-
- See its docstring for more information.
- """
- ...
-
-def acos(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.arccos `.
-
- See its docstring for more information.
- """
- ...
-
-def acosh(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.arccosh `.
-
- See its docstring for more information.
- """
- ...
-
-def add(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.add `.
-
- See its docstring for more information.
- """
- ...
-
-def asin(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.arcsin `.
-
- See its docstring for more information.
- """
- ...
-
-def asinh(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.arcsinh `.
-
- See its docstring for more information.
- """
- ...
-
-def atan(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.arctan `.
-
- See its docstring for more information.
- """
- ...
-
-def atan2(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.arctan2 `.
-
- See its docstring for more information.
- """
- ...
-
-def atanh(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.arctanh `.
-
- See its docstring for more information.
- """
- ...
-
-def bitwise_and(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.bitwise_and `.
-
- See its docstring for more information.
- """
- ...
-
-def bitwise_left_shift(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.left_shift `.
-
- See its docstring for more information.
- """
- ...
-
-def bitwise_invert(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.invert `.
-
- See its docstring for more information.
- """
- ...
-
-def bitwise_or(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.bitwise_or `.
-
- See its docstring for more information.
- """
- ...
-
-def bitwise_right_shift(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.right_shift `.
-
- See its docstring for more information.
- """
- ...
-
-def bitwise_xor(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.bitwise_xor `.
-
- See its docstring for more information.
- """
- ...
-
-def ceil(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.ceil `.
-
- See its docstring for more information.
- """
- ...
-
-def conj(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.conj `.
-
- See its docstring for more information.
- """
- ...
-
-def cos(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.cos `.
-
- See its docstring for more information.
- """
- ...
-
-def cosh(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.cosh `.
-
- See its docstring for more information.
- """
- ...
-
-def divide(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.divide `.
-
- See its docstring for more information.
- """
- ...
-
-def equal(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.equal `.
-
- See its docstring for more information.
- """
- ...
-
-def exp(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.exp `.
-
- See its docstring for more information.
- """
- ...
-
-def expm1(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.expm1 `.
-
- See its docstring for more information.
- """
- ...
-
-def floor(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.floor `.
-
- See its docstring for more information.
- """
- ...
-
-def floor_divide(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.floor_divide `.
-
- See its docstring for more information.
- """
- ...
-
-def greater(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.greater `.
-
- See its docstring for more information.
- """
- ...
-
-def greater_equal(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.greater_equal `.
-
- See its docstring for more information.
- """
- ...
-
-def imag(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.imag `.
-
- See its docstring for more information.
- """
- ...
-
-def isfinite(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.isfinite `.
-
- See its docstring for more information.
- """
- ...
-
-def isinf(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.isinf `.
-
- See its docstring for more information.
- """
- ...
-
-def isnan(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.isnan `.
-
- See its docstring for more information.
- """
- ...
-
-def less(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.less `.
-
- See its docstring for more information.
- """
- ...
-
-def less_equal(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.less_equal `.
-
- See its docstring for more information.
- """
- ...
-
-def log(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.log `.
-
- See its docstring for more information.
- """
- ...
-
-def log1p(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.log1p `.
-
- See its docstring for more information.
- """
- ...
-
-def log2(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.log2 `.
-
- See its docstring for more information.
- """
- ...
-
-def log10(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.log10 `.
-
- See its docstring for more information.
- """
- ...
-
-def logaddexp(x1: Array, x2: Array) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.logaddexp `.
-
- See its docstring for more information.
- """
- ...
-
-def logical_and(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.logical_and `.
-
- See its docstring for more information.
- """
- ...
-
-def logical_not(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.logical_not `.
-
- See its docstring for more information.
- """
- ...
-
-def logical_or(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.logical_or `.
-
- See its docstring for more information.
- """
- ...
-
-def logical_xor(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.logical_xor `.
-
- See its docstring for more information.
- """
- ...
-
-def multiply(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.multiply `.
-
- See its docstring for more information.
- """
- ...
-
-def negative(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.negative `.
-
- See its docstring for more information.
- """
- ...
-
-def not_equal(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.not_equal `.
-
- See its docstring for more information.
- """
- ...
-
-def positive(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.positive `.
-
- See its docstring for more information.
- """
- ...
-
-def pow(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.power `.
-
- See its docstring for more information.
- """
- ...
-
-def real(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.real `.
-
- See its docstring for more information.
- """
- ...
-
-def remainder(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.remainder `.
-
- See its docstring for more information.
- """
- ...
-
-def round(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.round `.
-
- See its docstring for more information.
- """
- ...
-
-def sign(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.sign `.
-
- See its docstring for more information.
- """
- ...
-
-def sin(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.sin `.
-
- See its docstring for more information.
- """
- ...
-
-def sinh(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.sinh `.
-
- See its docstring for more information.
- """
- ...
-
-def square(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.square `.
-
- See its docstring for more information.
- """
- ...
-
-def sqrt(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.sqrt `.
-
- See its docstring for more information.
- """
- ...
-
-def subtract(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.subtract `.
-
- See its docstring for more information.
- """
- ...
-
-def tan(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.tan `.
-
- See its docstring for more information.
- """
- ...
-
-def tanh(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.tanh `.
-
- See its docstring for more information.
- """
- ...
-
-def trunc(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.trunc `.
-
- See its docstring for more information.
- """
- ...
-
diff --git a/typings/numpy/array_api/_indexing_functions.pyi b/typings/numpy/array_api/_indexing_functions.pyi
deleted file mode 100644
index 0b94dbe..0000000
--- a/typings/numpy/array_api/_indexing_functions.pyi
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ._array_object import Array
-
-def take(x: Array, indices: Array, /, *, axis: Optional[int] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.take `.
-
- See its docstring for more information.
- """
- ...
-
diff --git a/typings/numpy/array_api/_manipulation_functions.pyi b/typings/numpy/array_api/_manipulation_functions.pyi
deleted file mode 100644
index 383f2ff..0000000
--- a/typings/numpy/array_api/_manipulation_functions.pyi
+++ /dev/null
@@ -1,71 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ._array_object import Array
-from typing import List, Optional, Tuple, Union
-
-def concat(arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: Optional[int] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.concatenate `.
-
- See its docstring for more information.
- """
- ...
-
-def expand_dims(x: Array, /, *, axis: int) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.expand_dims `.
-
- See its docstring for more information.
- """
- ...
-
-def flip(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.flip `.
-
- See its docstring for more information.
- """
- ...
-
-def permute_dims(x: Array, /, axes: Tuple[int, ...]) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.transpose `.
-
- See its docstring for more information.
- """
- ...
-
-def reshape(x: Array, /, shape: Tuple[int, ...], *, copy: Optional[Bool] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.reshape `.
-
- See its docstring for more information.
- """
- ...
-
-def roll(x: Array, /, shift: Union[int, Tuple[int, ...]], *, axis: Optional[Union[int, Tuple[int, ...]]] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.roll `.
-
- See its docstring for more information.
- """
- ...
-
-def squeeze(x: Array, /, axis: Union[int, Tuple[int, ...]]) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.squeeze `.
-
- See its docstring for more information.
- """
- ...
-
-def stack(arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: int = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.stack `.
-
- See its docstring for more information.
- """
- ...
-
diff --git a/typings/numpy/array_api/_searching_functions.pyi b/typings/numpy/array_api/_searching_functions.pyi
deleted file mode 100644
index caf870a..0000000
--- a/typings/numpy/array_api/_searching_functions.pyi
+++ /dev/null
@@ -1,39 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ._array_object import Array
-from typing import Optional, Tuple
-
-def argmax(x: Array, /, *, axis: Optional[int] = ..., keepdims: bool = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.argmax `.
-
- See its docstring for more information.
- """
- ...
-
-def argmin(x: Array, /, *, axis: Optional[int] = ..., keepdims: bool = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.argmin `.
-
- See its docstring for more information.
- """
- ...
-
-def nonzero(x: Array, /) -> Tuple[Array, ...]:
- """
- Array API compatible wrapper for :py:func:`np.nonzero `.
-
- See its docstring for more information.
- """
- ...
-
-def where(condition: Array, x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.where `.
-
- See its docstring for more information.
- """
- ...
-
diff --git a/typings/numpy/array_api/_set_functions.pyi b/typings/numpy/array_api/_set_functions.pyi
deleted file mode 100644
index 1784345..0000000
--- a/typings/numpy/array_api/_set_functions.pyi
+++ /dev/null
@@ -1,54 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ._array_object import Array
-from typing import NamedTuple
-
-class UniqueAllResult(NamedTuple):
- values: Array
- indices: Array
- inverse_indices: Array
- counts: Array
- ...
-
-
-class UniqueCountsResult(NamedTuple):
- values: Array
- counts: Array
- ...
-
-
-class UniqueInverseResult(NamedTuple):
- values: Array
- inverse_indices: Array
- ...
-
-
-def unique_all(x: Array, /) -> UniqueAllResult:
- """
- Array API compatible wrapper for :py:func:`np.unique `.
-
- See its docstring for more information.
- """
- ...
-
-def unique_counts(x: Array, /) -> UniqueCountsResult:
- ...
-
-def unique_inverse(x: Array, /) -> UniqueInverseResult:
- """
- Array API compatible wrapper for :py:func:`np.unique `.
-
- See its docstring for more information.
- """
- ...
-
-def unique_values(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.unique `.
-
- See its docstring for more information.
- """
- ...
-
diff --git a/typings/numpy/array_api/_sorting_functions.pyi b/typings/numpy/array_api/_sorting_functions.pyi
deleted file mode 100644
index c5d242b..0000000
--- a/typings/numpy/array_api/_sorting_functions.pyi
+++ /dev/null
@@ -1,22 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ._array_object import Array
-
-def argsort(x: Array, /, *, axis: int = ..., descending: bool = ..., stable: bool = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.argsort `.
-
- See its docstring for more information.
- """
- ...
-
-def sort(x: Array, /, *, axis: int = ..., descending: bool = ..., stable: bool = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.sort `.
-
- See its docstring for more information.
- """
- ...
-
diff --git a/typings/numpy/array_api/_statistical_functions.pyi b/typings/numpy/array_api/_statistical_functions.pyi
deleted file mode 100644
index 9af3d56..0000000
--- a/typings/numpy/array_api/_statistical_functions.pyi
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ._array_object import Array
-from typing import Optional, TYPE_CHECKING, Tuple, Union
-from ._typing import Dtype
-
-if TYPE_CHECKING:
- ...
-def max(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = ..., keepdims: bool = ...) -> Array:
- ...
-
-def mean(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = ..., keepdims: bool = ...) -> Array:
- ...
-
-def min(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = ..., keepdims: bool = ...) -> Array:
- ...
-
-def prod(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = ..., dtype: Optional[Dtype] = ..., keepdims: bool = ...) -> Array:
- ...
-
-def std(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = ..., correction: Union[int, float] = ..., keepdims: bool = ...) -> Array:
- ...
-
-def sum(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = ..., dtype: Optional[Dtype] = ..., keepdims: bool = ...) -> Array:
- ...
-
-def var(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = ..., correction: Union[int, float] = ..., keepdims: bool = ...) -> Array:
- ...
-
diff --git a/typings/numpy/array_api/_typing.pyi b/typings/numpy/array_api/_typing.pyi
deleted file mode 100644
index bab0bac..0000000
--- a/typings/numpy/array_api/_typing.pyi
+++ /dev/null
@@ -1,39 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import sys
-from typing import Any, Literal, Protocol, TypeVar, Union
-from numpy import dtype, float32, float64, int16, int32, int64, int8, uint16, uint32, uint64, uint8
-
-"""
-This file defines the types for type annotations.
-
-These names aren't part of the module namespace, but they are used in the
-annotations in the function signatures. The functions in the module are only
-valid for inputs that match the given type annotations.
-"""
-__all__ = ["Array", "Device", "Dtype", "SupportsDLPack", "SupportsBufferProtocol", "PyCapsule"]
-_T_co = TypeVar("_T_co", covariant=True)
-class NestedSequence(Protocol[_T_co]):
- def __getitem__(self, key: int, /) -> _T_co | NestedSequence[_T_co]:
- ...
-
- def __len__(self, /) -> int:
- ...
-
-
-
-Device = Literal["cpu"]
-Dtype = dtype[Union[int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64,]]
-if sys.version_info >= (3, 12):
- ...
-else:
- ...
-PyCapsule = Any
-class SupportsDLPack(Protocol):
- def __dlpack__(self, /, *, stream: None = ...) -> PyCapsule:
- ...
-
-
-
diff --git a/typings/numpy/array_api/_utility_functions.pyi b/typings/numpy/array_api/_utility_functions.pyi
deleted file mode 100644
index e5a911f..0000000
--- a/typings/numpy/array_api/_utility_functions.pyi
+++ /dev/null
@@ -1,23 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ._array_object import Array
-from typing import Optional, Tuple, Union
-
-def all(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = ..., keepdims: bool = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.all `.
-
- See its docstring for more information.
- """
- ...
-
-def any(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = ..., keepdims: bool = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.any `.
-
- See its docstring for more information.
- """
- ...
-
diff --git a/typings/numpy/array_api/linalg.pyi b/typings/numpy/array_api/linalg.pyi
deleted file mode 100644
index 5634ea3..0000000
--- a/typings/numpy/array_api/linalg.pyi
+++ /dev/null
@@ -1,200 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ._array_object import Array
-from typing import NamedTuple, TYPE_CHECKING
-from ._typing import Dtype, Literal, Optional, Sequence, Tuple, Union
-
-if TYPE_CHECKING:
- ...
-class EighResult(NamedTuple):
- eigenvalues: Array
- eigenvectors: Array
- ...
-
-
-class QRResult(NamedTuple):
- Q: Array
- R: Array
- ...
-
-
-class SlogdetResult(NamedTuple):
- sign: Array
- logabsdet: Array
- ...
-
-
-class SVDResult(NamedTuple):
- U: Array
- S: Array
- Vh: Array
- ...
-
-
-def cholesky(x: Array, /, *, upper: bool = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.linalg.cholesky `.
-
- See its docstring for more information.
- """
- ...
-
-def cross(x1: Array, x2: Array, /, *, axis: int = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.cross `.
-
- See its docstring for more information.
- """
- ...
-
-def det(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.linalg.det `.
-
- See its docstring for more information.
- """
- ...
-
-def diagonal(x: Array, /, *, offset: int = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.diagonal `.
-
- See its docstring for more information.
- """
- ...
-
-def eigh(x: Array, /) -> EighResult:
- """
- Array API compatible wrapper for :py:func:`np.linalg.eigh `.
-
- See its docstring for more information.
- """
- ...
-
-def eigvalsh(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.linalg.eigvalsh `.
-
- See its docstring for more information.
- """
- ...
-
-def inv(x: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.linalg.inv `.
-
- See its docstring for more information.
- """
- ...
-
-def matmul(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.matmul `.
-
- See its docstring for more information.
- """
- ...
-
-def matrix_norm(x: Array, /, *, keepdims: bool = ..., ord: Optional[Union[int, float, Literal[fro, nuc]]] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.linalg.norm `.
-
- See its docstring for more information.
- """
- ...
-
-def matrix_power(x: Array, n: int, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.matrix_power `.
-
- See its docstring for more information.
- """
- ...
-
-def matrix_rank(x: Array, /, *, rtol: Optional[Union[float, Array]] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.matrix_rank `.
-
- See its docstring for more information.
- """
- ...
-
-def matrix_transpose(x: Array, /) -> Array:
- ...
-
-def outer(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.outer `.
-
- See its docstring for more information.
- """
- ...
-
-def pinv(x: Array, /, *, rtol: Optional[Union[float, Array]] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.linalg.pinv `.
-
- See its docstring for more information.
- """
- ...
-
-def qr(x: Array, /, *, mode: Literal[reduced, complete] = ...) -> QRResult:
- """
- Array API compatible wrapper for :py:func:`np.linalg.qr `.
-
- See its docstring for more information.
- """
- ...
-
-def slogdet(x: Array, /) -> SlogdetResult:
- """
- Array API compatible wrapper for :py:func:`np.linalg.slogdet `.
-
- See its docstring for more information.
- """
- ...
-
-def solve(x1: Array, x2: Array, /) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.linalg.solve `.
-
- See its docstring for more information.
- """
- ...
-
-def svd(x: Array, /, *, full_matrices: bool = ...) -> SVDResult:
- """
- Array API compatible wrapper for :py:func:`np.linalg.svd `.
-
- See its docstring for more information.
- """
- ...
-
-def svdvals(x: Array, /) -> Union[Array, Tuple[Array, ...]]:
- ...
-
-def tensordot(x1: Array, x2: Array, /, *, axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = ...) -> Array:
- ...
-
-def trace(x: Array, /, *, offset: int = ..., dtype: Optional[Dtype] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.trace `.
-
- See its docstring for more information.
- """
- ...
-
-def vecdot(x1: Array, x2: Array, /, *, axis: int = ...) -> Array:
- ...
-
-def vector_norm(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = ..., keepdims: bool = ..., ord: Optional[Union[int, float]] = ...) -> Array:
- """
- Array API compatible wrapper for :py:func:`np.linalg.norm `.
-
- See its docstring for more information.
- """
- ...
-
-__all__ = ['cholesky', 'cross', 'det', 'diagonal', 'eigh', 'eigvalsh', 'inv', 'matmul', 'matrix_norm', 'matrix_power', 'matrix_rank', 'matrix_transpose', 'outer', 'pinv', 'qr', 'slogdet', 'solve', 'svd', 'svdvals', 'tensordot', 'trace', 'vecdot', 'vector_norm']
diff --git a/typings/numpy/compat/__init__.pyi b/typings/numpy/compat/__init__.pyi
deleted file mode 100644
index a940069..0000000
--- a/typings/numpy/compat/__init__.pyi
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from .._utils import _inspect
-from .._utils._inspect import formatargspec, getargspec
-from . import py3k
-from .py3k import *
-
-"""
-Compatibility module.
-
-This module contains duplicated code from Python itself or 3rd party
-extensions, which may be included for the following reasons:
-
- * compatibility
- * we may only need a small subset of the copied library/module
-
-"""
-__all__ = []
diff --git a/typings/numpy/compat/py3k.pyi b/typings/numpy/compat/py3k.pyi
deleted file mode 100644
index bc1814e..0000000
--- a/typings/numpy/compat/py3k.pyi
+++ /dev/null
@@ -1,110 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import os
-
-"""
-Python 3.X compatibility tools.
-
-While this file was originally intended for Python 2 -> 3 transition,
-it is now used to create a compatibility layer between different
-minor versions of Python 3.
-
-While the active version of numpy may not support a given version of python, we
-allow downstream libraries to continue to use these shims for forward
-compatibility with numpy while they transition their code to newer versions of
-Python.
-"""
-__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested', 'asstr', 'open_latin1', 'long', 'basestring', 'sixu', 'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path', 'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike']
-long = int
-integer_types = ...
-basestring = str
-unicode = str
-bytes = bytes
-def asunicode(s): # -> str:
- ...
-
-def asbytes(s): # -> bytes:
- ...
-
-def asstr(s): # -> str:
- ...
-
-def isfileobj(f): # -> bool:
- ...
-
-def open_latin1(filename, mode=...): # -> IO[Any]:
- ...
-
-def sixu(s):
- ...
-
-strchar = ...
-def getexception(): # -> BaseException | None:
- ...
-
-def asbytes_nested(x): # -> list[Unknown] | bytes:
- ...
-
-def asunicode_nested(x): # -> list[Unknown] | str:
- ...
-
-def is_pathlib_path(obj): # -> bool:
- """
- Check whether obj is a `pathlib.Path` object.
-
- Prefer using ``isinstance(obj, os.PathLike)`` instead of this function.
- """
- ...
-
-class contextlib_nullcontext:
- """Context manager that does no additional processing.
-
- Used as a stand-in for a normal context manager, when a particular
- block of code is only sometimes used with a normal context manager:
-
- cm = optional_cm if condition else nullcontext()
- with cm:
- # Perform operation, using optional_cm if condition is True
-
- .. note::
- Prefer using `contextlib.nullcontext` instead of this context manager.
- """
- def __init__(self, enter_result=...) -> None:
- ...
-
- def __enter__(self): # -> None:
- ...
-
- def __exit__(self, *excinfo): # -> None:
- ...
-
-
-
-def npy_load_module(name, fn, info=...): # -> ModuleType:
- """
- Load a module. Uses ``load_module`` which will be deprecated in python
- 3.12. An alternative that uses ``exec_module`` is in
- numpy.distutils.misc_util.exec_mod_from_location
-
- .. versionadded:: 1.11.2
-
- Parameters
- ----------
- name : str
- Full module name.
- fn : str
- Path to module file.
- info : tuple, optional
- Only here for backward compatibility with Python 2.*.
-
- Returns
- -------
- mod : module
-
- """
- ...
-
-os_fspath = ...
-os_PathLike = os.PathLike
diff --git a/typings/numpy/conftest.pyi b/typings/numpy/conftest.pyi
deleted file mode 100644
index 943b007..0000000
--- a/typings/numpy/conftest.pyi
+++ /dev/null
@@ -1,56 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import pytest
-
-"""
-Pytest configuration and fixtures for the Numpy test suite.
-"""
-_old_fpu_mode = ...
-_collect_results = ...
-_pytest_ini = ...
-def pytest_configure(config): # -> None:
- ...
-
-def pytest_addoption(parser): # -> None:
- ...
-
-def pytest_sessionstart(session): # -> None:
- ...
-
-@pytest.hookimpl()
-def pytest_itemcollected(item): # -> None:
- """
- Check FPU precision mode was not changed during test collection.
-
- The clumsy way we do it here is mainly necessary because numpy
- still uses yield tests, which can execute code at test collection
- time.
- """
- ...
-
-@pytest.fixture(scope="function", autouse=True)
-def check_fpu_mode(request): # -> Generator[None, Any, None]:
- """
- Check FPU precision mode was not changed during the test.
- """
- ...
-
-@pytest.fixture(autouse=True)
-def add_np(doctest_namespace): # -> None:
- ...
-
-@pytest.fixture(autouse=True)
-def env_setup(monkeypatch): # -> None:
- ...
-
-@pytest.fixture(params=[True, False])
-def weak_promotion(request): # -> Generator[Unknown, Any, None]:
- """
- Fixture to ensure "legacy" promotion state or change it to use the new
- weak promotion (plus warning). `old_promotion` should be used as a
- parameter in the function.
- """
- ...
-
diff --git a/typings/numpy/core/__init__.pyi b/typings/numpy/core/__init__.pyi
deleted file mode 100644
index 006bc27..0000000
--- a/typings/numpy/core/__init__.pyi
+++ /dev/null
@@ -1,4 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
diff --git a/typings/numpy/core/_asarray.pyi b/typings/numpy/core/_asarray.pyi
deleted file mode 100644
index c46dee4..0000000
--- a/typings/numpy/core/_asarray.pyi
+++ /dev/null
@@ -1,25 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Iterable
-from typing import Any, Literal, TypeVar, Union, overload
-from numpy import ndarray
-from numpy._typing import DTypeLike, _SupportsArrayFunc
-
-_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
-_Requirements = Literal["C", "C_CONTIGUOUS", "CONTIGUOUS", "F", "F_CONTIGUOUS", "FORTRAN", "A", "ALIGNED", "W", "WRITEABLE", "O", "OWNDATA"]
-_E = Literal["E", "ENSUREARRAY"]
-_RequirementsWithE = Union[_Requirements, _E]
-@overload
-def require(a: _ArrayType, dtype: None = ..., requirements: None | _Requirements | Iterable[_Requirements] = ..., *, like: _SupportsArrayFunc = ...) -> _ArrayType:
- ...
-
-@overload
-def require(a: object, dtype: DTypeLike = ..., requirements: _E | Iterable[_RequirementsWithE] = ..., *, like: _SupportsArrayFunc = ...) -> ndarray[Any, Any]:
- ...
-
-@overload
-def require(a: object, dtype: DTypeLike = ..., requirements: None | _Requirements | Iterable[_Requirements] = ..., *, like: _SupportsArrayFunc = ...) -> ndarray[Any, Any]:
- ...
-
diff --git a/typings/numpy/core/_internal.pyi b/typings/numpy/core/_internal.pyi
deleted file mode 100644
index f6e37ca..0000000
--- a/typings/numpy/core/_internal.pyi
+++ /dev/null
@@ -1,44 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import ctypes as ct
-from typing import Any, Generic, TypeVar, overload
-from numpy import ndarray
-from numpy.ctypeslib import c_intp
-
-_CastT = TypeVar("_CastT", bound=ct._CanCastTo)
-_CT = TypeVar("_CT", bound=ct._CData)
-_PT = TypeVar("_PT", bound=None | int)
-class _ctypes(Generic[_PT]):
- @overload
- def __new__(cls, array: ndarray[Any, Any], ptr: None = ...) -> _ctypes[None]:
- ...
-
- @overload
- def __new__(cls, array: ndarray[Any, Any], ptr: _PT) -> _ctypes[_PT]:
- ...
-
- @property
- def data(self) -> _PT:
- ...
-
- @property
- def shape(self) -> ct.Array[c_intp]:
- ...
-
- @property
- def strides(self) -> ct.Array[c_intp]:
- ...
-
- def data_as(self, obj: type[_CastT]) -> _CastT:
- ...
-
- def shape_as(self, obj: type[_CT]) -> ct.Array[_CT]:
- ...
-
- def strides_as(self, obj: type[_CT]) -> ct.Array[_CT]:
- ...
-
-
-
diff --git a/typings/numpy/core/_type_aliases.pyi b/typings/numpy/core/_type_aliases.pyi
deleted file mode 100644
index a18b74f..0000000
--- a/typings/numpy/core/_type_aliases.pyi
+++ /dev/null
@@ -1,18 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any, TypedDict
-from numpy import complexfloating, floating, generic, signedinteger, unsignedinteger
-
-class _SCTypes(TypedDict):
- int: list[type[signedinteger[Any]]]
- uint: list[type[unsignedinteger[Any]]]
- float: list[type[floating[Any]]]
- complex: list[type[complexfloating[Any, Any]]]
- others: list[type]
- ...
-
-
-sctypeDict: dict[int | str, type[generic]]
-sctypes: _SCTypes
diff --git a/typings/numpy/core/_ufunc_config.pyi b/typings/numpy/core/_ufunc_config.pyi
deleted file mode 100644
index 82ff48c..0000000
--- a/typings/numpy/core/_ufunc_config.pyi
+++ /dev/null
@@ -1,45 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Callable
-from typing import Any, Literal, TypedDict
-from numpy import _SupportsWrite
-
-_ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"]
-_ErrFunc = Callable[[str, int], Any]
-class _ErrDict(TypedDict):
- divide: _ErrKind
- over: _ErrKind
- under: _ErrKind
- invalid: _ErrKind
- ...
-
-
-class _ErrDictOptional(TypedDict, total=False):
- all: None | _ErrKind
- divide: None | _ErrKind
- over: None | _ErrKind
- under: None | _ErrKind
- invalid: None | _ErrKind
- ...
-
-
-def seterr(all: None | _ErrKind = ..., divide: None | _ErrKind = ..., over: None | _ErrKind = ..., under: None | _ErrKind = ..., invalid: None | _ErrKind = ...) -> _ErrDict:
- ...
-
-def geterr() -> _ErrDict:
- ...
-
-def setbufsize(size: int) -> int:
- ...
-
-def getbufsize() -> int:
- ...
-
-def seterrcall(func: None | _ErrFunc | _SupportsWrite[str]) -> None | _ErrFunc | _SupportsWrite[str]:
- ...
-
-def geterrcall() -> None | _ErrFunc | _SupportsWrite[str]:
- ...
-
diff --git a/typings/numpy/core/arrayprint.pyi b/typings/numpy/core/arrayprint.pyi
deleted file mode 100644
index 9a5ebdc..0000000
--- a/typings/numpy/core/arrayprint.pyi
+++ /dev/null
@@ -1,73 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Callable
-from typing import Any, Literal, SupportsIndex, TypedDict
-from contextlib import _GeneratorContextManager
-from numpy import bool_, clongdouble, complexfloating, datetime64, floating, integer, longdouble, ndarray, timedelta64, void
-from numpy._typing import _CharLike_co, _FloatLike_co
-
-_FloatMode = Literal["fixed", "unique", "maxprec", "maxprec_equal"]
-class _FormatDict(TypedDict, total=False):
- bool: Callable[[bool_], str]
- int: Callable[[integer[Any]], str]
- timedelta: Callable[[timedelta64], str]
- datetime: Callable[[datetime64], str]
- float: Callable[[floating[Any]], str]
- longfloat: Callable[[longdouble], str]
- complexfloat: Callable[[complexfloating[Any, Any]], str]
- longcomplexfloat: Callable[[clongdouble], str]
- void: Callable[[void], str]
- numpystr: Callable[[_CharLike_co], str]
- object: Callable[[object], str]
- all: Callable[[object], str]
- int_kind: Callable[[integer[Any]], str]
- float_kind: Callable[[floating[Any]], str]
- complex_kind: Callable[[complexfloating[Any, Any]], str]
- str_kind: Callable[[_CharLike_co], str]
- ...
-
-
-class _FormatOptions(TypedDict):
- precision: int
- threshold: int
- edgeitems: int
- linewidth: int
- suppress: bool
- nanstr: str
- infstr: str
- formatter: None | _FormatDict
- sign: Literal["-", "+", " "]
- floatmode: _FloatMode
- legacy: Literal[False, "1.13", "1.21"]
- ...
-
-
-def set_printoptions(precision: None | SupportsIndex = ..., threshold: None | int = ..., edgeitems: None | int = ..., linewidth: None | int = ..., suppress: None | bool = ..., nanstr: None | str = ..., infstr: None | str = ..., formatter: None | _FormatDict = ..., sign: Literal[None, "-", "+", " "] = ..., floatmode: None | _FloatMode = ..., *, legacy: Literal[None, False, "1.13", "1.21"] = ...) -> None:
- ...
-
-def get_printoptions() -> _FormatOptions:
- ...
-
-def array2string(a: ndarray[Any, Any], max_line_width: None | int = ..., precision: None | SupportsIndex = ..., suppress_small: None | bool = ..., separator: str = ..., prefix: str = ..., *, formatter: None | _FormatDict = ..., threshold: None | int = ..., edgeitems: None | int = ..., sign: Literal[None, "-", "+", " "] = ..., floatmode: None | _FloatMode = ..., suffix: str = ..., legacy: Literal[None, False, "1.13", "1.21"] = ...) -> str:
- ...
-
-def format_float_scientific(x: _FloatLike_co, precision: None | int = ..., unique: bool = ..., trim: Literal["k", ".", "0", "-"] = ..., sign: bool = ..., pad_left: None | int = ..., exp_digits: None | int = ..., min_digits: None | int = ...) -> str:
- ...
-
-def format_float_positional(x: _FloatLike_co, precision: None | int = ..., unique: bool = ..., fractional: bool = ..., trim: Literal["k", ".", "0", "-"] = ..., sign: bool = ..., pad_left: None | int = ..., pad_right: None | int = ..., min_digits: None | int = ...) -> str:
- ...
-
-def array_repr(arr: ndarray[Any, Any], max_line_width: None | int = ..., precision: None | SupportsIndex = ..., suppress_small: None | bool = ...) -> str:
- ...
-
-def array_str(a: ndarray[Any, Any], max_line_width: None | int = ..., precision: None | SupportsIndex = ..., suppress_small: None | bool = ...) -> str:
- ...
-
-def set_string_function(f: None | Callable[[ndarray[Any, Any]], str], repr: bool = ...) -> None:
- ...
-
-def printoptions(precision: None | SupportsIndex = ..., threshold: None | int = ..., edgeitems: None | int = ..., linewidth: None | int = ..., suppress: None | bool = ..., nanstr: None | str = ..., infstr: None | str = ..., formatter: None | _FormatDict = ..., sign: Literal[None, "-", "+", " "] = ..., floatmode: None | _FloatMode = ..., *, legacy: Literal[None, False, "1.13", "1.21"] = ...) -> _GeneratorContextManager[_FormatOptions]:
- ...
-
diff --git a/typings/numpy/core/defchararray.pyi b/typings/numpy/core/defchararray.pyi
deleted file mode 100644
index 06cf826..0000000
--- a/typings/numpy/core/defchararray.pyi
+++ /dev/null
@@ -1,375 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any, Literal as L, TypeVar, overload
-from numpy import _OrderKACF, bool_, bytes_, chararray as chararray, dtype, int_, object_, str_
-from numpy._typing import NDArray, _ArrayLikeBool_co as b_co, _ArrayLikeBytes_co as S_co, _ArrayLikeInt_co as i_co, _ArrayLikeStr_co as U_co
-
-_SCT = TypeVar("_SCT", str_, bytes_)
-_CharArray = chararray[Any, dtype[_SCT]]
-__all__: list[str]
-@overload
-def equal(x1: U_co, x2: U_co) -> NDArray[bool_]:
- ...
-
-@overload
-def equal(x1: S_co, x2: S_co) -> NDArray[bool_]:
- ...
-
-@overload
-def not_equal(x1: U_co, x2: U_co) -> NDArray[bool_]:
- ...
-
-@overload
-def not_equal(x1: S_co, x2: S_co) -> NDArray[bool_]:
- ...
-
-@overload
-def greater_equal(x1: U_co, x2: U_co) -> NDArray[bool_]:
- ...
-
-@overload
-def greater_equal(x1: S_co, x2: S_co) -> NDArray[bool_]:
- ...
-
-@overload
-def less_equal(x1: U_co, x2: U_co) -> NDArray[bool_]:
- ...
-
-@overload
-def less_equal(x1: S_co, x2: S_co) -> NDArray[bool_]:
- ...
-
-@overload
-def greater(x1: U_co, x2: U_co) -> NDArray[bool_]:
- ...
-
-@overload
-def greater(x1: S_co, x2: S_co) -> NDArray[bool_]:
- ...
-
-@overload
-def less(x1: U_co, x2: U_co) -> NDArray[bool_]:
- ...
-
-@overload
-def less(x1: S_co, x2: S_co) -> NDArray[bool_]:
- ...
-
-@overload
-def add(x1: U_co, x2: U_co) -> NDArray[str_]:
- ...
-
-@overload
-def add(x1: S_co, x2: S_co) -> NDArray[bytes_]:
- ...
-
-@overload
-def multiply(a: U_co, i: i_co) -> NDArray[str_]:
- ...
-
-@overload
-def multiply(a: S_co, i: i_co) -> NDArray[bytes_]:
- ...
-
-@overload
-def mod(a: U_co, value: Any) -> NDArray[str_]:
- ...
-
-@overload
-def mod(a: S_co, value: Any) -> NDArray[bytes_]:
- ...
-
-@overload
-def capitalize(a: U_co) -> NDArray[str_]:
- ...
-
-@overload
-def capitalize(a: S_co) -> NDArray[bytes_]:
- ...
-
-@overload
-def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]:
- ...
-
-@overload
-def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]:
- ...
-
-def decode(a: S_co, encoding: None | str = ..., errors: None | str = ...) -> NDArray[str_]:
- ...
-
-def encode(a: U_co, encoding: None | str = ..., errors: None | str = ...) -> NDArray[bytes_]:
- ...
-
-@overload
-def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[str_]:
- ...
-
-@overload
-def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[bytes_]:
- ...
-
-@overload
-def join(sep: U_co, seq: U_co) -> NDArray[str_]:
- ...
-
-@overload
-def join(sep: S_co, seq: S_co) -> NDArray[bytes_]:
- ...
-
-@overload
-def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]:
- ...
-
-@overload
-def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]:
- ...
-
-@overload
-def lower(a: U_co) -> NDArray[str_]:
- ...
-
-@overload
-def lower(a: S_co) -> NDArray[bytes_]:
- ...
-
-@overload
-def lstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]:
- ...
-
-@overload
-def lstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]:
- ...
-
-@overload
-def partition(a: U_co, sep: U_co) -> NDArray[str_]:
- ...
-
-@overload
-def partition(a: S_co, sep: S_co) -> NDArray[bytes_]:
- ...
-
-@overload
-def replace(a: U_co, old: U_co, new: U_co, count: None | i_co = ...) -> NDArray[str_]:
- ...
-
-@overload
-def replace(a: S_co, old: S_co, new: S_co, count: None | i_co = ...) -> NDArray[bytes_]:
- ...
-
-@overload
-def rjust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]:
- ...
-
-@overload
-def rjust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]:
- ...
-
-@overload
-def rpartition(a: U_co, sep: U_co) -> NDArray[str_]:
- ...
-
-@overload
-def rpartition(a: S_co, sep: S_co) -> NDArray[bytes_]:
- ...
-
-@overload
-def rsplit(a: U_co, sep: None | U_co = ..., maxsplit: None | i_co = ...) -> NDArray[object_]:
- ...
-
-@overload
-def rsplit(a: S_co, sep: None | S_co = ..., maxsplit: None | i_co = ...) -> NDArray[object_]:
- ...
-
-@overload
-def rstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]:
- ...
-
-@overload
-def rstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]:
- ...
-
-@overload
-def split(a: U_co, sep: None | U_co = ..., maxsplit: None | i_co = ...) -> NDArray[object_]:
- ...
-
-@overload
-def split(a: S_co, sep: None | S_co = ..., maxsplit: None | i_co = ...) -> NDArray[object_]:
- ...
-
-@overload
-def splitlines(a: U_co, keepends: None | b_co = ...) -> NDArray[object_]:
- ...
-
-@overload
-def splitlines(a: S_co, keepends: None | b_co = ...) -> NDArray[object_]:
- ...
-
-@overload
-def strip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]:
- ...
-
-@overload
-def strip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]:
- ...
-
-@overload
-def swapcase(a: U_co) -> NDArray[str_]:
- ...
-
-@overload
-def swapcase(a: S_co) -> NDArray[bytes_]:
- ...
-
-@overload
-def title(a: U_co) -> NDArray[str_]:
- ...
-
-@overload
-def title(a: S_co) -> NDArray[bytes_]:
- ...
-
-@overload
-def translate(a: U_co, table: U_co, deletechars: None | U_co = ...) -> NDArray[str_]:
- ...
-
-@overload
-def translate(a: S_co, table: S_co, deletechars: None | S_co = ...) -> NDArray[bytes_]:
- ...
-
-@overload
-def upper(a: U_co) -> NDArray[str_]:
- ...
-
-@overload
-def upper(a: S_co) -> NDArray[bytes_]:
- ...
-
-@overload
-def zfill(a: U_co, width: i_co) -> NDArray[str_]:
- ...
-
-@overload
-def zfill(a: S_co, width: i_co) -> NDArray[bytes_]:
- ...
-
-@overload
-def count(a: U_co, sub: U_co, start: i_co = ..., end: None | i_co = ...) -> NDArray[int_]:
- ...
-
-@overload
-def count(a: S_co, sub: S_co, start: i_co = ..., end: None | i_co = ...) -> NDArray[int_]:
- ...
-
-@overload
-def endswith(a: U_co, suffix: U_co, start: i_co = ..., end: None | i_co = ...) -> NDArray[bool_]:
- ...
-
-@overload
-def endswith(a: S_co, suffix: S_co, start: i_co = ..., end: None | i_co = ...) -> NDArray[bool_]:
- ...
-
-@overload
-def find(a: U_co, sub: U_co, start: i_co = ..., end: None | i_co = ...) -> NDArray[int_]:
- ...
-
-@overload
-def find(a: S_co, sub: S_co, start: i_co = ..., end: None | i_co = ...) -> NDArray[int_]:
- ...
-
-@overload
-def index(a: U_co, sub: U_co, start: i_co = ..., end: None | i_co = ...) -> NDArray[int_]:
- ...
-
-@overload
-def index(a: S_co, sub: S_co, start: i_co = ..., end: None | i_co = ...) -> NDArray[int_]:
- ...
-
-def isalpha(a: U_co | S_co) -> NDArray[bool_]:
- ...
-
-def isalnum(a: U_co | S_co) -> NDArray[bool_]:
- ...
-
-def isdecimal(a: U_co | S_co) -> NDArray[bool_]:
- ...
-
-def isdigit(a: U_co | S_co) -> NDArray[bool_]:
- ...
-
-def islower(a: U_co | S_co) -> NDArray[bool_]:
- ...
-
-def isnumeric(a: U_co | S_co) -> NDArray[bool_]:
- ...
-
-def isspace(a: U_co | S_co) -> NDArray[bool_]:
- ...
-
-def istitle(a: U_co | S_co) -> NDArray[bool_]:
- ...
-
-def isupper(a: U_co | S_co) -> NDArray[bool_]:
- ...
-
-@overload
-def rfind(a: U_co, sub: U_co, start: i_co = ..., end: None | i_co = ...) -> NDArray[int_]:
- ...
-
-@overload
-def rfind(a: S_co, sub: S_co, start: i_co = ..., end: None | i_co = ...) -> NDArray[int_]:
- ...
-
-@overload
-def rindex(a: U_co, sub: U_co, start: i_co = ..., end: None | i_co = ...) -> NDArray[int_]:
- ...
-
-@overload
-def rindex(a: S_co, sub: S_co, start: i_co = ..., end: None | i_co = ...) -> NDArray[int_]:
- ...
-
-@overload
-def startswith(a: U_co, prefix: U_co, start: i_co = ..., end: None | i_co = ...) -> NDArray[bool_]:
- ...
-
-@overload
-def startswith(a: S_co, prefix: S_co, start: i_co = ..., end: None | i_co = ...) -> NDArray[bool_]:
- ...
-
-def str_len(A: U_co | S_co) -> NDArray[int_]:
- ...
-
-@overload
-def array(obj: U_co, itemsize: None | int = ..., copy: bool = ..., unicode: L[False] = ..., order: _OrderKACF = ...) -> _CharArray[str_]:
- ...
-
-@overload
-def array(obj: S_co, itemsize: None | int = ..., copy: bool = ..., unicode: L[False] = ..., order: _OrderKACF = ...) -> _CharArray[bytes_]:
- ...
-
-@overload
-def array(obj: object, itemsize: None | int = ..., copy: bool = ..., unicode: L[False] = ..., order: _OrderKACF = ...) -> _CharArray[bytes_]:
- ...
-
-@overload
-def array(obj: object, itemsize: None | int = ..., copy: bool = ..., unicode: L[True] = ..., order: _OrderKACF = ...) -> _CharArray[str_]:
- ...
-
-@overload
-def asarray(obj: U_co, itemsize: None | int = ..., unicode: L[False] = ..., order: _OrderKACF = ...) -> _CharArray[str_]:
- ...
-
-@overload
-def asarray(obj: S_co, itemsize: None | int = ..., unicode: L[False] = ..., order: _OrderKACF = ...) -> _CharArray[bytes_]:
- ...
-
-@overload
-def asarray(obj: object, itemsize: None | int = ..., unicode: L[False] = ..., order: _OrderKACF = ...) -> _CharArray[bytes_]:
- ...
-
-@overload
-def asarray(obj: object, itemsize: None | int = ..., unicode: L[True] = ..., order: _OrderKACF = ...) -> _CharArray[str_]:
- ...
-
diff --git a/typings/numpy/core/einsumfunc.pyi b/typings/numpy/core/einsumfunc.pyi
deleted file mode 100644
index 9984ab9..0000000
--- a/typings/numpy/core/einsumfunc.pyi
+++ /dev/null
@@ -1,65 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Sequence
-from typing import Any, Literal, TypeVar, Union, overload
-from numpy import _OrderKACF, bool_, dtype, ndarray, number
-from numpy._typing import _ArrayLikeBool_co, _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, _ArrayLikeObject_co, _ArrayLikeUInt_co, _DTypeLikeBool, _DTypeLikeComplex, _DTypeLikeComplex_co, _DTypeLikeFloat, _DTypeLikeInt, _DTypeLikeObject, _DTypeLikeUInt
-
-_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, dtype[Union[bool_, number[Any]]]])
-_OptimizeKind = None | bool | Literal["greedy", "optimal"] | Sequence[Any]
-_CastingSafe = Literal["no", "equiv", "safe", "same_kind"]
-_CastingUnsafe = Literal["unsafe"]
-__all__: list[str]
-@overload
-def einsum(subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeBool_co, out: None = ..., dtype: None | _DTypeLikeBool = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ...) -> Any:
- ...
-
-@overload
-def einsum(subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeUInt_co, out: None = ..., dtype: None | _DTypeLikeUInt = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ...) -> Any:
- ...
-
-@overload
-def einsum(subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeInt_co, out: None = ..., dtype: None | _DTypeLikeInt = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ...) -> Any:
- ...
-
-@overload
-def einsum(subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeFloat_co, out: None = ..., dtype: None | _DTypeLikeFloat = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ...) -> Any:
- ...
-
-@overload
-def einsum(subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeComplex_co, out: None = ..., dtype: None | _DTypeLikeComplex = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ...) -> Any:
- ...
-
-@overload
-def einsum(subscripts: str | _ArrayLikeInt_co, /, *operands: Any, casting: _CastingUnsafe, dtype: None | _DTypeLikeComplex_co = ..., out: None = ..., order: _OrderKACF = ..., optimize: _OptimizeKind = ...) -> Any:
- ...
-
-@overload
-def einsum(subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeComplex_co, out: _ArrayType, dtype: None | _DTypeLikeComplex_co = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ...) -> _ArrayType:
- ...
-
-@overload
-def einsum(subscripts: str | _ArrayLikeInt_co, /, *operands: Any, out: _ArrayType, casting: _CastingUnsafe, dtype: None | _DTypeLikeComplex_co = ..., order: _OrderKACF = ..., optimize: _OptimizeKind = ...) -> _ArrayType:
- ...
-
-@overload
-def einsum(subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeObject_co, out: None = ..., dtype: None | _DTypeLikeObject = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ...) -> Any:
- ...
-
-@overload
-def einsum(subscripts: str | _ArrayLikeInt_co, /, *operands: Any, casting: _CastingUnsafe, dtype: None | _DTypeLikeObject = ..., out: None = ..., order: _OrderKACF = ..., optimize: _OptimizeKind = ...) -> Any:
- ...
-
-@overload
-def einsum(subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeObject_co, out: _ArrayType, dtype: None | _DTypeLikeObject = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ...) -> _ArrayType:
- ...
-
-@overload
-def einsum(subscripts: str | _ArrayLikeInt_co, /, *operands: Any, out: _ArrayType, casting: _CastingUnsafe, dtype: None | _DTypeLikeObject = ..., order: _OrderKACF = ..., optimize: _OptimizeKind = ...) -> _ArrayType:
- ...
-
-def einsum_path(subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeComplex_co | _DTypeLikeObject, optimize: _OptimizeKind = ...) -> tuple[list[Any], str]:
- ...
-
diff --git a/typings/numpy/core/fromnumeric.pyi b/typings/numpy/core/fromnumeric.pyi
deleted file mode 100644
index 59886b5..0000000
--- a/typings/numpy/core/fromnumeric.pyi
+++ /dev/null
@@ -1,489 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Sequence
-from typing import Any, Literal, SupportsIndex, TypeVar, overload
-from numpy import _CastingKind, _ModeKind, _OrderACF, _OrderKACF, _PartitionKind, _SortKind, _SortSide, bool_, complexfloating, float16, floating, generic, int64, int_, intp, number, object_, uint64
-from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, _ArrayLikeObject_co, _ArrayLikeUInt_co, _BoolLike_co, _ComplexLike_co, _DTypeLike, _IntLike_co, _NumberLike_co, _ScalarLike_co, _Shape, _ShapeLike
-
-_SCT = TypeVar("_SCT", bound=generic)
-_SCT_uifcO = TypeVar("_SCT_uifcO", bound=number[Any] | object_)
-_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
-__all__: list[str]
-@overload
-def take(a: _ArrayLike[_SCT], indices: _IntLike_co, axis: None = ..., out: None = ..., mode: _ModeKind = ...) -> _SCT:
- ...
-
-@overload
-def take(a: ArrayLike, indices: _IntLike_co, axis: None | SupportsIndex = ..., out: None = ..., mode: _ModeKind = ...) -> Any:
- ...
-
-@overload
-def take(a: _ArrayLike[_SCT], indices: _ArrayLikeInt_co, axis: None | SupportsIndex = ..., out: None = ..., mode: _ModeKind = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def take(a: ArrayLike, indices: _ArrayLikeInt_co, axis: None | SupportsIndex = ..., out: None = ..., mode: _ModeKind = ...) -> NDArray[Any]:
- ...
-
-@overload
-def take(a: ArrayLike, indices: _ArrayLikeInt_co, axis: None | SupportsIndex = ..., out: _ArrayType = ..., mode: _ModeKind = ...) -> _ArrayType:
- ...
-
-@overload
-def reshape(a: _ArrayLike[_SCT], newshape: _ShapeLike, order: _OrderACF = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def reshape(a: ArrayLike, newshape: _ShapeLike, order: _OrderACF = ...) -> NDArray[Any]:
- ...
-
-@overload
-def choose(a: _IntLike_co, choices: ArrayLike, out: None = ..., mode: _ModeKind = ...) -> Any:
- ...
-
-@overload
-def choose(a: _ArrayLikeInt_co, choices: _ArrayLike[_SCT], out: None = ..., mode: _ModeKind = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def choose(a: _ArrayLikeInt_co, choices: ArrayLike, out: None = ..., mode: _ModeKind = ...) -> NDArray[Any]:
- ...
-
-@overload
-def choose(a: _ArrayLikeInt_co, choices: ArrayLike, out: _ArrayType = ..., mode: _ModeKind = ...) -> _ArrayType:
- ...
-
-@overload
-def repeat(a: _ArrayLike[_SCT], repeats: _ArrayLikeInt_co, axis: None | SupportsIndex = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: None | SupportsIndex = ...) -> NDArray[Any]:
- ...
-
-def put(a: NDArray[Any], ind: _ArrayLikeInt_co, v: ArrayLike, mode: _ModeKind = ...) -> None:
- ...
-
-@overload
-def swapaxes(a: _ArrayLike[_SCT], axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[_SCT]:
- ...
-
-@overload
-def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[Any]:
- ...
-
-@overload
-def transpose(a: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def transpose(a: ArrayLike, axes: None | _ShapeLike = ...) -> NDArray[Any]:
- ...
-
-@overload
-def partition(a: _ArrayLike[_SCT], kth: _ArrayLikeInt_co, axis: None | SupportsIndex = ..., kind: _PartitionKind = ..., order: None | str | Sequence[str] = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def partition(a: ArrayLike, kth: _ArrayLikeInt_co, axis: None | SupportsIndex = ..., kind: _PartitionKind = ..., order: None | str | Sequence[str] = ...) -> NDArray[Any]:
- ...
-
-def argpartition(a: ArrayLike, kth: _ArrayLikeInt_co, axis: None | SupportsIndex = ..., kind: _PartitionKind = ..., order: None | str | Sequence[str] = ...) -> NDArray[intp]:
- ...
-
-@overload
-def sort(a: _ArrayLike[_SCT], axis: None | SupportsIndex = ..., kind: None | _SortKind = ..., order: None | str | Sequence[str] = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def sort(a: ArrayLike, axis: None | SupportsIndex = ..., kind: None | _SortKind = ..., order: None | str | Sequence[str] = ...) -> NDArray[Any]:
- ...
-
-def argsort(a: ArrayLike, axis: None | SupportsIndex = ..., kind: None | _SortKind = ..., order: None | str | Sequence[str] = ...) -> NDArray[intp]:
- ...
-
-@overload
-def argmax(a: ArrayLike, axis: None = ..., out: None = ..., *, keepdims: Literal[False] = ...) -> intp:
- ...
-
-@overload
-def argmax(a: ArrayLike, axis: None | SupportsIndex = ..., out: None = ..., *, keepdims: bool = ...) -> Any:
- ...
-
-@overload
-def argmax(a: ArrayLike, axis: None | SupportsIndex = ..., out: _ArrayType = ..., *, keepdims: bool = ...) -> _ArrayType:
- ...
-
-@overload
-def argmin(a: ArrayLike, axis: None = ..., out: None = ..., *, keepdims: Literal[False] = ...) -> intp:
- ...
-
-@overload
-def argmin(a: ArrayLike, axis: None | SupportsIndex = ..., out: None = ..., *, keepdims: bool = ...) -> Any:
- ...
-
-@overload
-def argmin(a: ArrayLike, axis: None | SupportsIndex = ..., out: _ArrayType = ..., *, keepdims: bool = ...) -> _ArrayType:
- ...
-
-@overload
-def searchsorted(a: ArrayLike, v: _ScalarLike_co, side: _SortSide = ..., sorter: None | _ArrayLikeInt_co = ...) -> intp:
- ...
-
-@overload
-def searchsorted(a: ArrayLike, v: ArrayLike, side: _SortSide = ..., sorter: None | _ArrayLikeInt_co = ...) -> NDArray[intp]:
- ...
-
-@overload
-def resize(a: _ArrayLike[_SCT], new_shape: _ShapeLike) -> NDArray[_SCT]:
- ...
-
-@overload
-def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]:
- ...
-
-@overload
-def squeeze(a: _SCT, axis: None | _ShapeLike = ...) -> _SCT:
- ...
-
-@overload
-def squeeze(a: _ArrayLike[_SCT], axis: None | _ShapeLike = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def squeeze(a: ArrayLike, axis: None | _ShapeLike = ...) -> NDArray[Any]:
- ...
-
-@overload
-def diagonal(a: _ArrayLike[_SCT], offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def diagonal(a: ArrayLike, offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ...) -> NDArray[Any]:
- ...
-
-@overload
-def trace(a: ArrayLike, offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., dtype: DTypeLike = ..., out: None = ...) -> Any:
- ...
-
-@overload
-def trace(a: ArrayLike, offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., dtype: DTypeLike = ..., out: _ArrayType = ...) -> _ArrayType:
- ...
-
-@overload
-def ravel(a: _ArrayLike[_SCT], order: _OrderKACF = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def ravel(a: ArrayLike, order: _OrderKACF = ...) -> NDArray[Any]:
- ...
-
-def nonzero(a: ArrayLike) -> tuple[NDArray[intp], ...]:
- ...
-
-def shape(a: ArrayLike) -> _Shape:
- ...
-
-@overload
-def compress(condition: _ArrayLikeBool_co, a: _ArrayLike[_SCT], axis: None | SupportsIndex = ..., out: None = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: None | SupportsIndex = ..., out: None = ...) -> NDArray[Any]:
- ...
-
-@overload
-def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: None | SupportsIndex = ..., out: _ArrayType = ...) -> _ArrayType:
- ...
-
-@overload
-def clip(a: _SCT, a_min: None | ArrayLike, a_max: None | ArrayLike, out: None = ..., *, dtype: None = ..., where: None | _ArrayLikeBool_co = ..., order: _OrderKACF = ..., subok: bool = ..., signature: str | tuple[None | str, ...] = ..., extobj: list[Any] = ..., casting: _CastingKind = ...) -> _SCT:
- ...
-
-@overload
-def clip(a: _ScalarLike_co, a_min: None | ArrayLike, a_max: None | ArrayLike, out: None = ..., *, dtype: None = ..., where: None | _ArrayLikeBool_co = ..., order: _OrderKACF = ..., subok: bool = ..., signature: str | tuple[None | str, ...] = ..., extobj: list[Any] = ..., casting: _CastingKind = ...) -> Any:
- ...
-
-@overload
-def clip(a: _ArrayLike[_SCT], a_min: None | ArrayLike, a_max: None | ArrayLike, out: None = ..., *, dtype: None = ..., where: None | _ArrayLikeBool_co = ..., order: _OrderKACF = ..., subok: bool = ..., signature: str | tuple[None | str, ...] = ..., extobj: list[Any] = ..., casting: _CastingKind = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def clip(a: ArrayLike, a_min: None | ArrayLike, a_max: None | ArrayLike, out: None = ..., *, dtype: None = ..., where: None | _ArrayLikeBool_co = ..., order: _OrderKACF = ..., subok: bool = ..., signature: str | tuple[None | str, ...] = ..., extobj: list[Any] = ..., casting: _CastingKind = ...) -> NDArray[Any]:
- ...
-
-@overload
-def clip(a: ArrayLike, a_min: None | ArrayLike, a_max: None | ArrayLike, out: _ArrayType = ..., *, dtype: DTypeLike, where: None | _ArrayLikeBool_co = ..., order: _OrderKACF = ..., subok: bool = ..., signature: str | tuple[None | str, ...] = ..., extobj: list[Any] = ..., casting: _CastingKind = ...) -> Any:
- ...
-
-@overload
-def clip(a: ArrayLike, a_min: None | ArrayLike, a_max: None | ArrayLike, out: _ArrayType, *, dtype: DTypeLike = ..., where: None | _ArrayLikeBool_co = ..., order: _OrderKACF = ..., subok: bool = ..., signature: str | tuple[None | str, ...] = ..., extobj: list[Any] = ..., casting: _CastingKind = ...) -> _ArrayType:
- ...
-
-@overload
-def sum(a: _ArrayLike[_SCT], axis: None = ..., dtype: None = ..., out: None = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ...) -> _SCT:
- ...
-
-@overload
-def sum(a: ArrayLike, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: None = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ...) -> Any:
- ...
-
-@overload
-def sum(a: ArrayLike, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _ArrayType = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ...) -> _ArrayType:
- ...
-
-@overload
-def all(a: ArrayLike, axis: None = ..., out: None = ..., keepdims: Literal[False] = ..., *, where: _ArrayLikeBool_co = ...) -> bool_:
- ...
-
-@overload
-def all(a: ArrayLike, axis: None | _ShapeLike = ..., out: None = ..., keepdims: bool = ..., *, where: _ArrayLikeBool_co = ...) -> Any:
- ...
-
-@overload
-def all(a: ArrayLike, axis: None | _ShapeLike = ..., out: _ArrayType = ..., keepdims: bool = ..., *, where: _ArrayLikeBool_co = ...) -> _ArrayType:
- ...
-
-@overload
-def any(a: ArrayLike, axis: None = ..., out: None = ..., keepdims: Literal[False] = ..., *, where: _ArrayLikeBool_co = ...) -> bool_:
- ...
-
-@overload
-def any(a: ArrayLike, axis: None | _ShapeLike = ..., out: None = ..., keepdims: bool = ..., *, where: _ArrayLikeBool_co = ...) -> Any:
- ...
-
-@overload
-def any(a: ArrayLike, axis: None | _ShapeLike = ..., out: _ArrayType = ..., keepdims: bool = ..., *, where: _ArrayLikeBool_co = ...) -> _ArrayType:
- ...
-
-@overload
-def cumsum(a: _ArrayLike[_SCT], axis: None | SupportsIndex = ..., dtype: None = ..., out: None = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def cumsum(a: ArrayLike, axis: None | SupportsIndex = ..., dtype: None = ..., out: None = ...) -> NDArray[Any]:
- ...
-
-@overload
-def cumsum(a: ArrayLike, axis: None | SupportsIndex = ..., dtype: _DTypeLike[_SCT] = ..., out: None = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def cumsum(a: ArrayLike, axis: None | SupportsIndex = ..., dtype: DTypeLike = ..., out: None = ...) -> NDArray[Any]:
- ...
-
-@overload
-def cumsum(a: ArrayLike, axis: None | SupportsIndex = ..., dtype: DTypeLike = ..., out: _ArrayType = ...) -> _ArrayType:
- ...
-
-@overload
-def ptp(a: _ArrayLike[_SCT], axis: None = ..., out: None = ..., keepdims: Literal[False] = ...) -> _SCT:
- ...
-
-@overload
-def ptp(a: ArrayLike, axis: None | _ShapeLike = ..., out: None = ..., keepdims: bool = ...) -> Any:
- ...
-
-@overload
-def ptp(a: ArrayLike, axis: None | _ShapeLike = ..., out: _ArrayType = ..., keepdims: bool = ...) -> _ArrayType:
- ...
-
-@overload
-def amax(a: _ArrayLike[_SCT], axis: None = ..., out: None = ..., keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ...) -> _SCT:
- ...
-
-@overload
-def amax(a: ArrayLike, axis: None | _ShapeLike = ..., out: None = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ...) -> Any:
- ...
-
-@overload
-def amax(a: ArrayLike, axis: None | _ShapeLike = ..., out: _ArrayType = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ...) -> _ArrayType:
- ...
-
-@overload
-def amin(a: _ArrayLike[_SCT], axis: None = ..., out: None = ..., keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ...) -> _SCT:
- ...
-
-@overload
-def amin(a: ArrayLike, axis: None | _ShapeLike = ..., out: None = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ...) -> Any:
- ...
-
-@overload
-def amin(a: ArrayLike, axis: None | _ShapeLike = ..., out: _ArrayType = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ...) -> _ArrayType:
- ...
-
-@overload
-def prod(a: _ArrayLikeBool_co, axis: None = ..., dtype: None = ..., out: None = ..., keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ...) -> int_:
- ...
-
-@overload
-def prod(a: _ArrayLikeUInt_co, axis: None = ..., dtype: None = ..., out: None = ..., keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ...) -> uint64:
- ...
-
-@overload
-def prod(a: _ArrayLikeInt_co, axis: None = ..., dtype: None = ..., out: None = ..., keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ...) -> int64:
- ...
-
-@overload
-def prod(a: _ArrayLikeFloat_co, axis: None = ..., dtype: None = ..., out: None = ..., keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ...) -> floating[Any]:
- ...
-
-@overload
-def prod(a: _ArrayLikeComplex_co, axis: None = ..., dtype: None = ..., out: None = ..., keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ...) -> complexfloating[Any, Any]:
- ...
-
-@overload
-def prod(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., dtype: None = ..., out: None = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ...) -> Any:
- ...
-
-@overload
-def prod(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None = ..., dtype: _DTypeLike[_SCT] = ..., out: None = ..., keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ...) -> _SCT:
- ...
-
-@overload
-def prod(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., dtype: None | DTypeLike = ..., out: None = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ...) -> Any:
- ...
-
-@overload
-def prod(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., dtype: None | DTypeLike = ..., out: _ArrayType = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ...) -> _ArrayType:
- ...
-
-@overload
-def cumprod(a: _ArrayLikeBool_co, axis: None | SupportsIndex = ..., dtype: None = ..., out: None = ...) -> NDArray[int_]:
- ...
-
-@overload
-def cumprod(a: _ArrayLikeUInt_co, axis: None | SupportsIndex = ..., dtype: None = ..., out: None = ...) -> NDArray[uint64]:
- ...
-
-@overload
-def cumprod(a: _ArrayLikeInt_co, axis: None | SupportsIndex = ..., dtype: None = ..., out: None = ...) -> NDArray[int64]:
- ...
-
-@overload
-def cumprod(a: _ArrayLikeFloat_co, axis: None | SupportsIndex = ..., dtype: None = ..., out: None = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def cumprod(a: _ArrayLikeComplex_co, axis: None | SupportsIndex = ..., dtype: None = ..., out: None = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def cumprod(a: _ArrayLikeObject_co, axis: None | SupportsIndex = ..., dtype: None = ..., out: None = ...) -> NDArray[object_]:
- ...
-
-@overload
-def cumprod(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | SupportsIndex = ..., dtype: _DTypeLike[_SCT] = ..., out: None = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def cumprod(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | SupportsIndex = ..., dtype: DTypeLike = ..., out: None = ...) -> NDArray[Any]:
- ...
-
-@overload
-def cumprod(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | SupportsIndex = ..., dtype: DTypeLike = ..., out: _ArrayType = ...) -> _ArrayType:
- ...
-
-def ndim(a: ArrayLike) -> int:
- ...
-
-def size(a: ArrayLike, axis: None | int = ...) -> int:
- ...
-
-@overload
-def around(a: _BoolLike_co, decimals: SupportsIndex = ..., out: None = ...) -> float16:
- ...
-
-@overload
-def around(a: _SCT_uifcO, decimals: SupportsIndex = ..., out: None = ...) -> _SCT_uifcO:
- ...
-
-@overload
-def around(a: _ComplexLike_co | object_, decimals: SupportsIndex = ..., out: None = ...) -> Any:
- ...
-
-@overload
-def around(a: _ArrayLikeBool_co, decimals: SupportsIndex = ..., out: None = ...) -> NDArray[float16]:
- ...
-
-@overload
-def around(a: _ArrayLike[_SCT_uifcO], decimals: SupportsIndex = ..., out: None = ...) -> NDArray[_SCT_uifcO]:
- ...
-
-@overload
-def around(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, decimals: SupportsIndex = ..., out: None = ...) -> NDArray[Any]:
- ...
-
-@overload
-def around(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, decimals: SupportsIndex = ..., out: _ArrayType = ...) -> _ArrayType:
- ...
-
-@overload
-def mean(a: _ArrayLikeFloat_co, axis: None = ..., dtype: None = ..., out: None = ..., keepdims: Literal[False] = ..., *, where: _ArrayLikeBool_co = ...) -> floating[Any]:
- ...
-
-@overload
-def mean(a: _ArrayLikeComplex_co, axis: None = ..., dtype: None = ..., out: None = ..., keepdims: Literal[False] = ..., *, where: _ArrayLikeBool_co = ...) -> complexfloating[Any, Any]:
- ...
-
-@overload
-def mean(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., dtype: None = ..., out: None = ..., keepdims: bool = ..., *, where: _ArrayLikeBool_co = ...) -> Any:
- ...
-
-@overload
-def mean(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None = ..., dtype: _DTypeLike[_SCT] = ..., out: None = ..., keepdims: Literal[False] = ..., *, where: _ArrayLikeBool_co = ...) -> _SCT:
- ...
-
-@overload
-def mean(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: None = ..., keepdims: bool = ..., *, where: _ArrayLikeBool_co = ...) -> Any:
- ...
-
-@overload
-def mean(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _ArrayType = ..., keepdims: bool = ..., *, where: _ArrayLikeBool_co = ...) -> _ArrayType:
- ...
-
-@overload
-def std(a: _ArrayLikeComplex_co, axis: None = ..., dtype: None = ..., out: None = ..., ddof: float = ..., keepdims: Literal[False] = ..., *, where: _ArrayLikeBool_co = ...) -> floating[Any]:
- ...
-
-@overload
-def std(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., dtype: None = ..., out: None = ..., ddof: float = ..., keepdims: bool = ..., *, where: _ArrayLikeBool_co = ...) -> Any:
- ...
-
-@overload
-def std(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None = ..., dtype: _DTypeLike[_SCT] = ..., out: None = ..., ddof: float = ..., keepdims: Literal[False] = ..., *, where: _ArrayLikeBool_co = ...) -> _SCT:
- ...
-
-@overload
-def std(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ..., keepdims: bool = ..., *, where: _ArrayLikeBool_co = ...) -> Any:
- ...
-
-@overload
-def std(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _ArrayType = ..., ddof: float = ..., keepdims: bool = ..., *, where: _ArrayLikeBool_co = ...) -> _ArrayType:
- ...
-
-@overload
-def var(a: _ArrayLikeComplex_co, axis: None = ..., dtype: None = ..., out: None = ..., ddof: float = ..., keepdims: Literal[False] = ..., *, where: _ArrayLikeBool_co = ...) -> floating[Any]:
- ...
-
-@overload
-def var(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., dtype: None = ..., out: None = ..., ddof: float = ..., keepdims: bool = ..., *, where: _ArrayLikeBool_co = ...) -> Any:
- ...
-
-@overload
-def var(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None = ..., dtype: _DTypeLike[_SCT] = ..., out: None = ..., ddof: float = ..., keepdims: Literal[False] = ..., *, where: _ArrayLikeBool_co = ...) -> _SCT:
- ...
-
-@overload
-def var(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ..., keepdims: bool = ..., *, where: _ArrayLikeBool_co = ...) -> Any:
- ...
-
-@overload
-def var(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _ArrayType = ..., ddof: float = ..., keepdims: bool = ..., *, where: _ArrayLikeBool_co = ...) -> _ArrayType:
- ...
-
-max = ...
-min = ...
-round = ...
diff --git a/typings/numpy/core/function_base.pyi b/typings/numpy/core/function_base.pyi
deleted file mode 100644
index 32271f3..0000000
--- a/typings/numpy/core/function_base.pyi
+++ /dev/null
@@ -1,77 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any, Literal as L, SupportsIndex, TypeVar, overload
-from numpy import complexfloating, floating, generic
-from numpy._typing import DTypeLike, NDArray, _ArrayLikeComplex_co, _ArrayLikeFloat_co, _DTypeLike
-
-_SCT = TypeVar("_SCT", bound=generic)
-__all__: list[str]
-@overload
-def linspace(start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, num: SupportsIndex = ..., endpoint: bool = ..., retstep: L[False] = ..., dtype: None = ..., axis: SupportsIndex = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def linspace(start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., retstep: L[False] = ..., dtype: None = ..., axis: SupportsIndex = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def linspace(start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., retstep: L[False] = ..., dtype: _DTypeLike[_SCT] = ..., axis: SupportsIndex = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def linspace(start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., retstep: L[False] = ..., dtype: DTypeLike = ..., axis: SupportsIndex = ...) -> NDArray[Any]:
- ...
-
-@overload
-def linspace(start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, num: SupportsIndex = ..., endpoint: bool = ..., retstep: L[True] = ..., dtype: None = ..., axis: SupportsIndex = ...) -> tuple[NDArray[floating[Any]], floating[Any]]:
- ...
-
-@overload
-def linspace(start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., retstep: L[True] = ..., dtype: None = ..., axis: SupportsIndex = ...) -> tuple[NDArray[complexfloating[Any, Any]], complexfloating[Any, Any]]:
- ...
-
-@overload
-def linspace(start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., retstep: L[True] = ..., dtype: _DTypeLike[_SCT] = ..., axis: SupportsIndex = ...) -> tuple[NDArray[_SCT], _SCT]:
- ...
-
-@overload
-def linspace(start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., retstep: L[True] = ..., dtype: DTypeLike = ..., axis: SupportsIndex = ...) -> tuple[NDArray[Any], Any]:
- ...
-
-@overload
-def logspace(start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, num: SupportsIndex = ..., endpoint: bool = ..., base: _ArrayLikeFloat_co = ..., dtype: None = ..., axis: SupportsIndex = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def logspace(start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., base: _ArrayLikeComplex_co = ..., dtype: None = ..., axis: SupportsIndex = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def logspace(start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., base: _ArrayLikeComplex_co = ..., dtype: _DTypeLike[_SCT] = ..., axis: SupportsIndex = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def logspace(start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., base: _ArrayLikeComplex_co = ..., dtype: DTypeLike = ..., axis: SupportsIndex = ...) -> NDArray[Any]:
- ...
-
-@overload
-def geomspace(start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, num: SupportsIndex = ..., endpoint: bool = ..., dtype: None = ..., axis: SupportsIndex = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def geomspace(start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., dtype: None = ..., axis: SupportsIndex = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def geomspace(start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., dtype: _DTypeLike[_SCT] = ..., axis: SupportsIndex = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def geomspace(start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., dtype: DTypeLike = ..., axis: SupportsIndex = ...) -> NDArray[Any]:
- ...
-
-def add_newdoc(place: str, obj: str, doc: str | tuple[str, str] | list[tuple[str, str]], warn_on_python: bool = ...) -> None:
- ...
-
diff --git a/typings/numpy/core/multiarray.pyi b/typings/numpy/core/multiarray.pyi
deleted file mode 100644
index 4062936..0000000
--- a/typings/numpy/core/multiarray.pyi
+++ /dev/null
@@ -1,521 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import os
-import datetime as dt
-from collections.abc import Callable, Iterable, Sequence
-from typing import Any, ClassVar, Final, Literal as L, Protocol, SupportsIndex, TypeVar, final, overload
-from numpy import _CastingKind, _CopyMode, _IOProtocol, _ModeKind, _NDIterFlagsKind, _NDIterOpFlagsKind, _OrderCF, _OrderKACF, _SupportsBuffer, bool_, busdaycalendar as busdaycalendar, complexfloating, datetime64, dtype as dtype, float64, floating, generic, int_, intp, nditer as nditer, signedinteger, str_, timedelta64, ufunc, uint8, unsignedinteger
-from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeBytes_co, _ArrayLikeComplex_co, _ArrayLikeDT64_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, _ArrayLikeObject_co, _ArrayLikeStr_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, _DTypeLike, _FloatLike_co, _IntLike_co, _NestedSequence, _ScalarLike_co, _ShapeLike, _SupportsArrayFunc, _TD64Like_co
-
-_T_co = TypeVar("_T_co", covariant=True)
-_T_contra = TypeVar("_T_contra", contravariant=True)
-_SCT = TypeVar("_SCT", bound=generic)
-_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
-_UnitKind = L["Y", "M", "D", "h", "m", "s", "ms", "us", "μs", "ns", "ps", "fs", "as",]
-_RollKind = L["nat", "forward", "following", "backward", "preceding", "modifiedfollowing", "modifiedpreceding",]
-class _SupportsLenAndGetItem(Protocol[_T_contra, _T_co]):
- def __len__(self) -> int:
- ...
-
- def __getitem__(self, key: _T_contra, /) -> _T_co:
- ...
-
-
-
-__all__: list[str]
-ALLOW_THREADS: Final[int]
-BUFSIZE: L[8192]
-CLIP: L[0]
-WRAP: L[1]
-RAISE: L[2]
-MAXDIMS: L[32]
-MAY_SHARE_BOUNDS: L[0]
-MAY_SHARE_EXACT: L[-1]
-tracemalloc_domain: L[389047]
-@overload
-def empty_like(prototype: _ArrayType, dtype: None = ..., order: _OrderKACF = ..., subok: bool = ..., shape: None | _ShapeLike = ...) -> _ArrayType:
- ...
-
-@overload
-def empty_like(prototype: _ArrayLike[_SCT], dtype: None = ..., order: _OrderKACF = ..., subok: bool = ..., shape: None | _ShapeLike = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def empty_like(prototype: object, dtype: None = ..., order: _OrderKACF = ..., subok: bool = ..., shape: None | _ShapeLike = ...) -> NDArray[Any]:
- ...
-
-@overload
-def empty_like(prototype: Any, dtype: _DTypeLike[_SCT], order: _OrderKACF = ..., subok: bool = ..., shape: None | _ShapeLike = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def empty_like(prototype: Any, dtype: DTypeLike, order: _OrderKACF = ..., subok: bool = ..., shape: None | _ShapeLike = ...) -> NDArray[Any]:
- ...
-
-@overload
-def array(object: _ArrayType, dtype: None = ..., *, copy: bool | _CopyMode = ..., order: _OrderKACF = ..., subok: L[True], ndmin: int = ..., like: None | _SupportsArrayFunc = ...) -> _ArrayType:
- ...
-
-@overload
-def array(object: _ArrayLike[_SCT], dtype: None = ..., *, copy: bool | _CopyMode = ..., order: _OrderKACF = ..., subok: bool = ..., ndmin: int = ..., like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def array(object: object, dtype: None = ..., *, copy: bool | _CopyMode = ..., order: _OrderKACF = ..., subok: bool = ..., ndmin: int = ..., like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def array(object: Any, dtype: _DTypeLike[_SCT], *, copy: bool | _CopyMode = ..., order: _OrderKACF = ..., subok: bool = ..., ndmin: int = ..., like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def array(object: Any, dtype: DTypeLike, *, copy: bool | _CopyMode = ..., order: _OrderKACF = ..., subok: bool = ..., ndmin: int = ..., like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def zeros(shape: _ShapeLike, dtype: None = ..., order: _OrderCF = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[float64]:
- ...
-
-@overload
-def zeros(shape: _ShapeLike, dtype: _DTypeLike[_SCT], order: _OrderCF = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def zeros(shape: _ShapeLike, dtype: DTypeLike, order: _OrderCF = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def empty(shape: _ShapeLike, dtype: None = ..., order: _OrderCF = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[float64]:
- ...
-
-@overload
-def empty(shape: _ShapeLike, dtype: _DTypeLike[_SCT], order: _OrderCF = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def empty(shape: _ShapeLike, dtype: DTypeLike, order: _OrderCF = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def unravel_index(indices: _IntLike_co, shape: _ShapeLike, order: _OrderCF = ...) -> tuple[intp, ...]:
- ...
-
-@overload
-def unravel_index(indices: _ArrayLikeInt_co, shape: _ShapeLike, order: _OrderCF = ...) -> tuple[NDArray[intp], ...]:
- ...
-
-@overload
-def ravel_multi_index(multi_index: Sequence[_IntLike_co], dims: Sequence[SupportsIndex], mode: _ModeKind | tuple[_ModeKind, ...] = ..., order: _OrderCF = ...) -> intp:
- ...
-
-@overload
-def ravel_multi_index(multi_index: Sequence[_ArrayLikeInt_co], dims: Sequence[SupportsIndex], mode: _ModeKind | tuple[_ModeKind, ...] = ..., order: _OrderCF = ...) -> NDArray[intp]:
- ...
-
-@overload
-def concatenate(arrays: _ArrayLike[_SCT], /, axis: None | SupportsIndex = ..., out: None = ..., *, dtype: None = ..., casting: None | _CastingKind = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def concatenate(arrays: _SupportsLenAndGetItem[int, ArrayLike], /, axis: None | SupportsIndex = ..., out: None = ..., *, dtype: None = ..., casting: None | _CastingKind = ...) -> NDArray[Any]:
- ...
-
-@overload
-def concatenate(arrays: _SupportsLenAndGetItem[int, ArrayLike], /, axis: None | SupportsIndex = ..., out: None = ..., *, dtype: _DTypeLike[_SCT], casting: None | _CastingKind = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def concatenate(arrays: _SupportsLenAndGetItem[int, ArrayLike], /, axis: None | SupportsIndex = ..., out: None = ..., *, dtype: DTypeLike, casting: None | _CastingKind = ...) -> NDArray[Any]:
- ...
-
-@overload
-def concatenate(arrays: _SupportsLenAndGetItem[int, ArrayLike], /, axis: None | SupportsIndex = ..., out: _ArrayType = ..., *, dtype: DTypeLike = ..., casting: None | _CastingKind = ...) -> _ArrayType:
- ...
-
-def inner(a: ArrayLike, b: ArrayLike, /) -> Any:
- ...
-
-@overload
-def where(condition: ArrayLike, /) -> tuple[NDArray[intp], ...]:
- ...
-
-@overload
-def where(condition: ArrayLike, x: ArrayLike, y: ArrayLike, /) -> NDArray[Any]:
- ...
-
-def lexsort(keys: ArrayLike, axis: None | SupportsIndex = ...) -> Any:
- ...
-
-def can_cast(from_: ArrayLike | DTypeLike, to: DTypeLike, casting: None | _CastingKind = ...) -> bool:
- ...
-
-def min_scalar_type(a: ArrayLike, /) -> dtype[Any]:
- ...
-
-def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike) -> dtype[Any]:
- ...
-
-@overload
-def dot(a: ArrayLike, b: ArrayLike, out: None = ...) -> Any:
- ...
-
-@overload
-def dot(a: ArrayLike, b: ArrayLike, out: _ArrayType) -> _ArrayType:
- ...
-
-@overload
-def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> bool_:
- ...
-
-@overload
-def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger[Any]:
- ...
-
-@overload
-def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger[Any]:
- ...
-
-@overload
-def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating[Any]:
- ...
-
-@overload
-def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating[Any, Any]:
- ...
-
-@overload
-def vdot(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, /) -> timedelta64:
- ...
-
-@overload
-def vdot(a: _ArrayLikeObject_co, b: Any, /) -> Any:
- ...
-
-@overload
-def vdot(a: Any, b: _ArrayLikeObject_co, /) -> Any:
- ...
-
-def bincount(x: ArrayLike, /, weights: None | ArrayLike = ..., minlength: SupportsIndex = ...) -> NDArray[intp]:
- ...
-
-def copyto(dst: NDArray[Any], src: ArrayLike, casting: None | _CastingKind = ..., where: None | _ArrayLikeBool_co = ...) -> None:
- ...
-
-def putmask(a: NDArray[Any], /, mask: _ArrayLikeBool_co, values: ArrayLike) -> None:
- ...
-
-def packbits(a: _ArrayLikeInt_co, /, axis: None | SupportsIndex = ..., bitorder: L["big", "little"] = ...) -> NDArray[uint8]:
- ...
-
-def unpackbits(a: _ArrayLike[uint8], /, axis: None | SupportsIndex = ..., count: None | SupportsIndex = ..., bitorder: L["big", "little"] = ...) -> NDArray[uint8]:
- ...
-
-def shares_memory(a: object, b: object, /, max_work: None | int = ...) -> bool:
- ...
-
-def may_share_memory(a: object, b: object, /, max_work: None | int = ...) -> bool:
- ...
-
-@overload
-def asarray(a: _ArrayLike[_SCT], dtype: None = ..., order: _OrderKACF = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def asarray(a: object, dtype: None = ..., order: _OrderKACF = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def asarray(a: Any, dtype: _DTypeLike[_SCT], order: _OrderKACF = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def asarray(a: Any, dtype: DTypeLike, order: _OrderKACF = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def asanyarray(a: _ArrayType, dtype: None = ..., order: _OrderKACF = ..., *, like: None | _SupportsArrayFunc = ...) -> _ArrayType:
- ...
-
-@overload
-def asanyarray(a: _ArrayLike[_SCT], dtype: None = ..., order: _OrderKACF = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def asanyarray(a: object, dtype: None = ..., order: _OrderKACF = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def asanyarray(a: Any, dtype: _DTypeLike[_SCT], order: _OrderKACF = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def asanyarray(a: Any, dtype: DTypeLike, order: _OrderKACF = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def ascontiguousarray(a: _ArrayLike[_SCT], dtype: None = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def ascontiguousarray(a: object, dtype: None = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def ascontiguousarray(a: Any, dtype: _DTypeLike[_SCT], *, like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def ascontiguousarray(a: Any, dtype: DTypeLike, *, like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def asfortranarray(a: _ArrayLike[_SCT], dtype: None = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def asfortranarray(a: object, dtype: None = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def asfortranarray(a: Any, dtype: _DTypeLike[_SCT], *, like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def asfortranarray(a: Any, dtype: DTypeLike, *, like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-def geterrobj() -> list[Any]:
- ...
-
-def seterrobj(errobj: list[Any], /) -> None:
- ...
-
-def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype[Any]:
- ...
-
-@overload
-def fromstring(string: str | bytes, dtype: None = ..., count: SupportsIndex = ..., *, sep: str, like: None | _SupportsArrayFunc = ...) -> NDArray[float64]:
- ...
-
-@overload
-def fromstring(string: str | bytes, dtype: _DTypeLike[_SCT], count: SupportsIndex = ..., *, sep: str, like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def fromstring(string: str | bytes, dtype: DTypeLike, count: SupportsIndex = ..., *, sep: str, like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-def frompyfunc(func: Callable[..., Any], /, nin: SupportsIndex, nout: SupportsIndex, *, identity: Any = ...) -> ufunc:
- ...
-
-@overload
-def fromfile(file: str | bytes | os.PathLike[Any] | _IOProtocol, dtype: None = ..., count: SupportsIndex = ..., sep: str = ..., offset: SupportsIndex = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[float64]:
- ...
-
-@overload
-def fromfile(file: str | bytes | os.PathLike[Any] | _IOProtocol, dtype: _DTypeLike[_SCT], count: SupportsIndex = ..., sep: str = ..., offset: SupportsIndex = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def fromfile(file: str | bytes | os.PathLike[Any] | _IOProtocol, dtype: DTypeLike, count: SupportsIndex = ..., sep: str = ..., offset: SupportsIndex = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def fromiter(iter: Iterable[Any], dtype: _DTypeLike[_SCT], count: SupportsIndex = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def fromiter(iter: Iterable[Any], dtype: DTypeLike, count: SupportsIndex = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def frombuffer(buffer: _SupportsBuffer, dtype: None = ..., count: SupportsIndex = ..., offset: SupportsIndex = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[float64]:
- ...
-
-@overload
-def frombuffer(buffer: _SupportsBuffer, dtype: _DTypeLike[_SCT], count: SupportsIndex = ..., offset: SupportsIndex = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def frombuffer(buffer: _SupportsBuffer, dtype: DTypeLike, count: SupportsIndex = ..., offset: SupportsIndex = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def arange(stop: _IntLike_co, /, *, dtype: None = ..., like: None | _SupportsArrayFunc = ...) -> NDArray[signedinteger[Any]]:
- ...
-
-@overload
-def arange(start: _IntLike_co, stop: _IntLike_co, step: _IntLike_co = ..., dtype: None = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[signedinteger[Any]]:
- ...
-
-@overload
-def arange(stop: _FloatLike_co, /, *, dtype: None = ..., like: None | _SupportsArrayFunc = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def arange(start: _FloatLike_co, stop: _FloatLike_co, step: _FloatLike_co = ..., dtype: None = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def arange(stop: _TD64Like_co, /, *, dtype: None = ..., like: None | _SupportsArrayFunc = ...) -> NDArray[timedelta64]:
- ...
-
-@overload
-def arange(start: _TD64Like_co, stop: _TD64Like_co, step: _TD64Like_co = ..., dtype: None = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[timedelta64]:
- ...
-
-@overload
-def arange(start: datetime64, stop: datetime64, step: datetime64 = ..., dtype: None = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[datetime64]:
- ...
-
-@overload
-def arange(stop: Any, /, *, dtype: _DTypeLike[_SCT], like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def arange(start: Any, stop: Any, step: Any = ..., dtype: _DTypeLike[_SCT] = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def arange(stop: Any, /, *, dtype: DTypeLike, like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def arange(start: Any, stop: Any, step: Any = ..., dtype: DTypeLike = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-def datetime_data(dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /) -> tuple[str, int]:
- ...
-
-@overload
-def busday_count(begindates: _ScalarLike_co | dt.date, enddates: _ScalarLike_co | dt.date, weekmask: ArrayLike = ..., holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ...) -> int_:
- ...
-
-@overload
-def busday_count(begindates: ArrayLike | dt.date | _NestedSequence[dt.date], enddates: ArrayLike | dt.date | _NestedSequence[dt.date], weekmask: ArrayLike = ..., holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ...) -> NDArray[int_]:
- ...
-
-@overload
-def busday_count(begindates: ArrayLike | dt.date | _NestedSequence[dt.date], enddates: ArrayLike | dt.date | _NestedSequence[dt.date], weekmask: ArrayLike = ..., holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: _ArrayType = ...) -> _ArrayType:
- ...
-
-@overload
-def busday_offset(dates: datetime64 | dt.date, offsets: _TD64Like_co | dt.timedelta, roll: L["raise"] = ..., weekmask: ArrayLike = ..., holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ...) -> datetime64:
- ...
-
-@overload
-def busday_offset(dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], roll: L["raise"] = ..., weekmask: ArrayLike = ..., holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ...) -> NDArray[datetime64]:
- ...
-
-@overload
-def busday_offset(dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], roll: L["raise"] = ..., weekmask: ArrayLike = ..., holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: _ArrayType = ...) -> _ArrayType:
- ...
-
-@overload
-def busday_offset(dates: _ScalarLike_co | dt.date, offsets: _ScalarLike_co | dt.timedelta, roll: _RollKind, weekmask: ArrayLike = ..., holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ...) -> datetime64:
- ...
-
-@overload
-def busday_offset(dates: ArrayLike | dt.date | _NestedSequence[dt.date], offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], roll: _RollKind, weekmask: ArrayLike = ..., holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ...) -> NDArray[datetime64]:
- ...
-
-@overload
-def busday_offset(dates: ArrayLike | dt.date | _NestedSequence[dt.date], offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], roll: _RollKind, weekmask: ArrayLike = ..., holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: _ArrayType = ...) -> _ArrayType:
- ...
-
-@overload
-def is_busday(dates: _ScalarLike_co | dt.date, weekmask: ArrayLike = ..., holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ...) -> bool_:
- ...
-
-@overload
-def is_busday(dates: ArrayLike | _NestedSequence[dt.date], weekmask: ArrayLike = ..., holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ...) -> NDArray[bool_]:
- ...
-
-@overload
-def is_busday(dates: ArrayLike | _NestedSequence[dt.date], weekmask: ArrayLike = ..., holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: _ArrayType = ...) -> _ArrayType:
- ...
-
-@overload
-def datetime_as_string(arr: datetime64 | dt.date, unit: None | L["auto"] | _UnitKind = ..., timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., casting: _CastingKind = ...) -> str_:
- ...
-
-@overload
-def datetime_as_string(arr: _ArrayLikeDT64_co | _NestedSequence[dt.date], unit: None | L["auto"] | _UnitKind = ..., timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., casting: _CastingKind = ...) -> NDArray[str_]:
- ...
-
-@overload
-def compare_chararrays(a1: _ArrayLikeStr_co, a2: _ArrayLikeStr_co, cmp: L["<", "<=", "==", ">=", ">", "!="], rstrip: bool) -> NDArray[bool_]:
- ...
-
-@overload
-def compare_chararrays(a1: _ArrayLikeBytes_co, a2: _ArrayLikeBytes_co, cmp: L["<", "<=", "==", ">=", ">", "!="], rstrip: bool) -> NDArray[bool_]:
- ...
-
-def add_docstring(obj: Callable[..., Any], docstring: str, /) -> None:
- ...
-
-_GetItemKeys = L["C", "CONTIGUOUS", "C_CONTIGUOUS", "F", "FORTRAN", "F_CONTIGUOUS", "W", "WRITEABLE", "B", "BEHAVED", "O", "OWNDATA", "A", "ALIGNED", "X", "WRITEBACKIFCOPY", "CA", "CARRAY", "FA", "FARRAY", "FNC", "FORC",]
-_SetItemKeys = L["A", "ALIGNED", "W", "WRITEABLE", "X", "WRITEBACKIFCOPY",]
-@final
-class flagsobj:
- __hash__: ClassVar[None]
- aligned: bool
- writeable: bool
- writebackifcopy: bool
- @property
- def behaved(self) -> bool:
- ...
-
- @property
- def c_contiguous(self) -> bool:
- ...
-
- @property
- def carray(self) -> bool:
- ...
-
- @property
- def contiguous(self) -> bool:
- ...
-
- @property
- def f_contiguous(self) -> bool:
- ...
-
- @property
- def farray(self) -> bool:
- ...
-
- @property
- def fnc(self) -> bool:
- ...
-
- @property
- def forc(self) -> bool:
- ...
-
- @property
- def fortran(self) -> bool:
- ...
-
- @property
- def num(self) -> int:
- ...
-
- @property
- def owndata(self) -> bool:
- ...
-
- def __getitem__(self, key: _GetItemKeys) -> bool:
- ...
-
- def __setitem__(self, key: _SetItemKeys, value: bool) -> None:
- ...
-
-
-
-def nested_iters(op: ArrayLike | Sequence[ArrayLike], axes: Sequence[Sequence[SupportsIndex]], flags: None | Sequence[_NDIterFlagsKind] = ..., op_flags: None | Sequence[Sequence[_NDIterOpFlagsKind]] = ..., op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., order: _OrderKACF = ..., casting: _CastingKind = ..., buffersize: SupportsIndex = ...) -> tuple[nditer, ...]:
- ...
-
diff --git a/typings/numpy/core/numeric.pyi b/typings/numpy/core/numeric.pyi
deleted file mode 100644
index 236c93d..0000000
--- a/typings/numpy/core/numeric.pyi
+++ /dev/null
@@ -1,359 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Callable, Sequence
-from typing import Any, Literal, NoReturn, SupportsAbs, SupportsIndex, TypeGuard, TypeVar, overload
-from typing_extensions import TypeGuard
-from numpy import _OrderCF, _OrderKACF, bool_, complexfloating, float64, floating, generic, int_, intp, object_, signedinteger, timedelta64, unsignedinteger
-from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, _ArrayLikeObject_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, _ArrayLikeUnknown, _DTypeLike, _ScalarLike_co, _ShapeLike, _SupportsArrayFunc
-
-if sys.version_info >= (3, 10):
- ...
-else:
- ...
-_T = TypeVar("_T")
-_SCT = TypeVar("_SCT", bound=generic)
-_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
-_CorrelateMode = Literal["valid", "same", "full"]
-__all__: list[str]
-@overload
-def zeros_like(a: _ArrayType, dtype: None = ..., order: _OrderKACF = ..., subok: Literal[True] = ..., shape: None = ...) -> _ArrayType:
- ...
-
-@overload
-def zeros_like(a: _ArrayLike[_SCT], dtype: None = ..., order: _OrderKACF = ..., subok: bool = ..., shape: None | _ShapeLike = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def zeros_like(a: object, dtype: None = ..., order: _OrderKACF = ..., subok: bool = ..., shape: None | _ShapeLike = ...) -> NDArray[Any]:
- ...
-
-@overload
-def zeros_like(a: Any, dtype: _DTypeLike[_SCT], order: _OrderKACF = ..., subok: bool = ..., shape: None | _ShapeLike = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def zeros_like(a: Any, dtype: DTypeLike, order: _OrderKACF = ..., subok: bool = ..., shape: None | _ShapeLike = ...) -> NDArray[Any]:
- ...
-
-@overload
-def ones(shape: _ShapeLike, dtype: None = ..., order: _OrderCF = ..., *, like: _SupportsArrayFunc = ...) -> NDArray[float64]:
- ...
-
-@overload
-def ones(shape: _ShapeLike, dtype: _DTypeLike[_SCT], order: _OrderCF = ..., *, like: _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def ones(shape: _ShapeLike, dtype: DTypeLike, order: _OrderCF = ..., *, like: _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def ones_like(a: _ArrayType, dtype: None = ..., order: _OrderKACF = ..., subok: Literal[True] = ..., shape: None = ...) -> _ArrayType:
- ...
-
-@overload
-def ones_like(a: _ArrayLike[_SCT], dtype: None = ..., order: _OrderKACF = ..., subok: bool = ..., shape: None | _ShapeLike = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def ones_like(a: object, dtype: None = ..., order: _OrderKACF = ..., subok: bool = ..., shape: None | _ShapeLike = ...) -> NDArray[Any]:
- ...
-
-@overload
-def ones_like(a: Any, dtype: _DTypeLike[_SCT], order: _OrderKACF = ..., subok: bool = ..., shape: None | _ShapeLike = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def ones_like(a: Any, dtype: DTypeLike, order: _OrderKACF = ..., subok: bool = ..., shape: None | _ShapeLike = ...) -> NDArray[Any]:
- ...
-
-@overload
-def full(shape: _ShapeLike, fill_value: Any, dtype: None = ..., order: _OrderCF = ..., *, like: _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def full(shape: _ShapeLike, fill_value: Any, dtype: _DTypeLike[_SCT], order: _OrderCF = ..., *, like: _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def full(shape: _ShapeLike, fill_value: Any, dtype: DTypeLike, order: _OrderCF = ..., *, like: _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def full_like(a: _ArrayType, fill_value: Any, dtype: None = ..., order: _OrderKACF = ..., subok: Literal[True] = ..., shape: None = ...) -> _ArrayType:
- ...
-
-@overload
-def full_like(a: _ArrayLike[_SCT], fill_value: Any, dtype: None = ..., order: _OrderKACF = ..., subok: bool = ..., shape: None | _ShapeLike = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def full_like(a: object, fill_value: Any, dtype: None = ..., order: _OrderKACF = ..., subok: bool = ..., shape: None | _ShapeLike = ...) -> NDArray[Any]:
- ...
-
-@overload
-def full_like(a: Any, fill_value: Any, dtype: _DTypeLike[_SCT], order: _OrderKACF = ..., subok: bool = ..., shape: None | _ShapeLike = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def full_like(a: Any, fill_value: Any, dtype: DTypeLike, order: _OrderKACF = ..., subok: bool = ..., shape: None | _ShapeLike = ...) -> NDArray[Any]:
- ...
-
-@overload
-def count_nonzero(a: ArrayLike, axis: None = ..., *, keepdims: Literal[False] = ...) -> int:
- ...
-
-@overload
-def count_nonzero(a: ArrayLike, axis: _ShapeLike = ..., *, keepdims: bool = ...) -> Any:
- ...
-
-def isfortran(a: NDArray[Any] | generic) -> bool:
- ...
-
-def argwhere(a: ArrayLike) -> NDArray[intp]:
- ...
-
-def flatnonzero(a: ArrayLike) -> NDArray[intp]:
- ...
-
-@overload
-def correlate(a: _ArrayLikeUnknown, v: _ArrayLikeUnknown, mode: _CorrelateMode = ...) -> NDArray[Any]:
- ...
-
-@overload
-def correlate(a: _ArrayLikeBool_co, v: _ArrayLikeBool_co, mode: _CorrelateMode = ...) -> NDArray[bool_]:
- ...
-
-@overload
-def correlate(a: _ArrayLikeUInt_co, v: _ArrayLikeUInt_co, mode: _CorrelateMode = ...) -> NDArray[unsignedinteger[Any]]:
- ...
-
-@overload
-def correlate(a: _ArrayLikeInt_co, v: _ArrayLikeInt_co, mode: _CorrelateMode = ...) -> NDArray[signedinteger[Any]]:
- ...
-
-@overload
-def correlate(a: _ArrayLikeFloat_co, v: _ArrayLikeFloat_co, mode: _CorrelateMode = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def correlate(a: _ArrayLikeComplex_co, v: _ArrayLikeComplex_co, mode: _CorrelateMode = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def correlate(a: _ArrayLikeTD64_co, v: _ArrayLikeTD64_co, mode: _CorrelateMode = ...) -> NDArray[timedelta64]:
- ...
-
-@overload
-def correlate(a: _ArrayLikeObject_co, v: _ArrayLikeObject_co, mode: _CorrelateMode = ...) -> NDArray[object_]:
- ...
-
-@overload
-def convolve(a: _ArrayLikeUnknown, v: _ArrayLikeUnknown, mode: _CorrelateMode = ...) -> NDArray[Any]:
- ...
-
-@overload
-def convolve(a: _ArrayLikeBool_co, v: _ArrayLikeBool_co, mode: _CorrelateMode = ...) -> NDArray[bool_]:
- ...
-
-@overload
-def convolve(a: _ArrayLikeUInt_co, v: _ArrayLikeUInt_co, mode: _CorrelateMode = ...) -> NDArray[unsignedinteger[Any]]:
- ...
-
-@overload
-def convolve(a: _ArrayLikeInt_co, v: _ArrayLikeInt_co, mode: _CorrelateMode = ...) -> NDArray[signedinteger[Any]]:
- ...
-
-@overload
-def convolve(a: _ArrayLikeFloat_co, v: _ArrayLikeFloat_co, mode: _CorrelateMode = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def convolve(a: _ArrayLikeComplex_co, v: _ArrayLikeComplex_co, mode: _CorrelateMode = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def convolve(a: _ArrayLikeTD64_co, v: _ArrayLikeTD64_co, mode: _CorrelateMode = ...) -> NDArray[timedelta64]:
- ...
-
-@overload
-def convolve(a: _ArrayLikeObject_co, v: _ArrayLikeObject_co, mode: _CorrelateMode = ...) -> NDArray[object_]:
- ...
-
-@overload
-def outer(a: _ArrayLikeUnknown, b: _ArrayLikeUnknown, out: None = ...) -> NDArray[Any]:
- ...
-
-@overload
-def outer(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, out: None = ...) -> NDArray[bool_]:
- ...
-
-@overload
-def outer(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, out: None = ...) -> NDArray[unsignedinteger[Any]]:
- ...
-
-@overload
-def outer(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, out: None = ...) -> NDArray[signedinteger[Any]]:
- ...
-
-@overload
-def outer(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, out: None = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def outer(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, out: None = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def outer(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, out: None = ...) -> NDArray[timedelta64]:
- ...
-
-@overload
-def outer(a: _ArrayLikeObject_co, b: _ArrayLikeObject_co, out: None = ...) -> NDArray[object_]:
- ...
-
-@overload
-def outer(a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, b: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, out: _ArrayType) -> _ArrayType:
- ...
-
-@overload
-def tensordot(a: _ArrayLikeUnknown, b: _ArrayLikeUnknown, axes: int | tuple[_ShapeLike, _ShapeLike] = ...) -> NDArray[Any]:
- ...
-
-@overload
-def tensordot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, axes: int | tuple[_ShapeLike, _ShapeLike] = ...) -> NDArray[bool_]:
- ...
-
-@overload
-def tensordot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, axes: int | tuple[_ShapeLike, _ShapeLike] = ...) -> NDArray[unsignedinteger[Any]]:
- ...
-
-@overload
-def tensordot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, axes: int | tuple[_ShapeLike, _ShapeLike] = ...) -> NDArray[signedinteger[Any]]:
- ...
-
-@overload
-def tensordot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, axes: int | tuple[_ShapeLike, _ShapeLike] = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def tensordot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, axes: int | tuple[_ShapeLike, _ShapeLike] = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def tensordot(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, axes: int | tuple[_ShapeLike, _ShapeLike] = ...) -> NDArray[timedelta64]:
- ...
-
-@overload
-def tensordot(a: _ArrayLikeObject_co, b: _ArrayLikeObject_co, axes: int | tuple[_ShapeLike, _ShapeLike] = ...) -> NDArray[object_]:
- ...
-
-@overload
-def roll(a: _ArrayLike[_SCT], shift: _ShapeLike, axis: None | _ShapeLike = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def roll(a: ArrayLike, shift: _ShapeLike, axis: None | _ShapeLike = ...) -> NDArray[Any]:
- ...
-
-def rollaxis(a: NDArray[_SCT], axis: int, start: int = ...) -> NDArray[_SCT]:
- ...
-
-def moveaxis(a: NDArray[_SCT], source: _ShapeLike, destination: _ShapeLike) -> NDArray[_SCT]:
- ...
-
-@overload
-def cross(a: _ArrayLikeUnknown, b: _ArrayLikeUnknown, axisa: int = ..., axisb: int = ..., axisc: int = ..., axis: None | int = ...) -> NDArray[Any]:
- ...
-
-@overload
-def cross(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., axis: None | int = ...) -> NoReturn:
- ...
-
-@overload
-def cross(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., axis: None | int = ...) -> NDArray[unsignedinteger[Any]]:
- ...
-
-@overload
-def cross(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., axis: None | int = ...) -> NDArray[signedinteger[Any]]:
- ...
-
-@overload
-def cross(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., axis: None | int = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def cross(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., axis: None | int = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def cross(a: _ArrayLikeObject_co, b: _ArrayLikeObject_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., axis: None | int = ...) -> NDArray[object_]:
- ...
-
-@overload
-def indices(dimensions: Sequence[int], dtype: type[int] = ..., sparse: Literal[False] = ...) -> NDArray[int_]:
- ...
-
-@overload
-def indices(dimensions: Sequence[int], dtype: type[int] = ..., sparse: Literal[True] = ...) -> tuple[NDArray[int_], ...]:
- ...
-
-@overload
-def indices(dimensions: Sequence[int], dtype: _DTypeLike[_SCT], sparse: Literal[False] = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def indices(dimensions: Sequence[int], dtype: _DTypeLike[_SCT], sparse: Literal[True]) -> tuple[NDArray[_SCT], ...]:
- ...
-
-@overload
-def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: Literal[False] = ...) -> NDArray[Any]:
- ...
-
-@overload
-def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: Literal[True]) -> tuple[NDArray[Any], ...]:
- ...
-
-def fromfunction(function: Callable[..., _T], shape: Sequence[int], *, dtype: DTypeLike = ..., like: _SupportsArrayFunc = ..., **kwargs: Any) -> _T:
- ...
-
-def isscalar(element: object) -> TypeGuard[generic | bool | int | float | complex | str | bytes | memoryview]:
- ...
-
-def binary_repr(num: SupportsIndex, width: None | int = ...) -> str:
- ...
-
-def base_repr(number: SupportsAbs[float], base: float = ..., padding: SupportsIndex = ...) -> str:
- ...
-
-@overload
-def identity(n: int, dtype: None = ..., *, like: _SupportsArrayFunc = ...) -> NDArray[float64]:
- ...
-
-@overload
-def identity(n: int, dtype: _DTypeLike[_SCT], *, like: _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def identity(n: int, dtype: DTypeLike, *, like: _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-def allclose(a: ArrayLike, b: ArrayLike, rtol: float = ..., atol: float = ..., equal_nan: bool = ...) -> bool:
- ...
-
-@overload
-def isclose(a: _ScalarLike_co, b: _ScalarLike_co, rtol: float = ..., atol: float = ..., equal_nan: bool = ...) -> bool_:
- ...
-
-@overload
-def isclose(a: ArrayLike, b: ArrayLike, rtol: float = ..., atol: float = ..., equal_nan: bool = ...) -> NDArray[bool_]:
- ...
-
-def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool:
- ...
-
-def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool:
- ...
-
diff --git a/typings/numpy/core/numerictypes.pyi b/typings/numpy/core/numerictypes.pyi
deleted file mode 100644
index 08a9157..0000000
--- a/typings/numpy/core/numerictypes.pyi
+++ /dev/null
@@ -1,103 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import sys
-import types
-from typing import Any, Literal as L, Protocol, TypeVar, TypedDict, Union, overload
-from numpy import bool_, byte, bytes_, cdouble, clongdouble, csingle, datetime64, double, dtype, generic, half, int_, intc, longdouble, longlong, ndarray, object_, short, single, str_, timedelta64, ubyte, uint, uintc, ulonglong, ushort, void
-from numpy._typing import ArrayLike, DTypeLike, _DTypeLike
-
-_T = TypeVar("_T")
-_SCT = TypeVar("_SCT", bound=generic)
-class _CastFunc(Protocol):
- def __call__(self, x: ArrayLike, k: DTypeLike = ...) -> ndarray[Any, dtype[Any]]:
- ...
-
-
-
-class _TypeCodes(TypedDict):
- Character: L['c']
- Integer: L['bhilqp']
- UnsignedInteger: L['BHILQP']
- Float: L['efdg']
- Complex: L['FDG']
- AllInteger: L['bBhHiIlLqQpP']
- AllFloat: L['efdgFDG']
- Datetime: L['Mm']
- All: L['?bhilqpBHILQPefdgFDGSUVOMm']
- ...
-
-
-class _typedict(dict[type[generic], _T]):
- def __getitem__(self, key: DTypeLike) -> _T:
- ...
-
-
-
-if sys.version_info >= (3, 10):
- _TypeTuple = Union[type[Any], types.UnionType, tuple[Union[type[Any], types.UnionType, tuple[Any, ...]], ...],]
-else:
- ...
-__all__: list[str]
-@overload
-def maximum_sctype(t: _DTypeLike[_SCT]) -> type[_SCT]:
- ...
-
-@overload
-def maximum_sctype(t: DTypeLike) -> type[Any]:
- ...
-
-@overload
-def issctype(rep: dtype[Any] | type[Any]) -> bool:
- ...
-
-@overload
-def issctype(rep: object) -> L[False]:
- ...
-
-@overload
-def obj2sctype(rep: _DTypeLike[_SCT], default: None = ...) -> None | type[_SCT]:
- ...
-
-@overload
-def obj2sctype(rep: _DTypeLike[_SCT], default: _T) -> _T | type[_SCT]:
- ...
-
-@overload
-def obj2sctype(rep: DTypeLike, default: None = ...) -> None | type[Any]:
- ...
-
-@overload
-def obj2sctype(rep: DTypeLike, default: _T) -> _T | type[Any]:
- ...
-
-@overload
-def obj2sctype(rep: object, default: None = ...) -> None:
- ...
-
-@overload
-def obj2sctype(rep: object, default: _T) -> _T:
- ...
-
-@overload
-def issubclass_(arg1: type[Any], arg2: _TypeTuple) -> bool:
- ...
-
-@overload
-def issubclass_(arg1: object, arg2: object) -> L[False]:
- ...
-
-def issubsctype(arg1: DTypeLike, arg2: DTypeLike) -> bool:
- ...
-
-def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> bool:
- ...
-
-def sctype2char(sctype: DTypeLike) -> str:
- ...
-
-cast: _typedict[_CastFunc]
-nbytes: _typedict[int]
-typecodes: _TypeCodes
-ScalarType: tuple[type[int], type[float], type[complex], type[bool], type[bytes], type[str], type[memoryview], type[bool_], type[csingle], type[cdouble], type[clongdouble], type[half], type[single], type[double], type[longdouble], type[byte], type[short], type[intc], type[int_], type[longlong], type[timedelta64], type[datetime64], type[object_], type[bytes_], type[str_], type[ubyte], type[ushort], type[uintc], type[uint], type[ulonglong], type[void],]
diff --git a/typings/numpy/core/records.pyi b/typings/numpy/core/records.pyi
deleted file mode 100644
index 64cd7ff..0000000
--- a/typings/numpy/core/records.pyi
+++ /dev/null
@@ -1,85 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import os
-from collections.abc import Iterable, Sequence
-from typing import Any, Protocol, TypeVar, overload
-from numpy import _ByteOrder, _SupportsBuffer, dtype, generic, recarray as recarray, record as record
-from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLikeVoid_co, _NestedSequence, _ShapeLike
-
-_SCT = TypeVar("_SCT", bound=generic)
-_RecArray = recarray[Any, dtype[_SCT]]
-class _SupportsReadInto(Protocol):
- def seek(self, offset: int, whence: int, /) -> object:
- ...
-
- def tell(self, /) -> int:
- ...
-
- def readinto(self, buffer: memoryview, /) -> int:
- ...
-
-
-
-__all__: list[str]
-@overload
-def fromarrays(arrayList: Iterable[ArrayLike], dtype: DTypeLike = ..., shape: None | _ShapeLike = ..., formats: None = ..., names: None = ..., titles: None = ..., aligned: bool = ..., byteorder: None = ...) -> _RecArray[Any]:
- ...
-
-@overload
-def fromarrays(arrayList: Iterable[ArrayLike], dtype: None = ..., shape: None | _ShapeLike = ..., *, formats: DTypeLike, names: None | str | Sequence[str] = ..., titles: None | str | Sequence[str] = ..., aligned: bool = ..., byteorder: None | _ByteOrder = ...) -> _RecArray[record]:
- ...
-
-@overload
-def fromrecords(recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]], dtype: DTypeLike = ..., shape: None | _ShapeLike = ..., formats: None = ..., names: None = ..., titles: None = ..., aligned: bool = ..., byteorder: None = ...) -> _RecArray[record]:
- ...
-
-@overload
-def fromrecords(recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]], dtype: None = ..., shape: None | _ShapeLike = ..., *, formats: DTypeLike, names: None | str | Sequence[str] = ..., titles: None | str | Sequence[str] = ..., aligned: bool = ..., byteorder: None | _ByteOrder = ...) -> _RecArray[record]:
- ...
-
-@overload
-def fromstring(datastring: _SupportsBuffer, dtype: DTypeLike, shape: None | _ShapeLike = ..., offset: int = ..., formats: None = ..., names: None = ..., titles: None = ..., aligned: bool = ..., byteorder: None = ...) -> _RecArray[record]:
- ...
-
-@overload
-def fromstring(datastring: _SupportsBuffer, dtype: None = ..., shape: None | _ShapeLike = ..., offset: int = ..., *, formats: DTypeLike, names: None | str | Sequence[str] = ..., titles: None | str | Sequence[str] = ..., aligned: bool = ..., byteorder: None | _ByteOrder = ...) -> _RecArray[record]:
- ...
-
-@overload
-def fromfile(fd: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsReadInto, dtype: DTypeLike, shape: None | _ShapeLike = ..., offset: int = ..., formats: None = ..., names: None = ..., titles: None = ..., aligned: bool = ..., byteorder: None = ...) -> _RecArray[Any]:
- ...
-
-@overload
-def fromfile(fd: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsReadInto, dtype: None = ..., shape: None | _ShapeLike = ..., offset: int = ..., *, formats: DTypeLike, names: None | str | Sequence[str] = ..., titles: None | str | Sequence[str] = ..., aligned: bool = ..., byteorder: None | _ByteOrder = ...) -> _RecArray[record]:
- ...
-
-@overload
-def array(obj: _SCT | NDArray[_SCT], dtype: None = ..., shape: None | _ShapeLike = ..., offset: int = ..., formats: None = ..., names: None = ..., titles: None = ..., aligned: bool = ..., byteorder: None = ..., copy: bool = ...) -> _RecArray[_SCT]:
- ...
-
-@overload
-def array(obj: ArrayLike, dtype: DTypeLike, shape: None | _ShapeLike = ..., offset: int = ..., formats: None = ..., names: None = ..., titles: None = ..., aligned: bool = ..., byteorder: None = ..., copy: bool = ...) -> _RecArray[Any]:
- ...
-
-@overload
-def array(obj: ArrayLike, dtype: None = ..., shape: None | _ShapeLike = ..., offset: int = ..., *, formats: DTypeLike, names: None | str | Sequence[str] = ..., titles: None | str | Sequence[str] = ..., aligned: bool = ..., byteorder: None | _ByteOrder = ..., copy: bool = ...) -> _RecArray[record]:
- ...
-
-@overload
-def array(obj: None, dtype: DTypeLike, shape: _ShapeLike, offset: int = ..., formats: None = ..., names: None = ..., titles: None = ..., aligned: bool = ..., byteorder: None = ..., copy: bool = ...) -> _RecArray[Any]:
- ...
-
-@overload
-def array(obj: None, dtype: None = ..., *, shape: _ShapeLike, offset: int = ..., formats: DTypeLike, names: None | str | Sequence[str] = ..., titles: None | str | Sequence[str] = ..., aligned: bool = ..., byteorder: None | _ByteOrder = ..., copy: bool = ...) -> _RecArray[record]:
- ...
-
-@overload
-def array(obj: _SupportsReadInto, dtype: DTypeLike, shape: None | _ShapeLike = ..., offset: int = ..., formats: None = ..., names: None = ..., titles: None = ..., aligned: bool = ..., byteorder: None = ..., copy: bool = ...) -> _RecArray[Any]:
- ...
-
-@overload
-def array(obj: _SupportsReadInto, dtype: None = ..., shape: None | _ShapeLike = ..., offset: int = ..., *, formats: DTypeLike, names: None | str | Sequence[str] = ..., titles: None | str | Sequence[str] = ..., aligned: bool = ..., byteorder: None | _ByteOrder = ..., copy: bool = ...) -> _RecArray[record]:
- ...
-
diff --git a/typings/numpy/core/shape_base.pyi b/typings/numpy/core/shape_base.pyi
deleted file mode 100644
index ae975fd..0000000
--- a/typings/numpy/core/shape_base.pyi
+++ /dev/null
@@ -1,96 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Sequence
-from typing import Any, SupportsIndex, TypeVar, overload
-from numpy import _CastingKind, generic
-from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLike, _DTypeLike
-
-_SCT = TypeVar("_SCT", bound=generic)
-_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
-__all__: list[str]
-@overload
-def atleast_1d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]:
- ...
-
-@overload
-def atleast_1d(arys: ArrayLike, /) -> NDArray[Any]:
- ...
-
-@overload
-def atleast_1d(*arys: ArrayLike) -> list[NDArray[Any]]:
- ...
-
-@overload
-def atleast_2d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]:
- ...
-
-@overload
-def atleast_2d(arys: ArrayLike, /) -> NDArray[Any]:
- ...
-
-@overload
-def atleast_2d(*arys: ArrayLike) -> list[NDArray[Any]]:
- ...
-
-@overload
-def atleast_3d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]:
- ...
-
-@overload
-def atleast_3d(arys: ArrayLike, /) -> NDArray[Any]:
- ...
-
-@overload
-def atleast_3d(*arys: ArrayLike) -> list[NDArray[Any]]:
- ...
-
-@overload
-def vstack(tup: Sequence[_ArrayLike[_SCT]], *, dtype: None = ..., casting: _CastingKind = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def vstack(tup: Sequence[ArrayLike], *, dtype: _DTypeLike[_SCT], casting: _CastingKind = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def vstack(tup: Sequence[ArrayLike], *, dtype: DTypeLike = ..., casting: _CastingKind = ...) -> NDArray[Any]:
- ...
-
-@overload
-def hstack(tup: Sequence[_ArrayLike[_SCT]], *, dtype: None = ..., casting: _CastingKind = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def hstack(tup: Sequence[ArrayLike], *, dtype: _DTypeLike[_SCT], casting: _CastingKind = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def hstack(tup: Sequence[ArrayLike], *, dtype: DTypeLike = ..., casting: _CastingKind = ...) -> NDArray[Any]:
- ...
-
-@overload
-def stack(arrays: Sequence[_ArrayLike[_SCT]], axis: SupportsIndex = ..., out: None = ..., *, dtype: None = ..., casting: _CastingKind = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def stack(arrays: Sequence[ArrayLike], axis: SupportsIndex = ..., out: None = ..., *, dtype: _DTypeLike[_SCT], casting: _CastingKind = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def stack(arrays: Sequence[ArrayLike], axis: SupportsIndex = ..., out: None = ..., *, dtype: DTypeLike = ..., casting: _CastingKind = ...) -> NDArray[Any]:
- ...
-
-@overload
-def stack(arrays: Sequence[ArrayLike], axis: SupportsIndex = ..., out: _ArrayType = ..., *, dtype: DTypeLike = ..., casting: _CastingKind = ...) -> _ArrayType:
- ...
-
-@overload
-def block(arrays: _ArrayLike[_SCT]) -> NDArray[_SCT]:
- ...
-
-@overload
-def block(arrays: ArrayLike) -> NDArray[Any]:
- ...
-
diff --git a/typings/numpy/core/umath.pyi b/typings/numpy/core/umath.pyi
deleted file mode 100644
index 986489b..0000000
--- a/typings/numpy/core/umath.pyi
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ._multiarray_umath import *
-
-"""
-Create the numpy.core.umath namespace for backward compatibility. In v1.16
-the multiarray and umath c-extension modules were merged into a single
-_multiarray_umath extension module. So we replicate the old namespace
-by importing from the extension module.
-
-"""
-__all__ = ['_UFUNC_API', 'ERR_CALL', 'ERR_DEFAULT', 'ERR_IGNORE', 'ERR_LOG', 'ERR_PRINT', 'ERR_RAISE', 'ERR_WARN', 'FLOATING_POINT_SUPPORT', 'FPE_DIVIDEBYZERO', 'FPE_INVALID', 'FPE_OVERFLOW', 'FPE_UNDERFLOW', 'NAN', 'NINF', 'NZERO', 'PINF', 'PZERO', 'SHIFT_DIVIDEBYZERO', 'SHIFT_INVALID', 'SHIFT_OVERFLOW', 'SHIFT_UNDERFLOW', 'UFUNC_BUFSIZE_DEFAULT', 'UFUNC_PYVALS_NAME', '_add_newdoc_ufunc', 'absolute', 'add', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'cbrt', 'ceil', 'conj', 'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad', 'degrees', 'divide', 'divmod', 'e', 'equal', 'euler_gamma', 'exp', 'exp2', 'expm1', 'fabs', 'floor', 'floor_divide', 'float_power', 'fmax', 'fmin', 'fmod', 'frexp', 'frompyfunc', 'gcd', 'geterrobj', 'greater', 'greater_equal', 'heaviside', 'hypot', 'invert', 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'maximum', 'minimum', 'mod', 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'pi', 'positive', 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift', 'rint', 'seterrobj', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', 'true_divide', 'trunc']
diff --git a/typings/numpy/ctypeslib.pyi b/typings/numpy/ctypeslib.pyi
deleted file mode 100644
index 9408707..0000000
--- a/typings/numpy/ctypeslib.pyi
+++ /dev/null
@@ -1,265 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import os
-import ctypes
-from ctypes import c_int64 as _c_intp
-from collections.abc import Iterable, Sequence
-from typing import Any, ClassVar, Generic, Literal as L, TypeVar, overload
-from numpy import bool_, byte, double, dtype, generic, int_, intc, longdouble, longlong, ndarray, short, single, ubyte, uint, uintc, ulonglong, ushort, void
-from numpy.core._internal import _ctypes
-from numpy.core.multiarray import flagsobj
-from numpy._typing import DTypeLike, NDArray, _ArrayLike, _BoolCodes, _ByteCodes, _DTypeLike, _DoubleCodes, _IntCCodes, _IntCodes, _LongDoubleCodes, _LongLongCodes, _ShapeLike, _ShortCodes, _SingleCodes, _UByteCodes, _UIntCCodes, _UIntCodes, _ULongLongCodes, _UShortCodes, _VoidDTypeLike
-
-_DType = TypeVar("_DType", bound=dtype[Any])
-_DTypeOptional = TypeVar("_DTypeOptional", bound=None | dtype[Any])
-_SCT = TypeVar("_SCT", bound=generic)
-_FlagsKind = L['C_CONTIGUOUS', 'CONTIGUOUS', 'C', 'F_CONTIGUOUS', 'FORTRAN', 'F', 'ALIGNED', 'A', 'WRITEABLE', 'W', 'OWNDATA', 'O', 'WRITEBACKIFCOPY', 'X',]
-class _ndptr(ctypes.c_void_p, Generic[_DTypeOptional]):
- _dtype_: ClassVar[_DTypeOptional]
- _shape_: ClassVar[None]
- _ndim_: ClassVar[None | int]
- _flags_: ClassVar[None | list[_FlagsKind]]
- @overload
- @classmethod
- def from_param(cls: type[_ndptr[None]], obj: ndarray[Any, Any]) -> _ctypes[Any]:
- ...
-
- @overload
- @classmethod
- def from_param(cls: type[_ndptr[_DType]], obj: ndarray[Any, _DType]) -> _ctypes[Any]:
- ...
-
-
-
-class _concrete_ndptr(_ndptr[_DType]):
- _dtype_: ClassVar[_DType]
- _shape_: ClassVar[tuple[int, ...]]
- @property
- def contents(self) -> ndarray[Any, _DType]:
- ...
-
-
-
-def load_library(libname: str | bytes | os.PathLike[str] | os.PathLike[bytes], loader_path: str | bytes | os.PathLike[str] | os.PathLike[bytes]) -> ctypes.CDLL:
- ...
-
-__all__: list[str]
-c_intp = _c_intp
-@overload
-def ndpointer(dtype: None = ..., ndim: int = ..., shape: None | _ShapeLike = ..., flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...) -> type[_ndptr[None]]:
- ...
-
-@overload
-def ndpointer(dtype: _DTypeLike[_SCT], ndim: int = ..., *, shape: _ShapeLike, flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...) -> type[_concrete_ndptr[dtype[_SCT]]]:
- ...
-
-@overload
-def ndpointer(dtype: DTypeLike, ndim: int = ..., *, shape: _ShapeLike, flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...) -> type[_concrete_ndptr[dtype[Any]]]:
- ...
-
-@overload
-def ndpointer(dtype: _DTypeLike[_SCT], ndim: int = ..., shape: None = ..., flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...) -> type[_ndptr[dtype[_SCT]]]:
- ...
-
-@overload
-def ndpointer(dtype: DTypeLike, ndim: int = ..., shape: None = ..., flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...) -> type[_ndptr[dtype[Any]]]:
- ...
-
-@overload
-def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[bool_] | type[ctypes.c_bool]) -> type[ctypes.c_bool]:
- ...
-
-@overload
-def as_ctypes_type(dtype: _ByteCodes | _DTypeLike[byte] | type[ctypes.c_byte]) -> type[ctypes.c_byte]:
- ...
-
-@overload
-def as_ctypes_type(dtype: _ShortCodes | _DTypeLike[short] | type[ctypes.c_short]) -> type[ctypes.c_short]:
- ...
-
-@overload
-def as_ctypes_type(dtype: _IntCCodes | _DTypeLike[intc] | type[ctypes.c_int]) -> type[ctypes.c_int]:
- ...
-
-@overload
-def as_ctypes_type(dtype: _IntCodes | _DTypeLike[int_] | type[int | ctypes.c_long]) -> type[ctypes.c_long]:
- ...
-
-@overload
-def as_ctypes_type(dtype: _LongLongCodes | _DTypeLike[longlong] | type[ctypes.c_longlong]) -> type[ctypes.c_longlong]:
- ...
-
-@overload
-def as_ctypes_type(dtype: _UByteCodes | _DTypeLike[ubyte] | type[ctypes.c_ubyte]) -> type[ctypes.c_ubyte]:
- ...
-
-@overload
-def as_ctypes_type(dtype: _UShortCodes | _DTypeLike[ushort] | type[ctypes.c_ushort]) -> type[ctypes.c_ushort]:
- ...
-
-@overload
-def as_ctypes_type(dtype: _UIntCCodes | _DTypeLike[uintc] | type[ctypes.c_uint]) -> type[ctypes.c_uint]:
- ...
-
-@overload
-def as_ctypes_type(dtype: _UIntCodes | _DTypeLike[uint] | type[ctypes.c_ulong]) -> type[ctypes.c_ulong]:
- ...
-
-@overload
-def as_ctypes_type(dtype: _ULongLongCodes | _DTypeLike[ulonglong] | type[ctypes.c_ulonglong]) -> type[ctypes.c_ulonglong]:
- ...
-
-@overload
-def as_ctypes_type(dtype: _SingleCodes | _DTypeLike[single] | type[ctypes.c_float]) -> type[ctypes.c_float]:
- ...
-
-@overload
-def as_ctypes_type(dtype: _DoubleCodes | _DTypeLike[double] | type[float | ctypes.c_double]) -> type[ctypes.c_double]:
- ...
-
-@overload
-def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[longdouble] | type[ctypes.c_longdouble]) -> type[ctypes.c_longdouble]:
- ...
-
-@overload
-def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]:
- ...
-
-@overload
-def as_ctypes_type(dtype: str) -> type[Any]:
- ...
-
-@overload
-def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]:
- ...
-
-@overload
-def as_array(obj: _ArrayLike[_SCT], shape: None | _ShapeLike = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def as_array(obj: object, shape: None | _ShapeLike = ...) -> NDArray[Any]:
- ...
-
-@overload
-def as_ctypes(obj: bool_) -> ctypes.c_bool:
- ...
-
-@overload
-def as_ctypes(obj: byte) -> ctypes.c_byte:
- ...
-
-@overload
-def as_ctypes(obj: short) -> ctypes.c_short:
- ...
-
-@overload
-def as_ctypes(obj: intc) -> ctypes.c_int:
- ...
-
-@overload
-def as_ctypes(obj: int_) -> ctypes.c_long:
- ...
-
-@overload
-def as_ctypes(obj: longlong) -> ctypes.c_longlong:
- ...
-
-@overload
-def as_ctypes(obj: ubyte) -> ctypes.c_ubyte:
- ...
-
-@overload
-def as_ctypes(obj: ushort) -> ctypes.c_ushort:
- ...
-
-@overload
-def as_ctypes(obj: uintc) -> ctypes.c_uint:
- ...
-
-@overload
-def as_ctypes(obj: uint) -> ctypes.c_ulong:
- ...
-
-@overload
-def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong:
- ...
-
-@overload
-def as_ctypes(obj: single) -> ctypes.c_float:
- ...
-
-@overload
-def as_ctypes(obj: double) -> ctypes.c_double:
- ...
-
-@overload
-def as_ctypes(obj: longdouble) -> ctypes.c_longdouble:
- ...
-
-@overload
-def as_ctypes(obj: void) -> Any:
- ...
-
-@overload
-def as_ctypes(obj: NDArray[bool_]) -> ctypes.Array[ctypes.c_bool]:
- ...
-
-@overload
-def as_ctypes(obj: NDArray[byte]) -> ctypes.Array[ctypes.c_byte]:
- ...
-
-@overload
-def as_ctypes(obj: NDArray[short]) -> ctypes.Array[ctypes.c_short]:
- ...
-
-@overload
-def as_ctypes(obj: NDArray[intc]) -> ctypes.Array[ctypes.c_int]:
- ...
-
-@overload
-def as_ctypes(obj: NDArray[int_]) -> ctypes.Array[ctypes.c_long]:
- ...
-
-@overload
-def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]:
- ...
-
-@overload
-def as_ctypes(obj: NDArray[ubyte]) -> ctypes.Array[ctypes.c_ubyte]:
- ...
-
-@overload
-def as_ctypes(obj: NDArray[ushort]) -> ctypes.Array[ctypes.c_ushort]:
- ...
-
-@overload
-def as_ctypes(obj: NDArray[uintc]) -> ctypes.Array[ctypes.c_uint]:
- ...
-
-@overload
-def as_ctypes(obj: NDArray[uint]) -> ctypes.Array[ctypes.c_ulong]:
- ...
-
-@overload
-def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]:
- ...
-
-@overload
-def as_ctypes(obj: NDArray[single]) -> ctypes.Array[ctypes.c_float]:
- ...
-
-@overload
-def as_ctypes(obj: NDArray[double]) -> ctypes.Array[ctypes.c_double]:
- ...
-
-@overload
-def as_ctypes(obj: NDArray[longdouble]) -> ctypes.Array[ctypes.c_longdouble]:
- ...
-
-@overload
-def as_ctypes(obj: NDArray[void]) -> ctypes.Array[Any]:
- ...
-
diff --git a/typings/numpy/doc/__init__.pyi b/typings/numpy/doc/__init__.pyi
deleted file mode 100644
index d2821e6..0000000
--- a/typings/numpy/doc/__init__.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import os
-
-ref_dir = ...
-__all__ = sorted(f[: -3] for f in os.listdir(ref_dir) if f.endswith('.py') and notf.startswith('__'))
-__doc__ = ...
diff --git a/typings/numpy/dtypes.pyi b/typings/numpy/dtypes.pyi
deleted file mode 100644
index 15156ae..0000000
--- a/typings/numpy/dtypes.pyi
+++ /dev/null
@@ -1,39 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import numpy as np
-
-__all__: list[str]
-BoolDType = np.dtype[np.bool_]
-Int8DType = np.dtype[np.int8]
-UInt8DType = np.dtype[np.uint8]
-Int16DType = np.dtype[np.int16]
-UInt16DType = np.dtype[np.uint16]
-Int32DType = np.dtype[np.int32]
-UInt32DType = np.dtype[np.uint32]
-Int64DType = np.dtype[np.int64]
-UInt64DType = np.dtype[np.uint64]
-ByteDType = np.dtype[np.byte]
-UByteDType = np.dtype[np.ubyte]
-ShortDType = np.dtype[np.short]
-UShortDType = np.dtype[np.ushort]
-IntDType = np.dtype[np.intc]
-UIntDType = np.dtype[np.uintc]
-LongDType = np.dtype[np.int_]
-ULongDType = np.dtype[np.uint]
-LongLongDType = np.dtype[np.longlong]
-ULongLongDType = np.dtype[np.ulonglong]
-Float16DType = np.dtype[np.float16]
-Float32DType = np.dtype[np.float32]
-Float64DType = np.dtype[np.float64]
-LongDoubleDType = np.dtype[np.longdouble]
-Complex64DType = np.dtype[np.complex64]
-Complex128DType = np.dtype[np.complex128]
-CLongDoubleDType = np.dtype[np.clongdouble]
-ObjectDType = np.dtype[np.object_]
-BytesDType = np.dtype[np.bytes_]
-StrDType = np.dtype[np.str_]
-VoidDType = np.dtype[np.void]
-DateTime64DType = np.dtype[np.datetime64]
-TimeDelta64DType = np.dtype[np.timedelta64]
diff --git a/typings/numpy/exceptions.pyi b/typings/numpy/exceptions.pyi
deleted file mode 100644
index 5c58bb3..0000000
--- a/typings/numpy/exceptions.pyi
+++ /dev/null
@@ -1,43 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import overload
-
-__all__: list[str]
-class ComplexWarning(RuntimeWarning):
- ...
-
-
-class ModuleDeprecationWarning(DeprecationWarning):
- ...
-
-
-class VisibleDeprecationWarning(UserWarning):
- ...
-
-
-class TooHardError(RuntimeError):
- ...
-
-
-class DTypePromotionError(TypeError):
- ...
-
-
-class AxisError(ValueError, IndexError):
- axis: None | int
- ndim: None | int
- @overload
- def __init__(self, axis: str, ndim: None = ..., msg_prefix: None = ...) -> None:
- ...
-
- @overload
- def __init__(self, axis: int, ndim: int, msg_prefix: None | str = ...) -> None:
- ...
-
- def __str__(self) -> str:
- ...
-
-
-
diff --git a/typings/numpy/f2py/__init__.pyi b/typings/numpy/f2py/__init__.pyi
deleted file mode 100644
index ffae845..0000000
--- a/typings/numpy/f2py/__init__.pyi
+++ /dev/null
@@ -1,38 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import os
-import subprocess
-from collections.abc import Iterable
-from typing import Any, Literal as L, TypedDict, overload
-from numpy._pytesttester import PytestTester
-
-class _F2PyDictBase(TypedDict):
- csrc: list[str]
- h: list[str]
- ...
-
-
-class _F2PyDict(_F2PyDictBase, total=False):
- fsrc: list[str]
- ltx: list[str]
- ...
-
-
-__all__: list[str]
-test: PytestTester
-def run_main(comline_list: Iterable[str]) -> dict[str, _F2PyDict]:
- ...
-
-@overload
-def compile(source: str | bytes, modulename: str = ..., extra_args: str | list[str] = ..., verbose: bool = ..., source_fn: None | str | bytes | os.PathLike[Any] = ..., extension: L[".f", ".f90"] = ..., full_output: L[False] = ...) -> int:
- ...
-
-@overload
-def compile(source: str | bytes, modulename: str = ..., extra_args: str | list[str] = ..., verbose: bool = ..., source_fn: None | str | bytes | os.PathLike[Any] = ..., extension: L[".f", ".f90"] = ..., full_output: L[True] = ...) -> subprocess.CompletedProcess[bytes]:
- ...
-
-def get_include() -> str:
- ...
-
diff --git a/typings/numpy/fft/__init__.pyi b/typings/numpy/fft/__init__.pyi
deleted file mode 100644
index 365c5da..0000000
--- a/typings/numpy/fft/__init__.pyi
+++ /dev/null
@@ -1,11 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from numpy._pytesttester import PytestTester
-from numpy.fft._pocketfft import fft as fft, fft2 as fft2, fftn as fftn, hfft as hfft, ifft as ifft, ifft2 as ifft2, ifftn as ifftn, ihfft as ihfft, irfft as irfft, irfft2 as irfft2, irfftn as irfftn, rfft as rfft, rfft2 as rfft2, rfftn as rfftn
-from numpy.fft.helper import fftfreq as fftfreq, fftshift as fftshift, ifftshift as ifftshift, rfftfreq as rfftfreq
-
-__all__: list[str]
-__path__: list[str]
-test: PytestTester
diff --git a/typings/numpy/fft/_pocketfft.pyi b/typings/numpy/fft/_pocketfft.pyi
deleted file mode 100644
index 52add9e..0000000
--- a/typings/numpy/fft/_pocketfft.pyi
+++ /dev/null
@@ -1,53 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Sequence
-from typing import Literal as L
-from numpy import complex128, float64
-from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co
-
-_NormKind = L[None, "backward", "ortho", "forward"]
-__all__: list[str]
-def fft(a: ArrayLike, n: None | int = ..., axis: int = ..., norm: _NormKind = ...) -> NDArray[complex128]:
- ...
-
-def ifft(a: ArrayLike, n: None | int = ..., axis: int = ..., norm: _NormKind = ...) -> NDArray[complex128]:
- ...
-
-def rfft(a: ArrayLike, n: None | int = ..., axis: int = ..., norm: _NormKind = ...) -> NDArray[complex128]:
- ...
-
-def irfft(a: ArrayLike, n: None | int = ..., axis: int = ..., norm: _NormKind = ...) -> NDArray[float64]:
- ...
-
-def hfft(a: _ArrayLikeNumber_co, n: None | int = ..., axis: int = ..., norm: _NormKind = ...) -> NDArray[float64]:
- ...
-
-def ihfft(a: ArrayLike, n: None | int = ..., axis: int = ..., norm: _NormKind = ...) -> NDArray[complex128]:
- ...
-
-def fftn(a: ArrayLike, s: None | Sequence[int] = ..., axes: None | Sequence[int] = ..., norm: _NormKind = ...) -> NDArray[complex128]:
- ...
-
-def ifftn(a: ArrayLike, s: None | Sequence[int] = ..., axes: None | Sequence[int] = ..., norm: _NormKind = ...) -> NDArray[complex128]:
- ...
-
-def rfftn(a: ArrayLike, s: None | Sequence[int] = ..., axes: None | Sequence[int] = ..., norm: _NormKind = ...) -> NDArray[complex128]:
- ...
-
-def irfftn(a: ArrayLike, s: None | Sequence[int] = ..., axes: None | Sequence[int] = ..., norm: _NormKind = ...) -> NDArray[float64]:
- ...
-
-def fft2(a: ArrayLike, s: None | Sequence[int] = ..., axes: None | Sequence[int] = ..., norm: _NormKind = ...) -> NDArray[complex128]:
- ...
-
-def ifft2(a: ArrayLike, s: None | Sequence[int] = ..., axes: None | Sequence[int] = ..., norm: _NormKind = ...) -> NDArray[complex128]:
- ...
-
-def rfft2(a: ArrayLike, s: None | Sequence[int] = ..., axes: None | Sequence[int] = ..., norm: _NormKind = ...) -> NDArray[complex128]:
- ...
-
-def irfft2(a: ArrayLike, s: None | Sequence[int] = ..., axes: None | Sequence[int] = ..., norm: _NormKind = ...) -> NDArray[float64]:
- ...
-
diff --git a/typings/numpy/fft/helper.pyi b/typings/numpy/fft/helper.pyi
deleted file mode 100644
index b36ac6c..0000000
--- a/typings/numpy/fft/helper.pyi
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any, TypeVar, overload
-from numpy import complexfloating, floating, generic, integer
-from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ShapeLike
-
-_SCT = TypeVar("_SCT", bound=generic)
-__all__: list[str]
-@overload
-def fftshift(x: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def fftshift(x: ArrayLike, axes: None | _ShapeLike = ...) -> NDArray[Any]:
- ...
-
-@overload
-def ifftshift(x: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def ifftshift(x: ArrayLike, axes: None | _ShapeLike = ...) -> NDArray[Any]:
- ...
-
-@overload
-def fftfreq(n: int | integer[Any], d: _ArrayLikeFloat_co = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def fftfreq(n: int | integer[Any], d: _ArrayLikeComplex_co = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def rfftfreq(n: int | integer[Any], d: _ArrayLikeFloat_co = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def rfftfreq(n: int | integer[Any], d: _ArrayLikeComplex_co = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
diff --git a/typings/numpy/lib/__init__.pyi b/typings/numpy/lib/__init__.pyi
deleted file mode 100644
index eb25e41..0000000
--- a/typings/numpy/lib/__init__.pyi
+++ /dev/null
@@ -1,33 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import math as math
-from typing import Any
-from numpy._pytesttester import PytestTester
-from numpy import ndenumerate as ndenumerate, ndindex as ndindex
-from numpy.version import version
-from numpy.lib import format as format, mixins as mixins, scimath as scimath, stride_tricks as stride_tricks
-from numpy.lib._version import NumpyVersion as NumpyVersion
-from numpy.lib.arraypad import pad as pad
-from numpy.lib.arraysetops import ediff1d as ediff1d, in1d as in1d, intersect1d as intersect1d, isin as isin, setdiff1d as setdiff1d, setxor1d as setxor1d, union1d as union1d, unique as unique
-from numpy.lib.arrayterator import Arrayterator as Arrayterator
-from numpy.lib.function_base import add_docstring as add_docstring, add_newdoc as add_newdoc, add_newdoc_ufunc as add_newdoc_ufunc, angle as angle, append as append, asarray_chkfinite as asarray_chkfinite, average as average, bartlett as bartlett, bincount as bincount, blackman as blackman, copy as copy, corrcoef as corrcoef, cov as cov, delete as delete, diff as diff, digitize as digitize, disp as disp, extract as extract, flip as flip, gradient as gradient, hamming as hamming, hanning as hanning, i0 as i0, insert as insert, interp as interp, iterable as iterable, kaiser as kaiser, median as median, meshgrid as meshgrid, percentile as percentile, piecewise as piecewise, place as place, quantile as quantile, rot90 as rot90, select as select, sinc as sinc, sort_complex as sort_complex, trapz as trapz, trim_zeros as trim_zeros, unwrap as unwrap, vectorize as vectorize
-from numpy.lib.histograms import histogram as histogram, histogram_bin_edges as histogram_bin_edges, histogramdd as histogramdd
-from numpy.lib.index_tricks import c_ as c_, diag_indices as diag_indices, diag_indices_from as diag_indices_from, fill_diagonal as fill_diagonal, index_exp as index_exp, ix_ as ix_, mgrid as mgrid, ogrid as ogrid, r_ as r_, ravel_multi_index as ravel_multi_index, s_ as s_, unravel_index as unravel_index
-from numpy.lib.nanfunctions import nanargmax as nanargmax, nanargmin as nanargmin, nancumprod as nancumprod, nancumsum as nancumsum, nanmax as nanmax, nanmean as nanmean, nanmedian as nanmedian, nanmin as nanmin, nanpercentile as nanpercentile, nanprod as nanprod, nanquantile as nanquantile, nanstd as nanstd, nansum as nansum, nanvar as nanvar
-from numpy.lib.npyio import DataSource as DataSource, fromregex as fromregex, genfromtxt as genfromtxt, load as load, loadtxt as loadtxt, packbits as packbits, recfromcsv as recfromcsv, recfromtxt as recfromtxt, save as save, savetxt as savetxt, savez as savez, savez_compressed as savez_compressed, unpackbits as unpackbits
-from numpy.lib.polynomial import RankWarning as RankWarning, poly as poly, poly1d as poly1d, polyadd as polyadd, polyder as polyder, polydiv as polydiv, polyfit as polyfit, polyint as polyint, polymul as polymul, polysub as polysub, polyval as polyval, roots as roots
-from numpy.lib.shape_base import apply_along_axis as apply_along_axis, apply_over_axes as apply_over_axes, array_split as array_split, column_stack as column_stack, dsplit as dsplit, dstack as dstack, expand_dims as expand_dims, get_array_wrap as get_array_wrap, hsplit as hsplit, kron as kron, put_along_axis as put_along_axis, row_stack as row_stack, split as split, take_along_axis as take_along_axis, tile as tile, vsplit as vsplit
-from numpy.lib.stride_tricks import broadcast_arrays as broadcast_arrays, broadcast_shapes as broadcast_shapes, broadcast_to as broadcast_to
-from numpy.lib.twodim_base import diag as diag, diagflat as diagflat, eye as eye, fliplr as fliplr, flipud as flipud, histogram2d as histogram2d, mask_indices as mask_indices, tri as tri, tril as tril, tril_indices as tril_indices, tril_indices_from as tril_indices_from, triu as triu, triu_indices as triu_indices, triu_indices_from as triu_indices_from, vander as vander
-from numpy.lib.type_check import asfarray as asfarray, common_type as common_type, imag as imag, iscomplex as iscomplex, iscomplexobj as iscomplexobj, isreal as isreal, isrealobj as isrealobj, mintypecode as mintypecode, nan_to_num as nan_to_num, real as real, real_if_close as real_if_close, typename as typename
-from numpy.lib.ufunclike import fix as fix, isneginf as isneginf, isposinf as isposinf
-from numpy.lib.utils import byte_bounds as byte_bounds, deprecate as deprecate, deprecate_with_doc as deprecate_with_doc, get_include as get_include, info as info, issubclass_ as issubclass_, issubdtype as issubdtype, issubsctype as issubsctype, lookfor as lookfor, safe_eval as safe_eval, show_runtime as show_runtime, source as source, who as who
-from numpy.core.multiarray import tracemalloc_domain as tracemalloc_domain
-
-__all__: list[str]
-__path__: list[str]
-test: PytestTester
-__version__ = ...
-emath = scimath
diff --git a/typings/numpy/lib/_version.pyi b/typings/numpy/lib/_version.pyi
deleted file mode 100644
index 6bb2225..0000000
--- a/typings/numpy/lib/_version.pyi
+++ /dev/null
@@ -1,36 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-__all__: list[str]
-class NumpyVersion:
- vstring: str
- version: str
- major: int
- minor: int
- bugfix: int
- pre_release: str
- is_devversion: bool
- def __init__(self, vstring: str) -> None:
- ...
-
- def __lt__(self, other: str | NumpyVersion) -> bool:
- ...
-
- def __le__(self, other: str | NumpyVersion) -> bool:
- ...
-
- def __eq__(self, other: str | NumpyVersion) -> bool:
- ...
-
- def __ne__(self, other: str | NumpyVersion) -> bool:
- ...
-
- def __gt__(self, other: str | NumpyVersion) -> bool:
- ...
-
- def __ge__(self, other: str | NumpyVersion) -> bool:
- ...
-
-
-
diff --git a/typings/numpy/lib/arraypad.pyi b/typings/numpy/lib/arraypad.pyi
deleted file mode 100644
index 76886e1..0000000
--- a/typings/numpy/lib/arraypad.pyi
+++ /dev/null
@@ -1,33 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any, Literal as L, Protocol, TypeVar, overload
-from numpy import generic
-from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeInt
-
-_SCT = TypeVar("_SCT", bound=generic)
-class _ModeFunc(Protocol):
- def __call__(self, vector: NDArray[Any], iaxis_pad_width: tuple[int, int], iaxis: int, kwargs: dict[str, Any], /) -> None:
- ...
-
-
-
-_ModeKind = L["constant", "edge", "linear_ramp", "maximum", "mean", "median", "minimum", "reflect", "symmetric", "wrap", "empty",]
-__all__: list[str]
-@overload
-def pad(array: _ArrayLike[_SCT], pad_width: _ArrayLikeInt, mode: _ModeKind = ..., *, stat_length: None | _ArrayLikeInt = ..., constant_values: ArrayLike = ..., end_values: ArrayLike = ..., reflect_type: L["odd", "even"] = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def pad(array: ArrayLike, pad_width: _ArrayLikeInt, mode: _ModeKind = ..., *, stat_length: None | _ArrayLikeInt = ..., constant_values: ArrayLike = ..., end_values: ArrayLike = ..., reflect_type: L["odd", "even"] = ...) -> NDArray[Any]:
- ...
-
-@overload
-def pad(array: _ArrayLike[_SCT], pad_width: _ArrayLikeInt, mode: _ModeFunc, **kwargs: Any) -> NDArray[_SCT]:
- ...
-
-@overload
-def pad(array: ArrayLike, pad_width: _ArrayLikeInt, mode: _ModeFunc, **kwargs: Any) -> NDArray[Any]:
- ...
-
diff --git a/typings/numpy/lib/arraysetops.pyi b/typings/numpy/lib/arraysetops.pyi
deleted file mode 100644
index 2acd534..0000000
--- a/typings/numpy/lib/arraysetops.pyi
+++ /dev/null
@@ -1,142 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any, Literal as L, SupportsIndex, TypeVar, overload
-from numpy import bool_, byte, bytes_, cdouble, clongdouble, csingle, datetime64, double, generic, half, int8, int_, intc, intp, longdouble, longlong, number, object_, short, single, str_, timedelta64, ubyte, uint, uintc, ulonglong, ushort, void
-from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeDT64_co, _ArrayLikeNumber_co, _ArrayLikeObject_co, _ArrayLikeTD64_co
-
-_SCT = TypeVar("_SCT", bound=generic)
-_NumberType = TypeVar("_NumberType", bound=number[Any])
-_SCTNoCast = TypeVar("_SCTNoCast", bool_, ushort, ubyte, uintc, uint, ulonglong, short, byte, intc, int_, longlong, half, single, double, longdouble, csingle, cdouble, clongdouble, timedelta64, datetime64, object_, str_, bytes_, void)
-__all__: list[str]
-@overload
-def ediff1d(ary: _ArrayLikeBool_co, to_end: None | ArrayLike = ..., to_begin: None | ArrayLike = ...) -> NDArray[int8]:
- ...
-
-@overload
-def ediff1d(ary: _ArrayLike[_NumberType], to_end: None | ArrayLike = ..., to_begin: None | ArrayLike = ...) -> NDArray[_NumberType]:
- ...
-
-@overload
-def ediff1d(ary: _ArrayLikeNumber_co, to_end: None | ArrayLike = ..., to_begin: None | ArrayLike = ...) -> NDArray[Any]:
- ...
-
-@overload
-def ediff1d(ary: _ArrayLikeDT64_co | _ArrayLikeTD64_co, to_end: None | ArrayLike = ..., to_begin: None | ArrayLike = ...) -> NDArray[timedelta64]:
- ...
-
-@overload
-def ediff1d(ary: _ArrayLikeObject_co, to_end: None | ArrayLike = ..., to_begin: None | ArrayLike = ...) -> NDArray[object_]:
- ...
-
-@overload
-def unique(ar: _ArrayLike[_SCT], return_index: L[False] = ..., return_inverse: L[False] = ..., return_counts: L[False] = ..., axis: None | SupportsIndex = ..., *, equal_nan: bool = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def unique(ar: ArrayLike, return_index: L[False] = ..., return_inverse: L[False] = ..., return_counts: L[False] = ..., axis: None | SupportsIndex = ..., *, equal_nan: bool = ...) -> NDArray[Any]:
- ...
-
-@overload
-def unique(ar: _ArrayLike[_SCT], return_index: L[True] = ..., return_inverse: L[False] = ..., return_counts: L[False] = ..., axis: None | SupportsIndex = ..., *, equal_nan: bool = ...) -> tuple[NDArray[_SCT], NDArray[intp]]:
- ...
-
-@overload
-def unique(ar: ArrayLike, return_index: L[True] = ..., return_inverse: L[False] = ..., return_counts: L[False] = ..., axis: None | SupportsIndex = ..., *, equal_nan: bool = ...) -> tuple[NDArray[Any], NDArray[intp]]:
- ...
-
-@overload
-def unique(ar: _ArrayLike[_SCT], return_index: L[False] = ..., return_inverse: L[True] = ..., return_counts: L[False] = ..., axis: None | SupportsIndex = ..., *, equal_nan: bool = ...) -> tuple[NDArray[_SCT], NDArray[intp]]:
- ...
-
-@overload
-def unique(ar: ArrayLike, return_index: L[False] = ..., return_inverse: L[True] = ..., return_counts: L[False] = ..., axis: None | SupportsIndex = ..., *, equal_nan: bool = ...) -> tuple[NDArray[Any], NDArray[intp]]:
- ...
-
-@overload
-def unique(ar: _ArrayLike[_SCT], return_index: L[False] = ..., return_inverse: L[False] = ..., return_counts: L[True] = ..., axis: None | SupportsIndex = ..., *, equal_nan: bool = ...) -> tuple[NDArray[_SCT], NDArray[intp]]:
- ...
-
-@overload
-def unique(ar: ArrayLike, return_index: L[False] = ..., return_inverse: L[False] = ..., return_counts: L[True] = ..., axis: None | SupportsIndex = ..., *, equal_nan: bool = ...) -> tuple[NDArray[Any], NDArray[intp]]:
- ...
-
-@overload
-def unique(ar: _ArrayLike[_SCT], return_index: L[True] = ..., return_inverse: L[True] = ..., return_counts: L[False] = ..., axis: None | SupportsIndex = ..., *, equal_nan: bool = ...) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]:
- ...
-
-@overload
-def unique(ar: ArrayLike, return_index: L[True] = ..., return_inverse: L[True] = ..., return_counts: L[False] = ..., axis: None | SupportsIndex = ..., *, equal_nan: bool = ...) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]:
- ...
-
-@overload
-def unique(ar: _ArrayLike[_SCT], return_index: L[True] = ..., return_inverse: L[False] = ..., return_counts: L[True] = ..., axis: None | SupportsIndex = ..., *, equal_nan: bool = ...) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]:
- ...
-
-@overload
-def unique(ar: ArrayLike, return_index: L[True] = ..., return_inverse: L[False] = ..., return_counts: L[True] = ..., axis: None | SupportsIndex = ..., *, equal_nan: bool = ...) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]:
- ...
-
-@overload
-def unique(ar: _ArrayLike[_SCT], return_index: L[False] = ..., return_inverse: L[True] = ..., return_counts: L[True] = ..., axis: None | SupportsIndex = ..., *, equal_nan: bool = ...) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]:
- ...
-
-@overload
-def unique(ar: ArrayLike, return_index: L[False] = ..., return_inverse: L[True] = ..., return_counts: L[True] = ..., axis: None | SupportsIndex = ..., *, equal_nan: bool = ...) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]:
- ...
-
-@overload
-def unique(ar: _ArrayLike[_SCT], return_index: L[True] = ..., return_inverse: L[True] = ..., return_counts: L[True] = ..., axis: None | SupportsIndex = ..., *, equal_nan: bool = ...) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp], NDArray[intp]]:
- ...
-
-@overload
-def unique(ar: ArrayLike, return_index: L[True] = ..., return_inverse: L[True] = ..., return_counts: L[True] = ..., axis: None | SupportsIndex = ..., *, equal_nan: bool = ...) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp], NDArray[intp]]:
- ...
-
-@overload
-def intersect1d(ar1: _ArrayLike[_SCTNoCast], ar2: _ArrayLike[_SCTNoCast], assume_unique: bool = ..., return_indices: L[False] = ...) -> NDArray[_SCTNoCast]:
- ...
-
-@overload
-def intersect1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = ..., return_indices: L[False] = ...) -> NDArray[Any]:
- ...
-
-@overload
-def intersect1d(ar1: _ArrayLike[_SCTNoCast], ar2: _ArrayLike[_SCTNoCast], assume_unique: bool = ..., return_indices: L[True] = ...) -> tuple[NDArray[_SCTNoCast], NDArray[intp], NDArray[intp]]:
- ...
-
-@overload
-def intersect1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = ..., return_indices: L[True] = ...) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]:
- ...
-
-@overload
-def setxor1d(ar1: _ArrayLike[_SCTNoCast], ar2: _ArrayLike[_SCTNoCast], assume_unique: bool = ...) -> NDArray[_SCTNoCast]:
- ...
-
-@overload
-def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = ...) -> NDArray[Any]:
- ...
-
-def in1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = ..., invert: bool = ...) -> NDArray[bool_]:
- ...
-
-def isin(element: ArrayLike, test_elements: ArrayLike, assume_unique: bool = ..., invert: bool = ..., *, kind: None | str = ...) -> NDArray[bool_]:
- ...
-
-@overload
-def union1d(ar1: _ArrayLike[_SCTNoCast], ar2: _ArrayLike[_SCTNoCast]) -> NDArray[_SCTNoCast]:
- ...
-
-@overload
-def union1d(ar1: ArrayLike, ar2: ArrayLike) -> NDArray[Any]:
- ...
-
-@overload
-def setdiff1d(ar1: _ArrayLike[_SCTNoCast], ar2: _ArrayLike[_SCTNoCast], assume_unique: bool = ...) -> NDArray[_SCTNoCast]:
- ...
-
-@overload
-def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = ...) -> NDArray[Any]:
- ...
-
diff --git a/typings/numpy/lib/arrayterator.pyi b/typings/numpy/lib/arrayterator.pyi
deleted file mode 100644
index ba8ad4c..0000000
--- a/typings/numpy/lib/arrayterator.pyi
+++ /dev/null
@@ -1,47 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Generator
-from typing import Any, TypeVar, Union, overload
-from numpy import dtype, generic, ndarray
-from numpy._typing import DTypeLike
-
-_Shape = TypeVar("_Shape", bound=Any)
-_DType = TypeVar("_DType", bound=dtype[Any])
-_ScalarType = TypeVar("_ScalarType", bound=generic)
-_Index = Union[Union[ellipsis, int, slice], tuple[Union[ellipsis, int, slice], ...],]
-__all__: list[str]
-class Arrayterator(ndarray[_Shape, _DType]):
- var: ndarray[_Shape, _DType]
- buf_size: None | int
- start: list[int]
- stop: list[int]
- step: list[int]
- @property
- def shape(self) -> tuple[int, ...]:
- ...
-
- @property
- def flat(self: ndarray[Any, dtype[_ScalarType]]) -> Generator[_ScalarType, None, None]:
- ...
-
- def __init__(self, var: ndarray[_Shape, _DType], buf_size: None | int = ...) -> None:
- ...
-
- @overload
- def __array__(self, dtype: None = ...) -> ndarray[Any, _DType]:
- ...
-
- @overload
- def __array__(self, dtype: DTypeLike) -> ndarray[Any, dtype[Any]]:
- ...
-
- def __getitem__(self, index: _Index) -> Arrayterator[Any, _DType]:
- ...
-
- def __iter__(self) -> Generator[ndarray[Any, _DType], None, None]:
- ...
-
-
-
diff --git a/typings/numpy/lib/format.pyi b/typings/numpy/lib/format.pyi
deleted file mode 100644
index 34ae354..0000000
--- a/typings/numpy/lib/format.pyi
+++ /dev/null
@@ -1,48 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Final, Literal
-
-__all__: list[str]
-EXPECTED_KEYS: Final[set[str]]
-MAGIC_PREFIX: Final[bytes]
-MAGIC_LEN: Literal[8]
-ARRAY_ALIGN: Literal[64]
-BUFFER_SIZE: Literal[262144]
-def magic(major, minor):
- ...
-
-def read_magic(fp):
- ...
-
-def dtype_to_descr(dtype):
- ...
-
-def descr_to_dtype(descr):
- ...
-
-def header_data_from_array_1_0(array):
- ...
-
-def write_array_header_1_0(fp, d):
- ...
-
-def write_array_header_2_0(fp, d):
- ...
-
-def read_array_header_1_0(fp):
- ...
-
-def read_array_header_2_0(fp):
- ...
-
-def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...):
- ...
-
-def read_array(fp, allow_pickle=..., pickle_kwargs=...):
- ...
-
-def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...):
- ...
-
diff --git a/typings/numpy/lib/function_base.pyi b/typings/numpy/lib/function_base.pyi
deleted file mode 100644
index 672b602..0000000
--- a/typings/numpy/lib/function_base.pyi
+++ /dev/null
@@ -1,382 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import sys
-from collections.abc import Callable, Iterable, Iterator, Sequence
-from typing import Any, Literal as L, Protocol, SupportsIndex, SupportsInt, TypeGuard, TypeVar, overload
-from numpy import _OrderKACF, complex128, complexfloating, datetime64, float64, floating, generic, intp, object_, timedelta64, ufunc
-from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLike, _ArrayLikeComplex_co, _ArrayLikeDT64_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, _ArrayLikeObject_co, _ArrayLikeTD64_co, _ComplexLike_co, _DTypeLike, _FloatLike_co, _ScalarLike_co, _ShapeLike
-
-if sys.version_info >= (3, 10):
- ...
-else:
- ...
-_T = TypeVar("_T")
-_T_co = TypeVar("_T_co", covariant=True)
-_SCT = TypeVar("_SCT", bound=generic)
-_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
-_2Tuple = tuple[_T, _T]
-class _TrimZerosSequence(Protocol[_T_co]):
- def __len__(self) -> int:
- ...
-
- def __getitem__(self, key: slice, /) -> _T_co:
- ...
-
- def __iter__(self) -> Iterator[Any]:
- ...
-
-
-
-class _SupportsWriteFlush(Protocol):
- def write(self, s: str, /) -> object:
- ...
-
- def flush(self) -> object:
- ...
-
-
-
-__all__: list[str]
-def add_newdoc_ufunc(ufunc: ufunc, new_docstring: str, /) -> None:
- ...
-
-@overload
-def rot90(m: _ArrayLike[_SCT], k: int = ..., axes: tuple[int, int] = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def rot90(m: ArrayLike, k: int = ..., axes: tuple[int, int] = ...) -> NDArray[Any]:
- ...
-
-@overload
-def flip(m: _SCT, axis: None = ...) -> _SCT:
- ...
-
-@overload
-def flip(m: _ScalarLike_co, axis: None = ...) -> Any:
- ...
-
-@overload
-def flip(m: _ArrayLike[_SCT], axis: None | _ShapeLike = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def flip(m: ArrayLike, axis: None | _ShapeLike = ...) -> NDArray[Any]:
- ...
-
-def iterable(y: object) -> TypeGuard[Iterable[Any]]:
- ...
-
-@overload
-def average(a: _ArrayLikeFloat_co, axis: None = ..., weights: None | _ArrayLikeFloat_co = ..., returned: L[False] = ..., keepdims: L[False] = ...) -> floating[Any]:
- ...
-
-@overload
-def average(a: _ArrayLikeComplex_co, axis: None = ..., weights: None | _ArrayLikeComplex_co = ..., returned: L[False] = ..., keepdims: L[False] = ...) -> complexfloating[Any, Any]:
- ...
-
-@overload
-def average(a: _ArrayLikeObject_co, axis: None = ..., weights: None | Any = ..., returned: L[False] = ..., keepdims: L[False] = ...) -> Any:
- ...
-
-@overload
-def average(a: _ArrayLikeFloat_co, axis: None = ..., weights: None | _ArrayLikeFloat_co = ..., returned: L[True] = ..., keepdims: L[False] = ...) -> _2Tuple[floating[Any]]:
- ...
-
-@overload
-def average(a: _ArrayLikeComplex_co, axis: None = ..., weights: None | _ArrayLikeComplex_co = ..., returned: L[True] = ..., keepdims: L[False] = ...) -> _2Tuple[complexfloating[Any, Any]]:
- ...
-
-@overload
-def average(a: _ArrayLikeObject_co, axis: None = ..., weights: None | Any = ..., returned: L[True] = ..., keepdims: L[False] = ...) -> _2Tuple[Any]:
- ...
-
-@overload
-def average(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., weights: None | Any = ..., returned: L[False] = ..., keepdims: bool = ...) -> Any:
- ...
-
-@overload
-def average(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., weights: None | Any = ..., returned: L[True] = ..., keepdims: bool = ...) -> _2Tuple[Any]:
- ...
-
-@overload
-def asarray_chkfinite(a: _ArrayLike[_SCT], dtype: None = ..., order: _OrderKACF = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def asarray_chkfinite(a: object, dtype: None = ..., order: _OrderKACF = ...) -> NDArray[Any]:
- ...
-
-@overload
-def asarray_chkfinite(a: Any, dtype: _DTypeLike[_SCT], order: _OrderKACF = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def asarray_chkfinite(a: Any, dtype: DTypeLike, order: _OrderKACF = ...) -> NDArray[Any]:
- ...
-
-@overload
-def piecewise(x: _ArrayLike[_SCT], condlist: ArrayLike, funclist: Sequence[Any | Callable[..., Any]], *args: Any, **kw: Any) -> NDArray[_SCT]:
- ...
-
-@overload
-def piecewise(x: ArrayLike, condlist: ArrayLike, funclist: Sequence[Any | Callable[..., Any]], *args: Any, **kw: Any) -> NDArray[Any]:
- ...
-
-def select(condlist: Sequence[ArrayLike], choicelist: Sequence[ArrayLike], default: ArrayLike = ...) -> NDArray[Any]:
- ...
-
-@overload
-def copy(a: _ArrayType, order: _OrderKACF, subok: L[True]) -> _ArrayType:
- ...
-
-@overload
-def copy(a: _ArrayType, order: _OrderKACF = ..., *, subok: L[True]) -> _ArrayType:
- ...
-
-@overload
-def copy(a: _ArrayLike[_SCT], order: _OrderKACF = ..., subok: L[False] = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def copy(a: ArrayLike, order: _OrderKACF = ..., subok: L[False] = ...) -> NDArray[Any]:
- ...
-
-def gradient(f: ArrayLike, *varargs: ArrayLike, axis: None | _ShapeLike = ..., edge_order: L[1, 2] = ...) -> Any:
- ...
-
-@overload
-def diff(a: _T, n: L[0], axis: SupportsIndex = ..., prepend: ArrayLike = ..., append: ArrayLike = ...) -> _T:
- ...
-
-@overload
-def diff(a: ArrayLike, n: int = ..., axis: SupportsIndex = ..., prepend: ArrayLike = ..., append: ArrayLike = ...) -> NDArray[Any]:
- ...
-
-@overload
-def interp(x: _ArrayLikeFloat_co, xp: _ArrayLikeFloat_co, fp: _ArrayLikeFloat_co, left: None | _FloatLike_co = ..., right: None | _FloatLike_co = ..., period: None | _FloatLike_co = ...) -> NDArray[float64]:
- ...
-
-@overload
-def interp(x: _ArrayLikeFloat_co, xp: _ArrayLikeFloat_co, fp: _ArrayLikeComplex_co, left: None | _ComplexLike_co = ..., right: None | _ComplexLike_co = ..., period: None | _FloatLike_co = ...) -> NDArray[complex128]:
- ...
-
-@overload
-def angle(z: _ComplexLike_co, deg: bool = ...) -> floating[Any]:
- ...
-
-@overload
-def angle(z: object_, deg: bool = ...) -> Any:
- ...
-
-@overload
-def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]:
- ...
-
-@overload
-def unwrap(p: _ArrayLikeFloat_co, discont: None | float = ..., axis: int = ..., *, period: float = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def unwrap(p: _ArrayLikeObject_co, discont: None | float = ..., axis: int = ..., *, period: float = ...) -> NDArray[object_]:
- ...
-
-def sort_complex(a: ArrayLike) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-def trim_zeros(filt: _TrimZerosSequence[_T], trim: L["f", "b", "fb", "bf"] = ...) -> _T:
- ...
-
-@overload
-def extract(condition: ArrayLike, arr: _ArrayLike[_SCT]) -> NDArray[_SCT]:
- ...
-
-@overload
-def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]:
- ...
-
-def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None:
- ...
-
-def disp(mesg: object, device: None | _SupportsWriteFlush = ..., linefeed: bool = ...) -> None:
- ...
-
-@overload
-def cov(m: _ArrayLikeFloat_co, y: None | _ArrayLikeFloat_co = ..., rowvar: bool = ..., bias: bool = ..., ddof: None | SupportsIndex | SupportsInt = ..., fweights: None | ArrayLike = ..., aweights: None | ArrayLike = ..., *, dtype: None = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def cov(m: _ArrayLikeComplex_co, y: None | _ArrayLikeComplex_co = ..., rowvar: bool = ..., bias: bool = ..., ddof: None | SupportsIndex | SupportsInt = ..., fweights: None | ArrayLike = ..., aweights: None | ArrayLike = ..., *, dtype: None = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def cov(m: _ArrayLikeComplex_co, y: None | _ArrayLikeComplex_co = ..., rowvar: bool = ..., bias: bool = ..., ddof: None | SupportsIndex | SupportsInt = ..., fweights: None | ArrayLike = ..., aweights: None | ArrayLike = ..., *, dtype: _DTypeLike[_SCT]) -> NDArray[_SCT]:
- ...
-
-@overload
-def cov(m: _ArrayLikeComplex_co, y: None | _ArrayLikeComplex_co = ..., rowvar: bool = ..., bias: bool = ..., ddof: None | SupportsIndex | SupportsInt = ..., fweights: None | ArrayLike = ..., aweights: None | ArrayLike = ..., *, dtype: DTypeLike) -> NDArray[Any]:
- ...
-
-@overload
-def corrcoef(m: _ArrayLikeFloat_co, y: None | _ArrayLikeFloat_co = ..., rowvar: bool = ..., *, dtype: None = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def corrcoef(m: _ArrayLikeComplex_co, y: None | _ArrayLikeComplex_co = ..., rowvar: bool = ..., *, dtype: None = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def corrcoef(m: _ArrayLikeComplex_co, y: None | _ArrayLikeComplex_co = ..., rowvar: bool = ..., *, dtype: _DTypeLike[_SCT]) -> NDArray[_SCT]:
- ...
-
-@overload
-def corrcoef(m: _ArrayLikeComplex_co, y: None | _ArrayLikeComplex_co = ..., rowvar: bool = ..., *, dtype: DTypeLike) -> NDArray[Any]:
- ...
-
-def blackman(M: _FloatLike_co) -> NDArray[floating[Any]]:
- ...
-
-def bartlett(M: _FloatLike_co) -> NDArray[floating[Any]]:
- ...
-
-def hanning(M: _FloatLike_co) -> NDArray[floating[Any]]:
- ...
-
-def hamming(M: _FloatLike_co) -> NDArray[floating[Any]]:
- ...
-
-def i0(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]:
- ...
-
-def kaiser(M: _FloatLike_co, beta: _FloatLike_co) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def sinc(x: _FloatLike_co) -> floating[Any]:
- ...
-
-@overload
-def sinc(x: _ComplexLike_co) -> complexfloating[Any, Any]:
- ...
-
-@overload
-def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def median(a: _ArrayLikeFloat_co, axis: None = ..., out: None = ..., overwrite_input: bool = ..., keepdims: L[False] = ...) -> floating[Any]:
- ...
-
-@overload
-def median(a: _ArrayLikeComplex_co, axis: None = ..., out: None = ..., overwrite_input: bool = ..., keepdims: L[False] = ...) -> complexfloating[Any, Any]:
- ...
-
-@overload
-def median(a: _ArrayLikeTD64_co, axis: None = ..., out: None = ..., overwrite_input: bool = ..., keepdims: L[False] = ...) -> timedelta64:
- ...
-
-@overload
-def median(a: _ArrayLikeObject_co, axis: None = ..., out: None = ..., overwrite_input: bool = ..., keepdims: L[False] = ...) -> Any:
- ...
-
-@overload
-def median(a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., out: None = ..., overwrite_input: bool = ..., keepdims: bool = ...) -> Any:
- ...
-
-@overload
-def median(a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., out: _ArrayType = ..., overwrite_input: bool = ..., keepdims: bool = ...) -> _ArrayType:
- ...
-
-_MethodKind = L["inverted_cdf", "averaged_inverted_cdf", "closest_observation", "interpolated_inverted_cdf", "hazen", "weibull", "linear", "median_unbiased", "normal_unbiased", "lower", "higher", "midpoint", "nearest",]
-@overload
-def percentile(a: _ArrayLikeFloat_co, q: _FloatLike_co, axis: None = ..., out: None = ..., overwrite_input: bool = ..., method: _MethodKind = ..., keepdims: L[False] = ...) -> floating[Any]:
- ...
-
-@overload
-def percentile(a: _ArrayLikeComplex_co, q: _FloatLike_co, axis: None = ..., out: None = ..., overwrite_input: bool = ..., method: _MethodKind = ..., keepdims: L[False] = ...) -> complexfloating[Any, Any]:
- ...
-
-@overload
-def percentile(a: _ArrayLikeTD64_co, q: _FloatLike_co, axis: None = ..., out: None = ..., overwrite_input: bool = ..., method: _MethodKind = ..., keepdims: L[False] = ...) -> timedelta64:
- ...
-
-@overload
-def percentile(a: _ArrayLikeDT64_co, q: _FloatLike_co, axis: None = ..., out: None = ..., overwrite_input: bool = ..., method: _MethodKind = ..., keepdims: L[False] = ...) -> datetime64:
- ...
-
-@overload
-def percentile(a: _ArrayLikeObject_co, q: _FloatLike_co, axis: None = ..., out: None = ..., overwrite_input: bool = ..., method: _MethodKind = ..., keepdims: L[False] = ...) -> Any:
- ...
-
-@overload
-def percentile(a: _ArrayLikeFloat_co, q: _ArrayLikeFloat_co, axis: None = ..., out: None = ..., overwrite_input: bool = ..., method: _MethodKind = ..., keepdims: L[False] = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def percentile(a: _ArrayLikeComplex_co, q: _ArrayLikeFloat_co, axis: None = ..., out: None = ..., overwrite_input: bool = ..., method: _MethodKind = ..., keepdims: L[False] = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def percentile(a: _ArrayLikeTD64_co, q: _ArrayLikeFloat_co, axis: None = ..., out: None = ..., overwrite_input: bool = ..., method: _MethodKind = ..., keepdims: L[False] = ...) -> NDArray[timedelta64]:
- ...
-
-@overload
-def percentile(a: _ArrayLikeDT64_co, q: _ArrayLikeFloat_co, axis: None = ..., out: None = ..., overwrite_input: bool = ..., method: _MethodKind = ..., keepdims: L[False] = ...) -> NDArray[datetime64]:
- ...
-
-@overload
-def percentile(a: _ArrayLikeObject_co, q: _ArrayLikeFloat_co, axis: None = ..., out: None = ..., overwrite_input: bool = ..., method: _MethodKind = ..., keepdims: L[False] = ...) -> NDArray[object_]:
- ...
-
-@overload
-def percentile(a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, q: _ArrayLikeFloat_co, axis: None | _ShapeLike = ..., out: None = ..., overwrite_input: bool = ..., method: _MethodKind = ..., keepdims: bool = ...) -> Any:
- ...
-
-@overload
-def percentile(a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, q: _ArrayLikeFloat_co, axis: None | _ShapeLike = ..., out: _ArrayType = ..., overwrite_input: bool = ..., method: _MethodKind = ..., keepdims: bool = ...) -> _ArrayType:
- ...
-
-quantile = ...
-def trapz(y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, x: None | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co = ..., dx: float = ..., axis: SupportsIndex = ...) -> Any:
- ...
-
-def meshgrid(*xi: ArrayLike, copy: bool = ..., sparse: bool = ..., indexing: L["xy", "ij"] = ...) -> list[NDArray[Any]]:
- ...
-
-@overload
-def delete(arr: _ArrayLike[_SCT], obj: slice | _ArrayLikeInt_co, axis: None | SupportsIndex = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def delete(arr: ArrayLike, obj: slice | _ArrayLikeInt_co, axis: None | SupportsIndex = ...) -> NDArray[Any]:
- ...
-
-@overload
-def insert(arr: _ArrayLike[_SCT], obj: slice | _ArrayLikeInt_co, values: ArrayLike, axis: None | SupportsIndex = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def insert(arr: ArrayLike, obj: slice | _ArrayLikeInt_co, values: ArrayLike, axis: None | SupportsIndex = ...) -> NDArray[Any]:
- ...
-
-def append(arr: ArrayLike, values: ArrayLike, axis: None | SupportsIndex = ...) -> NDArray[Any]:
- ...
-
-@overload
-def digitize(x: _FloatLike_co, bins: _ArrayLikeFloat_co, right: bool = ...) -> intp:
- ...
-
-@overload
-def digitize(x: _ArrayLikeFloat_co, bins: _ArrayLikeFloat_co, right: bool = ...) -> NDArray[intp]:
- ...
-
diff --git a/typings/numpy/lib/histograms.pyi b/typings/numpy/lib/histograms.pyi
deleted file mode 100644
index cd7dc4b..0000000
--- a/typings/numpy/lib/histograms.pyi
+++ /dev/null
@@ -1,19 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Sequence
-from typing import Any, Literal as L, SupportsIndex
-from numpy._typing import ArrayLike, NDArray
-
-_BinKind = L["stone", "auto", "doane", "fd", "rice", "scott", "sqrt", "sturges",]
-__all__: list[str]
-def histogram_bin_edges(a: ArrayLike, bins: _BinKind | SupportsIndex | ArrayLike = ..., range: None | tuple[float, float] = ..., weights: None | ArrayLike = ...) -> NDArray[Any]:
- ...
-
-def histogram(a: ArrayLike, bins: _BinKind | SupportsIndex | ArrayLike = ..., range: None | tuple[float, float] = ..., density: bool = ..., weights: None | ArrayLike = ...) -> tuple[NDArray[Any], NDArray[Any]]:
- ...
-
-def histogramdd(sample: ArrayLike, bins: SupportsIndex | ArrayLike = ..., range: Sequence[tuple[float, float]] = ..., density: None | bool = ..., weights: None | ArrayLike = ...) -> tuple[NDArray[Any], list[NDArray[Any]]]:
- ...
-
diff --git a/typings/numpy/lib/index_tricks.pyi b/typings/numpy/lib/index_tricks.pyi
deleted file mode 100644
index 16da188..0000000
--- a/typings/numpy/lib/index_tricks.pyi
+++ /dev/null
@@ -1,151 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Sequence
-from typing import Any, Generic, Literal, SupportsIndex, TypeVar, overload
-from numpy import bool_, bytes_, complex_, dtype, float_, int_, matrix as _Matrix, ndarray, str_
-from numpy._typing import ArrayLike, DTypeLike, NDArray, _FiniteNestedSequence, _NestedSequence, _SupportsDType
-
-_T = TypeVar("_T")
-_DType = TypeVar("_DType", bound=dtype[Any])
-_BoolType = TypeVar("_BoolType", Literal[True], Literal[False])
-_TupType = TypeVar("_TupType", bound=tuple[Any, ...])
-_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
-__all__: list[str]
-@overload
-def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DType]]) -> tuple[ndarray[Any, _DType], ...]:
- ...
-
-@overload
-def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[str_], ...]:
- ...
-
-@overload
-def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[bytes_], ...]:
- ...
-
-@overload
-def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[bool_], ...]:
- ...
-
-@overload
-def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[int_], ...]:
- ...
-
-@overload
-def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[float_], ...]:
- ...
-
-@overload
-def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[complex_], ...]:
- ...
-
-class nd_grid(Generic[_BoolType]):
- sparse: _BoolType
- def __init__(self, sparse: _BoolType = ...) -> None:
- ...
-
- @overload
- def __getitem__(self: nd_grid[Literal[False]], key: slice | Sequence[slice]) -> NDArray[Any]:
- ...
-
- @overload
- def __getitem__(self: nd_grid[Literal[True]], key: slice | Sequence[slice]) -> list[NDArray[Any]]:
- ...
-
-
-
-class MGridClass(nd_grid[Literal[False]]):
- def __init__(self) -> None:
- ...
-
-
-
-mgrid: MGridClass
-class OGridClass(nd_grid[Literal[True]]):
- def __init__(self) -> None:
- ...
-
-
-
-ogrid: OGridClass
-class AxisConcatenator:
- axis: int
- matrix: bool
- ndmin: int
- trans1d: int
- def __init__(self, axis: int = ..., matrix: bool = ..., ndmin: int = ..., trans1d: int = ...) -> None:
- ...
-
- @staticmethod
- @overload
- def concatenate(*a: ArrayLike, axis: SupportsIndex = ..., out: None = ...) -> NDArray[Any]:
- ...
-
- @staticmethod
- @overload
- def concatenate(*a: ArrayLike, axis: SupportsIndex = ..., out: _ArrayType = ...) -> _ArrayType:
- ...
-
- @staticmethod
- def makemat(data: ArrayLike, dtype: DTypeLike = ..., copy: bool = ...) -> _Matrix[Any, Any]:
- ...
-
- def __getitem__(self, key: Any) -> Any:
- ...
-
-
-
-class RClass(AxisConcatenator):
- axis: Literal[0]
- matrix: Literal[False]
- ndmin: Literal[1]
- trans1d: Literal[-1]
- def __init__(self) -> None:
- ...
-
-
-
-r_: RClass
-class CClass(AxisConcatenator):
- axis: Literal[-1]
- matrix: Literal[False]
- ndmin: Literal[2]
- trans1d: Literal[0]
- def __init__(self) -> None:
- ...
-
-
-
-c_: CClass
-class IndexExpression(Generic[_BoolType]):
- maketuple: _BoolType
- def __init__(self, maketuple: _BoolType) -> None:
- ...
-
- @overload
- def __getitem__(self, item: _TupType) -> _TupType:
- ...
-
- @overload
- def __getitem__(self: IndexExpression[Literal[True]], item: _T) -> tuple[_T]:
- ...
-
- @overload
- def __getitem__(self: IndexExpression[Literal[False]], item: _T) -> _T:
- ...
-
-
-
-index_exp: IndexExpression[Literal[True]]
-s_: IndexExpression[Literal[False]]
-def fill_diagonal(a: ndarray[Any, Any], val: Any, wrap: bool = ...) -> None:
- ...
-
-def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[int_], ...]:
- ...
-
-def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[int_], ...]:
- ...
-
diff --git a/typings/numpy/lib/mixins.pyi b/typings/numpy/lib/mixins.pyi
deleted file mode 100644
index 576250d..0000000
--- a/typings/numpy/lib/mixins.pyi
+++ /dev/null
@@ -1,169 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from abc import ABCMeta, abstractmethod
-from typing import Any, Literal as L
-from numpy import ufunc
-
-__all__: list[str]
-class NDArrayOperatorsMixin(metaclass=ABCMeta):
- @abstractmethod
- def __array_ufunc__(self, ufunc: ufunc, method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"], *inputs: Any, **kwargs: Any) -> Any:
- ...
-
- def __lt__(self, other: Any) -> Any:
- ...
-
- def __le__(self, other: Any) -> Any:
- ...
-
- def __eq__(self, other: Any) -> Any:
- ...
-
- def __ne__(self, other: Any) -> Any:
- ...
-
- def __gt__(self, other: Any) -> Any:
- ...
-
- def __ge__(self, other: Any) -> Any:
- ...
-
- def __add__(self, other: Any) -> Any:
- ...
-
- def __radd__(self, other: Any) -> Any:
- ...
-
- def __iadd__(self, other: Any) -> Any:
- ...
-
- def __sub__(self, other: Any) -> Any:
- ...
-
- def __rsub__(self, other: Any) -> Any:
- ...
-
- def __isub__(self, other: Any) -> Any:
- ...
-
- def __mul__(self, other: Any) -> Any:
- ...
-
- def __rmul__(self, other: Any) -> Any:
- ...
-
- def __imul__(self, other: Any) -> Any:
- ...
-
- def __matmul__(self, other: Any) -> Any:
- ...
-
- def __rmatmul__(self, other: Any) -> Any:
- ...
-
- def __imatmul__(self, other: Any) -> Any:
- ...
-
- def __truediv__(self, other: Any) -> Any:
- ...
-
- def __rtruediv__(self, other: Any) -> Any:
- ...
-
- def __itruediv__(self, other: Any) -> Any:
- ...
-
- def __floordiv__(self, other: Any) -> Any:
- ...
-
- def __rfloordiv__(self, other: Any) -> Any:
- ...
-
- def __ifloordiv__(self, other: Any) -> Any:
- ...
-
- def __mod__(self, other: Any) -> Any:
- ...
-
- def __rmod__(self, other: Any) -> Any:
- ...
-
- def __imod__(self, other: Any) -> Any:
- ...
-
- def __divmod__(self, other: Any) -> Any:
- ...
-
- def __rdivmod__(self, other: Any) -> Any:
- ...
-
- def __pow__(self, other: Any) -> Any:
- ...
-
- def __rpow__(self, other: Any) -> Any:
- ...
-
- def __ipow__(self, other: Any) -> Any:
- ...
-
- def __lshift__(self, other: Any) -> Any:
- ...
-
- def __rlshift__(self, other: Any) -> Any:
- ...
-
- def __ilshift__(self, other: Any) -> Any:
- ...
-
- def __rshift__(self, other: Any) -> Any:
- ...
-
- def __rrshift__(self, other: Any) -> Any:
- ...
-
- def __irshift__(self, other: Any) -> Any:
- ...
-
- def __and__(self, other: Any) -> Any:
- ...
-
- def __rand__(self, other: Any) -> Any:
- ...
-
- def __iand__(self, other: Any) -> Any:
- ...
-
- def __xor__(self, other: Any) -> Any:
- ...
-
- def __rxor__(self, other: Any) -> Any:
- ...
-
- def __ixor__(self, other: Any) -> Any:
- ...
-
- def __or__(self, other: Any) -> Any:
- ...
-
- def __ror__(self, other: Any) -> Any:
- ...
-
- def __ior__(self, other: Any) -> Any:
- ...
-
- def __neg__(self) -> Any:
- ...
-
- def __pos__(self) -> Any:
- ...
-
- def __abs__(self) -> Any:
- ...
-
- def __invert__(self) -> Any:
- ...
-
-
-
diff --git a/typings/numpy/lib/nanfunctions.pyi b/typings/numpy/lib/nanfunctions.pyi
deleted file mode 100644
index 4a1b63c..0000000
--- a/typings/numpy/lib/nanfunctions.pyi
+++ /dev/null
@@ -1,19 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-__all__: list[str]
-nanmin = ...
-nanmax = ...
-nanargmin = ...
-nanargmax = ...
-nansum = ...
-nanprod = ...
-nancumsum = ...
-nancumprod = ...
-nanmean = ...
-nanvar = ...
-nanstd = ...
-nanmedian = ...
-nanpercentile = ...
-nanquantile = ...
diff --git a/typings/numpy/lib/npyio.pyi b/typings/numpy/lib/npyio.pyi
deleted file mode 100644
index 7298d32..0000000
--- a/typings/numpy/lib/npyio.pyi
+++ /dev/null
@@ -1,170 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import os
-import zipfile
-import types
-from re import Pattern
-from collections.abc import Callable, Collection, Iterable, Iterator, Mapping, Sequence
-from typing import Any, Generic, IO, Literal as L, Protocol, TypeVar, overload
-from numpy import dtype, float64, generic, recarray, record, void
-from numpy.ma.mrecords import MaskedRecords
-from numpy._typing import ArrayLike, DTypeLike, NDArray, _DTypeLike, _SupportsArrayFunc
-
-_T = TypeVar("_T")
-_T_contra = TypeVar("_T_contra", contravariant=True)
-_T_co = TypeVar("_T_co", covariant=True)
-_SCT = TypeVar("_SCT", bound=generic)
-_CharType_co = TypeVar("_CharType_co", str, bytes, covariant=True)
-_CharType_contra = TypeVar("_CharType_contra", str, bytes, contravariant=True)
-class _SupportsGetItem(Protocol[_T_contra, _T_co]):
- def __getitem__(self, key: _T_contra, /) -> _T_co:
- ...
-
-
-
-class _SupportsRead(Protocol[_CharType_co]):
- def read(self) -> _CharType_co:
- ...
-
-
-
-class _SupportsReadSeek(Protocol[_CharType_co]):
- def read(self, n: int, /) -> _CharType_co:
- ...
-
- def seek(self, offset: int, whence: int, /) -> object:
- ...
-
-
-
-class _SupportsWrite(Protocol[_CharType_contra]):
- def write(self, s: _CharType_contra, /) -> object:
- ...
-
-
-
-__all__: list[str]
-class BagObj(Generic[_T_co]):
- def __init__(self, obj: _SupportsGetItem[str, _T_co]) -> None:
- ...
-
- def __getattribute__(self, key: str) -> _T_co:
- ...
-
- def __dir__(self) -> list[str]:
- ...
-
-
-
-class NpzFile(Mapping[str, NDArray[Any]]):
- zip: zipfile.ZipFile
- fid: None | IO[str]
- files: list[str]
- allow_pickle: bool
- pickle_kwargs: None | Mapping[str, Any]
- _MAX_REPR_ARRAY_COUNT: int
- @property
- def f(self: _T) -> BagObj[_T]:
- ...
-
- @f.setter
- def f(self: _T, value: BagObj[_T]) -> None:
- ...
-
- def __init__(self, fid: IO[str], own_fid: bool = ..., allow_pickle: bool = ..., pickle_kwargs: None | Mapping[str, Any] = ...) -> None:
- ...
-
- def __enter__(self: _T) -> _T:
- ...
-
- def __exit__(self, exc_type: None | type[BaseException], exc_value: None | BaseException, traceback: None | types.TracebackType, /) -> None:
- ...
-
- def close(self) -> None:
- ...
-
- def __del__(self) -> None:
- ...
-
- def __iter__(self) -> Iterator[str]:
- ...
-
- def __len__(self) -> int:
- ...
-
- def __getitem__(self, key: str) -> NDArray[Any]:
- ...
-
- def __contains__(self, key: str) -> bool:
- ...
-
- def __repr__(self) -> str:
- ...
-
-
-
-def load(file: str | bytes | os.PathLike[Any] | _SupportsReadSeek[bytes], mmap_mode: L[None, "r+", "r", "w+", "c"] = ..., allow_pickle: bool = ..., fix_imports: bool = ..., encoding: L["ASCII", "latin1", "bytes"] = ...) -> Any:
- ...
-
-def save(file: str | os.PathLike[str] | _SupportsWrite[bytes], arr: ArrayLike, allow_pickle: bool = ..., fix_imports: bool = ...) -> None:
- ...
-
-def savez(file: str | os.PathLike[str] | _SupportsWrite[bytes], *args: ArrayLike, **kwds: ArrayLike) -> None:
- ...
-
-def savez_compressed(file: str | os.PathLike[str] | _SupportsWrite[bytes], *args: ArrayLike, **kwds: ArrayLike) -> None:
- ...
-
-@overload
-def loadtxt(fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], dtype: None = ..., comments: None | str | Sequence[str] = ..., delimiter: None | str = ..., converters: None | Mapping[int | str, Callable[[str], Any]] = ..., skiprows: int = ..., usecols: int | Sequence[int] = ..., unpack: bool = ..., ndmin: L[0, 1, 2] = ..., encoding: None | str = ..., max_rows: None | int = ..., *, quotechar: None | str = ..., like: None | _SupportsArrayFunc = ...) -> NDArray[float64]:
- ...
-
-@overload
-def loadtxt(fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], dtype: _DTypeLike[_SCT], comments: None | str | Sequence[str] = ..., delimiter: None | str = ..., converters: None | Mapping[int | str, Callable[[str], Any]] = ..., skiprows: int = ..., usecols: int | Sequence[int] = ..., unpack: bool = ..., ndmin: L[0, 1, 2] = ..., encoding: None | str = ..., max_rows: None | int = ..., *, quotechar: None | str = ..., like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def loadtxt(fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], dtype: DTypeLike, comments: None | str | Sequence[str] = ..., delimiter: None | str = ..., converters: None | Mapping[int | str, Callable[[str], Any]] = ..., skiprows: int = ..., usecols: int | Sequence[int] = ..., unpack: bool = ..., ndmin: L[0, 1, 2] = ..., encoding: None | str = ..., max_rows: None | int = ..., *, quotechar: None | str = ..., like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-def savetxt(fname: str | os.PathLike[str] | _SupportsWrite[str] | _SupportsWrite[bytes], X: ArrayLike, fmt: str | Sequence[str] = ..., delimiter: str = ..., newline: str = ..., header: str = ..., footer: str = ..., comments: str = ..., encoding: None | str = ...) -> None:
- ...
-
-@overload
-def fromregex(file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes], regexp: str | bytes | Pattern[Any], dtype: _DTypeLike[_SCT], encoding: None | str = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def fromregex(file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes], regexp: str | bytes | Pattern[Any], dtype: DTypeLike, encoding: None | str = ...) -> NDArray[Any]:
- ...
-
-@overload
-def genfromtxt(fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], dtype: None = ..., comments: str = ..., delimiter: None | str | int | Iterable[int] = ..., skip_header: int = ..., skip_footer: int = ..., converters: None | Mapping[int | str, Callable[[str], Any]] = ..., missing_values: Any = ..., filling_values: Any = ..., usecols: None | Sequence[int] = ..., names: L[None, True] | str | Collection[str] = ..., excludelist: None | Sequence[str] = ..., deletechars: str = ..., replace_space: str = ..., autostrip: bool = ..., case_sensitive: bool | L['upper', 'lower'] = ..., defaultfmt: str = ..., unpack: None | bool = ..., usemask: bool = ..., loose: bool = ..., invalid_raise: bool = ..., max_rows: None | int = ..., encoding: str = ..., *, ndmin: L[0, 1, 2] = ..., like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def genfromtxt(fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], dtype: _DTypeLike[_SCT], comments: str = ..., delimiter: None | str | int | Iterable[int] = ..., skip_header: int = ..., skip_footer: int = ..., converters: None | Mapping[int | str, Callable[[str], Any]] = ..., missing_values: Any = ..., filling_values: Any = ..., usecols: None | Sequence[int] = ..., names: L[None, True] | str | Collection[str] = ..., excludelist: None | Sequence[str] = ..., deletechars: str = ..., replace_space: str = ..., autostrip: bool = ..., case_sensitive: bool | L['upper', 'lower'] = ..., defaultfmt: str = ..., unpack: None | bool = ..., usemask: bool = ..., loose: bool = ..., invalid_raise: bool = ..., max_rows: None | int = ..., encoding: str = ..., *, ndmin: L[0, 1, 2] = ..., like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def genfromtxt(fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], dtype: DTypeLike, comments: str = ..., delimiter: None | str | int | Iterable[int] = ..., skip_header: int = ..., skip_footer: int = ..., converters: None | Mapping[int | str, Callable[[str], Any]] = ..., missing_values: Any = ..., filling_values: Any = ..., usecols: None | Sequence[int] = ..., names: L[None, True] | str | Collection[str] = ..., excludelist: None | Sequence[str] = ..., deletechars: str = ..., replace_space: str = ..., autostrip: bool = ..., case_sensitive: bool | L['upper', 'lower'] = ..., defaultfmt: str = ..., unpack: None | bool = ..., usemask: bool = ..., loose: bool = ..., invalid_raise: bool = ..., max_rows: None | int = ..., encoding: str = ..., *, ndmin: L[0, 1, 2] = ..., like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def recfromtxt(fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], *, usemask: L[False] = ..., **kwargs: Any) -> recarray[Any, dtype[record]]:
- ...
-
-@overload
-def recfromtxt(fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], *, usemask: L[True], **kwargs: Any) -> MaskedRecords[Any, dtype[void]]:
- ...
-
-@overload
-def recfromcsv(fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], *, usemask: L[False] = ..., **kwargs: Any) -> recarray[Any, dtype[record]]:
- ...
-
-@overload
-def recfromcsv(fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], *, usemask: L[True], **kwargs: Any) -> MaskedRecords[Any, dtype[void]]:
- ...
-
diff --git a/typings/numpy/lib/polynomial.pyi b/typings/numpy/lib/polynomial.pyi
deleted file mode 100644
index d3bbd92..0000000
--- a/typings/numpy/lib/polynomial.pyi
+++ /dev/null
@@ -1,183 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any, Literal as L, NoReturn, SupportsIndex, SupportsInt, TypeVar, overload
-from numpy import bool_, complex128, complexfloating, float64, floating, int32, int64, object_, poly1d as poly1d, signedinteger, unsignedinteger
-from numpy._typing import ArrayLike, NDArray, _ArrayLikeBool_co, _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, _ArrayLikeObject_co, _ArrayLikeUInt_co
-
-_T = TypeVar("_T")
-_2Tup = tuple[_T, _T]
-_5Tup = tuple[_T, NDArray[float64], NDArray[int32], NDArray[float64], NDArray[float64],]
-__all__: list[str]
-def poly(seq_of_zeros: ArrayLike) -> NDArray[floating[Any]]:
- ...
-
-def roots(p: ArrayLike) -> NDArray[complexfloating[Any, Any]] | NDArray[floating[Any]]:
- ...
-
-@overload
-def polyint(p: poly1d, m: SupportsInt | SupportsIndex = ..., k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ...) -> poly1d:
- ...
-
-@overload
-def polyint(p: _ArrayLikeFloat_co, m: SupportsInt | SupportsIndex = ..., k: None | _ArrayLikeFloat_co = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def polyint(p: _ArrayLikeComplex_co, m: SupportsInt | SupportsIndex = ..., k: None | _ArrayLikeComplex_co = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def polyint(p: _ArrayLikeObject_co, m: SupportsInt | SupportsIndex = ..., k: None | _ArrayLikeObject_co = ...) -> NDArray[object_]:
- ...
-
-@overload
-def polyder(p: poly1d, m: SupportsInt | SupportsIndex = ...) -> poly1d:
- ...
-
-@overload
-def polyder(p: _ArrayLikeFloat_co, m: SupportsInt | SupportsIndex = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def polyder(p: _ArrayLikeComplex_co, m: SupportsInt | SupportsIndex = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def polyder(p: _ArrayLikeObject_co, m: SupportsInt | SupportsIndex = ...) -> NDArray[object_]:
- ...
-
-@overload
-def polyfit(x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: SupportsIndex | SupportsInt, rcond: None | float = ..., full: L[False] = ..., w: None | _ArrayLikeFloat_co = ..., cov: L[False] = ...) -> NDArray[float64]:
- ...
-
-@overload
-def polyfit(x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: SupportsIndex | SupportsInt, rcond: None | float = ..., full: L[False] = ..., w: None | _ArrayLikeFloat_co = ..., cov: L[False] = ...) -> NDArray[complex128]:
- ...
-
-@overload
-def polyfit(x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: SupportsIndex | SupportsInt, rcond: None | float = ..., full: L[False] = ..., w: None | _ArrayLikeFloat_co = ..., cov: L[True, "unscaled"] = ...) -> _2Tup[NDArray[float64]]:
- ...
-
-@overload
-def polyfit(x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: SupportsIndex | SupportsInt, rcond: None | float = ..., full: L[False] = ..., w: None | _ArrayLikeFloat_co = ..., cov: L[True, "unscaled"] = ...) -> _2Tup[NDArray[complex128]]:
- ...
-
-@overload
-def polyfit(x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: SupportsIndex | SupportsInt, rcond: None | float = ..., full: L[True] = ..., w: None | _ArrayLikeFloat_co = ..., cov: bool | L["unscaled"] = ...) -> _5Tup[NDArray[float64]]:
- ...
-
-@overload
-def polyfit(x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: SupportsIndex | SupportsInt, rcond: None | float = ..., full: L[True] = ..., w: None | _ArrayLikeFloat_co = ..., cov: bool | L["unscaled"] = ...) -> _5Tup[NDArray[complex128]]:
- ...
-
-@overload
-def polyval(p: _ArrayLikeBool_co, x: _ArrayLikeBool_co) -> NDArray[int64]:
- ...
-
-@overload
-def polyval(p: _ArrayLikeUInt_co, x: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]:
- ...
-
-@overload
-def polyval(p: _ArrayLikeInt_co, x: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]:
- ...
-
-@overload
-def polyval(p: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def polyval(p: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def polyval(p: _ArrayLikeObject_co, x: _ArrayLikeObject_co) -> NDArray[object_]:
- ...
-
-@overload
-def polyadd(a1: poly1d, a2: _ArrayLikeComplex_co | _ArrayLikeObject_co) -> poly1d:
- ...
-
-@overload
-def polyadd(a1: _ArrayLikeComplex_co | _ArrayLikeObject_co, a2: poly1d) -> poly1d:
- ...
-
-@overload
-def polyadd(a1: _ArrayLikeBool_co, a2: _ArrayLikeBool_co) -> NDArray[bool_]:
- ...
-
-@overload
-def polyadd(a1: _ArrayLikeUInt_co, a2: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]:
- ...
-
-@overload
-def polyadd(a1: _ArrayLikeInt_co, a2: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]:
- ...
-
-@overload
-def polyadd(a1: _ArrayLikeFloat_co, a2: _ArrayLikeFloat_co) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def polyadd(a1: _ArrayLikeComplex_co, a2: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def polyadd(a1: _ArrayLikeObject_co, a2: _ArrayLikeObject_co) -> NDArray[object_]:
- ...
-
-@overload
-def polysub(a1: poly1d, a2: _ArrayLikeComplex_co | _ArrayLikeObject_co) -> poly1d:
- ...
-
-@overload
-def polysub(a1: _ArrayLikeComplex_co | _ArrayLikeObject_co, a2: poly1d) -> poly1d:
- ...
-
-@overload
-def polysub(a1: _ArrayLikeBool_co, a2: _ArrayLikeBool_co) -> NoReturn:
- ...
-
-@overload
-def polysub(a1: _ArrayLikeUInt_co, a2: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]:
- ...
-
-@overload
-def polysub(a1: _ArrayLikeInt_co, a2: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]:
- ...
-
-@overload
-def polysub(a1: _ArrayLikeFloat_co, a2: _ArrayLikeFloat_co) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def polysub(a1: _ArrayLikeComplex_co, a2: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def polysub(a1: _ArrayLikeObject_co, a2: _ArrayLikeObject_co) -> NDArray[object_]:
- ...
-
-polymul = ...
-@overload
-def polydiv(u: poly1d, v: _ArrayLikeComplex_co | _ArrayLikeObject_co) -> _2Tup[poly1d]:
- ...
-
-@overload
-def polydiv(u: _ArrayLikeComplex_co | _ArrayLikeObject_co, v: poly1d) -> _2Tup[poly1d]:
- ...
-
-@overload
-def polydiv(u: _ArrayLikeFloat_co, v: _ArrayLikeFloat_co) -> _2Tup[NDArray[floating[Any]]]:
- ...
-
-@overload
-def polydiv(u: _ArrayLikeComplex_co, v: _ArrayLikeComplex_co) -> _2Tup[NDArray[complexfloating[Any, Any]]]:
- ...
-
-@overload
-def polydiv(u: _ArrayLikeObject_co, v: _ArrayLikeObject_co) -> _2Tup[NDArray[Any]]:
- ...
-
diff --git a/typings/numpy/lib/scimath.pyi b/typings/numpy/lib/scimath.pyi
deleted file mode 100644
index f12055a..0000000
--- a/typings/numpy/lib/scimath.pyi
+++ /dev/null
@@ -1,153 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any, overload
-from numpy import complexfloating
-from numpy._typing import NDArray, _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ComplexLike_co, _FloatLike_co
-
-__all__: list[str]
-@overload
-def sqrt(x: _FloatLike_co) -> Any:
- ...
-
-@overload
-def sqrt(x: _ComplexLike_co) -> complexfloating[Any, Any]:
- ...
-
-@overload
-def sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]:
- ...
-
-@overload
-def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def log(x: _FloatLike_co) -> Any:
- ...
-
-@overload
-def log(x: _ComplexLike_co) -> complexfloating[Any, Any]:
- ...
-
-@overload
-def log(x: _ArrayLikeFloat_co) -> NDArray[Any]:
- ...
-
-@overload
-def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def log10(x: _FloatLike_co) -> Any:
- ...
-
-@overload
-def log10(x: _ComplexLike_co) -> complexfloating[Any, Any]:
- ...
-
-@overload
-def log10(x: _ArrayLikeFloat_co) -> NDArray[Any]:
- ...
-
-@overload
-def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def log2(x: _FloatLike_co) -> Any:
- ...
-
-@overload
-def log2(x: _ComplexLike_co) -> complexfloating[Any, Any]:
- ...
-
-@overload
-def log2(x: _ArrayLikeFloat_co) -> NDArray[Any]:
- ...
-
-@overload
-def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def logn(n: _FloatLike_co, x: _FloatLike_co) -> Any:
- ...
-
-@overload
-def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating[Any, Any]:
- ...
-
-@overload
-def logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]:
- ...
-
-@overload
-def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def power(x: _FloatLike_co, p: _FloatLike_co) -> Any:
- ...
-
-@overload
-def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating[Any, Any]:
- ...
-
-@overload
-def power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]:
- ...
-
-@overload
-def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def arccos(x: _FloatLike_co) -> Any:
- ...
-
-@overload
-def arccos(x: _ComplexLike_co) -> complexfloating[Any, Any]:
- ...
-
-@overload
-def arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]:
- ...
-
-@overload
-def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def arcsin(x: _FloatLike_co) -> Any:
- ...
-
-@overload
-def arcsin(x: _ComplexLike_co) -> complexfloating[Any, Any]:
- ...
-
-@overload
-def arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]:
- ...
-
-@overload
-def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def arctanh(x: _FloatLike_co) -> Any:
- ...
-
-@overload
-def arctanh(x: _ComplexLike_co) -> complexfloating[Any, Any]:
- ...
-
-@overload
-def arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]:
- ...
-
-@overload
-def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]:
- ...
-
diff --git a/typings/numpy/lib/shape_base.pyi b/typings/numpy/lib/shape_base.pyi
deleted file mode 100644
index 69d16c4..0000000
--- a/typings/numpy/lib/shape_base.pyi
+++ /dev/null
@@ -1,177 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import sys
-from collections.abc import Callable, Sequence
-from typing import Any, Concatenate, ParamSpec, Protocol, SupportsIndex, TypeVar, overload
-from numpy import bool_, complexfloating, floating, generic, integer, object_, signedinteger, ufunc, unsignedinteger
-from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, _ArrayLikeObject_co, _ArrayLikeUInt_co, _ShapeLike
-
-if sys.version_info >= (3, 10):
- ...
-else:
- ...
-_P = ParamSpec("_P")
-_SCT = TypeVar("_SCT", bound=generic)
-class _ArrayWrap(Protocol):
- def __call__(self, array: NDArray[Any], context: None | tuple[ufunc, tuple[Any, ...], int] = ..., /) -> Any:
- ...
-
-
-
-class _ArrayPrepare(Protocol):
- def __call__(self, array: NDArray[Any], context: None | tuple[ufunc, tuple[Any, ...], int] = ..., /) -> Any:
- ...
-
-
-
-class _SupportsArrayWrap(Protocol):
- @property
- def __array_wrap__(self) -> _ArrayWrap:
- ...
-
-
-
-class _SupportsArrayPrepare(Protocol):
- @property
- def __array_prepare__(self) -> _ArrayPrepare:
- ...
-
-
-
-__all__: list[str]
-row_stack = ...
-def take_along_axis(arr: _SCT | NDArray[_SCT], indices: NDArray[integer[Any]], axis: None | int) -> NDArray[_SCT]:
- ...
-
-def put_along_axis(arr: NDArray[_SCT], indices: NDArray[integer[Any]], values: ArrayLike, axis: None | int) -> None:
- ...
-
-@overload
-def apply_along_axis(func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_SCT]], axis: SupportsIndex, arr: ArrayLike, *args: _P.args, **kwargs: _P.kwargs) -> NDArray[_SCT]:
- ...
-
-@overload
-def apply_along_axis(func1d: Callable[Concatenate[NDArray[Any], _P], ArrayLike], axis: SupportsIndex, arr: ArrayLike, *args: _P.args, **kwargs: _P.kwargs) -> NDArray[Any]:
- ...
-
-def apply_over_axes(func: Callable[[NDArray[Any], int], NDArray[_SCT]], a: ArrayLike, axes: int | Sequence[int]) -> NDArray[_SCT]:
- ...
-
-@overload
-def expand_dims(a: _ArrayLike[_SCT], axis: _ShapeLike) -> NDArray[_SCT]:
- ...
-
-@overload
-def expand_dims(a: ArrayLike, axis: _ShapeLike) -> NDArray[Any]:
- ...
-
-@overload
-def column_stack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]:
- ...
-
-@overload
-def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]:
- ...
-
-@overload
-def dstack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]:
- ...
-
-@overload
-def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]:
- ...
-
-@overload
-def array_split(ary: _ArrayLike[_SCT], indices_or_sections: _ShapeLike, axis: SupportsIndex = ...) -> list[NDArray[_SCT]]:
- ...
-
-@overload
-def array_split(ary: ArrayLike, indices_or_sections: _ShapeLike, axis: SupportsIndex = ...) -> list[NDArray[Any]]:
- ...
-
-@overload
-def split(ary: _ArrayLike[_SCT], indices_or_sections: _ShapeLike, axis: SupportsIndex = ...) -> list[NDArray[_SCT]]:
- ...
-
-@overload
-def split(ary: ArrayLike, indices_or_sections: _ShapeLike, axis: SupportsIndex = ...) -> list[NDArray[Any]]:
- ...
-
-@overload
-def hsplit(ary: _ArrayLike[_SCT], indices_or_sections: _ShapeLike) -> list[NDArray[_SCT]]:
- ...
-
-@overload
-def hsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[NDArray[Any]]:
- ...
-
-@overload
-def vsplit(ary: _ArrayLike[_SCT], indices_or_sections: _ShapeLike) -> list[NDArray[_SCT]]:
- ...
-
-@overload
-def vsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[NDArray[Any]]:
- ...
-
-@overload
-def dsplit(ary: _ArrayLike[_SCT], indices_or_sections: _ShapeLike) -> list[NDArray[_SCT]]:
- ...
-
-@overload
-def dsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[NDArray[Any]]:
- ...
-
-@overload
-def get_array_prepare(*args: _SupportsArrayPrepare) -> _ArrayPrepare:
- ...
-
-@overload
-def get_array_prepare(*args: object) -> None | _ArrayPrepare:
- ...
-
-@overload
-def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap:
- ...
-
-@overload
-def get_array_wrap(*args: object) -> None | _ArrayWrap:
- ...
-
-@overload
-def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[bool_]:
- ...
-
-@overload
-def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]:
- ...
-
-@overload
-def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]:
- ...
-
-@overload
-def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]:
- ...
-
-@overload
-def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]:
- ...
-
-@overload
-def tile(A: _ArrayLike[_SCT], reps: int | Sequence[int]) -> NDArray[_SCT]:
- ...
-
-@overload
-def tile(A: ArrayLike, reps: int | Sequence[int]) -> NDArray[Any]:
- ...
-
diff --git a/typings/numpy/lib/stride_tricks.pyi b/typings/numpy/lib/stride_tricks.pyi
deleted file mode 100644
index 7d3fb1e..0000000
--- a/typings/numpy/lib/stride_tricks.pyi
+++ /dev/null
@@ -1,49 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Iterable
-from typing import Any, SupportsIndex, TypeVar, overload
-from numpy import generic
-from numpy._typing import ArrayLike, NDArray, _ArrayLike, _Shape, _ShapeLike
-
-_SCT = TypeVar("_SCT", bound=generic)
-__all__: list[str]
-class DummyArray:
- __array_interface__: dict[str, Any]
- base: None | NDArray[Any]
- def __init__(self, interface: dict[str, Any], base: None | NDArray[Any] = ...) -> None:
- ...
-
-
-
-@overload
-def as_strided(x: _ArrayLike[_SCT], shape: None | Iterable[int] = ..., strides: None | Iterable[int] = ..., subok: bool = ..., writeable: bool = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def as_strided(x: ArrayLike, shape: None | Iterable[int] = ..., strides: None | Iterable[int] = ..., subok: bool = ..., writeable: bool = ...) -> NDArray[Any]:
- ...
-
-@overload
-def sliding_window_view(x: _ArrayLike[_SCT], window_shape: int | Iterable[int], axis: None | SupportsIndex = ..., *, subok: bool = ..., writeable: bool = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def sliding_window_view(x: ArrayLike, window_shape: int | Iterable[int], axis: None | SupportsIndex = ..., *, subok: bool = ..., writeable: bool = ...) -> NDArray[Any]:
- ...
-
-@overload
-def broadcast_to(array: _ArrayLike[_SCT], shape: int | Iterable[int], subok: bool = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def broadcast_to(array: ArrayLike, shape: int | Iterable[int], subok: bool = ...) -> NDArray[Any]:
- ...
-
-def broadcast_shapes(*args: _ShapeLike) -> _Shape:
- ...
-
-def broadcast_arrays(*args: ArrayLike, subok: bool = ...) -> list[NDArray[Any]]:
- ...
-
diff --git a/typings/numpy/lib/twodim_base.pyi b/typings/numpy/lib/twodim_base.pyi
deleted file mode 100644
index 2a05513..0000000
--- a/typings/numpy/lib/twodim_base.pyi
+++ /dev/null
@@ -1,133 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Callable, Sequence
-from typing import Any, TypeVar, Union, overload
-from numpy import _OrderCF, bool_, complexfloating, datetime64, float64, floating, generic, int_, intp, number, object_, signedinteger, timedelta64
-from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLike, _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, _ArrayLikeObject_co, _DTypeLike, _SupportsArrayFunc
-
-_T = TypeVar("_T")
-_SCT = TypeVar("_SCT", bound=generic)
-_MaskFunc = Callable[[NDArray[int_], _T], NDArray[Union[number[Any], bool_, timedelta64, datetime64, object_]],]
-__all__: list[str]
-@overload
-def fliplr(m: _ArrayLike[_SCT]) -> NDArray[_SCT]:
- ...
-
-@overload
-def fliplr(m: ArrayLike) -> NDArray[Any]:
- ...
-
-@overload
-def flipud(m: _ArrayLike[_SCT]) -> NDArray[_SCT]:
- ...
-
-@overload
-def flipud(m: ArrayLike) -> NDArray[Any]:
- ...
-
-@overload
-def eye(N: int, M: None | int = ..., k: int = ..., dtype: None = ..., order: _OrderCF = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[float64]:
- ...
-
-@overload
-def eye(N: int, M: None | int = ..., k: int = ..., dtype: _DTypeLike[_SCT] = ..., order: _OrderCF = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def eye(N: int, M: None | int = ..., k: int = ..., dtype: DTypeLike = ..., order: _OrderCF = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def diag(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def diag(v: ArrayLike, k: int = ...) -> NDArray[Any]:
- ...
-
-@overload
-def diagflat(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]:
- ...
-
-@overload
-def tri(N: int, M: None | int = ..., k: int = ..., dtype: None = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[float64]:
- ...
-
-@overload
-def tri(N: int, M: None | int = ..., k: int = ..., dtype: _DTypeLike[_SCT] = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def tri(N: int, M: None | int = ..., k: int = ..., dtype: DTypeLike = ..., *, like: None | _SupportsArrayFunc = ...) -> NDArray[Any]:
- ...
-
-@overload
-def tril(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def tril(v: ArrayLike, k: int = ...) -> NDArray[Any]:
- ...
-
-@overload
-def triu(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def triu(v: ArrayLike, k: int = ...) -> NDArray[Any]:
- ...
-
-@overload
-def vander(x: _ArrayLikeInt_co, N: None | int = ..., increasing: bool = ...) -> NDArray[signedinteger[Any]]:
- ...
-
-@overload
-def vander(x: _ArrayLikeFloat_co, N: None | int = ..., increasing: bool = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def vander(x: _ArrayLikeComplex_co, N: None | int = ..., increasing: bool = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def vander(x: _ArrayLikeObject_co, N: None | int = ..., increasing: bool = ...) -> NDArray[object_]:
- ...
-
-@overload
-def histogram2d(x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, bins: int | Sequence[int] = ..., range: None | _ArrayLikeFloat_co = ..., density: None | bool = ..., weights: None | _ArrayLikeFloat_co = ...) -> tuple[NDArray[float64], NDArray[floating[Any]], NDArray[floating[Any]],]:
- ...
-
-@overload
-def histogram2d(x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, bins: int | Sequence[int] = ..., range: None | _ArrayLikeFloat_co = ..., density: None | bool = ..., weights: None | _ArrayLikeFloat_co = ...) -> tuple[NDArray[float64], NDArray[complexfloating[Any, Any]], NDArray[complexfloating[Any, Any]],]:
- ...
-
-@overload
-def histogram2d(x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, bins: Sequence[_ArrayLikeInt_co], range: None | _ArrayLikeFloat_co = ..., density: None | bool = ..., weights: None | _ArrayLikeFloat_co = ...) -> tuple[NDArray[float64], NDArray[Any], NDArray[Any],]:
- ...
-
-@overload
-def mask_indices(n: int, mask_func: _MaskFunc[int], k: int = ...) -> tuple[NDArray[intp], NDArray[intp]]:
- ...
-
-@overload
-def mask_indices(n: int, mask_func: _MaskFunc[_T], k: _T) -> tuple[NDArray[intp], NDArray[intp]]:
- ...
-
-def tril_indices(n: int, k: int = ..., m: None | int = ...) -> tuple[NDArray[int_], NDArray[int_]]:
- ...
-
-def tril_indices_from(arr: NDArray[Any], k: int = ...) -> tuple[NDArray[int_], NDArray[int_]]:
- ...
-
-def triu_indices(n: int, k: int = ..., m: None | int = ...) -> tuple[NDArray[int_], NDArray[int_]]:
- ...
-
-def triu_indices_from(arr: NDArray[Any], k: int = ...) -> tuple[NDArray[int_], NDArray[int_]]:
- ...
-
diff --git a/typings/numpy/lib/type_check.pyi b/typings/numpy/lib/type_check.pyi
deleted file mode 100644
index 8d378e5..0000000
--- a/typings/numpy/lib/type_check.pyi
+++ /dev/null
@@ -1,218 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Container, Iterable
-from typing import Any, Literal as L, Protocol, TypeVar, overload
-from numpy import bool_, complexfloating, dtype, float64, floating, generic, integer
-from numpy._typing import ArrayLike, DTypeLike, NBitBase, NDArray, _64Bit, _ArrayLike, _DTypeLikeComplex, _ScalarLike_co, _SupportsDType
-
-_T = TypeVar("_T")
-_T_co = TypeVar("_T_co", covariant=True)
-_SCT = TypeVar("_SCT", bound=generic)
-_NBit1 = TypeVar("_NBit1", bound=NBitBase)
-_NBit2 = TypeVar("_NBit2", bound=NBitBase)
-class _SupportsReal(Protocol[_T_co]):
- @property
- def real(self) -> _T_co:
- ...
-
-
-
-class _SupportsImag(Protocol[_T_co]):
- @property
- def imag(self) -> _T_co:
- ...
-
-
-
-__all__: list[str]
-def mintypecode(typechars: Iterable[str | ArrayLike], typeset: Container[str] = ..., default: str = ...) -> str:
- ...
-
-@overload
-def asfarray(a: object, dtype: None | type[float] = ...) -> NDArray[float64]:
- ...
-
-@overload
-def asfarray(a: Any, dtype: _DTypeLikeComplex) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def asfarray(a: Any, dtype: DTypeLike) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def real(val: _SupportsReal[_T]) -> _T:
- ...
-
-@overload
-def real(val: ArrayLike) -> NDArray[Any]:
- ...
-
-@overload
-def imag(val: _SupportsImag[_T]) -> _T:
- ...
-
-@overload
-def imag(val: ArrayLike) -> NDArray[Any]:
- ...
-
-@overload
-def iscomplex(x: _ScalarLike_co) -> bool_:
- ...
-
-@overload
-def iscomplex(x: ArrayLike) -> NDArray[bool_]:
- ...
-
-@overload
-def isreal(x: _ScalarLike_co) -> bool_:
- ...
-
-@overload
-def isreal(x: ArrayLike) -> NDArray[bool_]:
- ...
-
-def iscomplexobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool:
- ...
-
-def isrealobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool:
- ...
-
-@overload
-def nan_to_num(x: _SCT, copy: bool = ..., nan: float = ..., posinf: None | float = ..., neginf: None | float = ...) -> _SCT:
- ...
-
-@overload
-def nan_to_num(x: _ScalarLike_co, copy: bool = ..., nan: float = ..., posinf: None | float = ..., neginf: None | float = ...) -> Any:
- ...
-
-@overload
-def nan_to_num(x: _ArrayLike[_SCT], copy: bool = ..., nan: float = ..., posinf: None | float = ..., neginf: None | float = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def nan_to_num(x: ArrayLike, copy: bool = ..., nan: float = ..., posinf: None | float = ..., neginf: None | float = ...) -> NDArray[Any]:
- ...
-
-@overload
-def real_if_close(a: _ArrayLike[complexfloating[_NBit1, _NBit1]], tol: float = ...) -> NDArray[floating[_NBit1]] | NDArray[complexfloating[_NBit1, _NBit1]]:
- ...
-
-@overload
-def real_if_close(a: _ArrayLike[_SCT], tol: float = ...) -> NDArray[_SCT]:
- ...
-
-@overload
-def real_if_close(a: ArrayLike, tol: float = ...) -> NDArray[Any]:
- ...
-
-@overload
-def typename(char: L['S1']) -> L['character']:
- ...
-
-@overload
-def typename(char: L['?']) -> L['bool']:
- ...
-
-@overload
-def typename(char: L['b']) -> L['signed char']:
- ...
-
-@overload
-def typename(char: L['B']) -> L['unsigned char']:
- ...
-
-@overload
-def typename(char: L['h']) -> L['short']:
- ...
-
-@overload
-def typename(char: L['H']) -> L['unsigned short']:
- ...
-
-@overload
-def typename(char: L['i']) -> L['integer']:
- ...
-
-@overload
-def typename(char: L['I']) -> L['unsigned integer']:
- ...
-
-@overload
-def typename(char: L['l']) -> L['long integer']:
- ...
-
-@overload
-def typename(char: L['L']) -> L['unsigned long integer']:
- ...
-
-@overload
-def typename(char: L['q']) -> L['long long integer']:
- ...
-
-@overload
-def typename(char: L['Q']) -> L['unsigned long long integer']:
- ...
-
-@overload
-def typename(char: L['f']) -> L['single precision']:
- ...
-
-@overload
-def typename(char: L['d']) -> L['double precision']:
- ...
-
-@overload
-def typename(char: L['g']) -> L['long precision']:
- ...
-
-@overload
-def typename(char: L['F']) -> L['complex single precision']:
- ...
-
-@overload
-def typename(char: L['D']) -> L['complex double precision']:
- ...
-
-@overload
-def typename(char: L['G']) -> L['complex long double precision']:
- ...
-
-@overload
-def typename(char: L['S']) -> L['string']:
- ...
-
-@overload
-def typename(char: L['U']) -> L['unicode']:
- ...
-
-@overload
-def typename(char: L['V']) -> L['void']:
- ...
-
-@overload
-def typename(char: L['O']) -> L['object']:
- ...
-
-@overload
-def common_type(*arrays: _SupportsDType[dtype[integer[Any]]]) -> type[floating[_64Bit]]:
- ...
-
-@overload
-def common_type(*arrays: _SupportsDType[dtype[floating[_NBit1]]]) -> type[floating[_NBit1]]:
- ...
-
-@overload
-def common_type(*arrays: _SupportsDType[dtype[integer[Any] | floating[_NBit1]]]) -> type[floating[_NBit1 | _64Bit]]:
- ...
-
-@overload
-def common_type(*arrays: _SupportsDType[dtype[floating[_NBit1] | complexfloating[_NBit2, _NBit2]]]) -> type[complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]]:
- ...
-
-@overload
-def common_type(*arrays: _SupportsDType[dtype[integer[Any] | floating[_NBit1] | complexfloating[_NBit2, _NBit2]]]) -> type[complexfloating[_64Bit | _NBit1 | _NBit2, _64Bit | _NBit1 | _NBit2]]:
- ...
-
diff --git a/typings/numpy/lib/ufunclike.pyi b/typings/numpy/lib/ufunclike.pyi
deleted file mode 100644
index 2a54fa1..0000000
--- a/typings/numpy/lib/ufunclike.pyi
+++ /dev/null
@@ -1,50 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any, TypeVar, overload
-from numpy import bool_, floating, ndarray, object_
-from numpy._typing import NDArray, _ArrayLikeFloat_co, _ArrayLikeObject_co, _FloatLike_co
-
-_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
-__all__: list[str]
-@overload
-def fix(x: _FloatLike_co, out: None = ...) -> floating[Any]:
- ...
-
-@overload
-def fix(x: _ArrayLikeFloat_co, out: None = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def fix(x: _ArrayLikeObject_co, out: None = ...) -> NDArray[object_]:
- ...
-
-@overload
-def fix(x: _ArrayLikeFloat_co | _ArrayLikeObject_co, out: _ArrayType) -> _ArrayType:
- ...
-
-@overload
-def isposinf(x: _FloatLike_co, out: None = ...) -> bool_:
- ...
-
-@overload
-def isposinf(x: _ArrayLikeFloat_co, out: None = ...) -> NDArray[bool_]:
- ...
-
-@overload
-def isposinf(x: _ArrayLikeFloat_co, out: _ArrayType) -> _ArrayType:
- ...
-
-@overload
-def isneginf(x: _FloatLike_co, out: None = ...) -> bool_:
- ...
-
-@overload
-def isneginf(x: _ArrayLikeFloat_co, out: None = ...) -> NDArray[bool_]:
- ...
-
-@overload
-def isneginf(x: _ArrayLikeFloat_co, out: _ArrayType) -> _ArrayType:
- ...
-
diff --git a/typings/numpy/lib/utils.pyi b/typings/numpy/lib/utils.pyi
deleted file mode 100644
index b558b86..0000000
--- a/typings/numpy/lib/utils.pyi
+++ /dev/null
@@ -1,65 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ast import AST
-from collections.abc import Callable, Mapping, Sequence
-from typing import Any, Protocol, TypeVar, overload
-from numpy import generic, ndarray
-
-_T_contra = TypeVar("_T_contra", contravariant=True)
-_FuncType = TypeVar("_FuncType", bound=Callable[..., Any])
-class _SupportsWrite(Protocol[_T_contra]):
- def write(self, s: _T_contra, /) -> Any:
- ...
-
-
-
-__all__: list[str]
-class _Deprecate:
- old_name: None | str
- new_name: None | str
- message: None | str
- def __init__(self, old_name: None | str = ..., new_name: None | str = ..., message: None | str = ...) -> None:
- ...
-
- def __call__(self, func: _FuncType) -> _FuncType:
- ...
-
-
-
-def get_include() -> str:
- ...
-
-@overload
-def deprecate(*, old_name: None | str = ..., new_name: None | str = ..., message: None | str = ...) -> _Deprecate:
- ...
-
-@overload
-def deprecate(func: _FuncType, /, old_name: None | str = ..., new_name: None | str = ..., message: None | str = ...) -> _FuncType:
- ...
-
-def deprecate_with_doc(msg: None | str) -> _Deprecate:
- ...
-
-def byte_bounds(a: generic | ndarray[Any, Any]) -> tuple[int, int]:
- ...
-
-def who(vardict: None | Mapping[str, ndarray[Any, Any]] = ...) -> None:
- ...
-
-def info(object: object = ..., maxwidth: int = ..., output: None | _SupportsWrite[str] = ..., toplevel: str = ...) -> None:
- ...
-
-def source(object: object, output: None | _SupportsWrite[str] = ...) -> None:
- ...
-
-def lookfor(what: str, module: None | str | Sequence[str] = ..., import_modules: bool = ..., regenerate: bool = ..., output: None | _SupportsWrite[str] = ...) -> None:
- ...
-
-def safe_eval(source: str | AST) -> Any:
- ...
-
-def show_runtime() -> None:
- ...
-
diff --git a/typings/numpy/linalg/__init__.pyi b/typings/numpy/linalg/__init__.pyi
deleted file mode 100644
index 50b1446..0000000
--- a/typings/numpy/linalg/__init__.pyi
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from numpy.linalg.linalg import cholesky as cholesky, cond as cond, det as det, eig as eig, eigh as eigh, eigvals as eigvals, eigvalsh as eigvalsh, inv as inv, lstsq as lstsq, matrix_power as matrix_power, matrix_rank as matrix_rank, multi_dot as multi_dot, norm as norm, pinv as pinv, qr as qr, slogdet as slogdet, solve as solve, svd as svd, tensorinv as tensorinv, tensorsolve as tensorsolve
-from numpy._pytesttester import PytestTester
-
-__all__: list[str]
-__path__: list[str]
-test: PytestTester
-class LinAlgError(Exception):
- ...
-
-
diff --git a/typings/numpy/linalg/linalg.pyi b/typings/numpy/linalg/linalg.pyi
deleted file mode 100644
index c786400..0000000
--- a/typings/numpy/linalg/linalg.pyi
+++ /dev/null
@@ -1,233 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Iterable
-from typing import Any, Literal as L, NamedTuple, SupportsIndex, SupportsInt, TypeVar, overload
-from numpy import complex128, complexfloating, float64, floating, generic, int32
-from numpy._typing import ArrayLike, NDArray, _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, _ArrayLikeObject_co, _ArrayLikeTD64_co
-
-_T = TypeVar("_T")
-_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
-_SCT = TypeVar("_SCT", bound=generic, covariant=True)
-_SCT2 = TypeVar("_SCT2", bound=generic, covariant=True)
-_2Tuple = tuple[_T, _T]
-_ModeKind = L["reduced", "complete", "r", "raw"]
-__all__: list[str]
-class EigResult(NamedTuple):
- eigenvalues: NDArray[Any]
- eigenvectors: NDArray[Any]
- ...
-
-
-class EighResult(NamedTuple):
- eigenvalues: NDArray[Any]
- eigenvectors: NDArray[Any]
- ...
-
-
-class QRResult(NamedTuple):
- Q: NDArray[Any]
- R: NDArray[Any]
- ...
-
-
-class SlogdetResult(NamedTuple):
- sign: Any
- logabsdet: Any
- ...
-
-
-class SVDResult(NamedTuple):
- U: NDArray[Any]
- S: NDArray[Any]
- Vh: NDArray[Any]
- ...
-
-
-@overload
-def tensorsolve(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, axes: None | Iterable[int] = ...) -> NDArray[float64]:
- ...
-
-@overload
-def tensorsolve(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, axes: None | Iterable[int] = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def tensorsolve(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, axes: None | Iterable[int] = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def solve(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[float64]:
- ...
-
-@overload
-def solve(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def solve(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def tensorinv(a: _ArrayLikeInt_co, ind: int = ...) -> NDArray[float64]:
- ...
-
-@overload
-def tensorinv(a: _ArrayLikeFloat_co, ind: int = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def tensorinv(a: _ArrayLikeComplex_co, ind: int = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def inv(a: _ArrayLikeInt_co) -> NDArray[float64]:
- ...
-
-@overload
-def inv(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-def matrix_power(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, n: SupportsIndex) -> NDArray[Any]:
- ...
-
-@overload
-def cholesky(a: _ArrayLikeInt_co) -> NDArray[float64]:
- ...
-
-@overload
-def cholesky(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def cholesky(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def qr(a: _ArrayLikeInt_co, mode: _ModeKind = ...) -> QRResult:
- ...
-
-@overload
-def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = ...) -> QRResult:
- ...
-
-@overload
-def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = ...) -> QRResult:
- ...
-
-@overload
-def eigvals(a: _ArrayLikeInt_co) -> NDArray[float64] | NDArray[complex128]:
- ...
-
-@overload
-def eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]] | NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-@overload
-def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[float64]:
- ...
-
-@overload
-def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def eig(a: _ArrayLikeInt_co) -> EigResult:
- ...
-
-@overload
-def eig(a: _ArrayLikeFloat_co) -> EigResult:
- ...
-
-@overload
-def eig(a: _ArrayLikeComplex_co) -> EigResult:
- ...
-
-@overload
-def eigh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = ...) -> EighResult:
- ...
-
-@overload
-def eigh(a: _ArrayLikeFloat_co, UPLO: L["L", "U", "l", "u"] = ...) -> EighResult:
- ...
-
-@overload
-def eigh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = ...) -> EighResult:
- ...
-
-@overload
-def svd(a: _ArrayLikeInt_co, full_matrices: bool = ..., compute_uv: L[True] = ..., hermitian: bool = ...) -> SVDResult:
- ...
-
-@overload
-def svd(a: _ArrayLikeFloat_co, full_matrices: bool = ..., compute_uv: L[True] = ..., hermitian: bool = ...) -> SVDResult:
- ...
-
-@overload
-def svd(a: _ArrayLikeComplex_co, full_matrices: bool = ..., compute_uv: L[True] = ..., hermitian: bool = ...) -> SVDResult:
- ...
-
-@overload
-def svd(a: _ArrayLikeInt_co, full_matrices: bool = ..., compute_uv: L[False] = ..., hermitian: bool = ...) -> NDArray[float64]:
- ...
-
-@overload
-def svd(a: _ArrayLikeComplex_co, full_matrices: bool = ..., compute_uv: L[False] = ..., hermitian: bool = ...) -> NDArray[floating[Any]]:
- ...
-
-def cond(x: _ArrayLikeComplex_co, p: None | float | L["fro", "nuc"] = ...) -> Any:
- ...
-
-def matrix_rank(A: _ArrayLikeComplex_co, tol: None | _ArrayLikeFloat_co = ..., hermitian: bool = ...) -> Any:
- ...
-
-@overload
-def pinv(a: _ArrayLikeInt_co, rcond: _ArrayLikeFloat_co = ..., hermitian: bool = ...) -> NDArray[float64]:
- ...
-
-@overload
-def pinv(a: _ArrayLikeFloat_co, rcond: _ArrayLikeFloat_co = ..., hermitian: bool = ...) -> NDArray[floating[Any]]:
- ...
-
-@overload
-def pinv(a: _ArrayLikeComplex_co, rcond: _ArrayLikeFloat_co = ..., hermitian: bool = ...) -> NDArray[complexfloating[Any, Any]]:
- ...
-
-def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult:
- ...
-
-def det(a: _ArrayLikeComplex_co) -> Any:
- ...
-
-@overload
-def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: None | float = ...) -> tuple[NDArray[float64], NDArray[float64], int32, NDArray[float64],]:
- ...
-
-@overload
-def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: None | float = ...) -> tuple[NDArray[floating[Any]], NDArray[floating[Any]], int32, NDArray[floating[Any]],]:
- ...
-
-@overload
-def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: None | float = ...) -> tuple[NDArray[complexfloating[Any, Any]], NDArray[floating[Any]], int32, NDArray[floating[Any]],]:
- ...
-
-@overload
-def norm(x: ArrayLike, ord: None | float | L["fro", "nuc"] = ..., axis: None = ..., keepdims: bool = ...) -> floating[Any]:
- ...
-
-@overload
-def norm(x: ArrayLike, ord: None | float | L["fro", "nuc"] = ..., axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., keepdims: bool = ...) -> Any:
- ...
-
-def multi_dot(arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], *, out: None | NDArray[Any] = ...) -> Any:
- ...
-
diff --git a/typings/numpy/ma/__init__.pyi b/typings/numpy/ma/__init__.pyi
deleted file mode 100644
index 52c2a4d..0000000
--- a/typings/numpy/ma/__init__.pyi
+++ /dev/null
@@ -1,12 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from numpy._pytesttester import PytestTester
-from numpy.ma import extras as extras
-from numpy.ma.core import MAError as MAError, MaskError as MaskError, MaskType as MaskType, MaskedArray as MaskedArray, abs as abs, absolute as absolute, add as add, all as all, allclose as allclose, allequal as allequal, alltrue as alltrue, amax as amax, amin as amin, angle as angle, anom as anom, anomalies as anomalies, any as any, append as append, arange as arange, arccos as arccos, arccosh as arccosh, arcsin as arcsin, arcsinh as arcsinh, arctan as arctan, arctan2 as arctan2, arctanh as arctanh, argmax as argmax, argmin as argmin, argsort as argsort, around as around, array as array, asanyarray as asanyarray, asarray as asarray, bitwise_and as bitwise_and, bitwise_or as bitwise_or, bitwise_xor as bitwise_xor, bool_ as bool_, ceil as ceil, choose as choose, clip as clip, common_fill_value as common_fill_value, compress as compress, compressed as compressed, concatenate as concatenate, conjugate as conjugate, convolve as convolve, copy as copy, correlate as correlate, cos as cos, cosh as cosh, count as count, cumprod as cumprod, cumsum as cumsum, default_fill_value as default_fill_value, diag as diag, diagonal as diagonal, diff as diff, divide as divide, empty as empty, empty_like as empty_like, equal as equal, exp as exp, expand_dims as expand_dims, fabs as fabs, filled as filled, fix_invalid as fix_invalid, flatten_mask as flatten_mask, flatten_structured_array as flatten_structured_array, floor as floor, floor_divide as floor_divide, fmod as fmod, frombuffer as frombuffer, fromflex as fromflex, fromfunction as fromfunction, getdata as getdata, getmask as getmask, getmaskarray as getmaskarray, greater as greater, greater_equal as greater_equal, harden_mask as harden_mask, hypot as hypot, identity as identity, ids as ids, indices as indices, inner as inner, innerproduct as innerproduct, isMA as isMA, isMaskedArray as isMaskedArray, is_mask as is_mask, is_masked as is_masked, isarray as isarray, left_shift as left_shift, less as less, less_equal as less_equal, log as log, log10 as log10, log2 as log2, logical_and as logical_and, logical_not as logical_not, logical_or as logical_or, logical_xor as logical_xor, make_mask as make_mask, make_mask_descr as make_mask_descr, make_mask_none as make_mask_none, mask_or as mask_or, masked as masked, masked_array as masked_array, masked_equal as masked_equal, masked_greater as masked_greater, masked_greater_equal as masked_greater_equal, masked_inside as masked_inside, masked_invalid as masked_invalid, masked_less as masked_less, masked_less_equal as masked_less_equal, masked_not_equal as masked_not_equal, masked_object as masked_object, masked_outside as masked_outside, masked_print_option as masked_print_option, masked_singleton as masked_singleton, masked_values as masked_values, masked_where as masked_where, max as max, maximum as maximum, maximum_fill_value as maximum_fill_value, mean as mean, min as min, minimum as minimum, minimum_fill_value as minimum_fill_value, mod as mod, multiply as multiply, mvoid as mvoid, ndim as ndim, negative as negative, nomask as nomask, nonzero as nonzero, not_equal as not_equal, ones as ones, outer as outer, outerproduct as outerproduct, power as power, prod as prod, product as product, ptp as ptp, put as put, putmask as putmask, ravel as ravel, remainder as remainder, repeat as repeat, reshape as reshape, resize as resize, right_shift as right_shift, round as round, set_fill_value as set_fill_value, shape as shape, sin as sin, sinh as sinh, size as size, soften_mask as soften_mask, sometrue as sometrue, sort as sort, sqrt as sqrt, squeeze as squeeze, std as std, subtract as subtract, sum as sum, swapaxes as swapaxes, take as take, tan as tan, tanh as tanh, trace as trace, transpose as transpose, true_divide as true_divide, var as var, where as where, zeros as zeros
-from numpy.ma.extras import apply_along_axis as apply_along_axis, apply_over_axes as apply_over_axes, atleast_1d as atleast_1d, atleast_2d as atleast_2d, atleast_3d as atleast_3d, average as average, clump_masked as clump_masked, clump_unmasked as clump_unmasked, column_stack as column_stack, compress_cols as compress_cols, compress_nd as compress_nd, compress_rowcols as compress_rowcols, compress_rows as compress_rows, corrcoef as corrcoef, count_masked as count_masked, cov as cov, diagflat as diagflat, dot as dot, dstack as dstack, ediff1d as ediff1d, flatnotmasked_contiguous as flatnotmasked_contiguous, flatnotmasked_edges as flatnotmasked_edges, hsplit as hsplit, hstack as hstack, in1d as in1d, intersect1d as intersect1d, isin as isin, mask_cols as mask_cols, mask_rowcols as mask_rowcols, mask_rows as mask_rows, masked_all as masked_all, masked_all_like as masked_all_like, median as median, mr_ as mr_, ndenumerate as ndenumerate, notmasked_contiguous as notmasked_contiguous, notmasked_edges as notmasked_edges, polyfit as polyfit, row_stack as row_stack, setdiff1d as setdiff1d, setxor1d as setxor1d, stack as stack, union1d as union1d, unique as unique, vander as vander, vstack as vstack
-
-__all__: list[str]
-__path__: list[str]
-test: PytestTester
diff --git a/typings/numpy/ma/core.pyi b/typings/numpy/ma/core.pyi
deleted file mode 100644
index a12e554..0000000
--- a/typings/numpy/ma/core.pyi
+++ /dev/null
@@ -1,853 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Callable
-from typing import Any, TypeVar
-from numpy import bool_ as bool_, dtype, float64, ndarray, squeeze as squeeze
-
-_ShapeType = TypeVar("_ShapeType", bound=Any)
-_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True)
-__all__: list[str]
-MaskType = bool_
-nomask: bool_
-class MaskedArrayFutureWarning(FutureWarning):
- ...
-
-
-class MAError(Exception):
- ...
-
-
-class MaskError(MAError):
- ...
-
-
-def default_fill_value(obj):
- ...
-
-def minimum_fill_value(obj):
- ...
-
-def maximum_fill_value(obj):
- ...
-
-def set_fill_value(a, fill_value):
- ...
-
-def common_fill_value(a, b):
- ...
-
-def filled(a, fill_value=...):
- ...
-
-def getdata(a, subok=...):
- ...
-
-get_data = ...
-def fix_invalid(a, mask=..., copy=..., fill_value=...):
- ...
-
-class _MaskedUFunc:
- f: Any
- __doc__: Any
- __name__: Any
- def __init__(self, ufunc) -> None:
- ...
-
-
-
-class _MaskedUnaryOperation(_MaskedUFunc):
- fill: Any
- domain: Any
- def __init__(self, mufunc, fill=..., domain=...) -> None:
- ...
-
- def __call__(self, a, *args, **kwargs):
- ...
-
-
-
-class _MaskedBinaryOperation(_MaskedUFunc):
- fillx: Any
- filly: Any
- def __init__(self, mbfunc, fillx=..., filly=...) -> None:
- ...
-
- def __call__(self, a, b, *args, **kwargs):
- ...
-
- def reduce(self, target, axis=..., dtype=...):
- ...
-
- def outer(self, a, b):
- ...
-
- def accumulate(self, target, axis=...):
- ...
-
-
-
-class _DomainedBinaryOperation(_MaskedUFunc):
- domain: Any
- fillx: Any
- filly: Any
- def __init__(self, dbfunc, domain, fillx=..., filly=...) -> None:
- ...
-
- def __call__(self, a, b, *args, **kwargs):
- ...
-
-
-
-exp: _MaskedUnaryOperation
-conjugate: _MaskedUnaryOperation
-sin: _MaskedUnaryOperation
-cos: _MaskedUnaryOperation
-arctan: _MaskedUnaryOperation
-arcsinh: _MaskedUnaryOperation
-sinh: _MaskedUnaryOperation
-cosh: _MaskedUnaryOperation
-tanh: _MaskedUnaryOperation
-abs: _MaskedUnaryOperation
-absolute: _MaskedUnaryOperation
-fabs: _MaskedUnaryOperation
-negative: _MaskedUnaryOperation
-floor: _MaskedUnaryOperation
-ceil: _MaskedUnaryOperation
-around: _MaskedUnaryOperation
-logical_not: _MaskedUnaryOperation
-sqrt: _MaskedUnaryOperation
-log: _MaskedUnaryOperation
-log2: _MaskedUnaryOperation
-log10: _MaskedUnaryOperation
-tan: _MaskedUnaryOperation
-arcsin: _MaskedUnaryOperation
-arccos: _MaskedUnaryOperation
-arccosh: _MaskedUnaryOperation
-arctanh: _MaskedUnaryOperation
-add: _MaskedBinaryOperation
-subtract: _MaskedBinaryOperation
-multiply: _MaskedBinaryOperation
-arctan2: _MaskedBinaryOperation
-equal: _MaskedBinaryOperation
-not_equal: _MaskedBinaryOperation
-less_equal: _MaskedBinaryOperation
-greater_equal: _MaskedBinaryOperation
-less: _MaskedBinaryOperation
-greater: _MaskedBinaryOperation
-logical_and: _MaskedBinaryOperation
-alltrue: _MaskedBinaryOperation
-logical_or: _MaskedBinaryOperation
-sometrue: Callable[..., Any]
-logical_xor: _MaskedBinaryOperation
-bitwise_and: _MaskedBinaryOperation
-bitwise_or: _MaskedBinaryOperation
-bitwise_xor: _MaskedBinaryOperation
-hypot: _MaskedBinaryOperation
-divide: _MaskedBinaryOperation
-true_divide: _MaskedBinaryOperation
-floor_divide: _MaskedBinaryOperation
-remainder: _MaskedBinaryOperation
-fmod: _MaskedBinaryOperation
-mod: _MaskedBinaryOperation
-def make_mask_descr(ndtype):
- ...
-
-def getmask(a):
- ...
-
-get_mask = ...
-def getmaskarray(arr):
- ...
-
-def is_mask(m):
- ...
-
-def make_mask(m, copy=..., shrink=..., dtype=...):
- ...
-
-def make_mask_none(newshape, dtype=...):
- ...
-
-def mask_or(m1, m2, copy=..., shrink=...):
- ...
-
-def flatten_mask(mask):
- ...
-
-def masked_where(condition, a, copy=...):
- ...
-
-def masked_greater(x, value, copy=...):
- ...
-
-def masked_greater_equal(x, value, copy=...):
- ...
-
-def masked_less(x, value, copy=...):
- ...
-
-def masked_less_equal(x, value, copy=...):
- ...
-
-def masked_not_equal(x, value, copy=...):
- ...
-
-def masked_equal(x, value, copy=...):
- ...
-
-def masked_inside(x, v1, v2, copy=...):
- ...
-
-def masked_outside(x, v1, v2, copy=...):
- ...
-
-def masked_object(x, value, copy=..., shrink=...):
- ...
-
-def masked_values(x, value, rtol=..., atol=..., copy=..., shrink=...):
- ...
-
-def masked_invalid(a, copy=...):
- ...
-
-class _MaskedPrintOption:
- def __init__(self, display) -> None:
- ...
-
- def display(self):
- ...
-
- def set_display(self, s):
- ...
-
- def enabled(self):
- ...
-
- def enable(self, shrink=...):
- ...
-
-
-
-masked_print_option: _MaskedPrintOption
-def flatten_structured_array(a):
- ...
-
-class MaskedIterator:
- ma: Any
- dataiter: Any
- maskiter: Any
- def __init__(self, ma) -> None:
- ...
-
- def __iter__(self):
- ...
-
- def __getitem__(self, indx):
- ...
-
- def __setitem__(self, index, value):
- ...
-
- def __next__(self):
- ...
-
-
-
-class MaskedArray(ndarray[_ShapeType, _DType_co]):
- __array_priority__: Any
- def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...):
- ...
-
- def __array_finalize__(self, obj):
- ...
-
- def __array_wrap__(self, obj, context=...):
- ...
-
- def view(self, dtype=..., type=..., fill_value=...):
- ...
-
- def __getitem__(self, indx):
- ...
-
- def __setitem__(self, indx, value):
- ...
-
- @property
- def dtype(self):
- ...
-
- @dtype.setter
- def dtype(self, dtype):
- ...
-
- @property
- def shape(self):
- ...
-
- @shape.setter
- def shape(self, shape):
- ...
-
- def __setmask__(self, mask, copy=...):
- ...
-
- @property
- def mask(self):
- ...
-
- @mask.setter
- def mask(self, value):
- ...
-
- @property
- def recordmask(self):
- ...
-
- @recordmask.setter
- def recordmask(self, mask):
- ...
-
- def harden_mask(self):
- ...
-
- def soften_mask(self):
- ...
-
- @property
- def hardmask(self):
- ...
-
- def unshare_mask(self):
- ...
-
- @property
- def sharedmask(self):
- ...
-
- def shrink_mask(self):
- ...
-
- @property
- def baseclass(self):
- ...
-
- data: Any
- @property
- def flat(self):
- ...
-
- @flat.setter
- def flat(self, value):
- ...
-
- @property
- def fill_value(self):
- ...
-
- @fill_value.setter
- def fill_value(self, value=...):
- ...
-
- get_fill_value: Any
- set_fill_value: Any
- def filled(self, fill_value=...):
- ...
-
- def compressed(self):
- ...
-
- def compress(self, condition, axis=..., out=...):
- ...
-
- def __eq__(self, other) -> bool:
- ...
-
- def __ne__(self, other) -> bool:
- ...
-
- def __ge__(self, other) -> bool:
- ...
-
- def __gt__(self, other) -> bool:
- ...
-
- def __le__(self, other) -> bool:
- ...
-
- def __lt__(self, other) -> bool:
- ...
-
- def __add__(self, other):
- ...
-
- def __radd__(self, other):
- ...
-
- def __sub__(self, other):
- ...
-
- def __rsub__(self, other):
- ...
-
- def __mul__(self, other):
- ...
-
- def __rmul__(self, other):
- ...
-
- def __div__(self, other):
- ...
-
- def __truediv__(self, other):
- ...
-
- def __rtruediv__(self, other):
- ...
-
- def __floordiv__(self, other):
- ...
-
- def __rfloordiv__(self, other):
- ...
-
- def __pow__(self, other):
- ...
-
- def __rpow__(self, other):
- ...
-
- def __iadd__(self, other):
- ...
-
- def __isub__(self, other):
- ...
-
- def __imul__(self, other):
- ...
-
- def __idiv__(self, other):
- ...
-
- def __ifloordiv__(self, other):
- ...
-
- def __itruediv__(self, other):
- ...
-
- def __ipow__(self, other):
- ...
-
- def __float__(self):
- ...
-
- def __int__(self) -> int:
- ...
-
- @property
- def imag(self):
- ...
-
- get_imag: Any
- @property
- def real(self):
- ...
-
- get_real: Any
- def count(self, axis=..., keepdims=...):
- ...
-
- def ravel(self, order=...):
- ...
-
- def reshape(self, *s, **kwargs):
- ...
-
- def resize(self, newshape, refcheck=..., order=...):
- ...
-
- def put(self, indices, values, mode=...):
- ...
-
- def ids(self):
- ...
-
- def iscontiguous(self):
- ...
-
- def all(self, axis=..., out=..., keepdims=...):
- ...
-
- def any(self, axis=..., out=..., keepdims=...):
- ...
-
- def nonzero(self):
- ...
-
- def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...):
- ...
-
- def dot(self, b, out=..., strict=...):
- ...
-
- def sum(self, axis=..., dtype=..., out=..., keepdims=...):
- ...
-
- def cumsum(self, axis=..., dtype=..., out=...):
- ...
-
- def prod(self, axis=..., dtype=..., out=..., keepdims=...):
- ...
-
- product: Any
- def cumprod(self, axis=..., dtype=..., out=...):
- ...
-
- def mean(self, axis=..., dtype=..., out=..., keepdims=...):
- ...
-
- def anom(self, axis=..., dtype=...):
- ...
-
- def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...):
- ...
-
- def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...):
- ...
-
- def round(self, decimals=..., out=...):
- ...
-
- def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=...):
- ...
-
- def argmin(self, axis=..., fill_value=..., out=..., *, keepdims=...):
- ...
-
- def argmax(self, axis=..., fill_value=..., out=..., *, keepdims=...):
- ...
-
- def sort(self, axis=..., kind=..., order=..., endwith=..., fill_value=...):
- ...
-
- def min(self, axis=..., out=..., fill_value=..., keepdims=...):
- ...
-
- def max(self, axis=..., out=..., fill_value=..., keepdims=...):
- ...
-
- def ptp(self, axis=..., out=..., fill_value=..., keepdims=...):
- ...
-
- def partition(self, *args, **kwargs):
- ...
-
- def argpartition(self, *args, **kwargs):
- ...
-
- def take(self, indices, axis=..., out=..., mode=...):
- ...
-
- copy: Any
- diagonal: Any
- flatten: Any
- repeat: Any
- squeeze: Any
- swapaxes: Any
- T: Any
- transpose: Any
- def tolist(self, fill_value=...):
- ...
-
- def tobytes(self, fill_value=..., order=...):
- ...
-
- def tofile(self, fid, sep=..., format=...):
- ...
-
- def toflex(self):
- ...
-
- torecords: Any
- def __reduce__(self):
- ...
-
- def __deepcopy__(self, memo=...):
- ...
-
-
-
-class mvoid(MaskedArray[_ShapeType, _DType_co]):
- def __new__(self, data, mask=..., dtype=..., fill_value=..., hardmask=..., copy=..., subok=...):
- ...
-
- def __getitem__(self, indx):
- ...
-
- def __setitem__(self, indx, value):
- ...
-
- def __iter__(self):
- ...
-
- def __len__(self):
- ...
-
- def filled(self, fill_value=...):
- ...
-
- def tolist(self):
- ...
-
-
-
-def isMaskedArray(x):
- ...
-
-isarray = ...
-isMA = ...
-class MaskedConstant(MaskedArray[Any, dtype[float64]]):
- def __new__(cls):
- ...
-
- __class__: Any
- def __array_finalize__(self, obj):
- ...
-
- def __array_prepare__(self, obj, context=...):
- ...
-
- def __array_wrap__(self, obj, context=...):
- ...
-
- def __format__(self, format_spec):
- ...
-
- def __reduce__(self):
- ...
-
- def __iop__(self, other):
- ...
-
- __iadd__: Any
- __isub__: Any
- __imul__: Any
- __ifloordiv__: Any
- __itruediv__: Any
- __ipow__: Any
- def copy(self, *args, **kwargs):
- ...
-
- def __copy__(self):
- ...
-
- def __deepcopy__(self, memo):
- ...
-
- def __setattr__(self, attr, value):
- ...
-
-
-
-masked: MaskedConstant
-masked_singleton: MaskedConstant
-masked_array = MaskedArray
-def array(data, dtype=..., copy=..., order=..., mask=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., subok=..., ndmin=...):
- ...
-
-def is_masked(x):
- ...
-
-class _extrema_operation(_MaskedUFunc):
- compare: Any
- fill_value_func: Any
- def __init__(self, ufunc, compare, fill_value) -> None:
- ...
-
- def __call__(self, a, b):
- ...
-
- def reduce(self, target, axis=...):
- ...
-
- def outer(self, a, b):
- ...
-
-
-
-def min(obj, axis=..., out=..., fill_value=..., keepdims=...):
- ...
-
-def max(obj, axis=..., out=..., fill_value=..., keepdims=...):
- ...
-
-def ptp(obj, axis=..., out=..., fill_value=..., keepdims=...):
- ...
-
-class _frommethod:
- __name__: Any
- __doc__: Any
- reversed: Any
- def __init__(self, methodname, reversed=...) -> None:
- ...
-
- def getdoc(self):
- ...
-
- def __call__(self, a, *args, **params):
- ...
-
-
-
-all: _frommethod
-anomalies: _frommethod
-anom: _frommethod
-any: _frommethod
-compress: _frommethod
-cumprod: _frommethod
-cumsum: _frommethod
-copy: _frommethod
-diagonal: _frommethod
-harden_mask: _frommethod
-ids: _frommethod
-mean: _frommethod
-nonzero: _frommethod
-prod: _frommethod
-product: _frommethod
-ravel: _frommethod
-repeat: _frommethod
-soften_mask: _frommethod
-std: _frommethod
-sum: _frommethod
-swapaxes: _frommethod
-trace: _frommethod
-var: _frommethod
-count: _frommethod
-argmin: _frommethod
-argmax: _frommethod
-minimum: _extrema_operation
-maximum: _extrema_operation
-def take(a, indices, axis=..., out=..., mode=...):
- ...
-
-def power(a, b, third=...):
- ...
-
-def argsort(a, axis=..., kind=..., order=..., endwith=..., fill_value=...):
- ...
-
-def sort(a, axis=..., kind=..., order=..., endwith=..., fill_value=...):
- ...
-
-def compressed(x):
- ...
-
-def concatenate(arrays, axis=...):
- ...
-
-def diag(v, k=...):
- ...
-
-def left_shift(a, n):
- ...
-
-def right_shift(a, n):
- ...
-
-def put(a, indices, values, mode=...):
- ...
-
-def putmask(a, mask, values):
- ...
-
-def transpose(a, axes=...):
- ...
-
-def reshape(a, new_shape, order=...):
- ...
-
-def resize(x, new_shape):
- ...
-
-def ndim(obj):
- ...
-
-def shape(obj):
- ...
-
-def size(obj, axis=...):
- ...
-
-def diff(a, /, n=..., axis=..., prepend=..., append=...):
- ...
-
-def where(condition, x=..., y=...):
- ...
-
-def choose(indices, choices, out=..., mode=...):
- ...
-
-def round(a, decimals=..., out=...):
- ...
-
-def inner(a, b):
- ...
-
-innerproduct = ...
-def outer(a, b):
- ...
-
-outerproduct = ...
-def correlate(a, v, mode=..., propagate_mask=...):
- ...
-
-def convolve(a, v, mode=..., propagate_mask=...):
- ...
-
-def allequal(a, b, fill_value=...):
- ...
-
-def allclose(a, b, masked_equal=..., rtol=..., atol=...):
- ...
-
-def asarray(a, dtype=..., order=...):
- ...
-
-def asanyarray(a, dtype=...):
- ...
-
-def fromflex(fxarray):
- ...
-
-class _convert2ma:
- __doc__: Any
- def __init__(self, funcname, params=...) -> None:
- ...
-
- def getdoc(self):
- ...
-
- def __call__(self, *args, **params):
- ...
-
-
-
-arange: _convert2ma
-empty: _convert2ma
-empty_like: _convert2ma
-frombuffer: _convert2ma
-fromfunction: _convert2ma
-identity: _convert2ma
-ones: _convert2ma
-zeros: _convert2ma
-def append(a, b, axis=...):
- ...
-
-def dot(a, b, strict=..., out=...):
- ...
-
-def mask_rowcols(a, axis=...):
- ...
-
diff --git a/typings/numpy/ma/extras.pyi b/typings/numpy/ma/extras.pyi
deleted file mode 100644
index 86931ff..0000000
--- a/typings/numpy/ma/extras.pyi
+++ /dev/null
@@ -1,165 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any
-from numpy.lib.index_tricks import AxisConcatenator
-
-__all__: list[str]
-def count_masked(arr, axis=...):
- ...
-
-def masked_all(shape, dtype=...):
- ...
-
-def masked_all_like(arr):
- ...
-
-class _fromnxfunction:
- __name__: Any
- __doc__: Any
- def __init__(self, funcname) -> None:
- ...
-
- def getdoc(self):
- ...
-
- def __call__(self, *args, **params):
- ...
-
-
-
-class _fromnxfunction_single(_fromnxfunction):
- def __call__(self, x, *args, **params):
- ...
-
-
-
-class _fromnxfunction_seq(_fromnxfunction):
- def __call__(self, x, *args, **params):
- ...
-
-
-
-class _fromnxfunction_allargs(_fromnxfunction):
- def __call__(self, *args, **params):
- ...
-
-
-
-atleast_1d: _fromnxfunction_allargs
-atleast_2d: _fromnxfunction_allargs
-atleast_3d: _fromnxfunction_allargs
-vstack: _fromnxfunction_seq
-row_stack: _fromnxfunction_seq
-hstack: _fromnxfunction_seq
-column_stack: _fromnxfunction_seq
-dstack: _fromnxfunction_seq
-stack: _fromnxfunction_seq
-hsplit: _fromnxfunction_single
-diagflat: _fromnxfunction_single
-def apply_along_axis(func1d, axis, arr, *args, **kwargs):
- ...
-
-def apply_over_axes(func, a, axes):
- ...
-
-def average(a, axis=..., weights=..., returned=..., keepdims=...):
- ...
-
-def median(a, axis=..., out=..., overwrite_input=..., keepdims=...):
- ...
-
-def compress_nd(x, axis=...):
- ...
-
-def compress_rowcols(x, axis=...):
- ...
-
-def compress_rows(a):
- ...
-
-def compress_cols(a):
- ...
-
-def mask_rows(a, axis=...):
- ...
-
-def mask_cols(a, axis=...):
- ...
-
-def ediff1d(arr, to_end=..., to_begin=...):
- ...
-
-def unique(ar1, return_index=..., return_inverse=...):
- ...
-
-def intersect1d(ar1, ar2, assume_unique=...):
- ...
-
-def setxor1d(ar1, ar2, assume_unique=...):
- ...
-
-def in1d(ar1, ar2, assume_unique=..., invert=...):
- ...
-
-def isin(element, test_elements, assume_unique=..., invert=...):
- ...
-
-def union1d(ar1, ar2):
- ...
-
-def setdiff1d(ar1, ar2, assume_unique=...):
- ...
-
-def cov(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...):
- ...
-
-def corrcoef(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...):
- ...
-
-class MAxisConcatenator(AxisConcatenator):
- concatenate: Any
- @classmethod
- def makemat(cls, arr):
- ...
-
- def __getitem__(self, key):
- ...
-
-
-
-class mr_class(MAxisConcatenator):
- def __init__(self) -> None:
- ...
-
-
-
-mr_: mr_class
-def ndenumerate(a, compressed=...):
- ...
-
-def flatnotmasked_edges(a):
- ...
-
-def notmasked_edges(a, axis=...):
- ...
-
-def flatnotmasked_contiguous(a):
- ...
-
-def notmasked_contiguous(a, axis=...):
- ...
-
-def clump_unmasked(a):
- ...
-
-def clump_masked(a):
- ...
-
-def vander(x, n=...):
- ...
-
-def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...):
- ...
-
diff --git a/typings/numpy/ma/mrecords.pyi b/typings/numpy/ma/mrecords.pyi
deleted file mode 100644
index 5b41e25..0000000
--- a/typings/numpy/ma/mrecords.pyi
+++ /dev/null
@@ -1,68 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any, TypeVar
-from numpy import dtype
-from numpy.ma import MaskedArray
-
-__all__: list[str]
-_ShapeType = TypeVar("_ShapeType", bound=Any)
-_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True)
-class MaskedRecords(MaskedArray[_ShapeType, _DType_co]):
- def __new__(cls, shape, dtype=..., buf=..., offset=..., strides=..., formats=..., names=..., titles=..., byteorder=..., aligned=..., mask=..., hard_mask=..., fill_value=..., keep_mask=..., copy=..., **options):
- ...
-
- _mask: Any
- _fill_value: Any
- def __array_finalize__(self, obj):
- ...
-
- def __len__(self):
- ...
-
- def __getattribute__(self, attr):
- ...
-
- def __setattr__(self, attr, val):
- ...
-
- def __getitem__(self, indx):
- ...
-
- def __setitem__(self, indx, value):
- ...
-
- def view(self, dtype=..., type=...):
- ...
-
- def harden_mask(self):
- ...
-
- def soften_mask(self):
- ...
-
- def copy(self):
- ...
-
- def tolist(self, fill_value=...):
- ...
-
- def __reduce__(self):
- ...
-
-
-
-mrecarray = MaskedRecords
-def fromarrays(arraylist, dtype=..., shape=..., formats=..., names=..., titles=..., aligned=..., byteorder=..., fill_value=...):
- ...
-
-def fromrecords(reclist, dtype=..., shape=..., formats=..., names=..., titles=..., aligned=..., byteorder=..., fill_value=..., mask=...):
- ...
-
-def fromtextfile(fname, delimiter=..., commentchar=..., missingchar=..., varnames=..., vartypes=...):
- ...
-
-def addfield(mrecord, newfield, newfieldname=...):
- ...
-
diff --git a/typings/numpy/matlib.pyi b/typings/numpy/matlib.pyi
deleted file mode 100644
index 5916be5..0000000
--- a/typings/numpy/matlib.pyi
+++ /dev/null
@@ -1,344 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import numpy as np
-from numpy import *
-
-__version__ = ...
-__all__ = np.__all__[:]
-__all__ += ['rand', 'randn', 'repmat']
-def empty(shape, dtype=..., order=...): # -> matrix[Unknown, Unknown]:
- """Return a new matrix of given shape and type, without initializing entries.
-
- Parameters
- ----------
- shape : int or tuple of int
- Shape of the empty matrix.
- dtype : data-type, optional
- Desired output data-type.
- order : {'C', 'F'}, optional
- Whether to store multi-dimensional data in row-major
- (C-style) or column-major (Fortran-style) order in
- memory.
-
- See Also
- --------
- empty_like, zeros
-
- Notes
- -----
- `empty`, unlike `zeros`, does not set the matrix values to zero,
- and may therefore be marginally faster. On the other hand, it requires
- the user to manually set all the values in the array, and should be
- used with caution.
-
- Examples
- --------
- >>> import numpy.matlib
- >>> np.matlib.empty((2, 2)) # filled with random data
- matrix([[ 6.76425276e-320, 9.79033856e-307], # random
- [ 7.39337286e-309, 3.22135945e-309]])
- >>> np.matlib.empty((2, 2), dtype=int)
- matrix([[ 6600475, 0], # random
- [ 6586976, 22740995]])
-
- """
- ...
-
-def ones(shape, dtype=..., order=...): # -> matrix[Unknown, Unknown]:
- """
- Matrix of ones.
-
- Return a matrix of given shape and type, filled with ones.
-
- Parameters
- ----------
- shape : {sequence of ints, int}
- Shape of the matrix
- dtype : data-type, optional
- The desired data-type for the matrix, default is np.float64.
- order : {'C', 'F'}, optional
- Whether to store matrix in C- or Fortran-contiguous order,
- default is 'C'.
-
- Returns
- -------
- out : matrix
- Matrix of ones of given shape, dtype, and order.
-
- See Also
- --------
- ones : Array of ones.
- matlib.zeros : Zero matrix.
-
- Notes
- -----
- If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
- `out` becomes a single row matrix of shape ``(1,N)``.
-
- Examples
- --------
- >>> np.matlib.ones((2,3))
- matrix([[1., 1., 1.],
- [1., 1., 1.]])
-
- >>> np.matlib.ones(2)
- matrix([[1., 1.]])
-
- """
- ...
-
-def zeros(shape, dtype=..., order=...): # -> matrix[Unknown, Unknown]:
- """
- Return a matrix of given shape and type, filled with zeros.
-
- Parameters
- ----------
- shape : int or sequence of ints
- Shape of the matrix
- dtype : data-type, optional
- The desired data-type for the matrix, default is float.
- order : {'C', 'F'}, optional
- Whether to store the result in C- or Fortran-contiguous order,
- default is 'C'.
-
- Returns
- -------
- out : matrix
- Zero matrix of given shape, dtype, and order.
-
- See Also
- --------
- numpy.zeros : Equivalent array function.
- matlib.ones : Return a matrix of ones.
-
- Notes
- -----
- If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
- `out` becomes a single row matrix of shape ``(1,N)``.
-
- Examples
- --------
- >>> import numpy.matlib
- >>> np.matlib.zeros((2, 3))
- matrix([[0., 0., 0.],
- [0., 0., 0.]])
-
- >>> np.matlib.zeros(2)
- matrix([[0., 0.]])
-
- """
- ...
-
-def identity(n, dtype=...): # -> matrix[Unknown, Unknown]:
- """
- Returns the square identity matrix of given size.
-
- Parameters
- ----------
- n : int
- Size of the returned identity matrix.
- dtype : data-type, optional
- Data-type of the output. Defaults to ``float``.
-
- Returns
- -------
- out : matrix
- `n` x `n` matrix with its main diagonal set to one,
- and all other elements zero.
-
- See Also
- --------
- numpy.identity : Equivalent array function.
- matlib.eye : More general matrix identity function.
-
- Examples
- --------
- >>> import numpy.matlib
- >>> np.matlib.identity(3, dtype=int)
- matrix([[1, 0, 0],
- [0, 1, 0],
- [0, 0, 1]])
-
- """
- ...
-
-def eye(n, M=..., k=..., dtype=..., order=...): # -> matrix[Any, Any]:
- """
- Return a matrix with ones on the diagonal and zeros elsewhere.
-
- Parameters
- ----------
- n : int
- Number of rows in the output.
- M : int, optional
- Number of columns in the output, defaults to `n`.
- k : int, optional
- Index of the diagonal: 0 refers to the main diagonal,
- a positive value refers to an upper diagonal,
- and a negative value to a lower diagonal.
- dtype : dtype, optional
- Data-type of the returned matrix.
- order : {'C', 'F'}, optional
- Whether the output should be stored in row-major (C-style) or
- column-major (Fortran-style) order in memory.
-
- .. versionadded:: 1.14.0
-
- Returns
- -------
- I : matrix
- A `n` x `M` matrix where all elements are equal to zero,
- except for the `k`-th diagonal, whose values are equal to one.
-
- See Also
- --------
- numpy.eye : Equivalent array function.
- identity : Square identity matrix.
-
- Examples
- --------
- >>> import numpy.matlib
- >>> np.matlib.eye(3, k=1, dtype=float)
- matrix([[0., 1., 0.],
- [0., 0., 1.],
- [0., 0., 0.]])
-
- """
- ...
-
-def rand(*args): # -> matrix[Any, Any]:
- """
- Return a matrix of random values with given shape.
-
- Create a matrix of the given shape and propagate it with
- random samples from a uniform distribution over ``[0, 1)``.
-
- Parameters
- ----------
- \\*args : Arguments
- Shape of the output.
- If given as N integers, each integer specifies the size of one
- dimension.
- If given as a tuple, this tuple gives the complete shape.
-
- Returns
- -------
- out : ndarray
- The matrix of random values with shape given by `\\*args`.
-
- See Also
- --------
- randn, numpy.random.RandomState.rand
-
- Examples
- --------
- >>> np.random.seed(123)
- >>> import numpy.matlib
- >>> np.matlib.rand(2, 3)
- matrix([[0.69646919, 0.28613933, 0.22685145],
- [0.55131477, 0.71946897, 0.42310646]])
- >>> np.matlib.rand((2, 3))
- matrix([[0.9807642 , 0.68482974, 0.4809319 ],
- [0.39211752, 0.34317802, 0.72904971]])
-
- If the first argument is a tuple, other arguments are ignored:
-
- >>> np.matlib.rand((2, 3), 4)
- matrix([[0.43857224, 0.0596779 , 0.39804426],
- [0.73799541, 0.18249173, 0.17545176]])
-
- """
- ...
-
-def randn(*args): # -> matrix[Any, Any]:
- """
- Return a random matrix with data from the "standard normal" distribution.
-
- `randn` generates a matrix filled with random floats sampled from a
- univariate "normal" (Gaussian) distribution of mean 0 and variance 1.
-
- Parameters
- ----------
- \\*args : Arguments
- Shape of the output.
- If given as N integers, each integer specifies the size of one
- dimension. If given as a tuple, this tuple gives the complete shape.
-
- Returns
- -------
- Z : matrix of floats
- A matrix of floating-point samples drawn from the standard normal
- distribution.
-
- See Also
- --------
- rand, numpy.random.RandomState.randn
-
- Notes
- -----
- For random samples from the normal distribution with mean ``mu`` and
- standard deviation ``sigma``, use::
-
- sigma * np.matlib.randn(...) + mu
-
- Examples
- --------
- >>> np.random.seed(123)
- >>> import numpy.matlib
- >>> np.matlib.randn(1)
- matrix([[-1.0856306]])
- >>> np.matlib.randn(1, 2, 3)
- matrix([[ 0.99734545, 0.2829785 , -1.50629471],
- [-0.57860025, 1.65143654, -2.42667924]])
-
- Two-by-four matrix of samples from the normal distribution with
- mean 3 and standard deviation 2.5:
-
- >>> 2.5 * np.matlib.randn((2, 4)) + 3
- matrix([[1.92771843, 6.16484065, 0.83314899, 1.30278462],
- [2.76322758, 6.72847407, 1.40274501, 1.8900451 ]])
-
- """
- ...
-
-def repmat(a, m, n):
- """
- Repeat a 0-D to 2-D array or matrix MxN times.
-
- Parameters
- ----------
- a : array_like
- The array or matrix to be repeated.
- m, n : int
- The number of times `a` is repeated along the first and second axes.
-
- Returns
- -------
- out : ndarray
- The result of repeating `a`.
-
- Examples
- --------
- >>> import numpy.matlib
- >>> a0 = np.array(1)
- >>> np.matlib.repmat(a0, 2, 3)
- array([[1, 1, 1],
- [1, 1, 1]])
-
- >>> a1 = np.arange(4)
- >>> np.matlib.repmat(a1, 2, 2)
- array([[0, 1, 2, 3, 0, 1, 2, 3],
- [0, 1, 2, 3, 0, 1, 2, 3]])
-
- >>> a2 = np.asmatrix(np.arange(6).reshape(2, 3))
- >>> np.matlib.repmat(a2, 2, 3)
- matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2],
- [3, 4, 5, 3, 4, 5, 3, 4, 5],
- [0, 1, 2, 0, 1, 2, 0, 1, 2],
- [3, 4, 5, 3, 4, 5, 3, 4, 5]])
-
- """
- ...
-
diff --git a/typings/numpy/matrixlib/__init__.pyi b/typings/numpy/matrixlib/__init__.pyi
deleted file mode 100644
index b0a23bf..0000000
--- a/typings/numpy/matrixlib/__init__.pyi
+++ /dev/null
@@ -1,11 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from numpy._pytesttester import PytestTester
-from numpy import matrix as matrix
-from numpy.matrixlib.defmatrix import asmatrix as asmatrix, bmat as bmat, mat as mat
-
-__all__: list[str]
-__path__: list[str]
-test: PytestTester
diff --git a/typings/numpy/matrixlib/defmatrix.pyi b/typings/numpy/matrixlib/defmatrix.pyi
deleted file mode 100644
index da0cb4e..0000000
--- a/typings/numpy/matrixlib/defmatrix.pyi
+++ /dev/null
@@ -1,17 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Mapping, Sequence
-from typing import Any
-from numpy import matrix as matrix
-from numpy._typing import ArrayLike, DTypeLike, NDArray
-
-__all__: list[str]
-def bmat(obj: str | Sequence[ArrayLike] | NDArray[Any], ldict: None | Mapping[str, Any] = ..., gdict: None | Mapping[str, Any] = ...) -> matrix[Any, Any]:
- ...
-
-def asmatrix(data: ArrayLike, dtype: DTypeLike = ...) -> matrix[Any, Any]:
- ...
-
-mat = ...
diff --git a/typings/numpy/polynomial/__init__.pyi b/typings/numpy/polynomial/__init__.pyi
deleted file mode 100644
index f996860..0000000
--- a/typings/numpy/polynomial/__init__.pyi
+++ /dev/null
@@ -1,19 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from numpy._pytesttester import PytestTester
-from numpy.polynomial import chebyshev as chebyshev, hermite as hermite, hermite_e as hermite_e, laguerre as laguerre, legendre as legendre, polynomial as polynomial
-from numpy.polynomial.chebyshev import Chebyshev as Chebyshev
-from numpy.polynomial.hermite import Hermite as Hermite
-from numpy.polynomial.hermite_e import HermiteE as HermiteE
-from numpy.polynomial.laguerre import Laguerre as Laguerre
-from numpy.polynomial.legendre import Legendre as Legendre
-from numpy.polynomial.polynomial import Polynomial as Polynomial
-
-__all__: list[str]
-__path__: list[str]
-test: PytestTester
-def set_default_printstyle(style):
- ...
-
diff --git a/typings/numpy/polynomial/_polybase.pyi b/typings/numpy/polynomial/_polybase.pyi
deleted file mode 100644
index 8be9d3e..0000000
--- a/typings/numpy/polynomial/_polybase.pyi
+++ /dev/null
@@ -1,174 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import abc
-from typing import Any, ClassVar
-
-__all__: list[str]
-class ABCPolyBase(abc.ABC):
- __hash__: ClassVar[None]
- __array_ufunc__: ClassVar[None]
- maxpower: ClassVar[int]
- coef: Any
- @property
- def symbol(self) -> str:
- ...
-
- @property
- @abc.abstractmethod
- def domain(self):
- ...
-
- @property
- @abc.abstractmethod
- def window(self):
- ...
-
- @property
- @abc.abstractmethod
- def basis_name(self):
- ...
-
- def has_samecoef(self, other):
- ...
-
- def has_samedomain(self, other):
- ...
-
- def has_samewindow(self, other):
- ...
-
- def has_sametype(self, other):
- ...
-
- def __init__(self, coef, domain=..., window=..., symbol: str = ...) -> None:
- ...
-
- def __format__(self, fmt_str):
- ...
-
- def __call__(self, arg):
- ...
-
- def __iter__(self):
- ...
-
- def __len__(self):
- ...
-
- def __neg__(self):
- ...
-
- def __pos__(self):
- ...
-
- def __add__(self, other):
- ...
-
- def __sub__(self, other):
- ...
-
- def __mul__(self, other):
- ...
-
- def __truediv__(self, other):
- ...
-
- def __floordiv__(self, other):
- ...
-
- def __mod__(self, other):
- ...
-
- def __divmod__(self, other):
- ...
-
- def __pow__(self, other):
- ...
-
- def __radd__(self, other):
- ...
-
- def __rsub__(self, other):
- ...
-
- def __rmul__(self, other):
- ...
-
- def __rdiv__(self, other):
- ...
-
- def __rtruediv__(self, other):
- ...
-
- def __rfloordiv__(self, other):
- ...
-
- def __rmod__(self, other):
- ...
-
- def __rdivmod__(self, other):
- ...
-
- def __eq__(self, other) -> bool:
- ...
-
- def __ne__(self, other) -> bool:
- ...
-
- def copy(self):
- ...
-
- def degree(self):
- ...
-
- def cutdeg(self, deg):
- ...
-
- def trim(self, tol=...):
- ...
-
- def truncate(self, size):
- ...
-
- def convert(self, domain=..., kind=..., window=...):
- ...
-
- def mapparms(self):
- ...
-
- def integ(self, m=..., k=..., lbnd=...):
- ...
-
- def deriv(self, m=...):
- ...
-
- def roots(self):
- ...
-
- def linspace(self, n=..., domain=...):
- ...
-
- @classmethod
- def fit(cls, x, y, deg, domain=..., rcond=..., full=..., w=..., window=...):
- ...
-
- @classmethod
- def fromroots(cls, roots, domain=..., window=...):
- ...
-
- @classmethod
- def identity(cls, domain=..., window=...):
- ...
-
- @classmethod
- def basis(cls, deg, domain=..., window=...):
- ...
-
- @classmethod
- def cast(cls, series, domain=..., window=...):
- ...
-
-
-
diff --git a/typings/numpy/polynomial/chebyshev.pyi b/typings/numpy/polynomial/chebyshev.pyi
deleted file mode 100644
index 926cd15..0000000
--- a/typings/numpy/polynomial/chebyshev.pyi
+++ /dev/null
@@ -1,108 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any
-from numpy import dtype, int_, ndarray
-from numpy.polynomial._polybase import ABCPolyBase
-
-__all__: list[str]
-chebtrim = ...
-def poly2cheb(pol):
- ...
-
-def cheb2poly(c):
- ...
-
-chebdomain: ndarray[Any, dtype[int_]]
-chebzero: ndarray[Any, dtype[int_]]
-chebone: ndarray[Any, dtype[int_]]
-chebx: ndarray[Any, dtype[int_]]
-def chebline(off, scl):
- ...
-
-def chebfromroots(roots):
- ...
-
-def chebadd(c1, c2):
- ...
-
-def chebsub(c1, c2):
- ...
-
-def chebmulx(c):
- ...
-
-def chebmul(c1, c2):
- ...
-
-def chebdiv(c1, c2):
- ...
-
-def chebpow(c, pow, maxpower=...):
- ...
-
-def chebder(c, m=..., scl=..., axis=...):
- ...
-
-def chebint(c, m=..., k=..., lbnd=..., scl=..., axis=...):
- ...
-
-def chebval(x, c, tensor=...):
- ...
-
-def chebval2d(x, y, c):
- ...
-
-def chebgrid2d(x, y, c):
- ...
-
-def chebval3d(x, y, z, c):
- ...
-
-def chebgrid3d(x, y, z, c):
- ...
-
-def chebvander(x, deg):
- ...
-
-def chebvander2d(x, y, deg):
- ...
-
-def chebvander3d(x, y, z, deg):
- ...
-
-def chebfit(x, y, deg, rcond=..., full=..., w=...):
- ...
-
-def chebcompanion(c):
- ...
-
-def chebroots(c):
- ...
-
-def chebinterpolate(func, deg, args=...):
- ...
-
-def chebgauss(deg):
- ...
-
-def chebweight(x):
- ...
-
-def chebpts1(npts):
- ...
-
-def chebpts2(npts):
- ...
-
-class Chebyshev(ABCPolyBase):
- @classmethod
- def interpolate(cls, func, deg, domain=..., args=...):
- ...
-
- domain: Any
- window: Any
- basis_name: Any
-
-
diff --git a/typings/numpy/polynomial/hermite.pyi b/typings/numpy/polynomial/hermite.pyi
deleted file mode 100644
index 41ad22e..0000000
--- a/typings/numpy/polynomial/hermite.pyi
+++ /dev/null
@@ -1,96 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any
-from numpy import dtype, float_, int_, ndarray
-from numpy.polynomial._polybase import ABCPolyBase
-
-__all__: list[str]
-hermtrim = ...
-def poly2herm(pol):
- ...
-
-def herm2poly(c):
- ...
-
-hermdomain: ndarray[Any, dtype[int_]]
-hermzero: ndarray[Any, dtype[int_]]
-hermone: ndarray[Any, dtype[int_]]
-hermx: ndarray[Any, dtype[float_]]
-def hermline(off, scl):
- ...
-
-def hermfromroots(roots):
- ...
-
-def hermadd(c1, c2):
- ...
-
-def hermsub(c1, c2):
- ...
-
-def hermmulx(c):
- ...
-
-def hermmul(c1, c2):
- ...
-
-def hermdiv(c1, c2):
- ...
-
-def hermpow(c, pow, maxpower=...):
- ...
-
-def hermder(c, m=..., scl=..., axis=...):
- ...
-
-def hermint(c, m=..., k=..., lbnd=..., scl=..., axis=...):
- ...
-
-def hermval(x, c, tensor=...):
- ...
-
-def hermval2d(x, y, c):
- ...
-
-def hermgrid2d(x, y, c):
- ...
-
-def hermval3d(x, y, z, c):
- ...
-
-def hermgrid3d(x, y, z, c):
- ...
-
-def hermvander(x, deg):
- ...
-
-def hermvander2d(x, y, deg):
- ...
-
-def hermvander3d(x, y, z, deg):
- ...
-
-def hermfit(x, y, deg, rcond=..., full=..., w=...):
- ...
-
-def hermcompanion(c):
- ...
-
-def hermroots(c):
- ...
-
-def hermgauss(deg):
- ...
-
-def hermweight(x):
- ...
-
-class Hermite(ABCPolyBase):
- domain: Any
- window: Any
- basis_name: Any
- ...
-
-
diff --git a/typings/numpy/polynomial/hermite_e.pyi b/typings/numpy/polynomial/hermite_e.pyi
deleted file mode 100644
index 4127138..0000000
--- a/typings/numpy/polynomial/hermite_e.pyi
+++ /dev/null
@@ -1,96 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any
-from numpy import dtype, int_, ndarray
-from numpy.polynomial._polybase import ABCPolyBase
-
-__all__: list[str]
-hermetrim = ...
-def poly2herme(pol):
- ...
-
-def herme2poly(c):
- ...
-
-hermedomain: ndarray[Any, dtype[int_]]
-hermezero: ndarray[Any, dtype[int_]]
-hermeone: ndarray[Any, dtype[int_]]
-hermex: ndarray[Any, dtype[int_]]
-def hermeline(off, scl):
- ...
-
-def hermefromroots(roots):
- ...
-
-def hermeadd(c1, c2):
- ...
-
-def hermesub(c1, c2):
- ...
-
-def hermemulx(c):
- ...
-
-def hermemul(c1, c2):
- ...
-
-def hermediv(c1, c2):
- ...
-
-def hermepow(c, pow, maxpower=...):
- ...
-
-def hermeder(c, m=..., scl=..., axis=...):
- ...
-
-def hermeint(c, m=..., k=..., lbnd=..., scl=..., axis=...):
- ...
-
-def hermeval(x, c, tensor=...):
- ...
-
-def hermeval2d(x, y, c):
- ...
-
-def hermegrid2d(x, y, c):
- ...
-
-def hermeval3d(x, y, z, c):
- ...
-
-def hermegrid3d(x, y, z, c):
- ...
-
-def hermevander(x, deg):
- ...
-
-def hermevander2d(x, y, deg):
- ...
-
-def hermevander3d(x, y, z, deg):
- ...
-
-def hermefit(x, y, deg, rcond=..., full=..., w=...):
- ...
-
-def hermecompanion(c):
- ...
-
-def hermeroots(c):
- ...
-
-def hermegauss(deg):
- ...
-
-def hermeweight(x):
- ...
-
-class HermiteE(ABCPolyBase):
- domain: Any
- window: Any
- basis_name: Any
- ...
-
-
diff --git a/typings/numpy/polynomial/laguerre.pyi b/typings/numpy/polynomial/laguerre.pyi
deleted file mode 100644
index 483902c..0000000
--- a/typings/numpy/polynomial/laguerre.pyi
+++ /dev/null
@@ -1,96 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any
-from numpy import dtype, int_, ndarray
-from numpy.polynomial._polybase import ABCPolyBase
-
-__all__: list[str]
-lagtrim = ...
-def poly2lag(pol):
- ...
-
-def lag2poly(c):
- ...
-
-lagdomain: ndarray[Any, dtype[int_]]
-lagzero: ndarray[Any, dtype[int_]]
-lagone: ndarray[Any, dtype[int_]]
-lagx: ndarray[Any, dtype[int_]]
-def lagline(off, scl):
- ...
-
-def lagfromroots(roots):
- ...
-
-def lagadd(c1, c2):
- ...
-
-def lagsub(c1, c2):
- ...
-
-def lagmulx(c):
- ...
-
-def lagmul(c1, c2):
- ...
-
-def lagdiv(c1, c2):
- ...
-
-def lagpow(c, pow, maxpower=...):
- ...
-
-def lagder(c, m=..., scl=..., axis=...):
- ...
-
-def lagint(c, m=..., k=..., lbnd=..., scl=..., axis=...):
- ...
-
-def lagval(x, c, tensor=...):
- ...
-
-def lagval2d(x, y, c):
- ...
-
-def laggrid2d(x, y, c):
- ...
-
-def lagval3d(x, y, z, c):
- ...
-
-def laggrid3d(x, y, z, c):
- ...
-
-def lagvander(x, deg):
- ...
-
-def lagvander2d(x, y, deg):
- ...
-
-def lagvander3d(x, y, z, deg):
- ...
-
-def lagfit(x, y, deg, rcond=..., full=..., w=...):
- ...
-
-def lagcompanion(c):
- ...
-
-def lagroots(c):
- ...
-
-def laggauss(deg):
- ...
-
-def lagweight(x):
- ...
-
-class Laguerre(ABCPolyBase):
- domain: Any
- window: Any
- basis_name: Any
- ...
-
-
diff --git a/typings/numpy/polynomial/legendre.pyi b/typings/numpy/polynomial/legendre.pyi
deleted file mode 100644
index 288aa26..0000000
--- a/typings/numpy/polynomial/legendre.pyi
+++ /dev/null
@@ -1,96 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any
-from numpy import dtype, int_, ndarray
-from numpy.polynomial._polybase import ABCPolyBase
-
-__all__: list[str]
-legtrim = ...
-def poly2leg(pol):
- ...
-
-def leg2poly(c):
- ...
-
-legdomain: ndarray[Any, dtype[int_]]
-legzero: ndarray[Any, dtype[int_]]
-legone: ndarray[Any, dtype[int_]]
-legx: ndarray[Any, dtype[int_]]
-def legline(off, scl):
- ...
-
-def legfromroots(roots):
- ...
-
-def legadd(c1, c2):
- ...
-
-def legsub(c1, c2):
- ...
-
-def legmulx(c):
- ...
-
-def legmul(c1, c2):
- ...
-
-def legdiv(c1, c2):
- ...
-
-def legpow(c, pow, maxpower=...):
- ...
-
-def legder(c, m=..., scl=..., axis=...):
- ...
-
-def legint(c, m=..., k=..., lbnd=..., scl=..., axis=...):
- ...
-
-def legval(x, c, tensor=...):
- ...
-
-def legval2d(x, y, c):
- ...
-
-def leggrid2d(x, y, c):
- ...
-
-def legval3d(x, y, z, c):
- ...
-
-def leggrid3d(x, y, z, c):
- ...
-
-def legvander(x, deg):
- ...
-
-def legvander2d(x, y, deg):
- ...
-
-def legvander3d(x, y, z, deg):
- ...
-
-def legfit(x, y, deg, rcond=..., full=..., w=...):
- ...
-
-def legcompanion(c):
- ...
-
-def legroots(c):
- ...
-
-def leggauss(deg):
- ...
-
-def legweight(x):
- ...
-
-class Legendre(ABCPolyBase):
- domain: Any
- window: Any
- basis_name: Any
- ...
-
-
diff --git a/typings/numpy/polynomial/polynomial.pyi b/typings/numpy/polynomial/polynomial.pyi
deleted file mode 100644
index ad1d457..0000000
--- a/typings/numpy/polynomial/polynomial.pyi
+++ /dev/null
@@ -1,84 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any
-from numpy import dtype, int_, ndarray
-from numpy.polynomial._polybase import ABCPolyBase
-
-__all__: list[str]
-polytrim = ...
-polydomain: ndarray[Any, dtype[int_]]
-polyzero: ndarray[Any, dtype[int_]]
-polyone: ndarray[Any, dtype[int_]]
-polyx: ndarray[Any, dtype[int_]]
-def polyline(off, scl):
- ...
-
-def polyfromroots(roots):
- ...
-
-def polyadd(c1, c2):
- ...
-
-def polysub(c1, c2):
- ...
-
-def polymulx(c):
- ...
-
-def polymul(c1, c2):
- ...
-
-def polydiv(c1, c2):
- ...
-
-def polypow(c, pow, maxpower=...):
- ...
-
-def polyder(c, m=..., scl=..., axis=...):
- ...
-
-def polyint(c, m=..., k=..., lbnd=..., scl=..., axis=...):
- ...
-
-def polyval(x, c, tensor=...):
- ...
-
-def polyvalfromroots(x, r, tensor=...):
- ...
-
-def polyval2d(x, y, c):
- ...
-
-def polygrid2d(x, y, c):
- ...
-
-def polyval3d(x, y, z, c):
- ...
-
-def polygrid3d(x, y, z, c):
- ...
-
-def polyvander(x, deg):
- ...
-
-def polyvander2d(x, y, deg):
- ...
-
-def polyvander3d(x, y, z, deg):
- ...
-
-def polyfit(x, y, deg, rcond=..., full=..., w=...):
- ...
-
-def polyroots(c):
- ...
-
-class Polynomial(ABCPolyBase):
- domain: Any
- window: Any
- basis_name: Any
- ...
-
-
diff --git a/typings/numpy/polynomial/polyutils.pyi b/typings/numpy/polynomial/polyutils.pyi
deleted file mode 100644
index 359de71..0000000
--- a/typings/numpy/polynomial/polyutils.pyi
+++ /dev/null
@@ -1,30 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-__all__: list[str]
-class RankWarning(UserWarning):
- ...
-
-
-def trimseq(seq):
- ...
-
-def as_series(alist, trim=...):
- ...
-
-def trimcoef(c, tol=...):
- ...
-
-def getdomain(x):
- ...
-
-def mapparms(old, new):
- ...
-
-def mapdomain(x, old, new):
- ...
-
-def format_float(x, parens=...):
- ...
-
diff --git a/typings/numpy/random/__init__.pyi b/typings/numpy/random/__init__.pyi
deleted file mode 100644
index 8bc5463..0000000
--- a/typings/numpy/random/__init__.pyi
+++ /dev/null
@@ -1,16 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from numpy._pytesttester import PytestTester
-from numpy.random._generator import Generator as Generator, default_rng as default_rng
-from numpy.random._mt19937 import MT19937 as MT19937
-from numpy.random._pcg64 import PCG64 as PCG64, PCG64DXSM as PCG64DXSM
-from numpy.random._philox import Philox as Philox
-from numpy.random._sfc64 import SFC64 as SFC64
-from numpy.random.bit_generator import BitGenerator as BitGenerator, SeedSequence as SeedSequence
-from numpy.random.mtrand import RandomState as RandomState, beta as beta, binomial as binomial, bytes as bytes, chisquare as chisquare, choice as choice, dirichlet as dirichlet, exponential as exponential, f as f, gamma as gamma, geometric as geometric, get_bit_generator as get_bit_generator, get_state as get_state, gumbel as gumbel, hypergeometric as hypergeometric, laplace as laplace, logistic as logistic, lognormal as lognormal, logseries as logseries, multinomial as multinomial, multivariate_normal as multivariate_normal, negative_binomial as negative_binomial, noncentral_chisquare as noncentral_chisquare, noncentral_f as noncentral_f, normal as normal, pareto as pareto, permutation as permutation, poisson as poisson, power as power, rand as rand, randint as randint, randn as randn, random as random, random_integers as random_integers, random_sample as random_sample, ranf as ranf, rayleigh as rayleigh, sample as sample, seed as seed, set_bit_generator as set_bit_generator, set_state as set_state, shuffle as shuffle, standard_cauchy as standard_cauchy, standard_exponential as standard_exponential, standard_gamma as standard_gamma, standard_normal as standard_normal, standard_t as standard_t, triangular as triangular, uniform as uniform, vonmises as vonmises, wald as wald, weibull as weibull, zipf as zipf
-
-__all__: list[str]
-__path__: list[str]
-test: PytestTester
diff --git a/typings/numpy/random/_generator.pyi b/typings/numpy/random/_generator.pyi
deleted file mode 100644
index 35bd52e..0000000
--- a/typings/numpy/random/_generator.pyi
+++ /dev/null
@@ -1,469 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Callable
-from typing import Any, Literal, TypeVar, Union, overload
-from numpy import bool_, dtype, float32, float64, int16, int32, int64, int8, int_, ndarray, uint, uint16, uint32, uint64, uint8
-from numpy.random import BitGenerator, SeedSequence
-from numpy._typing import ArrayLike, _ArrayLikeFloat_co, _ArrayLikeInt_co, _DTypeLikeBool, _DTypeLikeInt, _DTypeLikeUInt, _DoubleCodes, _Float32Codes, _Float64Codes, _FloatLike_co, _Int16Codes, _Int32Codes, _Int64Codes, _Int8Codes, _IntCodes, _ShapeLike, _SingleCodes, _SupportsDType, _UInt16Codes, _UInt32Codes, _UInt64Codes, _UInt8Codes, _UIntCodes
-
-_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
-_DTypeLikeFloat32 = Union[dtype[float32], _SupportsDType[dtype[float32]], type[float32], _Float32Codes, _SingleCodes,]
-_DTypeLikeFloat64 = Union[dtype[float64], _SupportsDType[dtype[float64]], type[float], type[float64], _Float64Codes, _DoubleCodes,]
-class Generator:
- def __init__(self, bit_generator: BitGenerator) -> None:
- ...
-
- def __repr__(self) -> str:
- ...
-
- def __str__(self) -> str:
- ...
-
- def __getstate__(self) -> dict[str, Any]:
- ...
-
- def __setstate__(self, state: dict[str, Any]) -> None:
- ...
-
- def __reduce__(self) -> tuple[Callable[[str], Generator], tuple[str], dict[str, Any]]:
- ...
-
- @property
- def bit_generator(self) -> BitGenerator:
- ...
-
- def spawn(self, n_children: int) -> list[Generator]:
- ...
-
- def bytes(self, length: int) -> bytes:
- ...
-
- @overload
- def standard_normal(self, size: None = ..., dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., out: None = ...) -> float:
- ...
-
- @overload
- def standard_normal(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def standard_normal(self, *, out: ndarray[Any, dtype[float64]] = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def standard_normal(self, size: _ShapeLike = ..., dtype: _DTypeLikeFloat32 = ..., out: None | ndarray[Any, dtype[float32]] = ...) -> ndarray[Any, dtype[float32]]:
- ...
-
- @overload
- def standard_normal(self, size: _ShapeLike = ..., dtype: _DTypeLikeFloat64 = ..., out: None | ndarray[Any, dtype[float64]] = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def permutation(self, x: int, axis: int = ...) -> ndarray[Any, dtype[int64]]:
- ...
-
- @overload
- def permutation(self, x: ArrayLike, axis: int = ...) -> ndarray[Any, Any]:
- ...
-
- @overload
- def standard_exponential(self, size: None = ..., dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., method: Literal["zig", "inv"] = ..., out: None = ...) -> float:
- ...
-
- @overload
- def standard_exponential(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def standard_exponential(self, *, out: ndarray[Any, dtype[float64]] = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def standard_exponential(self, size: _ShapeLike = ..., *, method: Literal["zig", "inv"] = ..., out: None | ndarray[Any, dtype[float64]] = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def standard_exponential(self, size: _ShapeLike = ..., dtype: _DTypeLikeFloat32 = ..., method: Literal["zig", "inv"] = ..., out: None | ndarray[Any, dtype[float32]] = ...) -> ndarray[Any, dtype[float32]]:
- ...
-
- @overload
- def standard_exponential(self, size: _ShapeLike = ..., dtype: _DTypeLikeFloat64 = ..., method: Literal["zig", "inv"] = ..., out: None | ndarray[Any, dtype[float64]] = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def random(self, size: None = ..., dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., out: None = ...) -> float:
- ...
-
- @overload
- def random(self, *, out: ndarray[Any, dtype[float64]] = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def random(self, size: _ShapeLike = ..., *, out: None | ndarray[Any, dtype[float64]] = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def random(self, size: _ShapeLike = ..., dtype: _DTypeLikeFloat32 = ..., out: None | ndarray[Any, dtype[float32]] = ...) -> ndarray[Any, dtype[float32]]:
- ...
-
- @overload
- def random(self, size: _ShapeLike = ..., dtype: _DTypeLikeFloat64 = ..., out: None | ndarray[Any, dtype[float64]] = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def beta(self, a: _FloatLike_co, b: _FloatLike_co, size: None = ...) -> float:
- ...
-
- @overload
- def beta(self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float:
- ...
-
- @overload
- def exponential(self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def integers(self, low: int, high: None | int = ...) -> int:
- ...
-
- @overload
- def integers(self, low: int, high: None | int = ..., size: None = ..., dtype: _DTypeLikeBool = ..., endpoint: bool = ...) -> bool:
- ...
-
- @overload
- def integers(self, low: int, high: None | int = ..., size: None = ..., dtype: _DTypeLikeInt | _DTypeLikeUInt = ..., endpoint: bool = ...) -> int:
- ...
-
- @overload
- def integers(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int64]]:
- ...
-
- @overload
- def integers(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: _DTypeLikeBool = ..., endpoint: bool = ...) -> ndarray[Any, dtype[bool_]]:
- ...
-
- @overload
- def integers(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., endpoint: bool = ...) -> ndarray[Any, dtype[int8]]:
- ...
-
- @overload
- def integers(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., endpoint: bool = ...) -> ndarray[Any, dtype[int16]]:
- ...
-
- @overload
- def integers(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., endpoint: bool = ...) -> ndarray[Any, dtype[int32]]:
- ...
-
- @overload
- def integers(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., endpoint: bool = ...) -> ndarray[Any, dtype[int64]]:
- ...
-
- @overload
- def integers(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., endpoint: bool = ...) -> ndarray[Any, dtype[uint8]]:
- ...
-
- @overload
- def integers(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., endpoint: bool = ...) -> ndarray[Any, dtype[uint16]]:
- ...
-
- @overload
- def integers(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., endpoint: bool = ...) -> ndarray[Any, dtype[uint32]]:
- ...
-
- @overload
- def integers(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., endpoint: bool = ...) -> ndarray[Any, dtype[uint64]]:
- ...
-
- @overload
- def integers(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., endpoint: bool = ...) -> ndarray[Any, dtype[int_]]:
- ...
-
- @overload
- def integers(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., endpoint: bool = ...) -> ndarray[Any, dtype[uint]]:
- ...
-
- @overload
- def choice(self, a: int, size: None = ..., replace: bool = ..., p: None | _ArrayLikeFloat_co = ..., axis: int = ..., shuffle: bool = ...) -> int:
- ...
-
- @overload
- def choice(self, a: int, size: _ShapeLike = ..., replace: bool = ..., p: None | _ArrayLikeFloat_co = ..., axis: int = ..., shuffle: bool = ...) -> ndarray[Any, dtype[int64]]:
- ...
-
- @overload
- def choice(self, a: ArrayLike, size: None = ..., replace: bool = ..., p: None | _ArrayLikeFloat_co = ..., axis: int = ..., shuffle: bool = ...) -> Any:
- ...
-
- @overload
- def choice(self, a: ArrayLike, size: _ShapeLike = ..., replace: bool = ..., p: None | _ArrayLikeFloat_co = ..., axis: int = ..., shuffle: bool = ...) -> ndarray[Any, Any]:
- ...
-
- @overload
- def uniform(self, low: _FloatLike_co = ..., high: _FloatLike_co = ..., size: None = ...) -> float:
- ...
-
- @overload
- def uniform(self, low: _ArrayLikeFloat_co = ..., high: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def normal(self, loc: _FloatLike_co = ..., scale: _FloatLike_co = ..., size: None = ...) -> float:
- ...
-
- @overload
- def normal(self, loc: _ArrayLikeFloat_co = ..., scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def standard_gamma(self, shape: _FloatLike_co, size: None = ..., dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., out: None = ...) -> float:
- ...
-
- @overload
- def standard_gamma(self, shape: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def standard_gamma(self, shape: _ArrayLikeFloat_co, *, out: ndarray[Any, dtype[float64]] = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def standard_gamma(self, shape: _ArrayLikeFloat_co, size: None | _ShapeLike = ..., dtype: _DTypeLikeFloat32 = ..., out: None | ndarray[Any, dtype[float32]] = ...) -> ndarray[Any, dtype[float32]]:
- ...
-
- @overload
- def standard_gamma(self, shape: _ArrayLikeFloat_co, size: None | _ShapeLike = ..., dtype: _DTypeLikeFloat64 = ..., out: None | ndarray[Any, dtype[float64]] = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def gamma(self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ...) -> float:
- ...
-
- @overload
- def gamma(self, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ...) -> float:
- ...
-
- @overload
- def f(self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def noncentral_f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float:
- ...
-
- @overload
- def noncentral_f(self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def chisquare(self, df: _FloatLike_co, size: None = ...) -> float:
- ...
-
- @overload
- def chisquare(self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def noncentral_chisquare(self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float:
- ...
-
- @overload
- def noncentral_chisquare(self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def standard_t(self, df: _FloatLike_co, size: None = ...) -> float:
- ...
-
- @overload
- def standard_t(self, df: _ArrayLikeFloat_co, size: None = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def standard_t(self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def vonmises(self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ...) -> float:
- ...
-
- @overload
- def vonmises(self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def pareto(self, a: _FloatLike_co, size: None = ...) -> float:
- ...
-
- @overload
- def pareto(self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def weibull(self, a: _FloatLike_co, size: None = ...) -> float:
- ...
-
- @overload
- def weibull(self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def power(self, a: _FloatLike_co, size: None = ...) -> float:
- ...
-
- @overload
- def power(self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def standard_cauchy(self, size: None = ...) -> float:
- ...
-
- @overload
- def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def laplace(self, loc: _FloatLike_co = ..., scale: _FloatLike_co = ..., size: None = ...) -> float:
- ...
-
- @overload
- def laplace(self, loc: _ArrayLikeFloat_co = ..., scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def gumbel(self, loc: _FloatLike_co = ..., scale: _FloatLike_co = ..., size: None = ...) -> float:
- ...
-
- @overload
- def gumbel(self, loc: _ArrayLikeFloat_co = ..., scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def logistic(self, loc: _FloatLike_co = ..., scale: _FloatLike_co = ..., size: None = ...) -> float:
- ...
-
- @overload
- def logistic(self, loc: _ArrayLikeFloat_co = ..., scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def lognormal(self, mean: _FloatLike_co = ..., sigma: _FloatLike_co = ..., size: None = ...) -> float:
- ...
-
- @overload
- def lognormal(self, mean: _ArrayLikeFloat_co = ..., sigma: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def rayleigh(self, scale: _FloatLike_co = ..., size: None = ...) -> float:
- ...
-
- @overload
- def rayleigh(self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def wald(self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ...) -> float:
- ...
-
- @overload
- def wald(self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def triangular(self, left: _FloatLike_co, mode: _FloatLike_co, right: _FloatLike_co, size: None = ...) -> float:
- ...
-
- @overload
- def triangular(self, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def binomial(self, n: int, p: _FloatLike_co, size: None = ...) -> int:
- ...
-
- @overload
- def binomial(self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int64]]:
- ...
-
- @overload
- def negative_binomial(self, n: _FloatLike_co, p: _FloatLike_co, size: None = ...) -> int:
- ...
-
- @overload
- def negative_binomial(self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int64]]:
- ...
-
- @overload
- def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int:
- ...
-
- @overload
- def poisson(self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int64]]:
- ...
-
- @overload
- def zipf(self, a: _FloatLike_co, size: None = ...) -> int:
- ...
-
- @overload
- def zipf(self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int64]]:
- ...
-
- @overload
- def geometric(self, p: _FloatLike_co, size: None = ...) -> int:
- ...
-
- @overload
- def geometric(self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int64]]:
- ...
-
- @overload
- def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int:
- ...
-
- @overload
- def hypergeometric(self, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int64]]:
- ...
-
- @overload
- def logseries(self, p: _FloatLike_co, size: None = ...) -> int:
- ...
-
- @overload
- def logseries(self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int64]]:
- ...
-
- def multivariate_normal(self, mean: _ArrayLikeFloat_co, cov: _ArrayLikeFloat_co, size: None | _ShapeLike = ..., check_valid: Literal["warn", "raise", "ignore"] = ..., tol: float = ..., *, method: Literal["svd", "eigh", "cholesky"] = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- def multinomial(self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int64]]:
- ...
-
- def multivariate_hypergeometric(self, colors: _ArrayLikeInt_co, nsample: int, size: None | _ShapeLike = ..., method: Literal["marginals", "count"] = ...) -> ndarray[Any, dtype[int64]]:
- ...
-
- def dirichlet(self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- def permuted(self, x: ArrayLike, *, axis: None | int = ..., out: None | ndarray[Any, Any] = ...) -> ndarray[Any, Any]:
- ...
-
- def shuffle(self, x: ArrayLike, axis: int = ...) -> None:
- ...
-
-
-
-def default_rng(seed: None | _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator = ...) -> Generator:
- ...
-
diff --git a/typings/numpy/random/_mt19937.pyi b/typings/numpy/random/_mt19937.pyi
deleted file mode 100644
index d47d0e1..0000000
--- a/typings/numpy/random/_mt19937.pyi
+++ /dev/null
@@ -1,38 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any, TypedDict
-from numpy import dtype, ndarray, uint32
-from numpy.random.bit_generator import BitGenerator, SeedSequence
-from numpy._typing import _ArrayLikeInt_co
-
-class _MT19937Internal(TypedDict):
- key: ndarray[Any, dtype[uint32]]
- pos: int
- ...
-
-
-class _MT19937State(TypedDict):
- bit_generator: str
- state: _MT19937Internal
- ...
-
-
-class MT19937(BitGenerator):
- def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None:
- ...
-
- def jumped(self, jumps: int = ...) -> MT19937:
- ...
-
- @property
- def state(self) -> _MT19937State:
- ...
-
- @state.setter
- def state(self, value: _MT19937State) -> None:
- ...
-
-
-
diff --git a/typings/numpy/random/_pcg64.pyi b/typings/numpy/random/_pcg64.pyi
deleted file mode 100644
index cef90b7..0000000
--- a/typings/numpy/random/_pcg64.pyi
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import TypedDict
-from numpy.random.bit_generator import BitGenerator, SeedSequence
-from numpy._typing import _ArrayLikeInt_co
-
-class _PCG64Internal(TypedDict):
- state: int
- inc: int
- ...
-
-
-class _PCG64State(TypedDict):
- bit_generator: str
- state: _PCG64Internal
- has_uint32: int
- uinteger: int
- ...
-
-
-class PCG64(BitGenerator):
- def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None:
- ...
-
- def jumped(self, jumps: int = ...) -> PCG64:
- ...
-
- @property
- def state(self) -> _PCG64State:
- ...
-
- @state.setter
- def state(self, value: _PCG64State) -> None:
- ...
-
- def advance(self, delta: int) -> PCG64:
- ...
-
-
-
-class PCG64DXSM(BitGenerator):
- def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None:
- ...
-
- def jumped(self, jumps: int = ...) -> PCG64DXSM:
- ...
-
- @property
- def state(self) -> _PCG64State:
- ...
-
- @state.setter
- def state(self, value: _PCG64State) -> None:
- ...
-
- def advance(self, delta: int) -> PCG64DXSM:
- ...
-
-
-
diff --git a/typings/numpy/random/_philox.pyi b/typings/numpy/random/_philox.pyi
deleted file mode 100644
index be50983..0000000
--- a/typings/numpy/random/_philox.pyi
+++ /dev/null
@@ -1,45 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any, TypedDict
-from numpy import dtype, ndarray, uint64
-from numpy.random.bit_generator import BitGenerator, SeedSequence
-from numpy._typing import _ArrayLikeInt_co
-
-class _PhiloxInternal(TypedDict):
- counter: ndarray[Any, dtype[uint64]]
- key: ndarray[Any, dtype[uint64]]
- ...
-
-
-class _PhiloxState(TypedDict):
- bit_generator: str
- state: _PhiloxInternal
- buffer: ndarray[Any, dtype[uint64]]
- buffer_pos: int
- has_uint32: int
- uinteger: int
- ...
-
-
-class Philox(BitGenerator):
- def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ..., counter: None | _ArrayLikeInt_co = ..., key: None | _ArrayLikeInt_co = ...) -> None:
- ...
-
- @property
- def state(self) -> _PhiloxState:
- ...
-
- @state.setter
- def state(self, value: _PhiloxState) -> None:
- ...
-
- def jumped(self, jumps: int = ...) -> Philox:
- ...
-
- def advance(self, delta: int) -> Philox:
- ...
-
-
-
diff --git a/typings/numpy/random/_sfc64.pyi b/typings/numpy/random/_sfc64.pyi
deleted file mode 100644
index 80907dc..0000000
--- a/typings/numpy/random/_sfc64.pyi
+++ /dev/null
@@ -1,36 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from typing import Any, TypedDict
-from numpy import dtype as dtype, ndarray as ndarray, uint64
-from numpy.random.bit_generator import BitGenerator, SeedSequence
-from numpy._typing import _ArrayLikeInt_co
-
-class _SFC64Internal(TypedDict):
- state: ndarray[Any, dtype[uint64]]
- ...
-
-
-class _SFC64State(TypedDict):
- bit_generator: str
- state: _SFC64Internal
- has_uint32: int
- uinteger: int
- ...
-
-
-class SFC64(BitGenerator):
- def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None:
- ...
-
- @property
- def state(self) -> _SFC64State:
- ...
-
- @state.setter
- def state(self, value: _SFC64State) -> None:
- ...
-
-
-
diff --git a/typings/numpy/random/bit_generator.pyi b/typings/numpy/random/bit_generator.pyi
deleted file mode 100644
index 30ab035..0000000
--- a/typings/numpy/random/bit_generator.pyi
+++ /dev/null
@@ -1,131 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import abc
-from threading import Lock
-from collections.abc import Callable, Mapping, Sequence
-from typing import Any, Literal, NamedTuple, TypeVar, TypedDict, Union, overload
-from numpy import dtype, ndarray, uint32, uint64
-from numpy._typing import _ArrayLikeInt_co, _ShapeLike, _SupportsDType, _UInt32Codes, _UInt64Codes
-
-_T = TypeVar("_T")
-_DTypeLikeUint32 = Union[dtype[uint32], _SupportsDType[dtype[uint32]], type[uint32], _UInt32Codes,]
-_DTypeLikeUint64 = Union[dtype[uint64], _SupportsDType[dtype[uint64]], type[uint64], _UInt64Codes,]
-class _SeedSeqState(TypedDict):
- entropy: None | int | Sequence[int]
- spawn_key: tuple[int, ...]
- pool_size: int
- n_children_spawned: int
- ...
-
-
-class _Interface(NamedTuple):
- state_address: Any
- state: Any
- next_uint64: Any
- next_uint32: Any
- next_double: Any
- bit_generator: Any
- ...
-
-
-class ISeedSequence(abc.ABC):
- @abc.abstractmethod
- def generate_state(self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ...) -> ndarray[Any, dtype[uint32 | uint64]]:
- ...
-
-
-
-class ISpawnableSeedSequence(ISeedSequence):
- @abc.abstractmethod
- def spawn(self: _T, n_children: int) -> list[_T]:
- ...
-
-
-
-class SeedlessSeedSequence(ISpawnableSeedSequence):
- def generate_state(self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ...) -> ndarray[Any, dtype[uint32 | uint64]]:
- ...
-
- def spawn(self: _T, n_children: int) -> list[_T]:
- ...
-
-
-
-class SeedSequence(ISpawnableSeedSequence):
- entropy: None | int | Sequence[int]
- spawn_key: tuple[int, ...]
- pool_size: int
- n_children_spawned: int
- pool: ndarray[Any, dtype[uint32]]
- def __init__(self, entropy: None | int | Sequence[int] | _ArrayLikeInt_co = ..., *, spawn_key: Sequence[int] = ..., pool_size: int = ..., n_children_spawned: int = ...) -> None:
- ...
-
- def __repr__(self) -> str:
- ...
-
- @property
- def state(self) -> _SeedSeqState:
- ...
-
- def generate_state(self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ...) -> ndarray[Any, dtype[uint32 | uint64]]:
- ...
-
- def spawn(self, n_children: int) -> list[SeedSequence]:
- ...
-
-
-
-class BitGenerator(abc.ABC):
- lock: Lock
- def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None:
- ...
-
- def __getstate__(self) -> dict[str, Any]:
- ...
-
- def __setstate__(self, state: dict[str, Any]) -> None:
- ...
-
- def __reduce__(self) -> tuple[Callable[[str], BitGenerator], tuple[str], tuple[dict[str, Any]]]:
- ...
-
- @abc.abstractmethod
- @property
- def state(self) -> Mapping[str, Any]:
- ...
-
- @state.setter
- def state(self, value: Mapping[str, Any]) -> None:
- ...
-
- @property
- def seed_seq(self) -> ISeedSequence:
- ...
-
- def spawn(self, n_children: int) -> list[BitGenerator]:
- ...
-
- @overload
- def random_raw(self, size: None = ..., output: Literal[True] = ...) -> int:
- ...
-
- @overload
- def random_raw(self, size: _ShapeLike = ..., output: Literal[True] = ...) -> ndarray[Any, dtype[uint64]]:
- ...
-
- @overload
- def random_raw(self, size: None | _ShapeLike = ..., output: Literal[False] = ...) -> None:
- ...
-
- @property
- def ctypes(self) -> _Interface:
- ...
-
- @property
- def cffi(self) -> _Interface:
- ...
-
-
-
diff --git a/typings/numpy/random/mtrand.pyi b/typings/numpy/random/mtrand.pyi
deleted file mode 100644
index 1c03db6..0000000
--- a/typings/numpy/random/mtrand.pyi
+++ /dev/null
@@ -1,513 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import builtins
-from collections.abc import Callable
-from typing import Any, Literal, Union, overload
-from numpy import bool_, dtype, float32, float64, int16, int32, int64, int8, int_, ndarray, uint, uint16, uint32, uint64, uint8
-from numpy.random.bit_generator import BitGenerator
-from numpy._typing import ArrayLike, _ArrayLikeFloat_co, _ArrayLikeInt_co, _DTypeLikeBool, _DTypeLikeInt, _DTypeLikeUInt, _DoubleCodes, _Float32Codes, _Float64Codes, _Int16Codes, _Int32Codes, _Int64Codes, _Int8Codes, _IntCodes, _ShapeLike, _SingleCodes, _SupportsDType, _UInt16Codes, _UInt32Codes, _UInt64Codes, _UInt8Codes, _UIntCodes
-
-_DTypeLikeFloat32 = Union[dtype[float32], _SupportsDType[dtype[float32]], type[float32], _Float32Codes, _SingleCodes,]
-_DTypeLikeFloat64 = Union[dtype[float64], _SupportsDType[dtype[float64]], type[float], type[float64], _Float64Codes, _DoubleCodes,]
-class RandomState:
- _bit_generator: BitGenerator
- def __init__(self, seed: None | _ArrayLikeInt_co | BitGenerator = ...) -> None:
- ...
-
- def __repr__(self) -> str:
- ...
-
- def __str__(self) -> str:
- ...
-
- def __getstate__(self) -> dict[str, Any]:
- ...
-
- def __setstate__(self, state: dict[str, Any]) -> None:
- ...
-
- def __reduce__(self) -> tuple[Callable[[str], RandomState], tuple[str], dict[str, Any]]:
- ...
-
- def seed(self, seed: None | _ArrayLikeFloat_co = ...) -> None:
- ...
-
- @overload
- def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]:
- ...
-
- @overload
- def get_state(self, legacy: Literal[True] = ...) -> dict[str, Any] | tuple[str, ndarray[Any, dtype[uint32]], int, int, float]:
- ...
-
- def set_state(self, state: dict[str, Any] | tuple[str, ndarray[Any, dtype[uint32]], int, int, float]) -> None:
- ...
-
- @overload
- def random_sample(self, size: None = ...) -> float:
- ...
-
- @overload
- def random_sample(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def random(self, size: None = ...) -> float:
- ...
-
- @overload
- def random(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def beta(self, a: float, b: float, size: None = ...) -> float:
- ...
-
- @overload
- def beta(self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def exponential(self, scale: float = ..., size: None = ...) -> float:
- ...
-
- @overload
- def exponential(self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def standard_exponential(self, size: None = ...) -> float:
- ...
-
- @overload
- def standard_exponential(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def tomaxint(self, size: None = ...) -> int:
- ...
-
- @overload
- def tomaxint(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[int_]]:
- ...
-
- @overload
- def randint(self, low: int, high: None | int = ...) -> int:
- ...
-
- @overload
- def randint(self, low: int, high: None | int = ..., size: None = ..., dtype: _DTypeLikeBool = ...) -> bool:
- ...
-
- @overload
- def randint(self, low: int, high: None | int = ..., size: None = ..., dtype: _DTypeLikeInt | _DTypeLikeUInt = ...) -> int:
- ...
-
- @overload
- def randint(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int_]]:
- ...
-
- @overload
- def randint(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: _DTypeLikeBool = ...) -> ndarray[Any, dtype[bool_]]:
- ...
-
- @overload
- def randint(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ...) -> ndarray[Any, dtype[int8]]:
- ...
-
- @overload
- def randint(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ...) -> ndarray[Any, dtype[int16]]:
- ...
-
- @overload
- def randint(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ...) -> ndarray[Any, dtype[int32]]:
- ...
-
- @overload
- def randint(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ...) -> ndarray[Any, dtype[int64]]:
- ...
-
- @overload
- def randint(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ...) -> ndarray[Any, dtype[uint8]]:
- ...
-
- @overload
- def randint(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ...) -> ndarray[Any, dtype[uint16]]:
- ...
-
- @overload
- def randint(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ...) -> ndarray[Any, dtype[uint32]]:
- ...
-
- @overload
- def randint(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ...) -> ndarray[Any, dtype[uint64]]:
- ...
-
- @overload
- def randint(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ...) -> ndarray[Any, dtype[int_]]:
- ...
-
- @overload
- def randint(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ...) -> ndarray[Any, dtype[uint]]:
- ...
-
- def bytes(self, length: int) -> builtins.bytes:
- ...
-
- @overload
- def choice(self, a: int, size: None = ..., replace: bool = ..., p: None | _ArrayLikeFloat_co = ...) -> int:
- ...
-
- @overload
- def choice(self, a: int, size: _ShapeLike = ..., replace: bool = ..., p: None | _ArrayLikeFloat_co = ...) -> ndarray[Any, dtype[int_]]:
- ...
-
- @overload
- def choice(self, a: ArrayLike, size: None = ..., replace: bool = ..., p: None | _ArrayLikeFloat_co = ...) -> Any:
- ...
-
- @overload
- def choice(self, a: ArrayLike, size: _ShapeLike = ..., replace: bool = ..., p: None | _ArrayLikeFloat_co = ...) -> ndarray[Any, Any]:
- ...
-
- @overload
- def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float:
- ...
-
- @overload
- def uniform(self, low: _ArrayLikeFloat_co = ..., high: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def rand(self) -> float:
- ...
-
- @overload
- def rand(self, *args: int) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def randn(self) -> float:
- ...
-
- @overload
- def randn(self, *args: int) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def random_integers(self, low: int, high: None | int = ..., size: None = ...) -> int:
- ...
-
- @overload
- def random_integers(self, low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int_]]:
- ...
-
- @overload
- def standard_normal(self, size: None = ...) -> float:
- ...
-
- @overload
- def standard_normal(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float:
- ...
-
- @overload
- def normal(self, loc: _ArrayLikeFloat_co = ..., scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def standard_gamma(self, shape: float, size: None = ...) -> float:
- ...
-
- @overload
- def standard_gamma(self, shape: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float:
- ...
-
- @overload
- def gamma(self, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def f(self, dfnum: float, dfden: float, size: None = ...) -> float:
- ...
-
- @overload
- def f(self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float:
- ...
-
- @overload
- def noncentral_f(self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def chisquare(self, df: float, size: None = ...) -> float:
- ...
-
- @overload
- def chisquare(self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float:
- ...
-
- @overload
- def noncentral_chisquare(self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def standard_t(self, df: float, size: None = ...) -> float:
- ...
-
- @overload
- def standard_t(self, df: _ArrayLikeFloat_co, size: None = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def standard_t(self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def vonmises(self, mu: float, kappa: float, size: None = ...) -> float:
- ...
-
- @overload
- def vonmises(self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def pareto(self, a: float, size: None = ...) -> float:
- ...
-
- @overload
- def pareto(self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def weibull(self, a: float, size: None = ...) -> float:
- ...
-
- @overload
- def weibull(self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def power(self, a: float, size: None = ...) -> float:
- ...
-
- @overload
- def power(self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def standard_cauchy(self, size: None = ...) -> float:
- ...
-
- @overload
- def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float:
- ...
-
- @overload
- def laplace(self, loc: _ArrayLikeFloat_co = ..., scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float:
- ...
-
- @overload
- def gumbel(self, loc: _ArrayLikeFloat_co = ..., scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float:
- ...
-
- @overload
- def logistic(self, loc: _ArrayLikeFloat_co = ..., scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float:
- ...
-
- @overload
- def lognormal(self, mean: _ArrayLikeFloat_co = ..., sigma: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def rayleigh(self, scale: float = ..., size: None = ...) -> float:
- ...
-
- @overload
- def rayleigh(self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def wald(self, mean: float, scale: float, size: None = ...) -> float:
- ...
-
- @overload
- def wald(self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float:
- ...
-
- @overload
- def triangular(self, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- @overload
- def binomial(self, n: int, p: float, size: None = ...) -> int:
- ...
-
- @overload
- def binomial(self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int_]]:
- ...
-
- @overload
- def negative_binomial(self, n: float, p: float, size: None = ...) -> int:
- ...
-
- @overload
- def negative_binomial(self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int_]]:
- ...
-
- @overload
- def poisson(self, lam: float = ..., size: None = ...) -> int:
- ...
-
- @overload
- def poisson(self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int_]]:
- ...
-
- @overload
- def zipf(self, a: float, size: None = ...) -> int:
- ...
-
- @overload
- def zipf(self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int_]]:
- ...
-
- @overload
- def geometric(self, p: float, size: None = ...) -> int:
- ...
-
- @overload
- def geometric(self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int_]]:
- ...
-
- @overload
- def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int:
- ...
-
- @overload
- def hypergeometric(self, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int_]]:
- ...
-
- @overload
- def logseries(self, p: float, size: None = ...) -> int:
- ...
-
- @overload
- def logseries(self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int_]]:
- ...
-
- def multivariate_normal(self, mean: _ArrayLikeFloat_co, cov: _ArrayLikeFloat_co, size: None | _ShapeLike = ..., check_valid: Literal["warn", "raise", "ignore"] = ..., tol: float = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- def multinomial(self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[int_]]:
- ...
-
- def dirichlet(self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ...) -> ndarray[Any, dtype[float64]]:
- ...
-
- def shuffle(self, x: ArrayLike) -> None:
- ...
-
- @overload
- def permutation(self, x: int) -> ndarray[Any, dtype[int_]]:
- ...
-
- @overload
- def permutation(self, x: ArrayLike) -> ndarray[Any, Any]:
- ...
-
-
-
-_rand: RandomState
-beta = ...
-binomial = ...
-bytes = ...
-chisquare = ...
-choice = ...
-dirichlet = ...
-exponential = ...
-f = ...
-gamma = ...
-get_state = ...
-geometric = ...
-gumbel = ...
-hypergeometric = ...
-laplace = ...
-logistic = ...
-lognormal = ...
-logseries = ...
-multinomial = ...
-multivariate_normal = ...
-negative_binomial = ...
-noncentral_chisquare = ...
-noncentral_f = ...
-normal = ...
-pareto = ...
-permutation = ...
-poisson = ...
-power = ...
-rand = ...
-randint = ...
-randn = ...
-random = ...
-random_integers = ...
-random_sample = ...
-rayleigh = ...
-seed = ...
-set_state = ...
-shuffle = ...
-standard_cauchy = ...
-standard_exponential = ...
-standard_gamma = ...
-standard_normal = ...
-standard_t = ...
-triangular = ...
-uniform = ...
-vonmises = ...
-wald = ...
-weibull = ...
-zipf = ...
-sample = ...
-ranf = ...
-def set_bit_generator(bitgen: BitGenerator) -> None:
- ...
-
-def get_bit_generator() -> BitGenerator:
- ...
-
diff --git a/typings/numpy/testing/__init__.pyi b/typings/numpy/testing/__init__.pyi
deleted file mode 100644
index 7426a5f..0000000
--- a/typings/numpy/testing/__init__.pyi
+++ /dev/null
@@ -1,11 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from numpy._pytesttester import PytestTester
-from unittest import TestCase as TestCase
-from numpy.testing._private.utils import HAS_LAPACK64 as HAS_LAPACK64, HAS_REFCOUNT as HAS_REFCOUNT, IS_PYPY as IS_PYPY, IS_PYSTON as IS_PYSTON, IgnoreException as IgnoreException, KnownFailureException as KnownFailureException, SkipTest as SkipTest, assert_ as assert_, assert_allclose as assert_allclose, assert_almost_equal as assert_almost_equal, assert_approx_equal as assert_approx_equal, assert_array_almost_equal as assert_array_almost_equal, assert_array_almost_equal_nulp as assert_array_almost_equal_nulp, assert_array_compare as assert_array_compare, assert_array_equal as assert_array_equal, assert_array_less as assert_array_less, assert_array_max_ulp as assert_array_max_ulp, assert_equal as assert_equal, assert_no_gc_cycles as assert_no_gc_cycles, assert_no_warnings as assert_no_warnings, assert_raises as assert_raises, assert_raises_regex as assert_raises_regex, assert_string_equal as assert_string_equal, assert_warns as assert_warns, break_cycles as break_cycles, build_err_msg as build_err_msg, clear_and_catch_warnings as clear_and_catch_warnings, decorate_methods as decorate_methods, jiffies as jiffies, measure as measure, memusage as memusage, print_assert_equal as print_assert_equal, rundocs as rundocs, runstring as runstring, suppress_warnings as suppress_warnings, tempdir as tempdir, temppath as temppath, verbose as verbose
-
-__all__: list[str]
-__path__: list[str]
-test: PytestTester
diff --git a/typings/numpy/testing/_private/utils.pyi b/typings/numpy/testing/_private/utils.pyi
deleted file mode 100644
index 4c1b100..0000000
--- a/typings/numpy/testing/_private/utils.pyi
+++ /dev/null
@@ -1,241 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import os
-import sys
-import ast
-import types
-import warnings
-import unittest
-import contextlib
-from re import Pattern
-from collections.abc import Callable, Iterable, Sequence
-from typing import Any, AnyStr, ClassVar, Final, Literal as L, ParamSpec, SupportsIndex, TypeVar, Union, overload, type_check_only
-from numpy import _FloatValue, bool_, number, object_
-from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLikeDT64_co, _ArrayLikeNumber_co, _ArrayLikeObject_co, _ArrayLikeTD64_co
-
-if sys.version_info >= (3, 10):
- ...
-else:
- ...
-_P = ParamSpec("_P")
-_T = TypeVar("_T")
-_ET = TypeVar("_ET", bound=BaseException)
-_FT = TypeVar("_FT", bound=Callable[..., Any])
-_ComparisonFunc = Callable[[NDArray[Any], NDArray[Any]], Union[bool, bool_, number[Any], NDArray[Union[bool_, number[Any], object_]],],]
-__all__: list[str]
-class KnownFailureException(Exception):
- ...
-
-
-class IgnoreException(Exception):
- ...
-
-
-class clear_and_catch_warnings(warnings.catch_warnings):
- class_modules: ClassVar[tuple[types.ModuleType, ...]]
- modules: set[types.ModuleType]
- @overload
- def __new__(cls, record: L[False] = ..., modules: Iterable[types.ModuleType] = ...) -> _clear_and_catch_warnings_without_records:
- ...
-
- @overload
- def __new__(cls, record: L[True], modules: Iterable[types.ModuleType] = ...) -> _clear_and_catch_warnings_with_records:
- ...
-
- @overload
- def __new__(cls, record: bool, modules: Iterable[types.ModuleType] = ...) -> clear_and_catch_warnings:
- ...
-
- def __enter__(self) -> None | list[warnings.WarningMessage]:
- ...
-
- def __exit__(self, __exc_type: None | type[BaseException] = ..., __exc_val: None | BaseException = ..., __exc_tb: None | types.TracebackType = ...) -> None:
- ...
-
-
-
-@type_check_only
-class _clear_and_catch_warnings_with_records(clear_and_catch_warnings):
- def __enter__(self) -> list[warnings.WarningMessage]:
- ...
-
-
-
-@type_check_only
-class _clear_and_catch_warnings_without_records(clear_and_catch_warnings):
- def __enter__(self) -> None:
- ...
-
-
-
-class suppress_warnings:
- log: list[warnings.WarningMessage]
- def __init__(self, forwarding_rule: L["always", "module", "once", "location"] = ...) -> None:
- ...
-
- def filter(self, category: type[Warning] = ..., message: str = ..., module: None | types.ModuleType = ...) -> None:
- ...
-
- def record(self, category: type[Warning] = ..., message: str = ..., module: None | types.ModuleType = ...) -> list[warnings.WarningMessage]:
- ...
-
- def __enter__(self: _T) -> _T:
- ...
-
- def __exit__(self, __exc_type: None | type[BaseException] = ..., __exc_val: None | BaseException = ..., __exc_tb: None | types.TracebackType = ...) -> None:
- ...
-
- def __call__(self, func: _FT) -> _FT:
- ...
-
-
-
-verbose: int
-IS_PYPY: Final[bool]
-IS_PYSTON: Final[bool]
-HAS_REFCOUNT: Final[bool]
-HAS_LAPACK64: Final[bool]
-def assert_(val: object, msg: str | Callable[[], str] = ...) -> None:
- ...
-
-if sys.platform == "win32" or sys.platform == "cygwin":
- ...
-else:
- def memusage(_proc_pid_stat: str | bytes | os.PathLike[Any] = ...) -> None | int:
- ...
-
-if sys.platform == "linux":
- def jiffies(_proc_pid_stat: str | bytes | os.PathLike[Any] = ..., _load_time: list[float] = ...) -> int:
- ...
-
-else:
- ...
-def build_err_msg(arrays: Iterable[object], err_msg: str, header: str = ..., verbose: bool = ..., names: Sequence[str] = ..., precision: None | SupportsIndex = ...) -> str:
- ...
-
-def assert_equal(actual: object, desired: object, err_msg: str = ..., verbose: bool = ...) -> None:
- ...
-
-def print_assert_equal(test_string: str, actual: object, desired: object) -> None:
- ...
-
-def assert_almost_equal(actual: _ArrayLikeNumber_co | _ArrayLikeObject_co, desired: _ArrayLikeNumber_co | _ArrayLikeObject_co, decimal: int = ..., err_msg: str = ..., verbose: bool = ...) -> None:
- ...
-
-def assert_approx_equal(actual: _FloatValue, desired: _FloatValue, significant: int = ..., err_msg: str = ..., verbose: bool = ...) -> None:
- ...
-
-def assert_array_compare(comparison: _ComparisonFunc, x: ArrayLike, y: ArrayLike, err_msg: str = ..., verbose: bool = ..., header: str = ..., precision: SupportsIndex = ..., equal_nan: bool = ..., equal_inf: bool = ..., *, strict: bool = ...) -> None:
- ...
-
-def assert_array_equal(x: ArrayLike, y: ArrayLike, err_msg: str = ..., verbose: bool = ..., *, strict: bool = ...) -> None:
- ...
-
-def assert_array_almost_equal(x: _ArrayLikeNumber_co | _ArrayLikeObject_co, y: _ArrayLikeNumber_co | _ArrayLikeObject_co, decimal: float = ..., err_msg: str = ..., verbose: bool = ...) -> None:
- ...
-
-@overload
-def assert_array_less(x: _ArrayLikeNumber_co | _ArrayLikeObject_co, y: _ArrayLikeNumber_co | _ArrayLikeObject_co, err_msg: str = ..., verbose: bool = ...) -> None:
- ...
-
-@overload
-def assert_array_less(x: _ArrayLikeTD64_co, y: _ArrayLikeTD64_co, err_msg: str = ..., verbose: bool = ...) -> None:
- ...
-
-@overload
-def assert_array_less(x: _ArrayLikeDT64_co, y: _ArrayLikeDT64_co, err_msg: str = ..., verbose: bool = ...) -> None:
- ...
-
-def runstring(astr: str | bytes | types.CodeType, dict: None | dict[str, Any]) -> Any:
- ...
-
-def assert_string_equal(actual: str, desired: str) -> None:
- ...
-
-def rundocs(filename: None | str | os.PathLike[str] = ..., raise_on_error: bool = ...) -> None:
- ...
-
-def raises(*args: type[BaseException]) -> Callable[[_FT], _FT]:
- ...
-
-@overload
-def assert_raises(expected_exception: type[BaseException] | tuple[type[BaseException], ...], callable: Callable[_P, Any], /, *args: _P.args, **kwargs: _P.kwargs) -> None:
- ...
-
-@overload
-def assert_raises(expected_exception: type[_ET] | tuple[type[_ET], ...], *, msg: None | str = ...) -> unittest.case._AssertRaisesContext[_ET]:
- ...
-
-@overload
-def assert_raises_regex(expected_exception: type[BaseException] | tuple[type[BaseException], ...], expected_regex: str | bytes | Pattern[Any], callable: Callable[_P, Any], /, *args: _P.args, **kwargs: _P.kwargs) -> None:
- ...
-
-@overload
-def assert_raises_regex(expected_exception: type[_ET] | tuple[type[_ET], ...], expected_regex: str | bytes | Pattern[Any], *, msg: None | str = ...) -> unittest.case._AssertRaisesContext[_ET]:
- ...
-
-def decorate_methods(cls: type[Any], decorator: Callable[[Callable[..., Any]], Any], testmatch: None | str | bytes | Pattern[Any] = ...) -> None:
- ...
-
-def measure(code_str: str | bytes | ast.mod | ast.AST, times: int = ..., label: None | str = ...) -> float:
- ...
-
-@overload
-def assert_allclose(actual: _ArrayLikeNumber_co | _ArrayLikeObject_co, desired: _ArrayLikeNumber_co | _ArrayLikeObject_co, rtol: float = ..., atol: float = ..., equal_nan: bool = ..., err_msg: str = ..., verbose: bool = ...) -> None:
- ...
-
-@overload
-def assert_allclose(actual: _ArrayLikeTD64_co, desired: _ArrayLikeTD64_co, rtol: float = ..., atol: float = ..., equal_nan: bool = ..., err_msg: str = ..., verbose: bool = ...) -> None:
- ...
-
-def assert_array_almost_equal_nulp(x: _ArrayLikeNumber_co, y: _ArrayLikeNumber_co, nulp: float = ...) -> None:
- ...
-
-def assert_array_max_ulp(a: _ArrayLikeNumber_co, b: _ArrayLikeNumber_co, maxulp: float = ..., dtype: DTypeLike = ...) -> NDArray[Any]:
- ...
-
-@overload
-def assert_warns(warning_class: type[Warning]) -> contextlib._GeneratorContextManager[None]:
- ...
-
-@overload
-def assert_warns(warning_class: type[Warning], func: Callable[_P, _T], /, *args: _P.args, **kwargs: _P.kwargs) -> _T:
- ...
-
-@overload
-def assert_no_warnings() -> contextlib._GeneratorContextManager[None]:
- ...
-
-@overload
-def assert_no_warnings(func: Callable[_P, _T], /, *args: _P.args, **kwargs: _P.kwargs) -> _T:
- ...
-
-@overload
-def tempdir(suffix: None = ..., prefix: None = ..., dir: None = ...) -> contextlib._GeneratorContextManager[str]:
- ...
-
-@overload
-def tempdir(suffix: None | AnyStr = ..., prefix: None | AnyStr = ..., dir: None | AnyStr | os.PathLike[AnyStr] = ...) -> contextlib._GeneratorContextManager[AnyStr]:
- ...
-
-@overload
-def temppath(suffix: None = ..., prefix: None = ..., dir: None = ..., text: bool = ...) -> contextlib._GeneratorContextManager[str]:
- ...
-
-@overload
-def temppath(suffix: None | AnyStr = ..., prefix: None | AnyStr = ..., dir: None | AnyStr | os.PathLike[AnyStr] = ..., text: bool = ...) -> contextlib._GeneratorContextManager[AnyStr]:
- ...
-
-@overload
-def assert_no_gc_cycles() -> contextlib._GeneratorContextManager[None]:
- ...
-
-@overload
-def assert_no_gc_cycles(func: Callable[_P, Any], /, *args: _P.args, **kwargs: _P.kwargs) -> None:
- ...
-
-def break_cycles() -> None:
- ...
-
diff --git a/typings/numpy/tests/__init__.pyi b/typings/numpy/tests/__init__.pyi
deleted file mode 100644
index 006bc27..0000000
--- a/typings/numpy/tests/__init__.pyi
+++ /dev/null
@@ -1,4 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
diff --git a/typings/numpy/typing/__init__.pyi b/typings/numpy/typing/__init__.pyi
deleted file mode 100644
index 12f659f..0000000
--- a/typings/numpy/typing/__init__.pyi
+++ /dev/null
@@ -1,166 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from numpy._typing import ArrayLike, DTypeLike, NBitBase, NDArray
-from numpy._typing._add_docstring import _docstrings
-from numpy._pytesttester import PytestTester
-
-"""
-============================
-Typing (:mod:`numpy.typing`)
-============================
-
-.. versionadded:: 1.20
-
-Large parts of the NumPy API have :pep:`484`-style type annotations. In
-addition a number of type aliases are available to users, most prominently
-the two below:
-
-- `ArrayLike`: objects that can be converted to arrays
-- `DTypeLike`: objects that can be converted to dtypes
-
-.. _typing-extensions: https://pypi.org/project/typing-extensions/
-
-Mypy plugin
------------
-
-.. versionadded:: 1.21
-
-.. automodule:: numpy.typing.mypy_plugin
-
-.. currentmodule:: numpy.typing
-
-Differences from the runtime NumPy API
---------------------------------------
-
-NumPy is very flexible. Trying to describe the full range of
-possibilities statically would result in types that are not very
-helpful. For that reason, the typed NumPy API is often stricter than
-the runtime NumPy API. This section describes some notable
-differences.
-
-ArrayLike
-~~~~~~~~~
-
-The `ArrayLike` type tries to avoid creating object arrays. For
-example,
-
-.. code-block:: python
-
- >>> np.array(x**2 for x in range(10))
- array( at ...>, dtype=object)
-
-is valid NumPy code which will create a 0-dimensional object
-array. Type checkers will complain about the above example when using
-the NumPy types however. If you really intended to do the above, then
-you can either use a ``# type: ignore`` comment:
-
-.. code-block:: python
-
- >>> np.array(x**2 for x in range(10)) # type: ignore
-
-or explicitly type the array like object as `~typing.Any`:
-
-.. code-block:: python
-
- >>> from typing import Any
- >>> array_like: Any = (x**2 for x in range(10))
- >>> np.array(array_like)
- array( at ...>, dtype=object)
-
-ndarray
-~~~~~~~
-
-It's possible to mutate the dtype of an array at runtime. For example,
-the following code is valid:
-
-.. code-block:: python
-
- >>> x = np.array([1, 2])
- >>> x.dtype = np.bool_
-
-This sort of mutation is not allowed by the types. Users who want to
-write statically typed code should instead use the `numpy.ndarray.view`
-method to create a view of the array with a different dtype.
-
-DTypeLike
-~~~~~~~~~
-
-The `DTypeLike` type tries to avoid creation of dtype objects using
-dictionary of fields like below:
-
-.. code-block:: python
-
- >>> x = np.dtype({"field1": (float, 1), "field2": (int, 3)})
-
-Although this is valid NumPy code, the type checker will complain about it,
-since its usage is discouraged.
-Please see : :ref:`Data type objects `
-
-Number precision
-~~~~~~~~~~~~~~~~
-
-The precision of `numpy.number` subclasses is treated as a covariant generic
-parameter (see :class:`~NBitBase`), simplifying the annotating of processes
-involving precision-based casting.
-
-.. code-block:: python
-
- >>> from typing import TypeVar
- >>> import numpy as np
- >>> import numpy.typing as npt
-
- >>> T = TypeVar("T", bound=npt.NBitBase)
- >>> def func(a: "np.floating[T]", b: "np.floating[T]") -> "np.floating[T]":
- ... ...
-
-Consequently, the likes of `~numpy.float16`, `~numpy.float32` and
-`~numpy.float64` are still sub-types of `~numpy.floating`, but, contrary to
-runtime, they're not necessarily considered as sub-classes.
-
-Timedelta64
-~~~~~~~~~~~
-
-The `~numpy.timedelta64` class is not considered a subclass of
-`~numpy.signedinteger`, the former only inheriting from `~numpy.generic`
-while static type checking.
-
-0D arrays
-~~~~~~~~~
-
-During runtime numpy aggressively casts any passed 0D arrays into their
-corresponding `~numpy.generic` instance. Until the introduction of shape
-typing (see :pep:`646`) it is unfortunately not possible to make the
-necessary distinction between 0D and >0D arrays. While thus not strictly
-correct, all operations are that can potentially perform a 0D-array -> scalar
-cast are currently annotated as exclusively returning an `ndarray`.
-
-If it is known in advance that an operation _will_ perform a
-0D-array -> scalar cast, then one can consider manually remedying the
-situation with either `typing.cast` or a ``# type: ignore`` comment.
-
-Record array dtypes
-~~~~~~~~~~~~~~~~~~~
-
-The dtype of `numpy.recarray`, and the `numpy.rec` functions in general,
-can be specified in one of two ways:
-
-* Directly via the ``dtype`` argument.
-* With up to five helper arguments that operate via `numpy.format_parser`:
- ``formats``, ``names``, ``titles``, ``aligned`` and ``byteorder``.
-
-These two approaches are currently typed as being mutually exclusive,
-*i.e.* if ``dtype`` is specified than one may not specify ``formats``.
-While this mutual exclusivity is not (strictly) enforced during runtime,
-combining both dtype specifiers can lead to unexpected or even downright
-buggy behavior.
-
-API
----
-
-"""
-__all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"]
-if __doc__ is not None:
- ...
-test = ...
diff --git a/typings/numpy/version.pyi b/typings/numpy/version.pyi
deleted file mode 100644
index 4d3d54f..0000000
--- a/typings/numpy/version.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-version = ...
-__version__ = ...
-full_version = ...
-git_revision = ...
-release = ...
-short_version = ...
diff --git a/typings/seaborn/__init__.pyi b/typings/seaborn/__init__.pyi
deleted file mode 100644
index 646fb7c..0000000
--- a/typings/seaborn/__init__.pyi
+++ /dev/null
@@ -1,21 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import matplotlib as mpl
-from .rcmod import *
-from .utils import *
-from .palettes import *
-from .relational import *
-from .regression import *
-from .categorical import *
-from .distributions import *
-from .matrix import *
-from .miscplot import *
-from .axisgrid import *
-from .widgets import *
-from .colors import crayons, xkcd_rgb
-from . import cm
-
-_orig_rc_params = ...
-__version__ = ...
diff --git a/typings/seaborn/_core.pyi b/typings/seaborn/_core.pyi
deleted file mode 100644
index ca4b046..0000000
--- a/typings/seaborn/_core.pyi
+++ /dev/null
@@ -1,275 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ._decorators import share_init_params_with_map
-
-class SemanticMapping:
- """Base class for mapping data values to plot attributes."""
- map_type = ...
- levels = ...
- lookup_table = ...
- def __init__(self, plotter) -> None:
- ...
-
- def map(cls, plotter, *args, **kwargs):
- ...
-
- def __call__(self, key, *args, **kwargs): # -> list[Unknown]:
- """Get the attribute(s) values for the data key."""
- ...
-
-
-
-@share_init_params_with_map
-class HueMapping(SemanticMapping):
- """Mapping that sets artist colors according to data values."""
- palette = ...
- norm = ...
- cmap = ...
- def __init__(self, plotter, palette=..., order=..., norm=...) -> None:
- """Map the levels of the `hue` variable to distinct colors.
-
- Parameters
- ----------
- # TODO add generic parameters
-
- """
- ...
-
- def infer_map_type(self, palette, norm, input_format, var_type): # -> Literal['categorical', 'numeric']:
- """Determine how to implement the mapping."""
- ...
-
- def categorical_mapping(self, data, palette, order): # -> tuple[list[Any], dict[Unknown, Unknown] | dict[Any, str | tuple[float, float, float]]]:
- """Determine colors when the hue mapping is categorical."""
- ...
-
- def numeric_mapping(self, data, palette, norm): # -> tuple[list[Unknown] | list[Any], dict[Unknown, Unknown], Unknown, Unknown | _ColorPalette | list[tuple[float, float, float]] | Any | list[str] | Literal['ch:']]:
- """Determine colors when the hue variable is quantitative."""
- ...
-
-
-
-@share_init_params_with_map
-class SizeMapping(SemanticMapping):
- """Mapping that sets artist sizes according to data values."""
- norm = ...
- def __init__(self, plotter, sizes=..., order=..., norm=...) -> None:
- """Map the levels of the `size` variable to distinct values.
-
- Parameters
- ----------
- # TODO add generic parameters
-
- """
- ...
-
- def infer_map_type(self, norm, sizes, var_type): # -> Literal['numeric', 'categorical']:
- ...
-
- def categorical_mapping(self, data, sizes, order): # -> tuple[list[Any], dict[Unknown, Unknown] | dict[Any, Unknown]]:
- ...
-
- def numeric_mapping(self, data, sizes, norm): # -> tuple[list[Any], dict[Unknown, Unknown] | dict[Any, Unknown], Unknown]:
- ...
-
-
-
-@share_init_params_with_map
-class StyleMapping(SemanticMapping):
- """Mapping that sets artist style according to data values."""
- map_type = ...
- def __init__(self, plotter, markers=..., dashes=..., order=...) -> None:
- """Map the levels of the `style` variable to distinct values.
-
- Parameters
- ----------
- # TODO add generic parameters
-
- """
- ...
-
-
-
-class VectorPlotter:
- """Base class for objects underlying *plot functions."""
- _semantic_mappings = ...
- semantics = ...
- wide_structure = ...
- flat_structure = ...
- _default_size_range = ...
- def __init__(self, data=..., variables=...) -> None:
- ...
-
- @classmethod
- def get_semantics(cls, kwargs, semantics=...): # -> dict[Unknown, Unknown]:
- """Subset a dictionary` arguments with known semantic variables."""
- ...
-
- @property
- def has_xy_data(self): # -> bool:
- """Return True at least one of x or y is defined."""
- ...
-
- @property
- def var_levels(self): # -> dict[Unknown, Unknown]:
- """Property interface to ordered list of variables levels.
-
- Each time it's accessed, it updates the var_levels dictionary with the
- list of levels in the current semantic mappers. But it also allows the
- dictionary to persist, so it can be used to set levels by a key. This is
- used to track the list of col/row levels using an attached FacetGrid
- object, but it's kind of messy and ideally fixed by improving the
- faceting logic so it interfaces better with the modern approach to
- tracking plot variables.
-
- """
- ...
-
- def assign_variables(self, data=..., variables=...): # -> Self@VectorPlotter:
- """Define plot variables, optionally using lookup from `data`."""
- ...
-
- def iter_data(self, grouping_vars=..., reverse=..., from_comp_data=...): # -> Generator[tuple[dict[str | Unknown, Any], DataFrame | Series | Unknown] | tuple[dict[Unknown, Unknown], DataFrame | Unknown], Any, None]:
- """Generator for getting subsets of data defined by semantic variables.
-
- Also injects "col" and "row" into grouping semantics.
-
- Parameters
- ----------
- grouping_vars : string or list of strings
- Semantic variables that define the subsets of data.
- reverse : bool, optional
- If True, reverse the order of iteration.
- from_comp_data : bool, optional
- If True, use self.comp_data rather than self.plot_data
-
- Yields
- ------
- sub_vars : dict
- Keys are semantic names, values are the level of that semantic.
- sub_data : :class:`pandas.DataFrame`
- Subset of ``plot_data`` for this combination of semantic values.
-
- """
- ...
-
- @property
- def comp_data(self): # -> DataFrame:
- """Dataframe with numeric x and y, after unit conversion and log scaling."""
- ...
-
-
-
-def variable_type(vector, boolean_type=...): # -> str:
- """
- Determine whether a vector contains numeric, categorical, or datetime data.
-
- This function differs from the pandas typing API in two ways:
-
- - Python sequences or object-typed PyData objects are considered numeric if
- all of their entries are numeric.
- - String or mixed-type data are considered categorical even if not
- explicitly represented as a :class:`pandas.api.types.CategoricalDtype`.
-
- Parameters
- ----------
- vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence
- Input data to test.
- boolean_type : 'numeric' or 'categorical'
- Type to use for vectors containing only 0s and 1s (and NAs).
-
- Returns
- -------
- var_type : 'numeric', 'categorical', or 'datetime'
- Name identifying the type of data in the vector.
- """
- ...
-
-def infer_orient(x=..., y=..., orient=..., require_numeric=...): # -> Literal['v', 'h']:
- """Determine how the plot should be oriented based on the data.
-
- For historical reasons, the convention is to call a plot "horizontally"
- or "vertically" oriented based on the axis representing its dependent
- variable. Practically, this is used when determining the axis for
- numerical aggregation.
-
- Paramters
- ---------
- x, y : Vector data or None
- Positional data vectors for the plot.
- orient : string or None
- Specified orientation, which must start with "v" or "h" if not None.
- require_numeric : bool
- If set, raise when the implied dependent variable is not numeric.
-
- Returns
- -------
- orient : "v" or "h"
-
- Raises
- ------
- ValueError: When `orient` is not None and does not start with "h" or "v"
- TypeError: When dependant variable is not numeric, with `require_numeric`
-
- """
- ...
-
-def unique_dashes(n): # -> list[Unknown]:
- """Build an arbitrarily long list of unique dash styles for lines.
-
- Parameters
- ----------
- n : int
- Number of unique dash specs to generate.
-
- Returns
- -------
- dashes : list of strings or tuples
- Valid arguments for the ``dashes`` parameter on
- :class:`matplotlib.lines.Line2D`. The first spec is a solid
- line (``""``), the remainder are sequences of long and short
- dashes.
-
- """
- ...
-
-def unique_markers(n): # -> list[Unknown]:
- """Build an arbitrarily long list of unique marker styles for points.
-
- Parameters
- ----------
- n : int
- Number of unique marker specs to generate.
-
- Returns
- -------
- markers : list of string or tuples
- Values for defining :class:`matplotlib.markers.MarkerStyle` objects.
- All markers will be filled.
-
- """
- ...
-
-def categorical_order(vector, order=...): # -> list[Any]:
- """Return a list of unique data values.
-
- Determine an ordered list of levels in ``values``.
-
- Parameters
- ----------
- vector : list, array, Categorical, or Series
- Vector of "categorical" values
- order : list-like, optional
- Desired order of category levels to override the order determined
- from the ``values`` object.
-
- Returns
- -------
- order : list
- Ordered list of category levels not including null values.
-
- """
- ...
-
diff --git a/typings/seaborn/_decorators.pyi b/typings/seaborn/_decorators.pyi
deleted file mode 100644
index f411dc4..0000000
--- a/typings/seaborn/_decorators.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-def share_init_params_with_map(cls):
- """Make cls.map a classmethod with same signature as cls.__init__."""
- ...
-
diff --git a/typings/seaborn/_docstrings.pyi b/typings/seaborn/_docstrings.pyi
deleted file mode 100644
index cbeee67..0000000
--- a/typings/seaborn/_docstrings.pyi
+++ /dev/null
@@ -1,30 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-class DocstringComponents:
- regexp = ...
- def __init__(self, comp_dict, strip_whitespace=...) -> None:
- """Read entries from a dict, optionally stripping outer whitespace."""
- ...
-
- def __getattr__(self, attr): # -> Any:
- """Provided dot access to entries."""
- ...
-
- @classmethod
- def from_nested_components(cls, **kwargs): # -> Self@DocstringComponents:
- """Add multiple sub-sets of components."""
- ...
-
- @classmethod
- def from_function_params(cls, func): # -> Self@DocstringComponents:
- """Use the numpydoc parser to extract components from existing func."""
- ...
-
-
-
-_core_params = ...
-_core_returns = ...
-_seealso_blurbs = ...
-_core_docs = ...
diff --git a/typings/seaborn/_statistics.pyi b/typings/seaborn/_statistics.pyi
deleted file mode 100644
index 5471d4b..0000000
--- a/typings/seaborn/_statistics.pyi
+++ /dev/null
@@ -1,132 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-"""Statistical transformations for visualization.
-
-This module is currently private, but is being written to eventually form part
-of the public API.
-
-The classes should behave roughly in the style of scikit-learn.
-
-- All data-independent parameters should be passed to the class constructor.
-- Each class should impelment a default transformation that is exposed through
- __call__. These are currently written for vector arguements, but I think
- consuming a whole `plot_data` DataFrame and return it with transformed
- variables would make more sense.
-- Some class have data-dependent preprocessing that should be cached and used
- multiple times (think defining histogram bins off all data and then counting
- observations within each bin multiple times per data subsets). These currently
- have unique names, but it would be good to have a common name. Not quite
- `fit`, but something similar.
-- Alternatively, the transform interface could take some information about grouping
- variables and do a groupby internally.
-- Some classes should define alternate transforms that might make the most sense
- with a different function. For example, KDE usually evaluates the distribution
- on a regular grid, but it would be useful for it to transform at the actual
- datapoints. Then again, this could be controlled by a parameter at the time of
- class instantiation.
-
-"""
-class KDE:
- """Univariate and bivariate kernel density estimator."""
- def __init__(self, *, bw_method=..., bw_adjust=..., gridsize=..., cut=..., clip=..., cumulative=...) -> None:
- """Initialize the estimator with its parameters.
-
- Parameters
- ----------
- bw_method : string, scalar, or callable, optional
- Method for determining the smoothing bandwidth to use; passed to
- :class:`scipy.stats.gaussian_kde`.
- bw_adjust : number, optional
- Factor that multiplicatively scales the value chosen using
- ``bw_method``. Increasing will make the curve smoother. See Notes.
- gridsize : int, optional
- Number of points on each dimension of the evaluation grid.
- cut : number, optional
- Factor, multiplied by the smoothing bandwidth, that determines how
- far the evaluation grid extends past the extreme datapoints. When
- set to 0, truncate the curve at the data limits.
- clip : pair of numbers None, or a pair of such pairs
- Do not evaluate the density outside of these limits.
- cumulative : bool, optional
- If True, estimate a cumulative distribution function.
-
- """
- ...
-
- def define_support(self, x1, x2=..., weights=..., cache=...): # -> NDArray[floating[Any]] | tuple[NDArray[floating[Any]], NDArray[floating[Any]]]:
- """Create the evaluation grid for a given data set."""
- ...
-
- def __call__(self, x1, x2=..., weights=...): # -> tuple[NDArray[Unknown] | Unknown, NDArray[floating[Any]] | tuple[NDArray[floating[Any]], NDArray[floating[Any]]]] | tuple[NDArray[float64] | Unknown, NDArray[floating[Any]] | tuple[NDArray[floating[Any]], NDArray[floating[Any]]]]:
- """Fit and evaluate on univariate or bivariate data."""
- ...
-
-
-
-class Histogram:
- """Univariate and bivariate histogram estimator."""
- def __init__(self, stat=..., bins=..., binwidth=..., binrange=..., discrete=..., cumulative=...) -> None:
- """Initialize the estimator with its parameters.
-
- Parameters
- ----------
- stat : {"count", "frequency", "density", "probability"}
- Aggregate statistic to compute in each bin.
-
- - ``count`` shows the number of observations
- - ``frequency`` shows the number of observations divided by the bin width
- - ``density`` normalizes counts so that the area of the histogram is 1
- - ``probability`` normalizes counts so that the sum of the bar heights is 1
-
- bins : str, number, vector, or a pair of such values
- Generic bin parameter that can be the name of a reference rule,
- the number of bins, or the breaks of the bins.
- Passed to :func:`numpy.histogram_bin_edges`.
- binwidth : number or pair of numbers
- Width of each bin, overrides ``bins`` but can be used with
- ``binrange``.
- binrange : pair of numbers or a pair of pairs
- Lowest and highest value for bin edges; can be used either
- with ``bins`` or ``binwidth``. Defaults to data extremes.
- discrete : bool or pair of bools
- If True, set ``binwidth`` and ``binrange`` such that bin
- edges cover integer values in the dataset.
- cumulative : bool
- If True, return the cumulative statistic.
-
- """
- ...
-
- def define_bin_edges(self, x1, x2=..., weights=..., cache=...): # -> NDArray[Any] | tuple[Unknown, ...]:
- """Given data, return the edges of the histogram bins."""
- ...
-
- def __call__(self, x1, x2=..., weights=...): # -> tuple[Any | ndarray[Any, Any] | NDArray[float64] | NDArray[Any], Unknown | NDArray[Any] | tuple[Unknown, ...]] | tuple[Unknown, Unknown | NDArray[Any] | tuple[Unknown, ...]]:
- """Count the occurrances in each bin, maybe normalize."""
- ...
-
-
-
-class ECDF:
- """Univariate empirical cumulative distribution estimator."""
- def __init__(self, stat=..., complementary=...) -> None:
- """Initialize the class with its paramters
-
- Parameters
- ----------
- stat : {{"proportion", "count"}}
- Distribution statistic to compute.
- complementary : bool
- If True, use the complementary CDF (1 - CDF)
-
- """
- ...
-
- def __call__(self, x1, x2=..., weights=...): # -> tuple[Any, Any]:
- """Return proportion or count of observations below each sorted datapoint."""
- ...
-
-
-
diff --git a/typings/seaborn/_testing.pyi b/typings/seaborn/_testing.pyi
deleted file mode 100644
index 71299a2..0000000
--- a/typings/seaborn/_testing.pyi
+++ /dev/null
@@ -1,16 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-LINE_PROPS = ...
-COLLECTION_PROPS = ...
-BAR_PROPS = ...
-def assert_artists_equal(list1, list2, properties): # -> None:
- ...
-
-def assert_legends_equal(leg1, leg2): # -> None:
- ...
-
-def assert_plots_equal(ax1, ax2, labels=...): # -> None:
- ...
-
diff --git a/typings/seaborn/algorithms.pyi b/typings/seaborn/algorithms.pyi
deleted file mode 100644
index fee7dc3..0000000
--- a/typings/seaborn/algorithms.pyi
+++ /dev/null
@@ -1,35 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-"""Algorithms to support fitting routines in seaborn plotting functions."""
-def bootstrap(*args, **kwargs): # -> NDArray[Unknown]:
- """Resample one or more arrays with replacement and store aggregate values.
-
- Positional arguments are a sequence of arrays to bootstrap along the first
- axis and pass to a summary function.
-
- Keyword arguments:
- n_boot : int, default 10000
- Number of iterations
- axis : int, default None
- Will pass axis to ``func`` as a keyword argument.
- units : array, default None
- Array of sampling unit IDs. When used the bootstrap resamples units
- and then observations within units instead of individual
- datapoints.
- func : string or callable, default np.mean
- Function to call on the args that are passed in. If string, tries
- to use as named method on numpy array.
- seed : Generator | SeedSequence | RandomState | int | None
- Seed for the random number generator; useful if you want
- reproducible resamples.
-
- Returns
- -------
- boot_dist: array
- array of bootstrapped statistic values
-
- """
- ...
-
diff --git a/typings/seaborn/axisgrid.pyi b/typings/seaborn/axisgrid.pyi
deleted file mode 100644
index 87880c3..0000000
--- a/typings/seaborn/axisgrid.pyi
+++ /dev/null
@@ -1,548 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ._decorators import _deprecate_positional_args
-
-__all__ = ["FacetGrid", "PairGrid", "JointGrid", "pairplot", "jointplot"]
-_param_docs = ...
-class Grid:
- """Base class for grids of subplots."""
- _margin_titles = ...
- _legend_out = ...
- def __init__(self) -> None:
- ...
-
- def set(self, **kwargs): # -> Self@Grid:
- """Set attributes on each subplot Axes."""
- ...
-
- def savefig(self, *args, **kwargs): # -> None:
- """Save the figure."""
- ...
-
- def tight_layout(self, *args, **kwargs): # -> None:
- """Call fig.tight_layout within rect that exclude the legend."""
- ...
-
- def add_legend(self, legend_data=..., title=..., label_order=..., adjust_subtitles=..., **kwargs): # -> Self@Grid:
- """Draw a legend, maybe placing it outside axes and resizing the figure.
-
- Parameters
- ----------
- legend_data : dict
- Dictionary mapping label names (or two-element tuples where the
- second element is a label name) to matplotlib artist handles. The
- default reads from ``self._legend_data``.
- title : string
- Title for the legend. The default reads from ``self._hue_var``.
- label_order : list of labels
- The order that the legend entries should appear in. The default
- reads from ``self.hue_names``.
- adjust_subtitles : bool
- If True, modify entries with invisible artists to left-align
- the labels and set the font size to that of a title.
- kwargs : key, value pairings
- Other keyword arguments are passed to the underlying legend methods
- on the Figure or Axes object.
-
- Returns
- -------
- self : Grid instance
- Returns self for easy chaining.
-
- """
- ...
-
- @property
- def legend(self): # -> None:
- """The :class:`matplotlib.legend.Legend` object, if present."""
- ...
-
-
-
-_facet_docs = ...
-class FacetGrid(Grid):
- """Multi-plot grid for plotting conditional relationships."""
- @_deprecate_positional_args
- def __init__(self, data, *, row=..., col=..., hue=..., col_wrap=..., sharex=..., sharey=..., height=..., aspect=..., palette=..., row_order=..., col_order=..., hue_order=..., hue_kws=..., dropna=..., legend_out=..., despine=..., margin_titles=..., xlim=..., ylim=..., subplot_kws=..., gridspec_kws=..., size=...) -> None:
- ...
-
- def facet_data(self): # -> Generator[tuple[tuple[int, int, int], Unknown], Any, None]:
- """Generator for name indices and data subsets for each facet.
-
- Yields
- ------
- (i, j, k), data_ijk : tuple of ints, DataFrame
- The ints provide an index into the {row, col, hue}_names attribute,
- and the dataframe contains a subset of the full data corresponding
- to each facet. The generator yields subsets that correspond with
- the self.axes.flat iterator, or self.axes[i, j] when `col_wrap`
- is None.
-
- """
- ...
-
- def map(self, func, *args, **kwargs): # -> Self@FacetGrid:
- """Apply a plotting function to each facet's subset of the data.
-
- Parameters
- ----------
- func : callable
- A plotting function that takes data and keyword arguments. It
- must plot to the currently active matplotlib Axes and take a
- `color` keyword argument. If faceting on the `hue` dimension,
- it must also take a `label` keyword argument.
- args : strings
- Column names in self.data that identify variables with data to
- plot. The data for each variable is passed to `func` in the
- order the variables are specified in the call.
- kwargs : keyword arguments
- All keyword arguments are passed to the plotting function.
-
- Returns
- -------
- self : object
- Returns self.
-
- """
- ...
-
- def map_dataframe(self, func, *args, **kwargs): # -> Self@FacetGrid:
- """Like ``.map`` but passes args as strings and inserts data in kwargs.
-
- This method is suitable for plotting with functions that accept a
- long-form DataFrame as a `data` keyword argument and access the
- data in that DataFrame using string variable names.
-
- Parameters
- ----------
- func : callable
- A plotting function that takes data and keyword arguments. Unlike
- the `map` method, a function used here must "understand" Pandas
- objects. It also must plot to the currently active matplotlib Axes
- and take a `color` keyword argument. If faceting on the `hue`
- dimension, it must also take a `label` keyword argument.
- args : strings
- Column names in self.data that identify variables with data to
- plot. The data for each variable is passed to `func` in the
- order the variables are specified in the call.
- kwargs : keyword arguments
- All keyword arguments are passed to the plotting function.
-
- Returns
- -------
- self : object
- Returns self.
-
- """
- ...
-
- def facet_axis(self, row_i, col_j, modify_state=...): # -> Any | ndarray[Any, dtype[Any]]:
- """Make the axis identified by these indices active and return it."""
- ...
-
- def despine(self, **kwargs): # -> Self@FacetGrid:
- """Remove axis spines from the facets."""
- ...
-
- def set_axis_labels(self, x_var=..., y_var=..., clear_inner=..., **kwargs): # -> Self@FacetGrid:
- """Set axis labels on the left column and bottom row of the grid."""
- ...
-
- def set_xlabels(self, label=..., clear_inner=..., **kwargs): # -> Self@FacetGrid:
- """Label the x axis on the bottom row of the grid."""
- ...
-
- def set_ylabels(self, label=..., clear_inner=..., **kwargs): # -> Self@FacetGrid:
- """Label the y axis on the left column of the grid."""
- ...
-
- def set_xticklabels(self, labels=..., step=..., **kwargs): # -> Self@FacetGrid:
- """Set x axis tick labels of the grid."""
- ...
-
- def set_yticklabels(self, labels=..., **kwargs): # -> Self@FacetGrid:
- """Set y axis tick labels on the left column of the grid."""
- ...
-
- def set_titles(self, template=..., row_template=..., col_template=..., **kwargs): # -> Self@FacetGrid:
- """Draw titles either above each facet or on the grid margins.
-
- Parameters
- ----------
- template : string
- Template for all titles with the formatting keys {col_var} and
- {col_name} (if using a `col` faceting variable) and/or {row_var}
- and {row_name} (if using a `row` faceting variable).
- row_template:
- Template for the row variable when titles are drawn on the grid
- margins. Must have {row_var} and {row_name} formatting keys.
- col_template:
- Template for the row variable when titles are drawn on the grid
- margins. Must have {col_var} and {col_name} formatting keys.
-
- Returns
- -------
- self: object
- Returns self.
-
- """
- ...
-
- @property
- def fig(self): # -> Figure:
- """The :class:`matplotlib.figure.Figure` with the plot."""
- ...
-
- @property
- def axes(self): # -> Any | NDArray[Any]:
- """An array of the :class:`matplotlib.axes.Axes` objects in the grid."""
- ...
-
- @property
- def ax(self): # -> Any:
- """The :class:`matplotlib.axes.Axes` when no faceting variables are assigned."""
- ...
-
- @property
- def axes_dict(self): # -> dict[Any, Any] | dict[tuple[Any, Any], Any]:
- """A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.
-
- If only one of ``row`` or ``col`` is assigned, each key is a string
- representing a level of that variable. If both facet dimensions are
- assigned, each key is a ``({row_level}, {col_level})`` tuple.
-
- """
- ...
-
-
-
-class PairGrid(Grid):
- """Subplot grid for plotting pairwise relationships in a dataset.
-
- This object maps each variable in a dataset onto a column and row in a
- grid of multiple axes. Different axes-level plotting functions can be
- used to draw bivariate plots in the upper and lower triangles, and the
- the marginal distribution of each variable can be shown on the diagonal.
-
- Several different common plots can be generated in a single line using
- :func:`pairplot`. Use :class:`PairGrid` when you need more flexibility.
-
- See the :ref:`tutorial ` for more information.
-
- """
- @_deprecate_positional_args
- def __init__(self, data, *, hue=..., hue_order=..., palette=..., hue_kws=..., vars=..., x_vars=..., y_vars=..., corner=..., diag_sharey=..., height=..., aspect=..., layout_pad=..., despine=..., dropna=..., size=...) -> None:
- """Initialize the plot figure and PairGrid object.
-
- Parameters
- ----------
- data : DataFrame
- Tidy (long-form) dataframe where each column is a variable and
- each row is an observation.
- hue : string (variable name)
- Variable in ``data`` to map plot aspects to different colors. This
- variable will be excluded from the default x and y variables.
- hue_order : list of strings
- Order for the levels of the hue variable in the palette
- palette : dict or seaborn color palette
- Set of colors for mapping the ``hue`` variable. If a dict, keys
- should be values in the ``hue`` variable.
- hue_kws : dictionary of param -> list of values mapping
- Other keyword arguments to insert into the plotting call to let
- other plot attributes vary across levels of the hue variable (e.g.
- the markers in a scatterplot).
- vars : list of variable names
- Variables within ``data`` to use, otherwise use every column with
- a numeric datatype.
- {x, y}_vars : lists of variable names
- Variables within ``data`` to use separately for the rows and
- columns of the figure; i.e. to make a non-square plot.
- corner : bool
- If True, don't add axes to the upper (off-diagonal) triangle of the
- grid, making this a "corner" plot.
- height : scalar
- Height (in inches) of each facet.
- aspect : scalar
- Aspect * height gives the width (in inches) of each facet.
- layout_pad : scalar
- Padding between axes; passed to ``fig.tight_layout``.
- despine : boolean
- Remove the top and right spines from the plots.
- dropna : boolean
- Drop missing values from the data before plotting.
-
- See Also
- --------
- pairplot : Easily drawing common uses of :class:`PairGrid`.
- FacetGrid : Subplot grid for plotting conditional relationships.
-
- Examples
- --------
-
- .. include:: ../docstrings/PairGrid.rst
-
- """
- ...
-
- def map(self, func, **kwargs): # -> Self@PairGrid:
- """Plot with the same function in every subplot.
-
- Parameters
- ----------
- func : callable plotting function
- Must take x, y arrays as positional arguments and draw onto the
- "currently active" matplotlib Axes. Also needs to accept kwargs
- called ``color`` and ``label``.
-
- """
- ...
-
- def map_lower(self, func, **kwargs): # -> Self@PairGrid:
- """Plot with a bivariate function on the lower diagonal subplots.
-
- Parameters
- ----------
- func : callable plotting function
- Must take x, y arrays as positional arguments and draw onto the
- "currently active" matplotlib Axes. Also needs to accept kwargs
- called ``color`` and ``label``.
-
- """
- ...
-
- def map_upper(self, func, **kwargs): # -> Self@PairGrid:
- """Plot with a bivariate function on the upper diagonal subplots.
-
- Parameters
- ----------
- func : callable plotting function
- Must take x, y arrays as positional arguments and draw onto the
- "currently active" matplotlib Axes. Also needs to accept kwargs
- called ``color`` and ``label``.
-
- """
- ...
-
- def map_offdiag(self, func, **kwargs): # -> Self@PairGrid:
- """Plot with a bivariate function on the off-diagonal subplots.
-
- Parameters
- ----------
- func : callable plotting function
- Must take x, y arrays as positional arguments and draw onto the
- "currently active" matplotlib Axes. Also needs to accept kwargs
- called ``color`` and ``label``.
-
- """
- ...
-
- def map_diag(self, func, **kwargs): # -> Self@PairGrid:
- """Plot with a univariate function on each diagonal subplot.
-
- Parameters
- ----------
- func : callable plotting function
- Must take an x array as a positional argument and draw onto the
- "currently active" matplotlib Axes. Also needs to accept kwargs
- called ``color`` and ``label``.
-
- """
- ...
-
-
-
-class JointGrid:
- """Grid for drawing a bivariate plot with marginal univariate plots.
-
- Many plots can be drawn by using the figure-level interface :func:`jointplot`.
- Use this class directly when you need more flexibility.
-
- """
- @_deprecate_positional_args
- def __init__(self, *, x=..., y=..., data=..., height=..., ratio=..., space=..., dropna=..., xlim=..., ylim=..., size=..., marginal_ticks=..., hue=..., palette=..., hue_order=..., hue_norm=...) -> None:
- ...
-
- def plot(self, joint_func, marginal_func, **kwargs): # -> Self@JointGrid:
- """Draw the plot by passing functions for joint and marginal axes.
-
- This method passes the ``kwargs`` dictionary to both functions. If you
- need more control, call :meth:`JointGrid.plot_joint` and
- :meth:`JointGrid.plot_marginals` directly with specific parameters.
-
- Parameters
- ----------
- joint_func, marginal_func: callables
- Functions to draw the bivariate and univariate plots. See methods
- referenced above for information about the required characteristics
- of these functions.
- kwargs
- Additional keyword arguments are passed to both functions.
-
- Returns
- -------
- :class:`JointGrid` instance
- Returns ``self`` for easy method chaining.
-
- """
- ...
-
- def plot_joint(self, func, **kwargs): # -> Self@JointGrid:
- """Draw a bivariate plot on the joint axes of the grid.
-
- Parameters
- ----------
- func : plotting callable
- If a seaborn function, it should accept ``x`` and ``y``. Otherwise,
- it must accept ``x`` and ``y`` vectors of data as the first two
- positional arguments, and it must plot on the "current" axes.
- If ``hue`` was defined in the class constructor, the function must
- accept ``hue`` as a parameter.
- kwargs
- Keyword argument are passed to the plotting function.
-
- Returns
- -------
- :class:`JointGrid` instance
- Returns ``self`` for easy method chaining.
-
- """
- ...
-
- def plot_marginals(self, func, **kwargs): # -> Self@JointGrid:
- """Draw univariate plots on each marginal axes.
-
- Parameters
- ----------
- func : plotting callable
- If a seaborn function, it should accept ``x`` and ``y`` and plot
- when only one of them is defined. Otherwise, it must accept a vector
- of data as the first positional argument and determine its orientation
- using the ``vertical`` parameter, and it must plot on the "current" axes.
- If ``hue`` was defined in the class constructor, it must accept ``hue``
- as a parameter.
- kwargs
- Keyword argument are passed to the plotting function.
-
- Returns
- -------
- :class:`JointGrid` instance
- Returns ``self`` for easy method chaining.
-
- """
- ...
-
- def set_axis_labels(self, xlabel=..., ylabel=..., **kwargs): # -> Self@JointGrid:
- """Set axis labels on the bivariate axes.
-
- Parameters
- ----------
- xlabel, ylabel : strings
- Label names for the x and y variables.
- kwargs : key, value mappings
- Other keyword arguments are passed to the following functions:
-
- - :meth:`matplotlib.axes.Axes.set_xlabel`
- - :meth:`matplotlib.axes.Axes.set_ylabel`
-
- Returns
- -------
- :class:`JointGrid` instance
- Returns ``self`` for easy method chaining.
-
- """
- ...
-
- def savefig(self, *args, **kwargs): # -> None:
- """Save the figure using a "tight" bounding box by default.
-
- Wraps :meth:`matplotlib.figure.Figure.savefig`.
-
- """
- ...
-
-
-
-@_deprecate_positional_args
-def pairplot(data, *, hue=..., hue_order=..., palette=..., vars=..., x_vars=..., y_vars=..., kind=..., diag_kind=..., markers=..., height=..., aspect=..., corner=..., dropna=..., plot_kws=..., diag_kws=..., grid_kws=..., size=...):
- """Plot pairwise relationships in a dataset.
-
- By default, this function will create a grid of Axes such that each numeric
- variable in ``data`` will by shared across the y-axes across a single row and
- the x-axes across a single column. The diagonal plots are treated
- differently: a univariate distribution plot is drawn to show the marginal
- distribution of the data in each column.
-
- It is also possible to show a subset of variables or plot different
- variables on the rows and columns.
-
- This is a high-level interface for :class:`PairGrid` that is intended to
- make it easy to draw a few common styles. You should use :class:`PairGrid`
- directly if you need more flexibility.
-
- Parameters
- ----------
- data : `pandas.DataFrame`
- Tidy (long-form) dataframe where each column is a variable and
- each row is an observation.
- hue : name of variable in ``data``
- Variable in ``data`` to map plot aspects to different colors.
- hue_order : list of strings
- Order for the levels of the hue variable in the palette
- palette : dict or seaborn color palette
- Set of colors for mapping the ``hue`` variable. If a dict, keys
- should be values in the ``hue`` variable.
- vars : list of variable names
- Variables within ``data`` to use, otherwise use every column with
- a numeric datatype.
- {x, y}_vars : lists of variable names
- Variables within ``data`` to use separately for the rows and
- columns of the figure; i.e. to make a non-square plot.
- kind : {'scatter', 'kde', 'hist', 'reg'}
- Kind of plot to make.
- diag_kind : {'auto', 'hist', 'kde', None}
- Kind of plot for the diagonal subplots. If 'auto', choose based on
- whether or not ``hue`` is used.
- markers : single matplotlib marker code or list
- Either the marker to use for all scatterplot points or a list of markers
- with a length the same as the number of levels in the hue variable so that
- differently colored points will also have different scatterplot
- markers.
- height : scalar
- Height (in inches) of each facet.
- aspect : scalar
- Aspect * height gives the width (in inches) of each facet.
- corner : bool
- If True, don't add axes to the upper (off-diagonal) triangle of the
- grid, making this a "corner" plot.
- dropna : boolean
- Drop missing values from the data before plotting.
- {plot, diag, grid}_kws : dicts
- Dictionaries of keyword arguments. ``plot_kws`` are passed to the
- bivariate plotting function, ``diag_kws`` are passed to the univariate
- plotting function, and ``grid_kws`` are passed to the :class:`PairGrid`
- constructor.
-
- Returns
- -------
- grid : :class:`PairGrid`
- Returns the underlying :class:`PairGrid` instance for further tweaking.
-
- See Also
- --------
- PairGrid : Subplot grid for more flexible plotting of pairwise relationships.
- JointGrid : Grid for plotting joint and marginal distributions of two variables.
-
- Examples
- --------
-
- .. include:: ../docstrings/pairplot.rst
-
- """
- ...
-
-@_deprecate_positional_args
-def jointplot(*, x=..., y=..., data=..., kind=..., color=..., height=..., ratio=..., space=..., dropna=..., xlim=..., ylim=..., marginal_ticks=..., joint_kws=..., marginal_kws=..., hue=..., palette=..., hue_order=..., hue_norm=..., **kwargs):
- ...
-
diff --git a/typings/seaborn/categorical.pyi b/typings/seaborn/categorical.pyi
deleted file mode 100644
index c4fbe74..0000000
--- a/typings/seaborn/categorical.pyi
+++ /dev/null
@@ -1,312 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ._decorators import _deprecate_positional_args
-
-__all__ = ["catplot", "factorplot", "stripplot", "swarmplot", "boxplot", "violinplot", "boxenplot", "pointplot", "barplot", "countplot"]
-class _CategoricalPlotter:
- width = ...
- default_palette = ...
- require_numeric = ...
- def establish_variables(self, x=..., y=..., hue=..., data=..., orient=..., order=..., hue_order=..., units=...):
- """Convert input specification into a common representation."""
- ...
-
- def establish_colors(self, color, palette, saturation): # -> None:
- """Get a list of colors for the main component of the plots."""
- ...
-
- @property
- def hue_offsets(self): # -> Any | NDArray[float64]:
- """A list of center positions for plots when hue nesting is used."""
- ...
-
- @property
- def nested_width(self): # -> float:
- """A float with the width of plot elements when hue nesting is used."""
- ...
-
- def annotate_axes(self, ax): # -> None:
- """Add descriptive labels to an Axes object."""
- ...
-
- def add_legend_data(self, ax, color, label): # -> None:
- """Add a dummy patch object so we can get legend data."""
- ...
-
-
-
-class _BoxPlotter(_CategoricalPlotter):
- def __init__(self, x, y, hue, data, order, hue_order, orient, color, palette, saturation, width, dodge, fliersize, linewidth) -> None:
- ...
-
- def draw_boxplot(self, ax, kws): # -> None:
- """Use matplotlib to draw a boxplot on an Axes."""
- ...
-
- def restyle_boxplot(self, artist_dict, color, props): # -> None:
- """Take a drawn matplotlib boxplot and make it look nice."""
- ...
-
- def plot(self, ax, boxplot_kws): # -> None:
- """Make the plot."""
- ...
-
-
-
-class _ViolinPlotter(_CategoricalPlotter):
- def __init__(self, x, y, hue, data, order, hue_order, bw, cut, scale, scale_hue, gridsize, width, inner, split, dodge, orient, linewidth, color, palette, saturation) -> None:
- ...
-
- def estimate_densities(self, bw, cut, scale, scale_hue, gridsize): # -> None:
- """Find the support and density for all of the data."""
- ...
-
- def fit_kde(self, x, bw): # -> tuple[gaussian_kde, Unknown]:
- """Estimate a KDE for a vector of data with flexible bandwidth."""
- ...
-
- def kde_support(self, x, bw, cut, gridsize):
- """Define a grid of support for the violin."""
- ...
-
- def scale_area(self, density, max_density, scale_hue): # -> None:
- """Scale the relative area under the KDE curve.
-
- This essentially preserves the "standard" KDE scaling, but the
- resulting maximum density will be 1 so that the curve can be
- properly multiplied by the violin width.
-
- """
- ...
-
- def scale_width(self, density): # -> None:
- """Scale each density curve to the same height."""
- ...
-
- def scale_count(self, density, counts, scale_hue): # -> None:
- """Scale each density curve by the number of observations."""
- ...
-
- @property
- def dwidth(self):
- ...
-
- def draw_violins(self, ax): # -> None:
- """Draw the violins onto `ax`."""
- ...
-
- def draw_single_observation(self, ax, at_group, at_quant, density): # -> None:
- """Draw a line to mark a single observation."""
- ...
-
- def draw_box_lines(self, ax, data, support, density, center): # -> None:
- """Draw boxplot information at center of the density."""
- ...
-
- def draw_quartiles(self, ax, data, support, density, center, split=...): # -> None:
- """Draw the quartiles as lines at width of density."""
- ...
-
- def draw_points(self, ax, data, center): # -> None:
- """Draw individual observations as points at middle of the violin."""
- ...
-
- def draw_stick_lines(self, ax, data, support, density, center, split=...): # -> None:
- """Draw individual observations as sticks at width of density."""
- ...
-
- def draw_to_density(self, ax, center, val, support, density, split, **kws): # -> None:
- """Draw a line orthogonal to the value axis at width of density."""
- ...
-
- def plot(self, ax): # -> None:
- """Make the violin plot."""
- ...
-
-
-
-class _CategoricalScatterPlotter(_CategoricalPlotter):
- default_palette = ...
- require_numeric = ...
- @property
- def point_colors(self): # -> list[Unknown]:
- """Return an index into the palette for each scatter point."""
- ...
-
- def add_legend_data(self, ax): # -> None:
- """Add empty scatterplot artists with labels for the legend."""
- ...
-
-
-
-class _StripPlotter(_CategoricalScatterPlotter):
- """1-d scatterplot with categorical organization."""
- def __init__(self, x, y, hue, data, order, hue_order, jitter, dodge, orient, color, palette) -> None:
- """Initialize the plotter."""
- ...
-
- def draw_stripplot(self, ax, kws): # -> None:
- """Draw the points onto `ax`."""
- ...
-
- def plot(self, ax, kws): # -> None:
- """Make the plot."""
- ...
-
-
-
-class _SwarmPlotter(_CategoricalScatterPlotter):
- def __init__(self, x, y, hue, data, order, hue_order, dodge, orient, color, palette) -> None:
- """Initialize the plotter."""
- ...
-
- def could_overlap(self, xy_i, swarm, d): # -> NDArray[Unknown]:
- """Return a list of all swarm points that could overlap with target.
-
- Assumes that swarm is a sorted list of all points below xy_i.
- """
- ...
-
- def position_candidates(self, xy_i, neighbors, d): # -> NDArray[Unknown]:
- """Return a list of (x, y) coordinates that might be valid."""
- ...
-
- def first_non_overlapping_candidate(self, candidates, neighbors, d):
- """Remove candidates from the list if they overlap with the swarm."""
- ...
-
- def beeswarm(self, orig_xy, d): # -> NDArray[Unknown]:
- """Adjust x position of points to avoid overlaps."""
- ...
-
- def add_gutters(self, points, center, width):
- """Stop points from extending beyond their territory."""
- ...
-
- def swarm_points(self, ax, points, center, width, s, **kws): # -> None:
- """Find new positions on the categorical axis for each point."""
- ...
-
- def draw_swarmplot(self, ax, kws): # -> None:
- """Plot the data."""
- ...
-
- def plot(self, ax, kws): # -> None:
- """Make the full plot."""
- ...
-
-
-
-class _CategoricalStatPlotter(_CategoricalPlotter):
- require_numeric = ...
- @property
- def nested_width(self): # -> float:
- """A float with the width of plot elements when hue nesting is used."""
- ...
-
- def estimate_statistic(self, estimator, ci, n_boot, seed): # -> None:
- ...
-
- def draw_confints(self, ax, at_group, confint, colors, errwidth=..., capsize=..., **kws): # -> None:
- ...
-
-
-
-class _BarPlotter(_CategoricalStatPlotter):
- """Show point estimates and confidence intervals with bars."""
- def __init__(self, x, y, hue, data, order, hue_order, estimator, ci, n_boot, units, seed, orient, color, palette, saturation, errcolor, errwidth, capsize, dodge) -> None:
- """Initialize the plotter."""
- ...
-
- def draw_bars(self, ax, kws): # -> None:
- """Draw the bars onto `ax`."""
- ...
-
- def plot(self, ax, bar_kws): # -> None:
- """Make the plot."""
- ...
-
-
-
-class _PointPlotter(_CategoricalStatPlotter):
- default_palette = ...
- def __init__(self, x, y, hue, data, order, hue_order, estimator, ci, n_boot, units, seed, markers, linestyles, dodge, join, scale, orient, color, palette, errwidth=..., capsize=...) -> None:
- """Initialize the plotter."""
- ...
-
- @property
- def hue_offsets(self): # -> NDArray[float64]:
- """Offsets relative to the center position for each hue level."""
- ...
-
- def draw_points(self, ax): # -> None:
- """Draw the main data components of the plot."""
- ...
-
- def plot(self, ax): # -> None:
- """Make the plot."""
- ...
-
-
-
-class _CountPlotter(_BarPlotter):
- require_numeric = ...
-
-
-class _LVPlotter(_CategoricalPlotter):
- def __init__(self, x, y, hue, data, order, hue_order, orient, color, palette, saturation, width, dodge, k_depth, linewidth, scale, outlier_prop, trust_alpha, showfliers=...) -> None:
- ...
-
- def draw_letter_value_plot(self, ax, kws): # -> None:
- """Use matplotlib to draw a letter value plot on an Axes."""
- ...
-
- def plot(self, ax, boxplot_kws): # -> None:
- """Make the plot."""
- ...
-
-
-
-_categorical_docs = ...
-@_deprecate_positional_args
-def boxplot(*, x=..., y=..., hue=..., data=..., order=..., hue_order=..., orient=..., color=..., palette=..., saturation=..., width=..., dodge=..., fliersize=..., linewidth=..., whis=..., ax=..., **kwargs): # -> Axes:
- ...
-
-@_deprecate_positional_args
-def violinplot(*, x=..., y=..., hue=..., data=..., order=..., hue_order=..., bw=..., cut=..., scale=..., scale_hue=..., gridsize=..., width=..., inner=..., split=..., dodge=..., orient=..., linewidth=..., color=..., palette=..., saturation=..., ax=..., **kwargs): # -> Axes:
- ...
-
-@_deprecate_positional_args
-def boxenplot(*, x=..., y=..., hue=..., data=..., order=..., hue_order=..., orient=..., color=..., palette=..., saturation=..., width=..., dodge=..., k_depth=..., linewidth=..., scale=..., outlier_prop=..., trust_alpha=..., showfliers=..., ax=..., **kwargs): # -> Axes:
- ...
-
-@_deprecate_positional_args
-def stripplot(*, x=..., y=..., hue=..., data=..., order=..., hue_order=..., jitter=..., dodge=..., orient=..., color=..., palette=..., size=..., edgecolor=..., linewidth=..., ax=..., **kwargs): # -> Axes:
- ...
-
-@_deprecate_positional_args
-def swarmplot(*, x=..., y=..., hue=..., data=..., order=..., hue_order=..., dodge=..., orient=..., color=..., palette=..., size=..., edgecolor=..., linewidth=..., ax=..., **kwargs): # -> Axes:
- ...
-
-@_deprecate_positional_args
-def barplot(*, x=..., y=..., hue=..., data=..., order=..., hue_order=..., estimator=..., ci=..., n_boot=..., units=..., seed=..., orient=..., color=..., palette=..., saturation=..., errcolor=..., errwidth=..., capsize=..., dodge=..., ax=..., **kwargs): # -> Axes:
- ...
-
-@_deprecate_positional_args
-def pointplot(*, x=..., y=..., hue=..., data=..., order=..., hue_order=..., estimator=..., ci=..., n_boot=..., units=..., seed=..., markers=..., linestyles=..., dodge=..., join=..., scale=..., orient=..., color=..., palette=..., errwidth=..., capsize=..., ax=..., **kwargs): # -> Axes:
- ...
-
-@_deprecate_positional_args
-def countplot(*, x=..., y=..., hue=..., data=..., order=..., hue_order=..., orient=..., color=..., palette=..., saturation=..., dodge=..., ax=..., **kwargs): # -> Axes:
- ...
-
-def factorplot(*args, **kwargs):
- """Deprecated; please use `catplot` instead."""
- ...
-
-@_deprecate_positional_args
-def catplot(*, x=..., y=..., hue=..., data=..., row=..., col=..., col_wrap=..., estimator=..., ci=..., n_boot=..., units=..., seed=..., order=..., hue_order=..., row_order=..., col_order=..., kind=..., height=..., aspect=..., orient=..., color=..., palette=..., legend=..., legend_out=..., sharex=..., sharey=..., margin_titles=..., facet_kws=..., **kwargs):
- ...
-
diff --git a/typings/seaborn/cm.pyi b/typings/seaborn/cm.pyi
deleted file mode 100644
index f9abe2c..0000000
--- a/typings/seaborn/cm.pyi
+++ /dev/null
@@ -1,11 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-_rocket_lut = ...
-_mako_lut = ...
-_vlag_lut = ...
-_icefire_lut = ...
-_flare_lut = ...
-_crest_lut = ...
-_lut_dict = ...
diff --git a/typings/seaborn/colors/__init__.pyi b/typings/seaborn/colors/__init__.pyi
deleted file mode 100644
index 0233e53..0000000
--- a/typings/seaborn/colors/__init__.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from .xkcd_rgb import xkcd_rgb
-from .crayons import crayons
-
diff --git a/typings/seaborn/colors/crayons.pyi b/typings/seaborn/colors/crayons.pyi
deleted file mode 100644
index 5acb7a4..0000000
--- a/typings/seaborn/colors/crayons.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-crayons = ...
diff --git a/typings/seaborn/colors/xkcd_rgb.pyi b/typings/seaborn/colors/xkcd_rgb.pyi
deleted file mode 100644
index 5bc7ac5..0000000
--- a/typings/seaborn/colors/xkcd_rgb.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-xkcd_rgb = ...
diff --git a/typings/seaborn/conftest.pyi b/typings/seaborn/conftest.pyi
deleted file mode 100644
index 29cc1bd..0000000
--- a/typings/seaborn/conftest.pyi
+++ /dev/null
@@ -1,98 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import pytest
-
-def has_verdana(): # -> bool:
- """Helper to verify if Verdana font is present"""
- ...
-
-@pytest.fixture(scope="session", autouse=True)
-def remove_pandas_unit_conversion(): # -> None:
- ...
-
-@pytest.fixture(autouse=True)
-def close_figs(): # -> Generator[None, Any, None]:
- ...
-
-@pytest.fixture(autouse=True)
-def random_seed(): # -> None:
- ...
-
-@pytest.fixture()
-def rng(): # -> RandomState:
- ...
-
-@pytest.fixture
-def wide_df(rng): # -> DataFrame:
- ...
-
-@pytest.fixture
-def wide_array(wide_df): # -> NDArray[Unknown]:
- ...
-
-@pytest.fixture
-def flat_series(rng): # -> Series:
- ...
-
-@pytest.fixture
-def flat_array(flat_series): # -> NDArray[Unknown]:
- ...
-
-@pytest.fixture
-def flat_list(flat_series):
- ...
-
-@pytest.fixture(params=["series", "array", "list"])
-def flat_data(rng, request): # -> Series | ndarray[Unknown, Unknown] | NDArray[Unknown] | list[Unknown] | Any:
- ...
-
-@pytest.fixture
-def wide_list_of_series(rng): # -> list[Series]:
- ...
-
-@pytest.fixture
-def wide_list_of_arrays(wide_list_of_series): # -> list[NDArray[Unknown]]:
- ...
-
-@pytest.fixture
-def wide_list_of_lists(wide_list_of_series): # -> list[Unknown]:
- ...
-
-@pytest.fixture
-def wide_dict_of_series(wide_list_of_series): # -> dict[Unknown, Unknown]:
- ...
-
-@pytest.fixture
-def wide_dict_of_arrays(wide_list_of_series): # -> dict[Unknown, NDArray[Unknown]]:
- ...
-
-@pytest.fixture
-def wide_dict_of_lists(wide_list_of_series): # -> dict[Unknown, Unknown]:
- ...
-
-@pytest.fixture
-def long_df(rng): # -> DataFrame:
- ...
-
-@pytest.fixture
-def long_dict(long_df):
- ...
-
-@pytest.fixture
-def repeated_df(rng): # -> DataFrame:
- ...
-
-@pytest.fixture
-def missing_df(rng, long_df):
- ...
-
-@pytest.fixture
-def object_df(rng, long_df):
- ...
-
-@pytest.fixture
-def null_series(flat_series): # -> Series:
- ...
-
diff --git a/typings/seaborn/distributions.pyi b/typings/seaborn/distributions.pyi
deleted file mode 100644
index 83099a3..0000000
--- a/typings/seaborn/distributions.pyi
+++ /dev/null
@@ -1,209 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ._core import VectorPlotter
-from ._decorators import _deprecate_positional_args
-
-"""Plotting functions for visualizing distributions."""
-__all__ = ["displot", "histplot", "kdeplot", "ecdfplot", "rugplot", "distplot"]
-_dist_params = ...
-_param_docs = ...
-class _DistributionPlotter(VectorPlotter):
- semantics = ...
- wide_structure = ...
- flat_structure = ...
- def __init__(self, data=..., variables=...) -> None:
- ...
-
- @property
- def univariate(self): # -> bool:
- """Return True if only x or y are used."""
- ...
-
- @property
- def data_variable(self): # -> str:
- """Return the variable with data for univariate plots."""
- ...
-
- @property
- def has_xy_data(self): # -> bool:
- """Return True at least one of x or y is defined."""
- ...
-
- def plot_univariate_histogram(self, multiple, element, fill, common_norm, common_bins, shrink, kde, kde_kws, color, legend, line_kws, estimate_kws, **plot_kws):
- ...
-
- def plot_bivariate_histogram(self, common_bins, common_norm, thresh, pthresh, pmax, color, legend, cbar, cbar_ax, cbar_kws, estimate_kws, **plot_kws):
- ...
-
- def plot_univariate_density(self, multiple, common_norm, common_grid, fill, legend, estimate_kws, **plot_kws):
- ...
-
- def plot_bivariate_density(self, common_norm, fill, levels, thresh, color, legend, cbar, cbar_ax, cbar_kws, estimate_kws, **contour_kws):
- ...
-
- def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws): # -> None:
- ...
-
- def plot_rug(self, height, expand_margins, legend, **kws): # -> None:
- ...
-
-
-
-class _DistributionFacetPlotter(_DistributionPlotter):
- semantics = ...
-
-
-def histplot(data=..., *, x=..., y=..., hue=..., weights=..., stat=..., bins=..., binwidth=..., binrange=..., discrete=..., cumulative=..., common_bins=..., common_norm=..., multiple=..., element=..., fill=..., shrink=..., kde=..., kde_kws=..., line_kws=..., thresh=..., pthresh=..., pmax=..., cbar=..., cbar_ax=..., cbar_kws=..., palette=..., hue_order=..., hue_norm=..., color=..., log_scale=..., legend=..., ax=..., **kwargs): # -> Axes:
- ...
-
-@_deprecate_positional_args
-def kdeplot(x=..., *, y=..., shade=..., vertical=..., kernel=..., bw=..., gridsize=..., cut=..., clip=..., legend=..., cumulative=..., shade_lowest=..., cbar=..., cbar_ax=..., cbar_kws=..., ax=..., weights=..., hue=..., palette=..., hue_order=..., hue_norm=..., multiple=..., common_norm=..., common_grid=..., levels=..., thresh=..., bw_method=..., bw_adjust=..., log_scale=..., color=..., fill=..., data=..., data2=..., **kwargs): # -> Axes:
- ...
-
-def ecdfplot(data=..., *, x=..., y=..., hue=..., weights=..., stat=..., complementary=..., palette=..., hue_order=..., hue_norm=..., log_scale=..., legend=..., ax=..., **kwargs): # -> Axes:
- ...
-
-@_deprecate_positional_args
-def rugplot(x=..., *, height=..., axis=..., ax=..., data=..., y=..., hue=..., palette=..., hue_order=..., hue_norm=..., expand_margins=..., legend=..., a=..., **kwargs): # -> Axes:
- ...
-
-def displot(data=..., *, x=..., y=..., hue=..., row=..., col=..., weights=..., kind=..., rug=..., rug_kws=..., log_scale=..., legend=..., palette=..., hue_order=..., hue_norm=..., color=..., col_wrap=..., row_order=..., col_order=..., height=..., aspect=..., facet_kws=..., **kwargs): # -> FacetGrid:
- ...
-
-def distplot(a=..., bins=..., hist=..., kde=..., rug=..., fit=..., hist_kws=..., kde_kws=..., rug_kws=..., fit_kws=..., color=..., vertical=..., norm_hist=..., axlabel=..., label=..., ax=..., x=...):
- """DEPRECATED: Flexibly plot a univariate distribution of observations.
-
- .. warning::
- This function is deprecated and will be removed in a future version.
- Please adapt your code to use one of two new functions:
-
- - :func:`displot`, a figure-level function with a similar flexibility
- over the kind of plot to draw
- - :func:`histplot`, an axes-level function for plotting histograms,
- including with kernel density smoothing
-
- This function combines the matplotlib ``hist`` function (with automatic
- calculation of a good default bin size) with the seaborn :func:`kdeplot`
- and :func:`rugplot` functions. It can also fit ``scipy.stats``
- distributions and plot the estimated PDF over the data.
-
- Parameters
- ----------
- a : Series, 1d-array, or list.
- Observed data. If this is a Series object with a ``name`` attribute,
- the name will be used to label the data axis.
- bins : argument for matplotlib hist(), or None, optional
- Specification of hist bins. If unspecified, as reference rule is used
- that tries to find a useful default.
- hist : bool, optional
- Whether to plot a (normed) histogram.
- kde : bool, optional
- Whether to plot a gaussian kernel density estimate.
- rug : bool, optional
- Whether to draw a rugplot on the support axis.
- fit : random variable object, optional
- An object with `fit` method, returning a tuple that can be passed to a
- `pdf` method a positional arguments following a grid of values to
- evaluate the pdf on.
- hist_kws : dict, optional
- Keyword arguments for :meth:`matplotlib.axes.Axes.hist`.
- kde_kws : dict, optional
- Keyword arguments for :func:`kdeplot`.
- rug_kws : dict, optional
- Keyword arguments for :func:`rugplot`.
- color : matplotlib color, optional
- Color to plot everything but the fitted curve in.
- vertical : bool, optional
- If True, observed values are on y-axis.
- norm_hist : bool, optional
- If True, the histogram height shows a density rather than a count.
- This is implied if a KDE or fitted density is plotted.
- axlabel : string, False, or None, optional
- Name for the support axis label. If None, will try to get it
- from a.name if False, do not set a label.
- label : string, optional
- Legend label for the relevant component of the plot.
- ax : matplotlib axis, optional
- If provided, plot on this axis.
-
- Returns
- -------
- ax : matplotlib Axes
- Returns the Axes object with the plot for further tweaking.
-
- See Also
- --------
- kdeplot : Show a univariate or bivariate distribution with a kernel
- density estimate.
- rugplot : Draw small vertical lines to show each observation in a
- distribution.
-
- Examples
- --------
-
- Show a default plot with a kernel density estimate and histogram with bin
- size determined automatically with a reference rule:
-
- .. plot::
- :context: close-figs
-
- >>> import seaborn as sns, numpy as np
- >>> sns.set_theme(); np.random.seed(0)
- >>> x = np.random.randn(100)
- >>> ax = sns.distplot(x)
-
- Use Pandas objects to get an informative axis label:
-
- .. plot::
- :context: close-figs
-
- >>> import pandas as pd
- >>> x = pd.Series(x, name="x variable")
- >>> ax = sns.distplot(x)
-
- Plot the distribution with a kernel density estimate and rug plot:
-
- .. plot::
- :context: close-figs
-
- >>> ax = sns.distplot(x, rug=True, hist=False)
-
- Plot the distribution with a histogram and maximum likelihood gaussian
- distribution fit:
-
- .. plot::
- :context: close-figs
-
- >>> from scipy.stats import norm
- >>> ax = sns.distplot(x, fit=norm, kde=False)
-
- Plot the distribution on the vertical axis:
-
- .. plot::
- :context: close-figs
-
- >>> ax = sns.distplot(x, vertical=True)
-
- Change the color of all the plot elements:
-
- .. plot::
- :context: close-figs
-
- >>> sns.set_color_codes()
- >>> ax = sns.distplot(x, color="y")
-
- Pass specific parameters to the underlying plot functions:
-
- .. plot::
- :context: close-figs
-
- >>> ax = sns.distplot(x, rug=True, rug_kws={"color": "g"},
- ... kde_kws={"color": "k", "lw": 3, "label": "KDE"},
- ... hist_kws={"histtype": "step", "linewidth": 3,
- ... "alpha": 1, "color": "g"})
-
- """
- ...
-
diff --git a/typings/seaborn/external/__init__.pyi b/typings/seaborn/external/__init__.pyi
deleted file mode 100644
index 006bc27..0000000
--- a/typings/seaborn/external/__init__.pyi
+++ /dev/null
@@ -1,4 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
diff --git a/typings/seaborn/external/docscrape.pyi b/typings/seaborn/external/docscrape.pyi
deleted file mode 100644
index 68c0bb8..0000000
--- a/typings/seaborn/external/docscrape.pyi
+++ /dev/null
@@ -1,165 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from collections.abc import Mapping
-
-"""Extract reference documentation from the NumPy source tree.
-
-Copyright (C) 2008 Stefan van der Walt , Pauli Virtanen
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
-INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
-IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-"""
-def strip_blank_lines(l):
- "Remove leading and trailing blank lines from a list of lines"
- ...
-
-class Reader:
- """A line-based string reader.
-
- """
- def __init__(self, data) -> None:
- """
- Parameters
- ----------
- data : str
- String with lines separated by '\n'.
-
- """
- ...
-
- def __getitem__(self, n):
- ...
-
- def reset(self): # -> None:
- ...
-
- def read(self): # -> Literal['']:
- ...
-
- def seek_next_non_empty_line(self): # -> None:
- ...
-
- def eof(self): # -> bool:
- ...
-
- def read_to_condition(self, condition_func): # -> list[Unknown]:
- ...
-
- def read_to_next_empty_line(self): # -> list[Unknown]:
- ...
-
- def read_to_next_unindented_line(self): # -> list[Unknown]:
- ...
-
- def peek(self, n=...): # -> Literal['']:
- ...
-
- def is_empty(self): # -> bool:
- ...
-
-
-
-class ParseError(Exception):
- def __str__(self) -> str:
- ...
-
-
-
-Parameter = ...
-class NumpyDocString(Mapping):
- """Parses a numpydoc string to an abstract representation
-
- Instances define a mapping from section title to structured data.
-
- """
- sections = ...
- def __init__(self, docstring, config=...) -> None:
- ...
-
- def __getitem__(self, key):
- ...
-
- def __setitem__(self, key, val): # -> None:
- ...
-
- def __iter__(self): # -> Iterator[str]:
- ...
-
- def __len__(self): # -> int:
- ...
-
- _role = ...
- _funcbacktick = ...
- _funcplain = ...
- _funcname = ...
- _funcnamenext = ...
- _funcnamenext = ...
- _description = ...
- _func_rgx = ...
- _line_rgx = ...
- empty_description = ...
- def __str__(self, func_role=...) -> str:
- ...
-
-
-
-def indent(str, indent=...): # -> LiteralString:
- ...
-
-def dedent_lines(lines): # -> list[str]:
- """Deindent a list of lines maximally"""
- ...
-
-def header(text, style=...):
- ...
-
-class FunctionDoc(NumpyDocString):
- def __init__(self, func, role=..., doc=..., config=...) -> None:
- ...
-
- def get_func(self): # -> tuple[Any | Overload[(__o: object, /) -> None, (__name: str, __bases: tuple[type, ...], __dict: dict[str, Any], **kwds: Any) -> None] | Unknown, Any | str]:
- ...
-
- def __str__(self) -> str:
- ...
-
-
-
-class ClassDoc(NumpyDocString):
- extra_public_methods = ...
- def __init__(self, cls, doc=..., modulename=..., func_doc=..., config=...) -> None:
- ...
-
- @property
- def methods(self): # -> list[Unknown] | list[str]:
- ...
-
- @property
- def properties(self): # -> list[Unknown] | list[str]:
- ...
-
-
-
diff --git a/typings/seaborn/external/husl.pyi b/typings/seaborn/external/husl.pyi
deleted file mode 100644
index abd7f73..0000000
--- a/typings/seaborn/external/husl.pyi
+++ /dev/null
@@ -1,104 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-__version__ = ...
-m = ...
-m_inv = ...
-refX = ...
-refY = ...
-refZ = ...
-refU = ...
-refV = ...
-lab_e = ...
-lab_k = ...
-def husl_to_rgb(h, s, l): # -> list[Unknown | float]:
- ...
-
-def husl_to_hex(h, s, l): # -> LiteralString:
- ...
-
-def rgb_to_husl(r, g, b): # -> list[Unknown]:
- ...
-
-def hex_to_husl(hex): # -> list[Unknown]:
- ...
-
-def huslp_to_rgb(h, s, l): # -> list[Unknown | float]:
- ...
-
-def huslp_to_hex(h, s, l): # -> LiteralString:
- ...
-
-def rgb_to_huslp(r, g, b): # -> list[Unknown]:
- ...
-
-def hex_to_huslp(hex): # -> list[Unknown]:
- ...
-
-def lch_to_rgb(l, c, h): # -> list[Unknown | float]:
- ...
-
-def rgb_to_lch(r, g, b): # -> list[Unknown]:
- ...
-
-def max_chroma(L, H): # -> float:
- ...
-
-def max_chroma_pastel(L): # -> float:
- ...
-
-def dot_product(a, b): # -> int:
- ...
-
-def f(t): # -> float:
- ...
-
-def f_inv(t): # -> float:
- ...
-
-def from_linear(c): # -> float:
- ...
-
-def to_linear(c): # -> float:
- ...
-
-def rgb_prepare(triple): # -> list[Unknown]:
- ...
-
-def hex_to_rgb(hex): # -> list[float]:
- ...
-
-def rgb_to_hex(triple): # -> LiteralString:
- ...
-
-def xyz_to_rgb(triple): # -> list[Unknown | float]:
- ...
-
-def rgb_to_xyz(triple): # -> list[int]:
- ...
-
-def xyz_to_luv(triple): # -> list[float] | list[Unknown]:
- ...
-
-def luv_to_xyz(triple): # -> list[float] | list[Unknown]:
- ...
-
-def luv_to_lch(triple): # -> list[Unknown]:
- ...
-
-def lch_to_luv(triple): # -> list[Unknown]:
- ...
-
-def husl_to_lch(triple): # -> list[Unknown]:
- ...
-
-def lch_to_husl(triple): # -> list[Unknown]:
- ...
-
-def huslp_to_lch(triple): # -> list[Unknown]:
- ...
-
-def lch_to_huslp(triple): # -> list[Unknown]:
- ...
-
diff --git a/typings/seaborn/matrix.pyi b/typings/seaborn/matrix.pyi
deleted file mode 100644
index b93e086..0000000
--- a/typings/seaborn/matrix.pyi
+++ /dev/null
@@ -1,557 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from .axisgrid import Grid
-from ._decorators import _deprecate_positional_args
-
-"""Functions to visualize matrices of data."""
-__all__ = ["heatmap", "clustermap"]
-class _HeatMapper:
- """Draw a heatmap plot of a matrix with nice labels and colormaps."""
- def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt, annot_kws, cbar, cbar_kws, xticklabels=..., yticklabels=..., mask=...) -> None:
- """Initialize the plotting object."""
- ...
-
- def plot(self, ax, cax, kws): # -> None:
- """Draw the heatmap on the provided Axes."""
- ...
-
-
-
-@_deprecate_positional_args
-def heatmap(data, *, vmin=..., vmax=..., cmap=..., center=..., robust=..., annot=..., fmt=..., annot_kws=..., linewidths=..., linecolor=..., cbar=..., cbar_kws=..., cbar_ax=..., square=..., xticklabels=..., yticklabels=..., mask=..., ax=..., **kwargs): # -> Axes:
- """Plot rectangular data as a color-encoded matrix.
-
- This is an Axes-level function and will draw the heatmap into the
- currently-active Axes if none is provided to the ``ax`` argument. Part of
- this Axes space will be taken and used to plot a colormap, unless ``cbar``
- is False or a separate Axes is provided to ``cbar_ax``.
-
- Parameters
- ----------
- data : rectangular dataset
- 2D dataset that can be coerced into an ndarray. If a Pandas DataFrame
- is provided, the index/column information will be used to label the
- columns and rows.
- vmin, vmax : floats, optional
- Values to anchor the colormap, otherwise they are inferred from the
- data and other keyword arguments.
- cmap : matplotlib colormap name or object, or list of colors, optional
- The mapping from data values to color space. If not provided, the
- default will depend on whether ``center`` is set.
- center : float, optional
- The value at which to center the colormap when plotting divergant data.
- Using this parameter will change the default ``cmap`` if none is
- specified.
- robust : bool, optional
- If True and ``vmin`` or ``vmax`` are absent, the colormap range is
- computed with robust quantiles instead of the extreme values.
- annot : bool or rectangular dataset, optional
- If True, write the data value in each cell. If an array-like with the
- same shape as ``data``, then use this to annotate the heatmap instead
- of the data. Note that DataFrames will match on position, not index.
- fmt : str, optional
- String formatting code to use when adding annotations.
- annot_kws : dict of key, value mappings, optional
- Keyword arguments for :meth:`matplotlib.axes.Axes.text` when ``annot``
- is True.
- linewidths : float, optional
- Width of the lines that will divide each cell.
- linecolor : color, optional
- Color of the lines that will divide each cell.
- cbar : bool, optional
- Whether to draw a colorbar.
- cbar_kws : dict of key, value mappings, optional
- Keyword arguments for :meth:`matplotlib.figure.Figure.colorbar`.
- cbar_ax : matplotlib Axes, optional
- Axes in which to draw the colorbar, otherwise take space from the
- main Axes.
- square : bool, optional
- If True, set the Axes aspect to "equal" so each cell will be
- square-shaped.
- xticklabels, yticklabels : "auto", bool, list-like, or int, optional
- If True, plot the column names of the dataframe. If False, don't plot
- the column names. If list-like, plot these alternate labels as the
- xticklabels. If an integer, use the column names but plot only every
- n label. If "auto", try to densely plot non-overlapping labels.
- mask : bool array or DataFrame, optional
- If passed, data will not be shown in cells where ``mask`` is True.
- Cells with missing values are automatically masked.
- ax : matplotlib Axes, optional
- Axes in which to draw the plot, otherwise use the currently-active
- Axes.
- kwargs : other keyword arguments
- All other keyword arguments are passed to
- :meth:`matplotlib.axes.Axes.pcolormesh`.
-
- Returns
- -------
- ax : matplotlib Axes
- Axes object with the heatmap.
-
- See Also
- --------
- clustermap : Plot a matrix using hierachical clustering to arrange the
- rows and columns.
-
- Examples
- --------
-
- Plot a heatmap for a numpy array:
-
- .. plot::
- :context: close-figs
-
- >>> import numpy as np; np.random.seed(0)
- >>> import seaborn as sns; sns.set_theme()
- >>> uniform_data = np.random.rand(10, 12)
- >>> ax = sns.heatmap(uniform_data)
-
- Change the limits of the colormap:
-
- .. plot::
- :context: close-figs
-
- >>> ax = sns.heatmap(uniform_data, vmin=0, vmax=1)
-
- Plot a heatmap for data centered on 0 with a diverging colormap:
-
- .. plot::
- :context: close-figs
-
- >>> normal_data = np.random.randn(10, 12)
- >>> ax = sns.heatmap(normal_data, center=0)
-
- Plot a dataframe with meaningful row and column labels:
-
- .. plot::
- :context: close-figs
-
- >>> flights = sns.load_dataset("flights")
- >>> flights = flights.pivot("month", "year", "passengers")
- >>> ax = sns.heatmap(flights)
-
- Annotate each cell with the numeric value using integer formatting:
-
- .. plot::
- :context: close-figs
-
- >>> ax = sns.heatmap(flights, annot=True, fmt="d")
-
- Add lines between each cell:
-
- .. plot::
- :context: close-figs
-
- >>> ax = sns.heatmap(flights, linewidths=.5)
-
- Use a different colormap:
-
- .. plot::
- :context: close-figs
-
- >>> ax = sns.heatmap(flights, cmap="YlGnBu")
-
- Center the colormap at a specific value:
-
- .. plot::
- :context: close-figs
-
- >>> ax = sns.heatmap(flights, center=flights.loc["Jan", 1955])
-
- Plot every other column label and don't plot row labels:
-
- .. plot::
- :context: close-figs
-
- >>> data = np.random.randn(50, 20)
- >>> ax = sns.heatmap(data, xticklabels=2, yticklabels=False)
-
- Don't draw a colorbar:
-
- .. plot::
- :context: close-figs
-
- >>> ax = sns.heatmap(flights, cbar=False)
-
- Use different axes for the colorbar:
-
- .. plot::
- :context: close-figs
-
- >>> grid_kws = {"height_ratios": (.9, .05), "hspace": .3}
- >>> f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws)
- >>> ax = sns.heatmap(flights, ax=ax,
- ... cbar_ax=cbar_ax,
- ... cbar_kws={"orientation": "horizontal"})
-
- Use a mask to plot only part of a matrix
-
- .. plot::
- :context: close-figs
-
- >>> corr = np.corrcoef(np.random.randn(10, 200))
- >>> mask = np.zeros_like(corr)
- >>> mask[np.triu_indices_from(mask)] = True
- >>> with sns.axes_style("white"):
- ... f, ax = plt.subplots(figsize=(7, 5))
- ... ax = sns.heatmap(corr, mask=mask, vmax=.3, square=True)
- """
- ...
-
-class _DendrogramPlotter:
- """Object for drawing tree of similarities between data rows/columns"""
- def __init__(self, data, linkage, metric, method, axis, label, rotate) -> None:
- """Plot a dendrogram of the relationships between the columns of data
-
- Parameters
- ----------
- data : pandas.DataFrame
- Rectangular data
- """
- ...
-
- @property
- def calculated_linkage(self):
- ...
-
- def calculate_dendrogram(self): # -> dict[str, Unknown]:
- """Calculates a dendrogram based on the linkage matrix
-
- Made a separate function, not a property because don't want to
- recalculate the dendrogram every time it is accessed.
-
- Returns
- -------
- dendrogram : dict
- Dendrogram dictionary as returned by scipy.cluster.hierarchy
- .dendrogram. The important key-value pairing is
- "reordered_ind" which indicates the re-ordering of the matrix
- """
- ...
-
- @property
- def reordered_ind(self):
- """Indices of the matrix, reordered by the dendrogram"""
- ...
-
- def plot(self, ax, tree_kws): # -> Self@_DendrogramPlotter:
- """Plots a dendrogram of the similarities between data on the axes
-
- Parameters
- ----------
- ax : matplotlib.axes.Axes
- Axes object upon which the dendrogram is plotted
-
- """
- ...
-
-
-
-@_deprecate_positional_args
-def dendrogram(data, *, linkage=..., axis=..., label=..., metric=..., method=..., rotate=..., tree_kws=..., ax=...): # -> _DendrogramPlotter:
- """Draw a tree diagram of relationships within a matrix
-
- Parameters
- ----------
- data : pandas.DataFrame
- Rectangular data
- linkage : numpy.array, optional
- Linkage matrix
- axis : int, optional
- Which axis to use to calculate linkage. 0 is rows, 1 is columns.
- label : bool, optional
- If True, label the dendrogram at leaves with column or row names
- metric : str, optional
- Distance metric. Anything valid for scipy.spatial.distance.pdist
- method : str, optional
- Linkage method to use. Anything valid for
- scipy.cluster.hierarchy.linkage
- rotate : bool, optional
- When plotting the matrix, whether to rotate it 90 degrees
- counter-clockwise, so the leaves face right
- tree_kws : dict, optional
- Keyword arguments for the ``matplotlib.collections.LineCollection``
- that is used for plotting the lines of the dendrogram tree.
- ax : matplotlib axis, optional
- Axis to plot on, otherwise uses current axis
-
- Returns
- -------
- dendrogramplotter : _DendrogramPlotter
- A Dendrogram plotter object.
-
- Notes
- -----
- Access the reordered dendrogram indices with
- dendrogramplotter.reordered_ind
-
- """
- ...
-
-class ClusterGrid(Grid):
- def __init__(self, data, pivot_kws=..., z_score=..., standard_scale=..., figsize=..., row_colors=..., col_colors=..., mask=..., dendrogram_ratio=..., colors_ratio=..., cbar_pos=...) -> None:
- """Grid object for organizing clustered heatmap input on to axes"""
- ...
-
- def format_data(self, data, pivot_kws, z_score=..., standard_scale=...):
- """Extract variables from data or use directly."""
- ...
-
- @staticmethod
- def z_score(data2d, axis=...):
- """Standarize the mean and variance of the data axis
-
- Parameters
- ----------
- data2d : pandas.DataFrame
- Data to normalize
- axis : int
- Which axis to normalize across. If 0, normalize across rows, if 1,
- normalize across columns.
-
- Returns
- -------
- normalized : pandas.DataFrame
- Noramlized data with a mean of 0 and variance of 1 across the
- specified axis.
- """
- ...
-
- @staticmethod
- def standard_scale(data2d, axis=...):
- """Divide the data by the difference between the max and min
-
- Parameters
- ----------
- data2d : pandas.DataFrame
- Data to normalize
- axis : int
- Which axis to normalize across. If 0, normalize across rows, if 1,
- normalize across columns.
- vmin : int
- If 0, then subtract the minimum of the data before dividing by
- the range.
-
- Returns
- -------
- standardized : pandas.DataFrame
- Noramlized data with a mean of 0 and variance of 1 across the
- specified axis.
-
- """
- ...
-
- def dim_ratios(self, colors, dendrogram_ratio, colors_ratio): # -> list[Unknown]:
- """Get the proportions of the figure taken up by each axes."""
- ...
-
- @staticmethod
- def color_list_to_matrix_and_cmap(colors, ind, axis=...): # -> tuple[ndarray[Any, dtype[Any]], Unknown]:
- """Turns a list of colors into a numpy matrix and matplotlib colormap
-
- These arguments can now be plotted using heatmap(matrix, cmap)
- and the provided colors will be plotted.
-
- Parameters
- ----------
- colors : list of matplotlib colors
- Colors to label the rows or columns of a dataframe.
- ind : list of ints
- Ordering of the rows or columns, to reorder the original colors
- by the clustered dendrogram order
- axis : int
- Which axis this is labeling
-
- Returns
- -------
- matrix : numpy.array
- A numpy array of integer values, where each corresponds to a color
- from the originally provided list of colors
- cmap : matplotlib.colors.ListedColormap
-
- """
- ...
-
- def savefig(self, *args, **kwargs): # -> None:
- ...
-
- def plot_dendrograms(self, row_cluster, col_cluster, metric, method, row_linkage, col_linkage, tree_kws): # -> None:
- ...
-
- def plot_colors(self, xind, yind, **kws): # -> None:
- """Plots color labels between the dendrogram and the heatmap
-
- Parameters
- ----------
- heatmap_kws : dict
- Keyword arguments heatmap
-
- """
- ...
-
- def plot_matrix(self, colorbar_kws, xind, yind, **kws): # -> None:
- ...
-
- def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster, row_linkage, col_linkage, tree_kws, **kws): # -> Self@ClusterGrid:
- ...
-
-
-
-@_deprecate_positional_args
-def clustermap(data, *, pivot_kws=..., method=..., metric=..., z_score=..., standard_scale=..., figsize=..., cbar_kws=..., row_cluster=..., col_cluster=..., row_linkage=..., col_linkage=..., row_colors=..., col_colors=..., mask=..., dendrogram_ratio=..., colors_ratio=..., cbar_pos=..., tree_kws=..., **kwargs): # -> ClusterGrid:
- """
- Plot a matrix dataset as a hierarchically-clustered heatmap.
-
- Parameters
- ----------
- data : 2D array-like
- Rectangular data for clustering. Cannot contain NAs.
- pivot_kws : dict, optional
- If `data` is a tidy dataframe, can provide keyword arguments for
- pivot to create a rectangular dataframe.
- method : str, optional
- Linkage method to use for calculating clusters. See
- :func:`scipy.cluster.hierarchy.linkage` documentation for more
- information.
- metric : str, optional
- Distance metric to use for the data. See
- :func:`scipy.spatial.distance.pdist` documentation for more options.
- To use different metrics (or methods) for rows and columns, you may
- construct each linkage matrix yourself and provide them as
- `{row,col}_linkage`.
- z_score : int or None, optional
- Either 0 (rows) or 1 (columns). Whether or not to calculate z-scores
- for the rows or the columns. Z scores are: z = (x - mean)/std, so
- values in each row (column) will get the mean of the row (column)
- subtracted, then divided by the standard deviation of the row (column).
- This ensures that each row (column) has mean of 0 and variance of 1.
- standard_scale : int or None, optional
- Either 0 (rows) or 1 (columns). Whether or not to standardize that
- dimension, meaning for each row or column, subtract the minimum and
- divide each by its maximum.
- figsize : tuple of (width, height), optional
- Overall size of the figure.
- cbar_kws : dict, optional
- Keyword arguments to pass to `cbar_kws` in :func:`heatmap`, e.g. to
- add a label to the colorbar.
- {row,col}_cluster : bool, optional
- If ``True``, cluster the {rows, columns}.
- {row,col}_linkage : :class:`numpy.ndarray`, optional
- Precomputed linkage matrix for the rows or columns. See
- :func:`scipy.cluster.hierarchy.linkage` for specific formats.
- {row,col}_colors : list-like or pandas DataFrame/Series, optional
- List of colors to label for either the rows or columns. Useful to evaluate
- whether samples within a group are clustered together. Can use nested lists or
- DataFrame for multiple color levels of labeling. If given as a
- :class:`pandas.DataFrame` or :class:`pandas.Series`, labels for the colors are
- extracted from the DataFrames column names or from the name of the Series.
- DataFrame/Series colors are also matched to the data by their index, ensuring
- colors are drawn in the correct order.
- mask : bool array or DataFrame, optional
- If passed, data will not be shown in cells where `mask` is True.
- Cells with missing values are automatically masked. Only used for
- visualizing, not for calculating.
- {dendrogram,colors}_ratio : float, or pair of floats, optional
- Proportion of the figure size devoted to the two marginal elements. If
- a pair is given, they correspond to (row, col) ratios.
- cbar_pos : tuple of (left, bottom, width, height), optional
- Position of the colorbar axes in the figure. Setting to ``None`` will
- disable the colorbar.
- tree_kws : dict, optional
- Parameters for the :class:`matplotlib.collections.LineCollection`
- that is used to plot the lines of the dendrogram tree.
- kwargs : other keyword arguments
- All other keyword arguments are passed to :func:`heatmap`.
-
- Returns
- -------
- :class:`ClusterGrid`
- A :class:`ClusterGrid` instance.
-
- See Also
- --------
- heatmap : Plot rectangular data as a color-encoded matrix.
-
- Notes
- -----
- The returned object has a ``savefig`` method that should be used if you
- want to save the figure object without clipping the dendrograms.
-
- To access the reordered row indices, use:
- ``clustergrid.dendrogram_row.reordered_ind``
-
- Column indices, use:
- ``clustergrid.dendrogram_col.reordered_ind``
-
- Examples
- --------
-
- Plot a clustered heatmap:
-
- .. plot::
- :context: close-figs
-
- >>> import seaborn as sns; sns.set_theme(color_codes=True)
- >>> iris = sns.load_dataset("iris")
- >>> species = iris.pop("species")
- >>> g = sns.clustermap(iris)
-
- Change the size and layout of the figure:
-
- .. plot::
- :context: close-figs
-
- >>> g = sns.clustermap(iris,
- ... figsize=(7, 5),
- ... row_cluster=False,
- ... dendrogram_ratio=(.1, .2),
- ... cbar_pos=(0, .2, .03, .4))
-
- Add colored labels to identify observations:
-
- .. plot::
- :context: close-figs
-
- >>> lut = dict(zip(species.unique(), "rbg"))
- >>> row_colors = species.map(lut)
- >>> g = sns.clustermap(iris, row_colors=row_colors)
-
- Use a different colormap and adjust the limits of the color range:
-
- .. plot::
- :context: close-figs
-
- >>> g = sns.clustermap(iris, cmap="mako", vmin=0, vmax=10)
-
- Use a different similarity metric:
-
- .. plot::
- :context: close-figs
-
- >>> g = sns.clustermap(iris, metric="correlation")
-
- Use a different clustering method:
-
- .. plot::
- :context: close-figs
-
- >>> g = sns.clustermap(iris, method="single")
-
- Standardize the data within the columns:
-
- .. plot::
- :context: close-figs
-
- >>> g = sns.clustermap(iris, standard_scale=1)
-
- Normalize the data within the rows:
-
- .. plot::
- :context: close-figs
-
- >>> g = sns.clustermap(iris, z_score=0, cmap="vlag")
- """
- ...
-
diff --git a/typings/seaborn/miscplot.pyi b/typings/seaborn/miscplot.pyi
deleted file mode 100644
index e2b5504..0000000
--- a/typings/seaborn/miscplot.pyi
+++ /dev/null
@@ -1,22 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-__all__ = ["palplot", "dogplot"]
-def palplot(pal, size=...): # -> None:
- """Plot the values in a color palette as a horizontal array.
-
- Parameters
- ----------
- pal : sequence of matplotlib colors
- colors, i.e. as returned by seaborn.color_palette()
- size :
- scaling factor for size of plot
-
- """
- ...
-
-def dogplot(*_, **__): # -> None:
- """Who's a good boy?"""
- ...
-
diff --git a/typings/seaborn/palettes.pyi b/typings/seaborn/palettes.pyi
deleted file mode 100644
index 2644183..0000000
--- a/typings/seaborn/palettes.pyi
+++ /dev/null
@@ -1,706 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-__all__ = ["color_palette", "hls_palette", "husl_palette", "mpl_palette", "dark_palette", "light_palette", "diverging_palette", "blend_palette", "xkcd_palette", "crayon_palette", "cubehelix_palette", "set_color_codes"]
-SEABORN_PALETTES = ...
-MPL_QUAL_PALS = ...
-QUAL_PALETTE_SIZES = ...
-QUAL_PALETTES = ...
-class _ColorPalette(list):
- """Set the color palette in a with statement, otherwise be a list."""
- def __enter__(self): # -> Self@_ColorPalette:
- """Open the context."""
- ...
-
- def __exit__(self, *args): # -> None:
- """Close the context."""
- ...
-
- def as_hex(self): # -> _ColorPalette:
- """Return a color palette with hex codes instead of RGB values."""
- ...
-
-
-
-def color_palette(palette=..., n_colors=..., desat=..., as_cmap=...): # -> _ColorPalette | list[tuple[float, float, float]] | Any | list[str]:
- """Return a list of colors or continuous colormap defining a palette.
-
- Possible ``palette`` values include:
- - Name of a seaborn palette (deep, muted, bright, pastel, dark, colorblind)
- - Name of matplotlib colormap
- - 'husl' or 'hls'
- - 'ch:'
- - 'light:', 'dark:', 'blend:,',
- - A sequence of colors in any format matplotlib accepts
-
- Calling this function with ``palette=None`` will return the current
- matplotlib color cycle.
-
- This function can also be used in a ``with`` statement to temporarily
- set the color cycle for a plot or set of plots.
-
- See the :ref:`tutorial ` for more information.
-
- Parameters
- ----------
- palette: None, string, or sequence, optional
- Name of palette or None to return current palette. If a sequence, input
- colors are used but possibly cycled and desaturated.
- n_colors : int, optional
- Number of colors in the palette. If ``None``, the default will depend
- on how ``palette`` is specified. Named palettes default to 6 colors,
- but grabbing the current palette or passing in a list of colors will
- not change the number of colors unless this is specified. Asking for
- more colors than exist in the palette will cause it to cycle. Ignored
- when ``as_cmap`` is True.
- desat : float, optional
- Proportion to desaturate each color by.
- as_cmap : bool
- If True, return a :class:`matplotlib.colors.Colormap`.
-
- Returns
- -------
- list of RGB tuples or :class:`matplotlib.colors.Colormap`
-
- See Also
- --------
- set_palette : Set the default color cycle for all plots.
- set_color_codes : Reassign color codes like ``"b"``, ``"g"``, etc. to
- colors from one of the seaborn palettes.
-
- Examples
- --------
-
- .. include:: ../docstrings/color_palette.rst
-
- """
- ...
-
-def hls_palette(n_colors=..., h=..., l=..., s=..., as_cmap=...): # -> _ColorPalette:
- """Get a set of evenly spaced colors in HLS hue space.
-
- h, l, and s should be between 0 and 1
-
- Parameters
- ----------
-
- n_colors : int
- number of colors in the palette
- h : float
- first hue
- l : float
- lightness
- s : float
- saturation
-
- Returns
- -------
- list of RGB tuples or :class:`matplotlib.colors.Colormap`
-
- See Also
- --------
- husl_palette : Make a palette using evenly spaced hues in the HUSL system.
-
- Examples
- --------
-
- Create a palette of 10 colors with the default parameters:
-
- .. plot::
- :context: close-figs
-
- >>> import seaborn as sns; sns.set_theme()
- >>> sns.palplot(sns.hls_palette(10))
-
- Create a palette of 10 colors that begins at a different hue value:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.hls_palette(10, h=.5))
-
- Create a palette of 10 colors that are darker than the default:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.hls_palette(10, l=.4))
-
- Create a palette of 10 colors that are less saturated than the default:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.hls_palette(10, s=.4))
-
- """
- ...
-
-def husl_palette(n_colors=..., h=..., s=..., l=..., as_cmap=...): # -> _ColorPalette:
- """Get a set of evenly spaced colors in HUSL hue space.
-
- h, s, and l should be between 0 and 1
-
- Parameters
- ----------
-
- n_colors : int
- number of colors in the palette
- h : float
- first hue
- s : float
- saturation
- l : float
- lightness
-
- Returns
- -------
- list of RGB tuples or :class:`matplotlib.colors.Colormap`
-
- See Also
- --------
- hls_palette : Make a palette using evently spaced circular hues in the
- HSL system.
-
- Examples
- --------
-
- Create a palette of 10 colors with the default parameters:
-
- .. plot::
- :context: close-figs
-
- >>> import seaborn as sns; sns.set_theme()
- >>> sns.palplot(sns.husl_palette(10))
-
- Create a palette of 10 colors that begins at a different hue value:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.husl_palette(10, h=.5))
-
- Create a palette of 10 colors that are darker than the default:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.husl_palette(10, l=.4))
-
- Create a palette of 10 colors that are less saturated than the default:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.husl_palette(10, s=.4))
-
- """
- ...
-
-def mpl_palette(name, n_colors=..., as_cmap=...): # -> _ColorPalette:
- """Return discrete colors from a matplotlib palette.
-
- Note that this handles the qualitative colorbrewer palettes
- properly, although if you ask for more colors than a particular
- qualitative palette can provide you will get fewer than you are
- expecting. In contrast, asking for qualitative color brewer palettes
- using :func:`color_palette` will return the expected number of colors,
- but they will cycle.
-
- If you are using the IPython notebook, you can also use the function
- :func:`choose_colorbrewer_palette` to interactively select palettes.
-
- Parameters
- ----------
- name : string
- Name of the palette. This should be a named matplotlib colormap.
- n_colors : int
- Number of discrete colors in the palette.
-
- Returns
- -------
- list of RGB tuples or :class:`matplotlib.colors.Colormap`
-
- Examples
- --------
-
- Create a qualitative colorbrewer palette with 8 colors:
-
- .. plot::
- :context: close-figs
-
- >>> import seaborn as sns; sns.set_theme()
- >>> sns.palplot(sns.mpl_palette("Set2", 8))
-
- Create a sequential colorbrewer palette:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.mpl_palette("Blues"))
-
- Create a diverging palette:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.mpl_palette("seismic", 8))
-
- Create a "dark" sequential palette:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.mpl_palette("GnBu_d"))
-
- """
- ...
-
-def dark_palette(color, n_colors=..., reverse=..., as_cmap=..., input=...): # -> _ColorPalette:
- """Make a sequential palette that blends from dark to ``color``.
-
- This kind of palette is good for data that range between relatively
- uninteresting low values and interesting high values.
-
- The ``color`` parameter can be specified in a number of ways, including
- all options for defining a color in matplotlib and several additional
- color spaces that are handled by seaborn. You can also use the database
- of named colors from the XKCD color survey.
-
- If you are using the IPython notebook, you can also choose this palette
- interactively with the :func:`choose_dark_palette` function.
-
- Parameters
- ----------
- color : base color for high values
- hex, rgb-tuple, or html color name
- n_colors : int, optional
- number of colors in the palette
- reverse : bool, optional
- if True, reverse the direction of the blend
- as_cmap : bool, optional
- If True, return a :class:`matplotlib.colors.Colormap`.
- input : {'rgb', 'hls', 'husl', xkcd'}
- Color space to interpret the input color. The first three options
- apply to tuple inputs and the latter applies to string inputs.
-
- Returns
- -------
- list of RGB tuples or :class:`matplotlib.colors.Colormap`
-
- See Also
- --------
- light_palette : Create a sequential palette with bright low values.
- diverging_palette : Create a diverging palette with two colors.
-
- Examples
- --------
-
- Generate a palette from an HTML color:
-
- .. plot::
- :context: close-figs
-
- >>> import seaborn as sns; sns.set_theme()
- >>> sns.palplot(sns.dark_palette("purple"))
-
- Generate a palette that decreases in lightness:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.dark_palette("seagreen", reverse=True))
-
- Generate a palette from an HUSL-space seed:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.dark_palette((260, 75, 60), input="husl"))
-
- Generate a colormap object:
-
- .. plot::
- :context: close-figs
-
- >>> from numpy import arange
- >>> x = arange(25).reshape(5, 5)
- >>> cmap = sns.dark_palette("#2ecc71", as_cmap=True)
- >>> ax = sns.heatmap(x, cmap=cmap)
-
- """
- ...
-
-def light_palette(color, n_colors=..., reverse=..., as_cmap=..., input=...): # -> _ColorPalette:
- """Make a sequential palette that blends from light to ``color``.
-
- This kind of palette is good for data that range between relatively
- uninteresting low values and interesting high values.
-
- The ``color`` parameter can be specified in a number of ways, including
- all options for defining a color in matplotlib and several additional
- color spaces that are handled by seaborn. You can also use the database
- of named colors from the XKCD color survey.
-
- If you are using the IPython notebook, you can also choose this palette
- interactively with the :func:`choose_light_palette` function.
-
- Parameters
- ----------
- color : base color for high values
- hex code, html color name, or tuple in ``input`` space.
- n_colors : int, optional
- number of colors in the palette
- reverse : bool, optional
- if True, reverse the direction of the blend
- as_cmap : bool, optional
- If True, return a :class:`matplotlib.colors.Colormap`.
- input : {'rgb', 'hls', 'husl', xkcd'}
- Color space to interpret the input color. The first three options
- apply to tuple inputs and the latter applies to string inputs.
-
- Returns
- -------
- list of RGB tuples or :class:`matplotlib.colors.Colormap`
-
- See Also
- --------
- dark_palette : Create a sequential palette with dark low values.
- diverging_palette : Create a diverging palette with two colors.
-
- Examples
- --------
-
- Generate a palette from an HTML color:
-
- .. plot::
- :context: close-figs
-
- >>> import seaborn as sns; sns.set_theme()
- >>> sns.palplot(sns.light_palette("purple"))
-
- Generate a palette that increases in lightness:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.light_palette("seagreen", reverse=True))
-
- Generate a palette from an HUSL-space seed:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.light_palette((260, 75, 60), input="husl"))
-
- Generate a colormap object:
-
- .. plot::
- :context: close-figs
-
- >>> from numpy import arange
- >>> x = arange(25).reshape(5, 5)
- >>> cmap = sns.light_palette("#2ecc71", as_cmap=True)
- >>> ax = sns.heatmap(x, cmap=cmap)
-
- """
- ...
-
-def diverging_palette(h_neg, h_pos, s=..., l=..., sep=..., n=..., center=..., as_cmap=...): # -> _ColorPalette:
- """Make a diverging palette between two HUSL colors.
-
- If you are using the IPython notebook, you can also choose this palette
- interactively with the :func:`choose_diverging_palette` function.
-
- Parameters
- ----------
- h_neg, h_pos : float in [0, 359]
- Anchor hues for negative and positive extents of the map.
- s : float in [0, 100], optional
- Anchor saturation for both extents of the map.
- l : float in [0, 100], optional
- Anchor lightness for both extents of the map.
- sep : int, optional
- Size of the intermediate region.
- n : int, optional
- Number of colors in the palette (if not returning a cmap)
- center : {"light", "dark"}, optional
- Whether the center of the palette is light or dark
- as_cmap : bool, optional
- If True, return a :class:`matplotlib.colors.Colormap`.
-
- Returns
- -------
- list of RGB tuples or :class:`matplotlib.colors.Colormap`
-
- See Also
- --------
- dark_palette : Create a sequential palette with dark values.
- light_palette : Create a sequential palette with light values.
-
- Examples
- --------
-
- Generate a blue-white-red palette:
-
- .. plot::
- :context: close-figs
-
- >>> import seaborn as sns; sns.set_theme()
- >>> sns.palplot(sns.diverging_palette(240, 10, n=9))
-
- Generate a brighter green-white-purple palette:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.diverging_palette(150, 275, s=80, l=55, n=9))
-
- Generate a blue-black-red palette:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.diverging_palette(250, 15, s=75, l=40,
- ... n=9, center="dark"))
-
- Generate a colormap object:
-
- .. plot::
- :context: close-figs
-
- >>> from numpy import arange
- >>> x = arange(25).reshape(5, 5)
- >>> cmap = sns.diverging_palette(220, 20, as_cmap=True)
- >>> ax = sns.heatmap(x, cmap=cmap)
-
- """
- ...
-
-def blend_palette(colors, n_colors=..., as_cmap=..., input=...): # -> _ColorPalette:
- """Make a palette that blends between a list of colors.
-
- Parameters
- ----------
- colors : sequence of colors in various formats interpreted by ``input``
- hex code, html color name, or tuple in ``input`` space.
- n_colors : int, optional
- Number of colors in the palette.
- as_cmap : bool, optional
- If True, return a :class:`matplotlib.colors.Colormap`.
-
- Returns
- -------
- list of RGB tuples or :class:`matplotlib.colors.Colormap`
-
- """
- ...
-
-def xkcd_palette(colors): # -> _ColorPalette | list[tuple[float, float, float]] | Any | list[str]:
- """Make a palette with color names from the xkcd color survey.
-
- See xkcd for the full list of colors: https://xkcd.com/color/rgb/
-
- This is just a simple wrapper around the ``seaborn.xkcd_rgb`` dictionary.
-
- Parameters
- ----------
- colors : list of strings
- List of keys in the ``seaborn.xkcd_rgb`` dictionary.
-
- Returns
- -------
- palette : seaborn color palette
- Returns the list of colors as RGB tuples in an object that behaves like
- other seaborn color palettes.
-
- See Also
- --------
- crayon_palette : Make a palette with Crayola crayon colors.
-
- """
- ...
-
-def crayon_palette(colors): # -> _ColorPalette | list[tuple[float, float, float]] | Any | list[str]:
- """Make a palette with color names from Crayola crayons.
-
- Colors are taken from here:
- https://en.wikipedia.org/wiki/List_of_Crayola_crayon_colors
-
- This is just a simple wrapper around the ``seaborn.crayons`` dictionary.
-
- Parameters
- ----------
- colors : list of strings
- List of keys in the ``seaborn.crayons`` dictionary.
-
- Returns
- -------
- palette : seaborn color palette
- Returns the list of colors as rgb tuples in an object that behaves like
- other seaborn color palettes.
-
- See Also
- --------
- xkcd_palette : Make a palette with named colors from the XKCD color survey.
-
- """
- ...
-
-def cubehelix_palette(n_colors=..., start=..., rot=..., gamma=..., hue=..., light=..., dark=..., reverse=..., as_cmap=...): # -> _ColorPalette:
- """Make a sequential palette from the cubehelix system.
-
- This produces a colormap with linearly-decreasing (or increasing)
- brightness. That means that information will be preserved if printed to
- black and white or viewed by someone who is colorblind. "cubehelix" is
- also available as a matplotlib-based palette, but this function gives the
- user more control over the look of the palette and has a different set of
- defaults.
-
- In addition to using this function, it is also possible to generate a
- cubehelix palette generally in seaborn using a string-shorthand; see the
- example below.
-
- Parameters
- ----------
- n_colors : int
- Number of colors in the palette.
- start : float, 0 <= start <= 3
- The hue at the start of the helix.
- rot : float
- Rotations around the hue wheel over the range of the palette.
- gamma : float 0 <= gamma
- Gamma factor to emphasize darker (gamma < 1) or lighter (gamma > 1)
- colors.
- hue : float, 0 <= hue <= 1
- Saturation of the colors.
- dark : float 0 <= dark <= 1
- Intensity of the darkest color in the palette.
- light : float 0 <= light <= 1
- Intensity of the lightest color in the palette.
- reverse : bool
- If True, the palette will go from dark to light.
- as_cmap : bool
- If True, return a :class:`matplotlib.colors.Colormap`.
-
- Returns
- -------
- list of RGB tuples or :class:`matplotlib.colors.Colormap`
-
- See Also
- --------
- choose_cubehelix_palette : Launch an interactive widget to select cubehelix
- palette parameters.
- dark_palette : Create a sequential palette with dark low values.
- light_palette : Create a sequential palette with bright low values.
-
- References
- ----------
- Green, D. A. (2011). "A colour scheme for the display of astronomical
- intensity images". Bulletin of the Astromical Society of India, Vol. 39,
- p. 289-295.
-
- Examples
- --------
-
- Generate the default palette:
-
- .. plot::
- :context: close-figs
-
- >>> import seaborn as sns; sns.set_theme()
- >>> sns.palplot(sns.cubehelix_palette())
-
- Rotate backwards from the same starting location:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.cubehelix_palette(rot=-.4))
-
- Use a different starting point and shorter rotation:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.cubehelix_palette(start=2.8, rot=.1))
-
- Reverse the direction of the lightness ramp:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.cubehelix_palette(reverse=True))
-
- Generate a colormap object:
-
- .. plot::
- :context: close-figs
-
- >>> from numpy import arange
- >>> x = arange(25).reshape(5, 5)
- >>> cmap = sns.cubehelix_palette(as_cmap=True)
- >>> ax = sns.heatmap(x, cmap=cmap)
-
- Use the full lightness range:
-
- .. plot::
- :context: close-figs
-
- >>> cmap = sns.cubehelix_palette(dark=0, light=1, as_cmap=True)
- >>> ax = sns.heatmap(x, cmap=cmap)
-
- Use through the :func:`color_palette` interface:
-
- .. plot::
- :context: close-figs
-
- >>> sns.palplot(sns.color_palette("ch:2,r=.2,l=.6"))
-
- """
- ...
-
-def set_color_codes(palette=...): # -> None:
- """Change how matplotlib color shorthands are interpreted.
-
- Calling this will change how shorthand codes like "b" or "g"
- are interpreted by matplotlib in subsequent plots.
-
- Parameters
- ----------
- palette : {deep, muted, pastel, dark, bright, colorblind}
- Named seaborn palette to use as the source of colors.
-
- See Also
- --------
- set : Color codes can be set through the high-level seaborn style
- manager.
- set_palette : Color codes can also be set through the function that
- sets the matplotlib color cycle.
-
- Examples
- --------
-
- Map matplotlib color codes to the default seaborn palette.
-
- .. plot::
- :context: close-figs
-
- >>> import matplotlib.pyplot as plt
- >>> import seaborn as sns; sns.set_theme()
- >>> sns.set_color_codes()
- >>> _ = plt.plot([0, 1], color="r")
-
- Use a different seaborn palette.
-
- .. plot::
- :context: close-figs
-
- >>> sns.set_color_codes("dark")
- >>> _ = plt.plot([0, 1], color="g")
- >>> _ = plt.plot([0, 2], color="m")
-
- """
- ...
-
diff --git a/typings/seaborn/rcmod.pyi b/typings/seaborn/rcmod.pyi
deleted file mode 100644
index 41193b9..0000000
--- a/typings/seaborn/rcmod.pyi
+++ /dev/null
@@ -1,265 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-import matplotlib as mpl
-from distutils.version import LooseVersion
-
-"""Control plot style and scaling using the matplotlib rcParams interface."""
-__all__ = ["set_theme", "set", "reset_defaults", "reset_orig", "axes_style", "set_style", "plotting_context", "set_context", "set_palette"]
-_style_keys = ...
-_context_keys = ...
-if LooseVersion(mpl.__version__) >= "3.0":
- ...
-def set_theme(context=..., style=..., palette=..., font=..., font_scale=..., color_codes=..., rc=...): # -> None:
- """Set multiple theme parameters in one step.
-
- Each set of parameters can be set directly or temporarily, see the
- referenced functions below for more information.
-
- Parameters
- ----------
- context : string or dict
- Plotting context parameters, see :func:`plotting_context`.
- style : string or dict
- Axes style parameters, see :func:`axes_style`.
- palette : string or sequence
- Color palette, see :func:`color_palette`.
- font : string
- Font family, see matplotlib font manager.
- font_scale : float, optional
- Separate scaling factor to independently scale the size of the
- font elements.
- color_codes : bool
- If ``True`` and ``palette`` is a seaborn palette, remap the shorthand
- color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
- rc : dict or None
- Dictionary of rc parameter mappings to override the above.
-
- """
- ...
-
-def set(*args, **kwargs): # -> None:
- """Alias for :func:`set_theme`, which is the preferred interface."""
- ...
-
-def reset_defaults(): # -> None:
- """Restore all RC params to default settings."""
- ...
-
-def reset_orig(): # -> None:
- """Restore all RC params to original settings (respects custom rc)."""
- ...
-
-def axes_style(style=..., rc=...): # -> _AxesStyle:
- """Return a parameter dict for the aesthetic style of the plots.
-
- This affects things like the color of the axes, whether a grid is
- enabled by default, and other aesthetic elements.
-
- This function returns an object that can be used in a ``with`` statement
- to temporarily change the style parameters.
-
- Parameters
- ----------
- style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
- A dictionary of parameters or the name of a preconfigured set.
- rc : dict, optional
- Parameter mappings to override the values in the preset seaborn
- style dictionaries. This only updates parameters that are
- considered part of the style definition.
-
- Examples
- --------
- >>> st = axes_style("whitegrid")
-
- >>> set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
-
- >>> import matplotlib.pyplot as plt
- >>> with axes_style("white"):
- ... f, ax = plt.subplots()
- ... ax.plot(x, y) # doctest: +SKIP
-
- See Also
- --------
- set_style : set the matplotlib parameters for a seaborn theme
- plotting_context : return a parameter dict to to scale plot elements
- color_palette : define the color palette for a plot
-
- """
- ...
-
-def set_style(style=..., rc=...): # -> None:
- """Set the aesthetic style of the plots.
-
- This affects things like the color of the axes, whether a grid is
- enabled by default, and other aesthetic elements.
-
- Parameters
- ----------
- style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
- A dictionary of parameters or the name of a preconfigured set.
- rc : dict, optional
- Parameter mappings to override the values in the preset seaborn
- style dictionaries. This only updates parameters that are
- considered part of the style definition.
-
- Examples
- --------
- >>> set_style("whitegrid")
-
- >>> set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
-
- See Also
- --------
- axes_style : return a dict of parameters or use in a ``with`` statement
- to temporarily set the style.
- set_context : set parameters to scale plot elements
- set_palette : set the default color palette for figures
-
- """
- ...
-
-def plotting_context(context=..., font_scale=..., rc=...): # -> _PlottingContext:
- """Return a parameter dict to scale elements of the figure.
-
- This affects things like the size of the labels, lines, and other
- elements of the plot, but not the overall style. The base context
- is "notebook", and the other contexts are "paper", "talk", and "poster",
- which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
- respectively.
-
- This function returns an object that can be used in a ``with`` statement
- to temporarily change the context parameters.
-
- Parameters
- ----------
- context : dict, None, or one of {paper, notebook, talk, poster}
- A dictionary of parameters or the name of a preconfigured set.
- font_scale : float, optional
- Separate scaling factor to independently scale the size of the
- font elements.
- rc : dict, optional
- Parameter mappings to override the values in the preset seaborn
- context dictionaries. This only updates parameters that are
- considered part of the context definition.
-
- Examples
- --------
- >>> c = plotting_context("poster")
-
- >>> c = plotting_context("notebook", font_scale=1.5)
-
- >>> c = plotting_context("talk", rc={"lines.linewidth": 2})
-
- >>> import matplotlib.pyplot as plt
- >>> with plotting_context("paper"):
- ... f, ax = plt.subplots()
- ... ax.plot(x, y) # doctest: +SKIP
-
- See Also
- --------
- set_context : set the matplotlib parameters to scale plot elements
- axes_style : return a dict of parameters defining a figure style
- color_palette : define the color palette for a plot
-
- """
- ...
-
-def set_context(context=..., font_scale=..., rc=...): # -> None:
- """Set the plotting context parameters.
-
- This affects things like the size of the labels, lines, and other
- elements of the plot, but not the overall style. The base context
- is "notebook", and the other contexts are "paper", "talk", and "poster",
- which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
- respectively.
-
- Parameters
- ----------
- context : dict, None, or one of {paper, notebook, talk, poster}
- A dictionary of parameters or the name of a preconfigured set.
- font_scale : float, optional
- Separate scaling factor to independently scale the size of the
- font elements.
- rc : dict, optional
- Parameter mappings to override the values in the preset seaborn
- context dictionaries. This only updates parameters that are
- considered part of the context definition.
-
- Examples
- --------
- >>> set_context("paper")
-
- >>> set_context("talk", font_scale=1.4)
-
- >>> set_context("talk", rc={"lines.linewidth": 2})
-
- See Also
- --------
- plotting_context : return a dictionary of rc parameters, or use in
- a ``with`` statement to temporarily set the context.
- set_style : set the default parameters for figure style
- set_palette : set the default color palette for figures
-
- """
- ...
-
-class _RCAesthetics(dict):
- def __enter__(self): # -> None:
- ...
-
- def __exit__(self, exc_type, exc_value, exc_tb): # -> None:
- ...
-
- def __call__(self, func): # -> _Wrapped[..., Unknown, (*args: Unknown, **kwargs: Unknown), Unknown]:
- ...
-
-
-
-class _AxesStyle(_RCAesthetics):
- """Light wrapper on a dict to set style temporarily."""
- _keys = ...
- _set = ...
-
-
-class _PlottingContext(_RCAesthetics):
- """Light wrapper on a dict to set context temporarily."""
- _keys = ...
- _set = ...
-
-
-def set_palette(palette, n_colors=..., desat=..., color_codes=...): # -> None:
- """Set the matplotlib color cycle using a seaborn palette.
-
- Parameters
- ----------
- palette : seaborn color paltte | matplotlib colormap | hls | husl
- Palette definition. Should be something that :func:`color_palette`
- can process.
- n_colors : int
- Number of colors in the cycle. The default number of colors will depend
- on the format of ``palette``, see the :func:`color_palette`
- documentation for more information.
- desat : float
- Proportion to desaturate each color by.
- color_codes : bool
- If ``True`` and ``palette`` is a seaborn palette, remap the shorthand
- color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
-
- Examples
- --------
- >>> set_palette("Reds")
-
- >>> set_palette("Set1", 8, .75)
-
- See Also
- --------
- color_palette : build a color palette or set the color cycle temporarily
- in a ``with`` statement.
- set_context : set parameters to scale plot elements
- set_style : set the default parameters for figure style
-
- """
- ...
-
diff --git a/typings/seaborn/regression.pyi b/typings/seaborn/regression.pyi
deleted file mode 100644
index c1a8ffe..0000000
--- a/typings/seaborn/regression.pyi
+++ /dev/null
@@ -1,158 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ._decorators import _deprecate_positional_args
-
-"""Plotting functions for linear models (broadly construed)."""
-_has_statsmodels = ...
-__all__ = ["lmplot", "regplot", "residplot"]
-class _LinearPlotter:
- """Base class for plotting relational data in tidy format.
-
- To get anything useful done you'll have to inherit from this, but setup
- code that can be abstracted out should be put here.
-
- """
- def establish_variables(self, data, **kws): # -> None:
- """Extract variables from data or use directly."""
- ...
-
- def dropna(self, *vars): # -> None:
- """Remove observations with missing data."""
- ...
-
- def plot(self, ax):
- ...
-
-
-
-class _RegressionPlotter(_LinearPlotter):
- """Plotter for numeric independent variables with regression model.
-
- This does the computations and drawing for the `regplot` function, and
- is thus also used indirectly by `lmplot`.
- """
- def __init__(self, x, y, data=..., x_estimator=..., x_bins=..., x_ci=..., scatter=..., fit_reg=..., ci=..., n_boot=..., units=..., seed=..., order=..., logistic=..., lowess=..., robust=..., logx=..., x_partial=..., y_partial=..., truncate=..., dropna=..., x_jitter=..., y_jitter=..., color=..., label=...) -> None:
- ...
-
- @property
- def scatter_data(self): # -> tuple[ndarray[Any, dtype[Unknown]] | NDArray[floating[Any]], ndarray[Any, dtype[Unknown]] | NDArray[floating[Any]]]:
- """Data where each observation is a point."""
- ...
-
- @property
- def estimate_data(self): # -> tuple[list[Any], list[Unknown], list[Unknown]]:
- """Data with a point estimate and CI for each discrete x value."""
- ...
-
- def fit_regression(self, ax=..., x_range=..., grid=...): # -> tuple[Unknown, Unknown | NDArray[float64] | Any, Unknown | None]:
- """Fit the regression model."""
- ...
-
- def fit_fast(self, grid): # -> tuple[Any, None] | tuple[Any, Any]:
- """Low-level regression and prediction using linear algebra."""
- ...
-
- def fit_poly(self, grid, order): # -> tuple[Unknown, None] | tuple[Unknown, NDArray[Unknown]]:
- """Regression using numpy polyfit for higher-order trends."""
- ...
-
- def fit_statsmodels(self, grid, model, **kwargs): # -> tuple[Unknown | NDArray[float64], None] | tuple[Unknown | NDArray[float64], NDArray[Unknown]]:
- """More general regression function using statsmodels objects."""
- ...
-
- def fit_lowess(self): # -> tuple[Unknown, Unknown]:
- """Fit a locally-weighted regression, which returns its own grid."""
- ...
-
- def fit_logx(self, grid): # -> tuple[Any, None] | tuple[Any, Any]:
- """Fit the model in log-space."""
- ...
-
- def bin_predictor(self, bins): # -> tuple[Any, Any]:
- """Discretize a predictor by assigning value to closest bin."""
- ...
-
- def regress_out(self, a, b): # -> ndarray[Any, dtype[Unknown]]:
- """Regress b from a keeping a's original mean."""
- ...
-
- def plot(self, ax, scatter_kws, line_kws): # -> None:
- """Draw the full plot."""
- ...
-
- def scatterplot(self, ax, kws): # -> None:
- """Draw the data."""
- ...
-
- def lineplot(self, ax, kws): # -> None:
- """Draw the model."""
- ...
-
-
-
-_regression_docs = ...
-@_deprecate_positional_args
-def lmplot(*, x=..., y=..., data=..., hue=..., col=..., row=..., palette=..., col_wrap=..., height=..., aspect=..., markers=..., sharex=..., sharey=..., hue_order=..., col_order=..., row_order=..., legend=..., legend_out=..., x_estimator=..., x_bins=..., x_ci=..., scatter=..., fit_reg=..., ci=..., n_boot=..., units=..., seed=..., order=..., logistic=..., lowess=..., robust=..., logx=..., x_partial=..., y_partial=..., truncate=..., x_jitter=..., y_jitter=..., scatter_kws=..., line_kws=..., size=...): # -> FacetGrid:
- ...
-
-@_deprecate_positional_args
-def regplot(*, x=..., y=..., data=..., x_estimator=..., x_bins=..., x_ci=..., scatter=..., fit_reg=..., ci=..., n_boot=..., units=..., seed=..., order=..., logistic=..., lowess=..., robust=..., logx=..., x_partial=..., y_partial=..., truncate=..., dropna=..., x_jitter=..., y_jitter=..., label=..., color=..., marker=..., scatter_kws=..., line_kws=..., ax=...): # -> Axes:
- ...
-
-@_deprecate_positional_args
-def residplot(*, x=..., y=..., data=..., lowess=..., x_partial=..., y_partial=..., order=..., robust=..., dropna=..., label=..., color=..., scatter_kws=..., line_kws=..., ax=...): # -> Axes:
- """Plot the residuals of a linear regression.
-
- This function will regress y on x (possibly as a robust or polynomial
- regression) and then draw a scatterplot of the residuals. You can
- optionally fit a lowess smoother to the residual plot, which can
- help in determining if there is structure to the residuals.
-
- Parameters
- ----------
- x : vector or string
- Data or column name in `data` for the predictor variable.
- y : vector or string
- Data or column name in `data` for the response variable.
- data : DataFrame, optional
- DataFrame to use if `x` and `y` are column names.
- lowess : boolean, optional
- Fit a lowess smoother to the residual scatterplot.
- {x, y}_partial : matrix or string(s) , optional
- Matrix with same first dimension as `x`, or column name(s) in `data`.
- These variables are treated as confounding and are removed from
- the `x` or `y` variables before plotting.
- order : int, optional
- Order of the polynomial to fit when calculating the residuals.
- robust : boolean, optional
- Fit a robust linear regression when calculating the residuals.
- dropna : boolean, optional
- If True, ignore observations with missing data when fitting and
- plotting.
- label : string, optional
- Label that will be used in any plot legends.
- color : matplotlib color, optional
- Color to use for all elements of the plot.
- {scatter, line}_kws : dictionaries, optional
- Additional keyword arguments passed to scatter() and plot() for drawing
- the components of the plot.
- ax : matplotlib axis, optional
- Plot into this axis, otherwise grab the current axis or make a new
- one if not existing.
-
- Returns
- -------
- ax: matplotlib axes
- Axes with the regression plot.
-
- See Also
- --------
- regplot : Plot a simple linear regression model.
- jointplot : Draw a :func:`residplot` with univariate marginal distributions
- (when used with ``kind="resid"``).
-
- """
- ...
-
diff --git a/typings/seaborn/relational.pyi b/typings/seaborn/relational.pyi
deleted file mode 100644
index f952891..0000000
--- a/typings/seaborn/relational.pyi
+++ /dev/null
@@ -1,59 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-from ._core import VectorPlotter
-from ._decorators import _deprecate_positional_args
-
-__all__ = ["relplot", "scatterplot", "lineplot"]
-_relational_narrative = ...
-_relational_docs = ...
-_param_docs = ...
-class _RelationalPlotter(VectorPlotter):
- wide_structure = ...
- sort = ...
- def add_legend_data(self, ax):
- """Add labeled artists to represent the different plot semantics."""
- ...
-
-
-
-class _LinePlotter(_RelationalPlotter):
- _legend_attributes = ...
- _legend_func = ...
- def __init__(self, *, data=..., variables=..., estimator=..., ci=..., n_boot=..., seed=..., sort=..., err_style=..., err_kws=..., legend=...) -> None:
- ...
-
- def aggregate(self, vals, grouper, units=...): # -> tuple[Unknown, Unknown, None] | tuple[Unknown, Unknown, Series | DataFrame | Unknown | None]:
- """Compute an estimate and confidence interval using grouper."""
- ...
-
- def plot(self, ax, kws): # -> None:
- """Draw the plot onto an axes, passing matplotlib kwargs."""
- ...
-
-
-
-class _ScatterPlotter(_RelationalPlotter):
- _legend_attributes = ...
- _legend_func = ...
- def __init__(self, *, data=..., variables=..., x_bins=..., y_bins=..., estimator=..., ci=..., n_boot=..., alpha=..., x_jitter=..., y_jitter=..., legend=...) -> None:
- ...
-
- def plot(self, ax, kws): # -> None:
- ...
-
-
-
-@_deprecate_positional_args
-def lineplot(*, x=..., y=..., hue=..., size=..., style=..., data=..., palette=..., hue_order=..., hue_norm=..., sizes=..., size_order=..., size_norm=..., dashes=..., markers=..., style_order=..., units=..., estimator=..., ci=..., n_boot=..., seed=..., sort=..., err_style=..., err_kws=..., legend=..., ax=..., **kwargs): # -> Axes:
- ...
-
-@_deprecate_positional_args
-def scatterplot(*, x=..., y=..., hue=..., style=..., size=..., data=..., palette=..., hue_order=..., hue_norm=..., sizes=..., size_order=..., size_norm=..., markers=..., style_order=..., x_bins=..., y_bins=..., units=..., estimator=..., ci=..., n_boot=..., alpha=..., x_jitter=..., y_jitter=..., legend=..., ax=..., **kwargs): # -> Axes:
- ...
-
-@_deprecate_positional_args
-def relplot(*, x=..., y=..., hue=..., size=..., style=..., data=..., row=..., col=..., col_wrap=..., row_order=..., col_order=..., palette=..., hue_order=..., hue_norm=..., sizes=..., size_order=..., size_norm=..., markers=..., dashes=..., style_order=..., legend=..., kind=..., height=..., aspect=..., facet_kws=..., units=..., **kwargs): # -> FacetGrid:
- ...
-
diff --git a/typings/seaborn/tests/__init__.pyi b/typings/seaborn/tests/__init__.pyi
deleted file mode 100644
index 006bc27..0000000
--- a/typings/seaborn/tests/__init__.pyi
+++ /dev/null
@@ -1,4 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
diff --git a/typings/seaborn/utils.pyi b/typings/seaborn/utils.pyi
deleted file mode 100644
index d3178e2..0000000
--- a/typings/seaborn/utils.pyi
+++ /dev/null
@@ -1,344 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-"""Utility functions, mostly for internal use."""
-__all__ = ["desaturate", "saturate", "set_hls_values", "despine", "get_dataset_names", "get_data_home", "load_dataset"]
-def sort_df(df, *args, **kwargs):
- """Wrapper to handle different pandas sorting API pre/post 0.17."""
- ...
-
-def ci_to_errsize(cis, heights): # -> NDArray[Unknown]:
- """Convert intervals to error arguments relative to plot heights.
-
- Parameters
- ----------
- cis: 2 x n sequence
- sequence of confidence interval limits
- heights : n sequence
- sequence of plot heights
-
- Returns
- -------
- errsize : 2 x n array
- sequence of error size relative to height values in correct
- format as argument for plt.bar
-
- """
- ...
-
-def pmf_hist(a, bins=...): # -> tuple[ndarray[Any, dtype[Any]], Any, Any]:
- """Return arguments to plt.bar for pmf-like histogram of an array.
-
- DEPRECATED: will be removed in a future version.
-
- Parameters
- ----------
- a: array-like
- array to make histogram of
- bins: int
- number of bins
-
- Returns
- -------
- x: array
- left x position of bars
- h: array
- height of bars
- w: float
- width of bars
-
- """
- ...
-
-def desaturate(color, prop): # -> tuple[float, float, float]:
- """Decrease the saturation channel of a color by some percent.
-
- Parameters
- ----------
- color : matplotlib color
- hex, rgb-tuple, or html color name
- prop : float
- saturation channel of color will be multiplied by this value
-
- Returns
- -------
- new_color : rgb tuple
- desaturated color code in RGB tuple representation
-
- """
- ...
-
-def saturate(color): # -> tuple[float, float, float]:
- """Return a fully saturated color with the same hue.
-
- Parameters
- ----------
- color : matplotlib color
- hex, rgb-tuple, or html color name
-
- Returns
- -------
- new_color : rgb tuple
- saturated color code in RGB tuple representation
-
- """
- ...
-
-def set_hls_values(color, h=..., l=..., s=...): # -> tuple[float, float, float]:
- """Independently manipulate the h, l, or s channels of a color.
-
- Parameters
- ----------
- color : matplotlib color
- hex, rgb-tuple, or html color name
- h, l, s : floats between 0 and 1, or None
- new values for each channel in hls space
-
- Returns
- -------
- new_color : rgb tuple
- new color code in RGB tuple representation
-
- """
- ...
-
-def axlabel(xlabel, ylabel, **kwargs): # -> None:
- """Grab current axis and label it.
-
- DEPRECATED: will be removed in a future version.
-
- """
- ...
-
-def remove_na(vector):
- """Helper method for removing null values from data vectors.
-
- Parameters
- ----------
- vector : vector object
- Must implement boolean masking with [] subscript syntax.
-
- Returns
- -------
- clean_clean : same type as ``vector``
- Vector of data with null values removed. May be a copy or a view.
-
- """
- ...
-
-def get_color_cycle(): # -> Any | list[str]:
- """Return the list of colors in the current matplotlib color cycle
-
- Parameters
- ----------
- None
-
- Returns
- -------
- colors : list
- List of matplotlib colors in the current cycle, or dark gray if
- the current color cycle is empty.
- """
- ...
-
-def despine(fig=..., ax=..., top=..., right=..., left=..., bottom=..., offset=..., trim=...): # -> None:
- """Remove the top and right spines from plot(s).
-
- fig : matplotlib figure, optional
- Figure to despine all axes of, defaults to the current figure.
- ax : matplotlib axes, optional
- Specific axes object to despine. Ignored if fig is provided.
- top, right, left, bottom : boolean, optional
- If True, remove that spine.
- offset : int or dict, optional
- Absolute distance, in points, spines should be moved away
- from the axes (negative values move spines inward). A single value
- applies to all spines; a dict can be used to set offset values per
- side.
- trim : bool, optional
- If True, limit spines to the smallest and largest major tick
- on each non-despined axis.
-
- Returns
- -------
- None
-
- """
- ...
-
-def percentiles(a, pcts, axis=...): # -> ndarray[Any, dtype[Unknown]]:
- """Like scoreatpercentile but can take and return array of percentiles.
-
- DEPRECATED: will be removed in a future version.
-
- Parameters
- ----------
- a : array
- data
- pcts : sequence of percentile values
- percentile or percentiles to find score at
- axis : int or None
- if not None, computes scores over this axis
-
- Returns
- -------
- scores: array
- array of scores at requested percentiles
- first dimension is length of object passed to ``pcts``
-
- """
- ...
-
-def ci(a, which=..., axis=...):
- """Return a percentile range from an array of values."""
- ...
-
-def sig_stars(p): # -> Literal['***', '**', '*', '.', '']:
- """Return a R-style significance string corresponding to p values.
-
- DEPRECATED: will be removed in a future version.
-
- """
- ...
-
-def iqr(a): # -> float | NDArray[floating[Any]] | Any | NDArray[Any]:
- """Calculate the IQR for an array of numbers.
-
- DEPRECATED: will be removed in a future version.
-
- """
- ...
-
-def get_dataset_names(): # -> list[Any]:
- """Report available example datasets, useful for reporting issues.
-
- Requires an internet connection.
-
- """
- ...
-
-def get_data_home(data_home=...): # -> str:
- """Return a path to the cache directory for example datasets.
-
- This directory is then used by :func:`load_dataset`.
-
- If the ``data_home`` argument is not specified, it tries to read from the
- ``SEABORN_DATA`` environment variable and defaults to ``~/seaborn-data``.
-
- """
- ...
-
-def load_dataset(name, cache=..., data_home=..., **kws):
- """Load an example dataset from the online repository (requires internet).
-
- This function provides quick access to a small number of example datasets
- that are useful for documenting seaborn or generating reproducible examples
- for bug reports. It is not necessary for normal usage.
-
- Note that some of the datasets have a small amount of preprocessing applied
- to define a proper ordering for categorical variables.
-
- Use :func:`get_dataset_names` to see a list of available datasets.
-
- Parameters
- ----------
- name : str
- Name of the dataset (``{name}.csv`` on
- https://github.com/mwaskom/seaborn-data).
- cache : boolean, optional
- If True, try to load from the local cache first, and save to the cache
- if a download is required.
- data_home : string, optional
- The directory in which to cache data; see :func:`get_data_home`.
- kws : keys and values, optional
- Additional keyword arguments are passed to passed through to
- :func:`pandas.read_csv`.
-
- Returns
- -------
- df : :class:`pandas.DataFrame`
- Tabular data, possibly with some preprocessing applied.
-
- """
- ...
-
-def axis_ticklabels_overlap(labels): # -> Literal[False]:
- """Return a boolean for whether the list of ticklabels have overlaps.
-
- Parameters
- ----------
- labels : list of matplotlib ticklabels
-
- Returns
- -------
- overlap : boolean
- True if any of the labels overlap.
-
- """
- ...
-
-def axes_ticklabels_overlap(ax): # -> tuple[Unknown | Literal[False], Unknown | Literal[False]]:
- """Return booleans for whether the x and y ticklabels on an Axes overlap.
-
- Parameters
- ----------
- ax : matplotlib Axes
-
- Returns
- -------
- x_overlap, y_overlap : booleans
- True when the labels on that axis overlap.
-
- """
- ...
-
-def locator_to_legend_entries(locator, limits, dtype): # -> tuple[list[Unknown], list[Unknown]]:
- """Return levels and formatted levels for brief numeric legends."""
- class dummy_axis:
- ...
-
-
-
-def relative_luminance(color): # -> Any:
- """Calculate the relative luminance of a color according to W3C standards
-
- Parameters
- ----------
- color : matplotlib color or sequence of matplotlib colors
- Hex code, rgb-tuple, or html color name.
-
- Returns
- -------
- luminance : float(s) between 0 and 1
-
- """
- ...
-
-def to_utf8(obj): # -> str:
- """Return a string representing a Python object.
-
- Strings (i.e. type ``str``) are returned unchanged.
-
- Byte strings (i.e. type ``bytes``) are returned as UTF-8-decoded strings.
-
- For other objects, the method ``__str__()`` is called, and the result is
- returned as a string.
-
- Parameters
- ----------
- obj : object
- Any Python object
-
- Returns
- -------
- s : str
- UTF-8-decoded string representation of ``obj``
-
- """
- ...
-
-def adjust_legend_subtitles(legend): # -> None:
- """Make invisible-handle "subtitles" entries look more like titles."""
- ...
-
diff --git a/typings/seaborn/widgets.pyi b/typings/seaborn/widgets.pyi
deleted file mode 100644
index 122da5b..0000000
--- a/typings/seaborn/widgets.pyi
+++ /dev/null
@@ -1,165 +0,0 @@
-"""
-This type stub file was generated by pyright.
-"""
-
-__all__ = ["choose_colorbrewer_palette", "choose_cubehelix_palette", "choose_dark_palette", "choose_light_palette", "choose_diverging_palette"]
-def choose_colorbrewer_palette(data_type, as_cmap=...): # -> LinearSegmentedColormap | list[Unknown]:
- """Select a palette from the ColorBrewer set.
-
- These palettes are built into matplotlib and can be used by name in
- many seaborn functions, or by passing the object returned by this function.
-
- Parameters
- ----------
- data_type : {'sequential', 'diverging', 'qualitative'}
- This describes the kind of data you want to visualize. See the seaborn
- color palette docs for more information about how to choose this value.
- Note that you can pass substrings (e.g. 'q' for 'qualitative.
-
- as_cmap : bool
- If True, the return value is a matplotlib colormap rather than a
- list of discrete colors.
-
- Returns
- -------
- pal or cmap : list of colors or matplotlib colormap
- Object that can be passed to plotting functions.
-
- See Also
- --------
- dark_palette : Create a sequential palette with dark low values.
- light_palette : Create a sequential palette with bright low values.
- diverging_palette : Create a diverging palette from selected colors.
- cubehelix_palette : Create a sequential palette or colormap using the
- cubehelix system.
-
-
- """
- ...
-
-def choose_dark_palette(input=..., as_cmap=...): # -> LinearSegmentedColormap | list[Unknown]:
- """Launch an interactive widget to create a dark sequential palette.
-
- This corresponds with the :func:`dark_palette` function. This kind
- of palette is good for data that range between relatively uninteresting
- low values and interesting high values.
-
- Requires IPython 2+ and must be used in the notebook.
-
- Parameters
- ----------
- input : {'husl', 'hls', 'rgb'}
- Color space for defining the seed value. Note that the default is
- different than the default input for :func:`dark_palette`.
- as_cmap : bool
- If True, the return value is a matplotlib colormap rather than a
- list of discrete colors.
-
- Returns
- -------
- pal or cmap : list of colors or matplotlib colormap
- Object that can be passed to plotting functions.
-
- See Also
- --------
- dark_palette : Create a sequential palette with dark low values.
- light_palette : Create a sequential palette with bright low values.
- cubehelix_palette : Create a sequential palette or colormap using the
- cubehelix system.
-
- """
- ...
-
-def choose_light_palette(input=..., as_cmap=...): # -> LinearSegmentedColormap | list[Unknown]:
- """Launch an interactive widget to create a light sequential palette.
-
- This corresponds with the :func:`light_palette` function. This kind
- of palette is good for data that range between relatively uninteresting
- low values and interesting high values.
-
- Requires IPython 2+ and must be used in the notebook.
-
- Parameters
- ----------
- input : {'husl', 'hls', 'rgb'}
- Color space for defining the seed value. Note that the default is
- different than the default input for :func:`light_palette`.
- as_cmap : bool
- If True, the return value is a matplotlib colormap rather than a
- list of discrete colors.
-
- Returns
- -------
- pal or cmap : list of colors or matplotlib colormap
- Object that can be passed to plotting functions.
-
- See Also
- --------
- light_palette : Create a sequential palette with bright low values.
- dark_palette : Create a sequential palette with dark low values.
- cubehelix_palette : Create a sequential palette or colormap using the
- cubehelix system.
-
- """
- ...
-
-def choose_diverging_palette(as_cmap=...): # -> LinearSegmentedColormap | list[Unknown]:
- """Launch an interactive widget to choose a diverging color palette.
-
- This corresponds with the :func:`diverging_palette` function. This kind
- of palette is good for data that range between interesting low values
- and interesting high values with a meaningful midpoint. (For example,
- change scores relative to some baseline value).
-
- Requires IPython 2+ and must be used in the notebook.
-
- Parameters
- ----------
- as_cmap : bool
- If True, the return value is a matplotlib colormap rather than a
- list of discrete colors.
-
- Returns
- -------
- pal or cmap : list of colors or matplotlib colormap
- Object that can be passed to plotting functions.
-
- See Also
- --------
- diverging_palette : Create a diverging color palette or colormap.
- choose_colorbrewer_palette : Interactively choose palettes from the
- colorbrewer set, including diverging palettes.
-
- """
- ...
-
-def choose_cubehelix_palette(as_cmap=...): # -> LinearSegmentedColormap | list[Unknown]:
- """Launch an interactive widget to create a sequential cubehelix palette.
-
- This corresponds with the :func:`cubehelix_palette` function. This kind
- of palette is good for data that range between relatively uninteresting
- low values and interesting high values. The cubehelix system allows the
- palette to have more hue variance across the range, which can be helpful
- for distinguishing a wider range of values.
-
- Requires IPython 2+ and must be used in the notebook.
-
- Parameters
- ----------
- as_cmap : bool
- If True, the return value is a matplotlib colormap rather than a
- list of discrete colors.
-
- Returns
- -------
- pal or cmap : list of colors or matplotlib colormap
- Object that can be passed to plotting functions.
-
- See Also
- --------
- cubehelix_palette : Create a sequential palette or colormap using the
- cubehelix system.
-
- """
- ...
-