mirror of
https://github.com/explosion/spaCy.git
synced 2025-01-24 16:24:16 +03:00
Merge branch 'develop' of https://github.com/explosion/spaCy into develop
This commit is contained in:
commit
af6cbb29e8
|
@ -1,11 +0,0 @@
|
|||
steps:
|
||||
-
|
||||
command: "fab env clean make test sdist"
|
||||
label: ":dizzy: :python:"
|
||||
artifact_paths: "dist/*.tar.gz"
|
||||
- wait
|
||||
- trigger: "spacy-sdist-against-models"
|
||||
label: ":dizzy: :hammer:"
|
||||
build:
|
||||
env:
|
||||
SPACY_VERSION: "{$SPACY_VERSION}"
|
|
@ -1,11 +0,0 @@
|
|||
steps:
|
||||
-
|
||||
command: "fab env clean make test wheel"
|
||||
label: ":dizzy: :python:"
|
||||
artifact_paths: "dist/*.whl"
|
||||
- wait
|
||||
- trigger: "spacy-train-from-wheel"
|
||||
label: ":dizzy: :train:"
|
||||
build:
|
||||
env:
|
||||
SPACY_VERSION: "{$SPACY_VERSION}"
|
149
fabfile.py
vendored
149
fabfile.py
vendored
|
@ -1,149 +0,0 @@
|
|||
import contextlib
|
||||
from pathlib import Path
|
||||
from fabric.api import local, lcd
|
||||
from os import path, environ
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
|
||||
PWD = path.dirname(__file__)
|
||||
ENV = environ["VENV_DIR"] if "VENV_DIR" in environ else ".env"
|
||||
VENV_DIR = Path(PWD) / ENV
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def virtualenv(name, create=False, python="/usr/bin/python3.6"):
|
||||
python = Path(python).resolve()
|
||||
env_path = VENV_DIR
|
||||
if create:
|
||||
if env_path.exists():
|
||||
shutil.rmtree(str(env_path))
|
||||
local("{python} -m venv {env_path}".format(python=python, env_path=VENV_DIR))
|
||||
|
||||
def wrapped_local(cmd, env_vars=[], capture=False, direct=False):
|
||||
return local(
|
||||
"source {}/bin/activate && {}".format(env_path, cmd),
|
||||
shell="/bin/bash",
|
||||
capture=False,
|
||||
)
|
||||
|
||||
yield wrapped_local
|
||||
|
||||
|
||||
def env(lang="python3.6"):
|
||||
if VENV_DIR.exists():
|
||||
local("rm -rf {env}".format(env=VENV_DIR))
|
||||
if lang.startswith("python3"):
|
||||
local("{lang} -m venv {env}".format(lang=lang, env=VENV_DIR))
|
||||
else:
|
||||
local("{lang} -m pip install virtualenv --no-cache-dir".format(lang=lang))
|
||||
local(
|
||||
"{lang} -m virtualenv {env} --no-cache-dir".format(lang=lang, env=VENV_DIR)
|
||||
)
|
||||
with virtualenv(VENV_DIR) as venv_local:
|
||||
print(venv_local("python --version", capture=True))
|
||||
venv_local("pip install --upgrade setuptools --no-cache-dir")
|
||||
venv_local("pip install pytest --no-cache-dir")
|
||||
venv_local("pip install wheel --no-cache-dir")
|
||||
venv_local("pip install -r requirements.txt --no-cache-dir")
|
||||
venv_local("pip install pex --no-cache-dir")
|
||||
|
||||
|
||||
def install():
|
||||
with virtualenv(VENV_DIR) as venv_local:
|
||||
venv_local("pip install dist/*.tar.gz")
|
||||
|
||||
|
||||
def make():
|
||||
with lcd(path.dirname(__file__)):
|
||||
local(
|
||||
"export PYTHONPATH=`pwd` && source .env/bin/activate && python setup.py build_ext --inplace",
|
||||
shell="/bin/bash",
|
||||
)
|
||||
|
||||
|
||||
def sdist():
|
||||
with virtualenv(VENV_DIR) as venv_local:
|
||||
with lcd(path.dirname(__file__)):
|
||||
venv_local("python -m pip install -U setuptools srsly")
|
||||
venv_local("python setup.py sdist")
|
||||
|
||||
|
||||
def wheel():
|
||||
with virtualenv(VENV_DIR) as venv_local:
|
||||
with lcd(path.dirname(__file__)):
|
||||
venv_local("python setup.py bdist_wheel")
|
||||
|
||||
|
||||
def pex():
|
||||
with virtualenv(VENV_DIR) as venv_local:
|
||||
with lcd(path.dirname(__file__)):
|
||||
sha = local("git rev-parse --short HEAD", capture=True)
|
||||
venv_local(f"pex dist/*.whl -e spacy -o dist/spacy-{sha}.pex", direct=True)
|
||||
|
||||
|
||||
def clean():
|
||||
with lcd(path.dirname(__file__)):
|
||||
local("rm -f dist/*.whl")
|
||||
local("rm -f dist/*.pex")
|
||||
with virtualenv(VENV_DIR) as venv_local:
|
||||
venv_local("python setup.py clean --all")
|
||||
|
||||
|
||||
def test():
|
||||
with virtualenv(VENV_DIR) as venv_local:
|
||||
with lcd(path.dirname(__file__)):
|
||||
venv_local("pytest -x spacy/tests")
|
||||
|
||||
|
||||
def train():
|
||||
args = environ.get("SPACY_TRAIN_ARGS", "")
|
||||
with virtualenv(VENV_DIR) as venv_local:
|
||||
venv_local("spacy train {args}".format(args=args))
|
||||
|
||||
|
||||
def conll17(treebank_dir, experiment_dir, vectors_dir, config, corpus=""):
|
||||
is_not_clean = local("git status --porcelain", capture=True)
|
||||
if is_not_clean:
|
||||
print("Repository is not clean")
|
||||
print(is_not_clean)
|
||||
sys.exit(1)
|
||||
git_sha = local("git rev-parse --short HEAD", capture=True)
|
||||
config_checksum = local("sha256sum {config}".format(config=config), capture=True)
|
||||
experiment_dir = Path(experiment_dir) / "{}--{}".format(
|
||||
config_checksum[:6], git_sha
|
||||
)
|
||||
if not experiment_dir.exists():
|
||||
experiment_dir.mkdir()
|
||||
test_data_dir = Path(treebank_dir) / "ud-test-v2.0-conll2017"
|
||||
assert test_data_dir.exists()
|
||||
assert test_data_dir.is_dir()
|
||||
if corpus:
|
||||
corpora = [corpus]
|
||||
else:
|
||||
corpora = ["UD_English", "UD_Chinese", "UD_Japanese", "UD_Vietnamese"]
|
||||
|
||||
local(
|
||||
"cp {config} {experiment_dir}/config.json".format(
|
||||
config=config, experiment_dir=experiment_dir
|
||||
)
|
||||
)
|
||||
with virtualenv(VENV_DIR) as venv_local:
|
||||
for corpus in corpora:
|
||||
venv_local(
|
||||
"spacy ud-train {treebank_dir} {experiment_dir} {config} {corpus} -v {vectors_dir}".format(
|
||||
treebank_dir=treebank_dir,
|
||||
experiment_dir=experiment_dir,
|
||||
config=config,
|
||||
corpus=corpus,
|
||||
vectors_dir=vectors_dir,
|
||||
)
|
||||
)
|
||||
venv_local(
|
||||
"spacy ud-run-test {test_data_dir} {experiment_dir} {corpus}".format(
|
||||
test_data_dir=test_data_dir,
|
||||
experiment_dir=experiment_dir,
|
||||
config=config,
|
||||
corpus=corpus,
|
||||
)
|
||||
)
|
|
@ -1,259 +0,0 @@
|
|||
// ISO C9x compliant stdint.h for Microsoft Visual Studio
|
||||
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
|
||||
//
|
||||
// Copyright (c) 2006-2013 Alexander Chemeris
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// 2. Redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
//
|
||||
// 3. Neither the name of the product nor the names of its contributors may
|
||||
// be used to endorse or promote products derived from this software
|
||||
// without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef _MSC_VER // [
|
||||
#error "Use this header only with Microsoft Visual C++ compilers!"
|
||||
#endif // _MSC_VER ]
|
||||
|
||||
#ifndef _MSC_STDINT_H_ // [
|
||||
#define _MSC_STDINT_H_
|
||||
|
||||
#if _MSC_VER > 1000
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if _MSC_VER >= 1600 // [
|
||||
#include <stdint.h>
|
||||
#else // ] _MSC_VER >= 1600 [
|
||||
|
||||
#include <limits.h>
|
||||
|
||||
// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
|
||||
// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
|
||||
// or compiler give many errors like this:
|
||||
// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
# include <wchar.h>
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
// Define _W64 macros to mark types changing their size, like intptr_t.
|
||||
#ifndef _W64
|
||||
# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
|
||||
# define _W64 __w64
|
||||
# else
|
||||
# define _W64
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
// 7.18.1 Integer types
|
||||
|
||||
// 7.18.1.1 Exact-width integer types
|
||||
|
||||
// Visual Studio 6 and Embedded Visual C++ 4 doesn't
|
||||
// realize that, e.g. char has the same size as __int8
|
||||
// so we give up on __intX for them.
|
||||
#if (_MSC_VER < 1300)
|
||||
typedef signed char int8_t;
|
||||
typedef signed short int16_t;
|
||||
typedef signed int int32_t;
|
||||
typedef unsigned char uint8_t;
|
||||
typedef unsigned short uint16_t;
|
||||
typedef unsigned int uint32_t;
|
||||
#else
|
||||
typedef signed __int8 int8_t;
|
||||
typedef signed __int16 int16_t;
|
||||
typedef signed __int32 int32_t;
|
||||
typedef unsigned __int8 uint8_t;
|
||||
typedef unsigned __int16 uint16_t;
|
||||
typedef unsigned __int32 uint32_t;
|
||||
#endif
|
||||
typedef signed __int64 int64_t;
|
||||
typedef unsigned __int64 uint64_t;
|
||||
|
||||
|
||||
// 7.18.1.2 Minimum-width integer types
|
||||
typedef int8_t int_least8_t;
|
||||
typedef int16_t int_least16_t;
|
||||
typedef int32_t int_least32_t;
|
||||
typedef int64_t int_least64_t;
|
||||
typedef uint8_t uint_least8_t;
|
||||
typedef uint16_t uint_least16_t;
|
||||
typedef uint32_t uint_least32_t;
|
||||
typedef uint64_t uint_least64_t;
|
||||
|
||||
// 7.18.1.3 Fastest minimum-width integer types
|
||||
typedef int8_t int_fast8_t;
|
||||
typedef int16_t int_fast16_t;
|
||||
typedef int32_t int_fast32_t;
|
||||
typedef int64_t int_fast64_t;
|
||||
typedef uint8_t uint_fast8_t;
|
||||
typedef uint16_t uint_fast16_t;
|
||||
typedef uint32_t uint_fast32_t;
|
||||
typedef uint64_t uint_fast64_t;
|
||||
|
||||
// 7.18.1.4 Integer types capable of holding object pointers
|
||||
#ifdef _WIN64 // [
|
||||
typedef signed __int64 intptr_t;
|
||||
typedef unsigned __int64 uintptr_t;
|
||||
#else // _WIN64 ][
|
||||
typedef _W64 signed int intptr_t;
|
||||
typedef _W64 unsigned int uintptr_t;
|
||||
#endif // _WIN64 ]
|
||||
|
||||
// 7.18.1.5 Greatest-width integer types
|
||||
typedef int64_t intmax_t;
|
||||
typedef uint64_t uintmax_t;
|
||||
|
||||
|
||||
// 7.18.2 Limits of specified-width integer types
|
||||
|
||||
#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
|
||||
|
||||
// 7.18.2.1 Limits of exact-width integer types
|
||||
#define INT8_MIN ((int8_t)_I8_MIN)
|
||||
#define INT8_MAX _I8_MAX
|
||||
#define INT16_MIN ((int16_t)_I16_MIN)
|
||||
#define INT16_MAX _I16_MAX
|
||||
#define INT32_MIN ((int32_t)_I32_MIN)
|
||||
#define INT32_MAX _I32_MAX
|
||||
#define INT64_MIN ((int64_t)_I64_MIN)
|
||||
#define INT64_MAX _I64_MAX
|
||||
#define UINT8_MAX _UI8_MAX
|
||||
#define UINT16_MAX _UI16_MAX
|
||||
#define UINT32_MAX _UI32_MAX
|
||||
#define UINT64_MAX _UI64_MAX
|
||||
|
||||
// 7.18.2.2 Limits of minimum-width integer types
|
||||
#define INT_LEAST8_MIN INT8_MIN
|
||||
#define INT_LEAST8_MAX INT8_MAX
|
||||
#define INT_LEAST16_MIN INT16_MIN
|
||||
#define INT_LEAST16_MAX INT16_MAX
|
||||
#define INT_LEAST32_MIN INT32_MIN
|
||||
#define INT_LEAST32_MAX INT32_MAX
|
||||
#define INT_LEAST64_MIN INT64_MIN
|
||||
#define INT_LEAST64_MAX INT64_MAX
|
||||
#define UINT_LEAST8_MAX UINT8_MAX
|
||||
#define UINT_LEAST16_MAX UINT16_MAX
|
||||
#define UINT_LEAST32_MAX UINT32_MAX
|
||||
#define UINT_LEAST64_MAX UINT64_MAX
|
||||
|
||||
// 7.18.2.3 Limits of fastest minimum-width integer types
|
||||
#define INT_FAST8_MIN INT8_MIN
|
||||
#define INT_FAST8_MAX INT8_MAX
|
||||
#define INT_FAST16_MIN INT16_MIN
|
||||
#define INT_FAST16_MAX INT16_MAX
|
||||
#define INT_FAST32_MIN INT32_MIN
|
||||
#define INT_FAST32_MAX INT32_MAX
|
||||
#define INT_FAST64_MIN INT64_MIN
|
||||
#define INT_FAST64_MAX INT64_MAX
|
||||
#define UINT_FAST8_MAX UINT8_MAX
|
||||
#define UINT_FAST16_MAX UINT16_MAX
|
||||
#define UINT_FAST32_MAX UINT32_MAX
|
||||
#define UINT_FAST64_MAX UINT64_MAX
|
||||
|
||||
// 7.18.2.4 Limits of integer types capable of holding object pointers
|
||||
#ifdef _WIN64 // [
|
||||
# define INTPTR_MIN INT64_MIN
|
||||
# define INTPTR_MAX INT64_MAX
|
||||
# define UINTPTR_MAX UINT64_MAX
|
||||
#else // _WIN64 ][
|
||||
# define INTPTR_MIN INT32_MIN
|
||||
# define INTPTR_MAX INT32_MAX
|
||||
# define UINTPTR_MAX UINT32_MAX
|
||||
#endif // _WIN64 ]
|
||||
|
||||
// 7.18.2.5 Limits of greatest-width integer types
|
||||
#define INTMAX_MIN INT64_MIN
|
||||
#define INTMAX_MAX INT64_MAX
|
||||
#define UINTMAX_MAX UINT64_MAX
|
||||
|
||||
// 7.18.3 Limits of other integer types
|
||||
|
||||
#ifdef _WIN64 // [
|
||||
# define PTRDIFF_MIN _I64_MIN
|
||||
# define PTRDIFF_MAX _I64_MAX
|
||||
#else // _WIN64 ][
|
||||
# define PTRDIFF_MIN _I32_MIN
|
||||
# define PTRDIFF_MAX _I32_MAX
|
||||
#endif // _WIN64 ]
|
||||
|
||||
#define SIG_ATOMIC_MIN INT_MIN
|
||||
#define SIG_ATOMIC_MAX INT_MAX
|
||||
|
||||
#ifndef SIZE_MAX // [
|
||||
# ifdef _WIN64 // [
|
||||
# define SIZE_MAX _UI64_MAX
|
||||
# else // _WIN64 ][
|
||||
# define SIZE_MAX _UI32_MAX
|
||||
# endif // _WIN64 ]
|
||||
#endif // SIZE_MAX ]
|
||||
|
||||
// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
|
||||
#ifndef WCHAR_MIN // [
|
||||
# define WCHAR_MIN 0
|
||||
#endif // WCHAR_MIN ]
|
||||
#ifndef WCHAR_MAX // [
|
||||
# define WCHAR_MAX _UI16_MAX
|
||||
#endif // WCHAR_MAX ]
|
||||
|
||||
#define WINT_MIN 0
|
||||
#define WINT_MAX _UI16_MAX
|
||||
|
||||
#endif // __STDC_LIMIT_MACROS ]
|
||||
|
||||
|
||||
// 7.18.4 Limits of other integer types
|
||||
|
||||
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
|
||||
|
||||
// 7.18.4.1 Macros for minimum-width integer constants
|
||||
|
||||
#define INT8_C(val) val##i8
|
||||
#define INT16_C(val) val##i16
|
||||
#define INT32_C(val) val##i32
|
||||
#define INT64_C(val) val##i64
|
||||
|
||||
#define UINT8_C(val) val##ui8
|
||||
#define UINT16_C(val) val##ui16
|
||||
#define UINT32_C(val) val##ui32
|
||||
#define UINT64_C(val) val##ui64
|
||||
|
||||
// 7.18.4.2 Macros for greatest-width integer constants
|
||||
// These #ifndef's are needed to prevent collisions with <boost/cstdint.hpp>.
|
||||
// Check out Issue 9 for the details.
|
||||
#ifndef INTMAX_C // [
|
||||
# define INTMAX_C INT64_C
|
||||
#endif // INTMAX_C ]
|
||||
#ifndef UINTMAX_C // [
|
||||
# define UINTMAX_C UINT64_C
|
||||
#endif // UINTMAX_C ]
|
||||
|
||||
#endif // __STDC_CONSTANT_MACROS ]
|
||||
|
||||
#endif // _MSC_VER >= 1600 ]
|
||||
|
||||
#endif // _MSC_STDINT_H_ ]
|
|
@ -1,22 +0,0 @@
|
|||
//-----------------------------------------------------------------------------
|
||||
// MurmurHash2 was written by Austin Appleby, and is placed in the public
|
||||
// domain. The author hereby disclaims copyright to this source code.
|
||||
|
||||
#ifndef _MURMURHASH2_H_
|
||||
#define _MURMURHASH2_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
uint32_t MurmurHash2 ( const void * key, int len, uint32_t seed );
|
||||
uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed );
|
||||
uint64_t MurmurHash64B ( const void * key, int len, uint64_t seed );
|
||||
uint32_t MurmurHash2A ( const void * key, int len, uint32_t seed );
|
||||
uint32_t MurmurHashNeutral2 ( const void * key, int len, uint32_t seed );
|
||||
uint32_t MurmurHashAligned2 ( const void * key, int len, uint32_t seed );
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
#endif // _MURMURHASH2_H_
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
//-----------------------------------------------------------------------------
|
||||
// MurmurHash3 was written by Austin Appleby, and is placed in the public
|
||||
// domain. The author hereby disclaims copyright to this source code.
|
||||
|
||||
#ifndef _MURMURHASH3_H_
|
||||
#define _MURMURHASH3_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
void MurmurHash3_x86_32 ( const void * key, int len, uint32_t seed, void * out );
|
||||
|
||||
void MurmurHash3_x86_128 ( const void * key, int len, uint32_t seed, void * out );
|
||||
|
||||
void MurmurHash3_x64_128 ( const void * key, int len, uint32_t seed, void * out );
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
#endif // _MURMURHASH3_H_
|
File diff suppressed because it is too large
Load Diff
|
@ -1,323 +0,0 @@
|
|||
|
||||
#ifdef _UMATHMODULE
|
||||
|
||||
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
|
||||
extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
|
||||
#else
|
||||
NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
|
||||
#endif
|
||||
|
||||
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
|
||||
extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
|
||||
#else
|
||||
NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
|
||||
#endif
|
||||
|
||||
NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData \
|
||||
(PyUFuncGenericFunction *, void **, char *, int, int, int, int, char *, char *, int);
|
||||
NPY_NO_EXPORT int PyUFunc_RegisterLoopForType \
|
||||
(PyUFuncObject *, int, PyUFuncGenericFunction, int *, void *);
|
||||
NPY_NO_EXPORT int PyUFunc_GenericFunction \
|
||||
(PyUFuncObject *, PyObject *, PyObject *, PyArrayObject **);
|
||||
NPY_NO_EXPORT void PyUFunc_f_f_As_d_d \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_d_d \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_f_f \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_g_g \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_F_F_As_D_D \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_F_F \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_D_D \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_G_G \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_O_O \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_ff_f_As_dd_d \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_ff_f \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_dd_d \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_gg_g \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_FF_F_As_DD_D \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_DD_D \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_FF_F \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_GG_G \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_OO_O \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_O_O_method \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_OO_O_method \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_On_Om \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT int PyUFunc_GetPyValues \
|
||||
(char *, int *, int *, PyObject **);
|
||||
NPY_NO_EXPORT int PyUFunc_checkfperr \
|
||||
(int, PyObject *, int *);
|
||||
NPY_NO_EXPORT void PyUFunc_clearfperr \
|
||||
(void);
|
||||
NPY_NO_EXPORT int PyUFunc_getfperr \
|
||||
(void);
|
||||
NPY_NO_EXPORT int PyUFunc_handlefperr \
|
||||
(int, PyObject *, int, int *);
|
||||
NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature \
|
||||
(PyUFuncObject *, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *);
|
||||
NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignature \
|
||||
(PyUFuncGenericFunction *, void **, char *, int, int, int, int, char *, char *, int, const char *);
|
||||
NPY_NO_EXPORT int PyUFunc_SetUsesArraysAsData \
|
||||
(void **, size_t);
|
||||
NPY_NO_EXPORT void PyUFunc_e_e \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_e_e_As_f_f \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_e_e_As_d_d \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_ee_e \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_ee_e_As_ff_f \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT void PyUFunc_ee_e_As_dd_d \
|
||||
(char **, npy_intp *, npy_intp *, void *);
|
||||
NPY_NO_EXPORT int PyUFunc_DefaultTypeResolver \
|
||||
(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **);
|
||||
NPY_NO_EXPORT int PyUFunc_ValidateCasting \
|
||||
(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **);
|
||||
|
||||
#else
|
||||
|
||||
#if defined(PY_UFUNC_UNIQUE_SYMBOL)
|
||||
#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL
|
||||
#endif
|
||||
|
||||
#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC)
|
||||
extern void **PyUFunc_API;
|
||||
#else
|
||||
#if defined(PY_UFUNC_UNIQUE_SYMBOL)
|
||||
void **PyUFunc_API;
|
||||
#else
|
||||
static void **PyUFunc_API=NULL;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0])
|
||||
#define PyUFunc_FromFuncAndData \
|
||||
(*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, char *, char *, int)) \
|
||||
PyUFunc_API[1])
|
||||
#define PyUFunc_RegisterLoopForType \
|
||||
(*(int (*)(PyUFuncObject *, int, PyUFuncGenericFunction, int *, void *)) \
|
||||
PyUFunc_API[2])
|
||||
#define PyUFunc_GenericFunction \
|
||||
(*(int (*)(PyUFuncObject *, PyObject *, PyObject *, PyArrayObject **)) \
|
||||
PyUFunc_API[3])
|
||||
#define PyUFunc_f_f_As_d_d \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[4])
|
||||
#define PyUFunc_d_d \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[5])
|
||||
#define PyUFunc_f_f \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[6])
|
||||
#define PyUFunc_g_g \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[7])
|
||||
#define PyUFunc_F_F_As_D_D \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[8])
|
||||
#define PyUFunc_F_F \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[9])
|
||||
#define PyUFunc_D_D \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[10])
|
||||
#define PyUFunc_G_G \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[11])
|
||||
#define PyUFunc_O_O \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[12])
|
||||
#define PyUFunc_ff_f_As_dd_d \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[13])
|
||||
#define PyUFunc_ff_f \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[14])
|
||||
#define PyUFunc_dd_d \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[15])
|
||||
#define PyUFunc_gg_g \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[16])
|
||||
#define PyUFunc_FF_F_As_DD_D \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[17])
|
||||
#define PyUFunc_DD_D \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[18])
|
||||
#define PyUFunc_FF_F \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[19])
|
||||
#define PyUFunc_GG_G \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[20])
|
||||
#define PyUFunc_OO_O \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[21])
|
||||
#define PyUFunc_O_O_method \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[22])
|
||||
#define PyUFunc_OO_O_method \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[23])
|
||||
#define PyUFunc_On_Om \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[24])
|
||||
#define PyUFunc_GetPyValues \
|
||||
(*(int (*)(char *, int *, int *, PyObject **)) \
|
||||
PyUFunc_API[25])
|
||||
#define PyUFunc_checkfperr \
|
||||
(*(int (*)(int, PyObject *, int *)) \
|
||||
PyUFunc_API[26])
|
||||
#define PyUFunc_clearfperr \
|
||||
(*(void (*)(void)) \
|
||||
PyUFunc_API[27])
|
||||
#define PyUFunc_getfperr \
|
||||
(*(int (*)(void)) \
|
||||
PyUFunc_API[28])
|
||||
#define PyUFunc_handlefperr \
|
||||
(*(int (*)(int, PyObject *, int, int *)) \
|
||||
PyUFunc_API[29])
|
||||
#define PyUFunc_ReplaceLoopBySignature \
|
||||
(*(int (*)(PyUFuncObject *, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *)) \
|
||||
PyUFunc_API[30])
|
||||
#define PyUFunc_FromFuncAndDataAndSignature \
|
||||
(*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, char *, char *, int, const char *)) \
|
||||
PyUFunc_API[31])
|
||||
#define PyUFunc_SetUsesArraysAsData \
|
||||
(*(int (*)(void **, size_t)) \
|
||||
PyUFunc_API[32])
|
||||
#define PyUFunc_e_e \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[33])
|
||||
#define PyUFunc_e_e_As_f_f \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[34])
|
||||
#define PyUFunc_e_e_As_d_d \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[35])
|
||||
#define PyUFunc_ee_e \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[36])
|
||||
#define PyUFunc_ee_e_As_ff_f \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[37])
|
||||
#define PyUFunc_ee_e_As_dd_d \
|
||||
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||
PyUFunc_API[38])
|
||||
#define PyUFunc_DefaultTypeResolver \
|
||||
(*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **)) \
|
||||
PyUFunc_API[39])
|
||||
#define PyUFunc_ValidateCasting \
|
||||
(*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **)) \
|
||||
PyUFunc_API[40])
|
||||
|
||||
static int
|
||||
_import_umath(void)
|
||||
{
|
||||
PyObject *numpy = PyImport_ImportModule("numpy.core.umath");
|
||||
PyObject *c_api = NULL;
|
||||
|
||||
if (numpy == NULL) {
|
||||
PyErr_SetString(PyExc_ImportError, "numpy.core.umath failed to import");
|
||||
return -1;
|
||||
}
|
||||
c_api = PyObject_GetAttrString(numpy, "_UFUNC_API");
|
||||
Py_DECREF(numpy);
|
||||
if (c_api == NULL) {
|
||||
PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found");
|
||||
return -1;
|
||||
}
|
||||
|
||||
#if PY_VERSION_HEX >= 0x03000000
|
||||
if (!PyCapsule_CheckExact(c_api)) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object");
|
||||
Py_DECREF(c_api);
|
||||
return -1;
|
||||
}
|
||||
PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL);
|
||||
#else
|
||||
if (!PyCObject_Check(c_api)) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCObject object");
|
||||
Py_DECREF(c_api);
|
||||
return -1;
|
||||
}
|
||||
PyUFunc_API = (void **)PyCObject_AsVoidPtr(c_api);
|
||||
#endif
|
||||
Py_DECREF(c_api);
|
||||
if (PyUFunc_API == NULL) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if PY_VERSION_HEX >= 0x03000000
|
||||
#define NUMPY_IMPORT_UMATH_RETVAL NULL
|
||||
#else
|
||||
#define NUMPY_IMPORT_UMATH_RETVAL
|
||||
#endif
|
||||
|
||||
#define import_umath() \
|
||||
do {\
|
||||
UFUNC_NOFPE\
|
||||
if (_import_umath() < 0) {\
|
||||
PyErr_Print();\
|
||||
PyErr_SetString(PyExc_ImportError,\
|
||||
"numpy.core.umath failed to import");\
|
||||
return NUMPY_IMPORT_UMATH_RETVAL;\
|
||||
}\
|
||||
} while(0)
|
||||
|
||||
#define import_umath1(ret) \
|
||||
do {\
|
||||
UFUNC_NOFPE\
|
||||
if (_import_umath() < 0) {\
|
||||
PyErr_Print();\
|
||||
PyErr_SetString(PyExc_ImportError,\
|
||||
"numpy.core.umath failed to import");\
|
||||
return ret;\
|
||||
}\
|
||||
} while(0)
|
||||
|
||||
#define import_umath2(ret, msg) \
|
||||
do {\
|
||||
UFUNC_NOFPE\
|
||||
if (_import_umath() < 0) {\
|
||||
PyErr_Print();\
|
||||
PyErr_SetString(PyExc_ImportError, msg);\
|
||||
return ret;\
|
||||
}\
|
||||
} while(0)
|
||||
|
||||
#define import_ufunc() \
|
||||
do {\
|
||||
UFUNC_NOFPE\
|
||||
if (_import_umath() < 0) {\
|
||||
PyErr_Print();\
|
||||
PyErr_SetString(PyExc_ImportError,\
|
||||
"numpy.core.umath failed to import");\
|
||||
}\
|
||||
} while(0)
|
||||
|
||||
#endif
|
|
@ -1,90 +0,0 @@
|
|||
#ifndef _NPY_INCLUDE_NEIGHBORHOOD_IMP
|
||||
#error You should not include this header directly
|
||||
#endif
|
||||
/*
|
||||
* Private API (here for inline)
|
||||
*/
|
||||
static NPY_INLINE int
|
||||
_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter);
|
||||
|
||||
/*
|
||||
* Update to next item of the iterator
|
||||
*
|
||||
* Note: this simply increment the coordinates vector, last dimension
|
||||
* incremented first , i.e, for dimension 3
|
||||
* ...
|
||||
* -1, -1, -1
|
||||
* -1, -1, 0
|
||||
* -1, -1, 1
|
||||
* ....
|
||||
* -1, 0, -1
|
||||
* -1, 0, 0
|
||||
* ....
|
||||
* 0, -1, -1
|
||||
* 0, -1, 0
|
||||
* ....
|
||||
*/
|
||||
#define _UPDATE_COORD_ITER(c) \
|
||||
wb = iter->coordinates[c] < iter->bounds[c][1]; \
|
||||
if (wb) { \
|
||||
iter->coordinates[c] += 1; \
|
||||
return 0; \
|
||||
} \
|
||||
else { \
|
||||
iter->coordinates[c] = iter->bounds[c][0]; \
|
||||
}
|
||||
|
||||
static NPY_INLINE int
|
||||
_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter)
|
||||
{
|
||||
npy_intp i, wb;
|
||||
|
||||
for (i = iter->nd - 1; i >= 0; --i) {
|
||||
_UPDATE_COORD_ITER(i)
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Version optimized for 2d arrays, manual loop unrolling
|
||||
*/
|
||||
static NPY_INLINE int
|
||||
_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter)
|
||||
{
|
||||
npy_intp wb;
|
||||
|
||||
_UPDATE_COORD_ITER(1)
|
||||
_UPDATE_COORD_ITER(0)
|
||||
|
||||
return 0;
|
||||
}
|
||||
#undef _UPDATE_COORD_ITER
|
||||
|
||||
/*
|
||||
* Advance to the next neighbour
|
||||
*/
|
||||
static NPY_INLINE int
|
||||
PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter)
|
||||
{
|
||||
_PyArrayNeighborhoodIter_IncrCoord (iter);
|
||||
iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset functions
|
||||
*/
|
||||
static NPY_INLINE int
|
||||
PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter)
|
||||
{
|
||||
npy_intp i;
|
||||
|
||||
for (i = 0; i < iter->nd; ++i) {
|
||||
iter->coordinates[i] = iter->bounds[i][0];
|
||||
}
|
||||
iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
#define NPY_SIZEOF_SHORT SIZEOF_SHORT
|
||||
#define NPY_SIZEOF_INT SIZEOF_INT
|
||||
#define NPY_SIZEOF_LONG SIZEOF_LONG
|
||||
#define NPY_SIZEOF_FLOAT 4
|
||||
#define NPY_SIZEOF_COMPLEX_FLOAT 8
|
||||
#define NPY_SIZEOF_DOUBLE 8
|
||||
#define NPY_SIZEOF_COMPLEX_DOUBLE 16
|
||||
#define NPY_SIZEOF_LONGDOUBLE 16
|
||||
#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
|
||||
#define NPY_SIZEOF_PY_INTPTR_T 8
|
||||
#define NPY_SIZEOF_PY_LONG_LONG 8
|
||||
#define NPY_SIZEOF_LONGLONG 8
|
||||
#define NPY_NO_SMP 0
|
||||
#define NPY_HAVE_DECL_ISNAN
|
||||
#define NPY_HAVE_DECL_ISINF
|
||||
#define NPY_HAVE_DECL_ISFINITE
|
||||
#define NPY_HAVE_DECL_SIGNBIT
|
||||
#define NPY_USE_C99_COMPLEX 1
|
||||
#define NPY_HAVE_COMPLEX_DOUBLE 1
|
||||
#define NPY_HAVE_COMPLEX_FLOAT 1
|
||||
#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1
|
||||
#define NPY_USE_C99_FORMATS 1
|
||||
#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
|
||||
#define NPY_ABI_VERSION 0x01000009
|
||||
#define NPY_API_VERSION 0x00000007
|
||||
|
||||
#ifndef __STDC_FORMAT_MACROS
|
||||
#define __STDC_FORMAT_MACROS 1
|
||||
#endif
|
|
@ -1,22 +0,0 @@
|
|||
|
||||
/* This expects the following variables to be defined (besides
|
||||
the usual ones from pyconfig.h
|
||||
|
||||
SIZEOF_LONG_DOUBLE -- sizeof(long double) or sizeof(double) if no
|
||||
long double is present on platform.
|
||||
CHAR_BIT -- number of bits in a char (usually 8)
|
||||
(should be in limits.h)
|
||||
|
||||
*/
|
||||
|
||||
#ifndef Py_ARRAYOBJECT_H
|
||||
#define Py_ARRAYOBJECT_H
|
||||
|
||||
#include "ndarrayobject.h"
|
||||
#include "npy_interrupt.h"
|
||||
|
||||
#ifdef NPY_NO_PREFIX
|
||||
#include "noprefix.h"
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -1,175 +0,0 @@
|
|||
#ifndef _NPY_ARRAYSCALARS_H_
|
||||
#define _NPY_ARRAYSCALARS_H_
|
||||
|
||||
#ifndef _MULTIARRAYMODULE
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_bool obval;
|
||||
} PyBoolScalarObject;
|
||||
#endif
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
signed char obval;
|
||||
} PyByteScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
short obval;
|
||||
} PyShortScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
int obval;
|
||||
} PyIntScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
long obval;
|
||||
} PyLongScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_longlong obval;
|
||||
} PyLongLongScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
unsigned char obval;
|
||||
} PyUByteScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
unsigned short obval;
|
||||
} PyUShortScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
unsigned int obval;
|
||||
} PyUIntScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
unsigned long obval;
|
||||
} PyULongScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_ulonglong obval;
|
||||
} PyULongLongScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_half obval;
|
||||
} PyHalfScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
float obval;
|
||||
} PyFloatScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
double obval;
|
||||
} PyDoubleScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_longdouble obval;
|
||||
} PyLongDoubleScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_cfloat obval;
|
||||
} PyCFloatScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_cdouble obval;
|
||||
} PyCDoubleScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_clongdouble obval;
|
||||
} PyCLongDoubleScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
PyObject * obval;
|
||||
} PyObjectScalarObject;
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_datetime obval;
|
||||
PyArray_DatetimeMetaData obmeta;
|
||||
} PyDatetimeScalarObject;
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
npy_timedelta obval;
|
||||
PyArray_DatetimeMetaData obmeta;
|
||||
} PyTimedeltaScalarObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
char obval;
|
||||
} PyScalarObject;
|
||||
|
||||
#define PyStringScalarObject PyStringObject
|
||||
#define PyUnicodeScalarObject PyUnicodeObject
|
||||
|
||||
typedef struct {
|
||||
PyObject_VAR_HEAD
|
||||
char *obval;
|
||||
PyArray_Descr *descr;
|
||||
int flags;
|
||||
PyObject *base;
|
||||
} PyVoidScalarObject;
|
||||
|
||||
/* Macros
|
||||
Py<Cls><bitsize>ScalarObject
|
||||
Py<Cls><bitsize>ArrType_Type
|
||||
are defined in ndarrayobject.h
|
||||
*/
|
||||
|
||||
#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0])))
|
||||
#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1])))
|
||||
#define PyArrayScalar_FromLong(i) \
|
||||
((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)])))
|
||||
#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \
|
||||
return Py_INCREF(PyArrayScalar_FromLong(i)), \
|
||||
PyArrayScalar_FromLong(i)
|
||||
#define PyArrayScalar_RETURN_FALSE \
|
||||
return Py_INCREF(PyArrayScalar_False), \
|
||||
PyArrayScalar_False
|
||||
#define PyArrayScalar_RETURN_TRUE \
|
||||
return Py_INCREF(PyArrayScalar_True), \
|
||||
PyArrayScalar_True
|
||||
|
||||
#define PyArrayScalar_New(cls) \
|
||||
Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0)
|
||||
#define PyArrayScalar_VAL(obj, cls) \
|
||||
((Py##cls##ScalarObject *)obj)->obval
|
||||
#define PyArrayScalar_ASSIGN(obj, cls, val) \
|
||||
PyArrayScalar_VAL(obj, cls) = val
|
||||
|
||||
#endif
|
|
@ -1,69 +0,0 @@
|
|||
#ifndef __NPY_HALFFLOAT_H__
|
||||
#define __NPY_HALFFLOAT_H__
|
||||
|
||||
#include <Python.h>
|
||||
#include <numpy/npy_math.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Half-precision routines
|
||||
*/
|
||||
|
||||
/* Conversions */
|
||||
float npy_half_to_float(npy_half h);
|
||||
double npy_half_to_double(npy_half h);
|
||||
npy_half npy_float_to_half(float f);
|
||||
npy_half npy_double_to_half(double d);
|
||||
/* Comparisons */
|
||||
int npy_half_eq(npy_half h1, npy_half h2);
|
||||
int npy_half_ne(npy_half h1, npy_half h2);
|
||||
int npy_half_le(npy_half h1, npy_half h2);
|
||||
int npy_half_lt(npy_half h1, npy_half h2);
|
||||
int npy_half_ge(npy_half h1, npy_half h2);
|
||||
int npy_half_gt(npy_half h1, npy_half h2);
|
||||
/* faster *_nonan variants for when you know h1 and h2 are not NaN */
|
||||
int npy_half_eq_nonan(npy_half h1, npy_half h2);
|
||||
int npy_half_lt_nonan(npy_half h1, npy_half h2);
|
||||
int npy_half_le_nonan(npy_half h1, npy_half h2);
|
||||
/* Miscellaneous functions */
|
||||
int npy_half_iszero(npy_half h);
|
||||
int npy_half_isnan(npy_half h);
|
||||
int npy_half_isinf(npy_half h);
|
||||
int npy_half_isfinite(npy_half h);
|
||||
int npy_half_signbit(npy_half h);
|
||||
npy_half npy_half_copysign(npy_half x, npy_half y);
|
||||
npy_half npy_half_spacing(npy_half h);
|
||||
npy_half npy_half_nextafter(npy_half x, npy_half y);
|
||||
|
||||
/*
|
||||
* Half-precision constants
|
||||
*/
|
||||
|
||||
#define NPY_HALF_ZERO (0x0000u)
|
||||
#define NPY_HALF_PZERO (0x0000u)
|
||||
#define NPY_HALF_NZERO (0x8000u)
|
||||
#define NPY_HALF_ONE (0x3c00u)
|
||||
#define NPY_HALF_NEGONE (0xbc00u)
|
||||
#define NPY_HALF_PINF (0x7c00u)
|
||||
#define NPY_HALF_NINF (0xfc00u)
|
||||
#define NPY_HALF_NAN (0x7e00u)
|
||||
|
||||
#define NPY_MAX_HALF (0x7bffu)
|
||||
|
||||
/*
|
||||
* Bit-level conversions
|
||||
*/
|
||||
|
||||
npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f);
|
||||
npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d);
|
||||
npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h);
|
||||
npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -1,244 +0,0 @@
|
|||
/*
|
||||
* DON'T INCLUDE THIS DIRECTLY.
|
||||
*/
|
||||
|
||||
#ifndef NPY_NDARRAYOBJECT_H
|
||||
#define NPY_NDARRAYOBJECT_H
|
||||
#ifdef __cplusplus
|
||||
#define CONFUSE_EMACS {
|
||||
#define CONFUSE_EMACS2 }
|
||||
extern "C" CONFUSE_EMACS
|
||||
#undef CONFUSE_EMACS
|
||||
#undef CONFUSE_EMACS2
|
||||
/* ... otherwise a semi-smart identer (like emacs) tries to indent
|
||||
everything when you're typing */
|
||||
#endif
|
||||
|
||||
#include "ndarraytypes.h"
|
||||
|
||||
/* Includes the "function" C-API -- these are all stored in a
|
||||
list of pointers --- one for each file
|
||||
The two lists are concatenated into one in multiarray.
|
||||
|
||||
They are available as import_array()
|
||||
*/
|
||||
|
||||
#include "__multiarray_api.h"
|
||||
|
||||
|
||||
/* C-API that requries previous API to be defined */
|
||||
|
||||
#define PyArray_DescrCheck(op) (((PyObject*)(op))->ob_type==&PyArrayDescr_Type)
|
||||
|
||||
#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type)
|
||||
#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type)
|
||||
|
||||
#define PyArray_HasArrayInterfaceType(op, type, context, out) \
|
||||
((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \
|
||||
(((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \
|
||||
(((out)=PyArray_FromArrayAttr(op, type, context)) != \
|
||||
Py_NotImplemented))
|
||||
|
||||
#define PyArray_HasArrayInterface(op, out) \
|
||||
PyArray_HasArrayInterfaceType(op, NULL, NULL, out)
|
||||
|
||||
#define PyArray_IsZeroDim(op) (PyArray_Check(op) && \
|
||||
(PyArray_NDIM((PyArrayObject *)op) == 0))
|
||||
|
||||
#define PyArray_IsScalar(obj, cls) \
|
||||
(PyObject_TypeCheck(obj, &Py##cls##ArrType_Type))
|
||||
|
||||
#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \
|
||||
PyArray_IsZeroDim(m))
|
||||
|
||||
#define PyArray_IsPythonNumber(obj) \
|
||||
(PyInt_Check(obj) || PyFloat_Check(obj) || PyComplex_Check(obj) || \
|
||||
PyLong_Check(obj) || PyBool_Check(obj))
|
||||
|
||||
#define PyArray_IsPythonScalar(obj) \
|
||||
(PyArray_IsPythonNumber(obj) || PyString_Check(obj) || \
|
||||
PyUnicode_Check(obj))
|
||||
|
||||
#define PyArray_IsAnyScalar(obj) \
|
||||
(PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj))
|
||||
|
||||
#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \
|
||||
PyArray_CheckScalar(obj))
|
||||
|
||||
#define PyArray_IsIntegerScalar(obj) (PyInt_Check(obj) \
|
||||
|| PyLong_Check(obj) \
|
||||
|| PyArray_IsScalar((obj), Integer))
|
||||
|
||||
|
||||
#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \
|
||||
Py_INCREF(m), (m) : \
|
||||
(PyArrayObject *)(PyArray_Copy(m)))
|
||||
|
||||
#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \
|
||||
PyArray_CompareLists(PyArray_DIMS(a1), \
|
||||
PyArray_DIMS(a2), \
|
||||
PyArray_NDIM(a1)))
|
||||
|
||||
#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m))
|
||||
#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m))
|
||||
#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL)
|
||||
|
||||
#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \
|
||||
NULL)
|
||||
|
||||
#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \
|
||||
PyArray_DescrFromType(type), 0, 0, 0, NULL);
|
||||
|
||||
#define PyArray_FROM_OTF(m, type, flags) \
|
||||
PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \
|
||||
(((flags) & NPY_ARRAY_ENSURECOPY) ? \
|
||||
((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL)
|
||||
|
||||
#define PyArray_FROMANY(m, type, min, max, flags) \
|
||||
PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \
|
||||
(((flags) & NPY_ARRAY_ENSURECOPY) ? \
|
||||
(flags) | NPY_ARRAY_DEFAULT : (flags)), NULL)
|
||||
|
||||
#define PyArray_ZEROS(m, dims, type, is_f_order) \
|
||||
PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order)
|
||||
|
||||
#define PyArray_EMPTY(m, dims, type, is_f_order) \
|
||||
PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order)
|
||||
|
||||
#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \
|
||||
PyArray_NBYTES(obj))
|
||||
|
||||
#define PyArray_REFCOUNT(obj) (((PyObject *)(obj))->ob_refcnt)
|
||||
#define NPY_REFCOUNT PyArray_REFCOUNT
|
||||
#define NPY_MAX_ELSIZE (2 * NPY_SIZEOF_LONGDOUBLE)
|
||||
|
||||
#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \
|
||||
PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
|
||||
max_depth, NPY_ARRAY_DEFAULT, NULL)
|
||||
|
||||
#define PyArray_EquivArrTypes(a1, a2) \
|
||||
PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2))
|
||||
|
||||
#define PyArray_EquivByteorders(b1, b2) \
|
||||
(((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2)))
|
||||
|
||||
#define PyArray_SimpleNew(nd, dims, typenum) \
|
||||
PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL)
|
||||
|
||||
#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \
|
||||
PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \
|
||||
data, 0, NPY_ARRAY_CARRAY, NULL)
|
||||
|
||||
#define PyArray_SimpleNewFromDescr(nd, dims, descr) \
|
||||
PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \
|
||||
NULL, NULL, 0, NULL)
|
||||
|
||||
#define PyArray_ToScalar(data, arr) \
|
||||
PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr)
|
||||
|
||||
|
||||
/* These might be faster without the dereferencing of obj
|
||||
going on inside -- of course an optimizing compiler should
|
||||
inline the constants inside a for loop making it a moot point
|
||||
*/
|
||||
|
||||
#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \
|
||||
(i)*PyArray_STRIDES(obj)[0]))
|
||||
|
||||
#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \
|
||||
(i)*PyArray_STRIDES(obj)[0] + \
|
||||
(j)*PyArray_STRIDES(obj)[1]))
|
||||
|
||||
#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \
|
||||
(i)*PyArray_STRIDES(obj)[0] + \
|
||||
(j)*PyArray_STRIDES(obj)[1] + \
|
||||
(k)*PyArray_STRIDES(obj)[2]))
|
||||
|
||||
#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \
|
||||
(i)*PyArray_STRIDES(obj)[0] + \
|
||||
(j)*PyArray_STRIDES(obj)[1] + \
|
||||
(k)*PyArray_STRIDES(obj)[2] + \
|
||||
(l)*PyArray_STRIDES(obj)[3]))
|
||||
|
||||
static NPY_INLINE void
|
||||
PyArray_XDECREF_ERR(PyArrayObject *arr)
|
||||
{
|
||||
if (arr != NULL) {
|
||||
if (PyArray_FLAGS(arr) & NPY_ARRAY_UPDATEIFCOPY) {
|
||||
PyArrayObject *base = (PyArrayObject *)PyArray_BASE(arr);
|
||||
PyArray_ENABLEFLAGS(base, NPY_ARRAY_WRITEABLE);
|
||||
PyArray_CLEARFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY);
|
||||
}
|
||||
Py_DECREF(arr);
|
||||
}
|
||||
}
|
||||
|
||||
#define PyArray_DESCR_REPLACE(descr) do { \
|
||||
PyArray_Descr *_new_; \
|
||||
_new_ = PyArray_DescrNew(descr); \
|
||||
Py_XDECREF(descr); \
|
||||
descr = _new_; \
|
||||
} while(0)
|
||||
|
||||
/* Copy should always return contiguous array */
|
||||
#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER)
|
||||
|
||||
#define PyArray_FromObject(op, type, min_depth, max_depth) \
|
||||
PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
|
||||
max_depth, NPY_ARRAY_BEHAVED | \
|
||||
NPY_ARRAY_ENSUREARRAY, NULL)
|
||||
|
||||
#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \
|
||||
PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
|
||||
max_depth, NPY_ARRAY_DEFAULT | \
|
||||
NPY_ARRAY_ENSUREARRAY, NULL)
|
||||
|
||||
#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \
|
||||
PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
|
||||
max_depth, NPY_ARRAY_ENSURECOPY | \
|
||||
NPY_ARRAY_DEFAULT | \
|
||||
NPY_ARRAY_ENSUREARRAY, NULL)
|
||||
|
||||
#define PyArray_Cast(mp, type_num) \
|
||||
PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0)
|
||||
|
||||
#define PyArray_Take(ap, items, axis) \
|
||||
PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE)
|
||||
|
||||
#define PyArray_Put(ap, items, values) \
|
||||
PyArray_PutTo(ap, items, values, NPY_RAISE)
|
||||
|
||||
/* Compatibility with old Numeric stuff -- don't use in new code */
|
||||
|
||||
#define PyArray_FromDimsAndData(nd, d, type, data) \
|
||||
PyArray_FromDimsAndDataAndDescr(nd, d, PyArray_DescrFromType(type), \
|
||||
data)
|
||||
|
||||
|
||||
/*
|
||||
Check to see if this key in the dictionary is the "title"
|
||||
entry of the tuple (i.e. a duplicate dictionary entry in the fields
|
||||
dict.
|
||||
*/
|
||||
|
||||
#define NPY_TITLE_KEY(key, value) ((PyTuple_GET_SIZE((value))==3) && \
|
||||
(PyTuple_GET_ITEM((value), 2) == (key)))
|
||||
|
||||
|
||||
/* Define python version independent deprecation macro */
|
||||
|
||||
#if PY_VERSION_HEX >= 0x02050000
|
||||
#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1)
|
||||
#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1)
|
||||
#else
|
||||
#define DEPRECATE(msg) PyErr_Warn(PyExc_DeprecationWarning,msg)
|
||||
#define DEPRECATE_FUTUREWARNING(msg) PyErr_Warn(PyExc_FutureWarning,msg)
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* NPY_NDARRAYOBJECT_H */
|
File diff suppressed because it is too large
Load Diff
|
@ -1,209 +0,0 @@
|
|||
#ifndef NPY_NOPREFIX_H
|
||||
#define NPY_NOPREFIX_H
|
||||
|
||||
/*
|
||||
* You can directly include noprefix.h as a backward
|
||||
* compatibility measure
|
||||
*/
|
||||
#ifndef NPY_NO_PREFIX
|
||||
#include "ndarrayobject.h"
|
||||
#include "npy_interrupt.h"
|
||||
#endif
|
||||
|
||||
#define SIGSETJMP NPY_SIGSETJMP
|
||||
#define SIGLONGJMP NPY_SIGLONGJMP
|
||||
#define SIGJMP_BUF NPY_SIGJMP_BUF
|
||||
|
||||
#define MAX_DIMS NPY_MAXDIMS
|
||||
|
||||
#define longlong npy_longlong
|
||||
#define ulonglong npy_ulonglong
|
||||
#define Bool npy_bool
|
||||
#define longdouble npy_longdouble
|
||||
#define byte npy_byte
|
||||
|
||||
#ifndef _BSD_SOURCE
|
||||
#define ushort npy_ushort
|
||||
#define uint npy_uint
|
||||
#define ulong npy_ulong
|
||||
#endif
|
||||
|
||||
#define ubyte npy_ubyte
|
||||
#define ushort npy_ushort
|
||||
#define uint npy_uint
|
||||
#define ulong npy_ulong
|
||||
#define cfloat npy_cfloat
|
||||
#define cdouble npy_cdouble
|
||||
#define clongdouble npy_clongdouble
|
||||
#define Int8 npy_int8
|
||||
#define UInt8 npy_uint8
|
||||
#define Int16 npy_int16
|
||||
#define UInt16 npy_uint16
|
||||
#define Int32 npy_int32
|
||||
#define UInt32 npy_uint32
|
||||
#define Int64 npy_int64
|
||||
#define UInt64 npy_uint64
|
||||
#define Int128 npy_int128
|
||||
#define UInt128 npy_uint128
|
||||
#define Int256 npy_int256
|
||||
#define UInt256 npy_uint256
|
||||
#define Float16 npy_float16
|
||||
#define Complex32 npy_complex32
|
||||
#define Float32 npy_float32
|
||||
#define Complex64 npy_complex64
|
||||
#define Float64 npy_float64
|
||||
#define Complex128 npy_complex128
|
||||
#define Float80 npy_float80
|
||||
#define Complex160 npy_complex160
|
||||
#define Float96 npy_float96
|
||||
#define Complex192 npy_complex192
|
||||
#define Float128 npy_float128
|
||||
#define Complex256 npy_complex256
|
||||
#define intp npy_intp
|
||||
#define uintp npy_uintp
|
||||
#define datetime npy_datetime
|
||||
#define timedelta npy_timedelta
|
||||
|
||||
#define SIZEOF_INTP NPY_SIZEOF_INTP
|
||||
#define SIZEOF_UINTP NPY_SIZEOF_UINTP
|
||||
#define SIZEOF_DATETIME NPY_SIZEOF_DATETIME
|
||||
#define SIZEOF_TIMEDELTA NPY_SIZEOF_TIMEDELTA
|
||||
|
||||
#define LONGLONG_FMT NPY_LONGLONG_FMT
|
||||
#define ULONGLONG_FMT NPY_ULONGLONG_FMT
|
||||
#define LONGLONG_SUFFIX NPY_LONGLONG_SUFFIX
|
||||
#define ULONGLONG_SUFFIX NPY_ULONGLONG_SUFFIX
|
||||
|
||||
#define MAX_INT8 127
|
||||
#define MIN_INT8 -128
|
||||
#define MAX_UINT8 255
|
||||
#define MAX_INT16 32767
|
||||
#define MIN_INT16 -32768
|
||||
#define MAX_UINT16 65535
|
||||
#define MAX_INT32 2147483647
|
||||
#define MIN_INT32 (-MAX_INT32 - 1)
|
||||
#define MAX_UINT32 4294967295U
|
||||
#define MAX_INT64 LONGLONG_SUFFIX(9223372036854775807)
|
||||
#define MIN_INT64 (-MAX_INT64 - LONGLONG_SUFFIX(1))
|
||||
#define MAX_UINT64 ULONGLONG_SUFFIX(18446744073709551615)
|
||||
#define MAX_INT128 LONGLONG_SUFFIX(85070591730234615865843651857942052864)
|
||||
#define MIN_INT128 (-MAX_INT128 - LONGLONG_SUFFIX(1))
|
||||
#define MAX_UINT128 ULONGLONG_SUFFIX(170141183460469231731687303715884105728)
|
||||
#define MAX_INT256 LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967)
|
||||
#define MIN_INT256 (-MAX_INT256 - LONGLONG_SUFFIX(1))
|
||||
#define MAX_UINT256 ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935)
|
||||
|
||||
#define MAX_BYTE NPY_MAX_BYTE
|
||||
#define MIN_BYTE NPY_MIN_BYTE
|
||||
#define MAX_UBYTE NPY_MAX_UBYTE
|
||||
#define MAX_SHORT NPY_MAX_SHORT
|
||||
#define MIN_SHORT NPY_MIN_SHORT
|
||||
#define MAX_USHORT NPY_MAX_USHORT
|
||||
#define MAX_INT NPY_MAX_INT
|
||||
#define MIN_INT NPY_MIN_INT
|
||||
#define MAX_UINT NPY_MAX_UINT
|
||||
#define MAX_LONG NPY_MAX_LONG
|
||||
#define MIN_LONG NPY_MIN_LONG
|
||||
#define MAX_ULONG NPY_MAX_ULONG
|
||||
#define MAX_LONGLONG NPY_MAX_LONGLONG
|
||||
#define MIN_LONGLONG NPY_MIN_LONGLONG
|
||||
#define MAX_ULONGLONG NPY_MAX_ULONGLONG
|
||||
#define MIN_DATETIME NPY_MIN_DATETIME
|
||||
#define MAX_DATETIME NPY_MAX_DATETIME
|
||||
#define MIN_TIMEDELTA NPY_MIN_TIMEDELTA
|
||||
#define MAX_TIMEDELTA NPY_MAX_TIMEDELTA
|
||||
|
||||
#define SIZEOF_LONGDOUBLE NPY_SIZEOF_LONGDOUBLE
|
||||
#define SIZEOF_LONGLONG NPY_SIZEOF_LONGLONG
|
||||
#define SIZEOF_HALF NPY_SIZEOF_HALF
|
||||
#define BITSOF_BOOL NPY_BITSOF_BOOL
|
||||
#define BITSOF_CHAR NPY_BITSOF_CHAR
|
||||
#define BITSOF_SHORT NPY_BITSOF_SHORT
|
||||
#define BITSOF_INT NPY_BITSOF_INT
|
||||
#define BITSOF_LONG NPY_BITSOF_LONG
|
||||
#define BITSOF_LONGLONG NPY_BITSOF_LONGLONG
|
||||
#define BITSOF_HALF NPY_BITSOF_HALF
|
||||
#define BITSOF_FLOAT NPY_BITSOF_FLOAT
|
||||
#define BITSOF_DOUBLE NPY_BITSOF_DOUBLE
|
||||
#define BITSOF_LONGDOUBLE NPY_BITSOF_LONGDOUBLE
|
||||
#define BITSOF_DATETIME NPY_BITSOF_DATETIME
|
||||
#define BITSOF_TIMEDELTA NPY_BITSOF_TIMEDELTA
|
||||
|
||||
#define _pya_malloc PyArray_malloc
|
||||
#define _pya_free PyArray_free
|
||||
#define _pya_realloc PyArray_realloc
|
||||
|
||||
#define BEGIN_THREADS_DEF NPY_BEGIN_THREADS_DEF
|
||||
#define BEGIN_THREADS NPY_BEGIN_THREADS
|
||||
#define END_THREADS NPY_END_THREADS
|
||||
#define ALLOW_C_API_DEF NPY_ALLOW_C_API_DEF
|
||||
#define ALLOW_C_API NPY_ALLOW_C_API
|
||||
#define DISABLE_C_API NPY_DISABLE_C_API
|
||||
|
||||
#define PY_FAIL NPY_FAIL
|
||||
#define PY_SUCCEED NPY_SUCCEED
|
||||
|
||||
#ifndef TRUE
|
||||
#define TRUE NPY_TRUE
|
||||
#endif
|
||||
|
||||
#ifndef FALSE
|
||||
#define FALSE NPY_FALSE
|
||||
#endif
|
||||
|
||||
#define LONGDOUBLE_FMT NPY_LONGDOUBLE_FMT
|
||||
|
||||
#define CONTIGUOUS NPY_CONTIGUOUS
|
||||
#define C_CONTIGUOUS NPY_C_CONTIGUOUS
|
||||
#define FORTRAN NPY_FORTRAN
|
||||
#define F_CONTIGUOUS NPY_F_CONTIGUOUS
|
||||
#define OWNDATA NPY_OWNDATA
|
||||
#define FORCECAST NPY_FORCECAST
|
||||
#define ENSURECOPY NPY_ENSURECOPY
|
||||
#define ENSUREARRAY NPY_ENSUREARRAY
|
||||
#define ELEMENTSTRIDES NPY_ELEMENTSTRIDES
|
||||
#define ALIGNED NPY_ALIGNED
|
||||
#define NOTSWAPPED NPY_NOTSWAPPED
|
||||
#define WRITEABLE NPY_WRITEABLE
|
||||
#define UPDATEIFCOPY NPY_UPDATEIFCOPY
|
||||
#define ARR_HAS_DESCR NPY_ARR_HAS_DESCR
|
||||
#define BEHAVED NPY_BEHAVED
|
||||
#define BEHAVED_NS NPY_BEHAVED_NS
|
||||
#define CARRAY NPY_CARRAY
|
||||
#define CARRAY_RO NPY_CARRAY_RO
|
||||
#define FARRAY NPY_FARRAY
|
||||
#define FARRAY_RO NPY_FARRAY_RO
|
||||
#define DEFAULT NPY_DEFAULT
|
||||
#define IN_ARRAY NPY_IN_ARRAY
|
||||
#define OUT_ARRAY NPY_OUT_ARRAY
|
||||
#define INOUT_ARRAY NPY_INOUT_ARRAY
|
||||
#define IN_FARRAY NPY_IN_FARRAY
|
||||
#define OUT_FARRAY NPY_OUT_FARRAY
|
||||
#define INOUT_FARRAY NPY_INOUT_FARRAY
|
||||
#define UPDATE_ALL NPY_UPDATE_ALL
|
||||
|
||||
#define OWN_DATA NPY_OWNDATA
|
||||
#define BEHAVED_FLAGS NPY_BEHAVED
|
||||
#define BEHAVED_FLAGS_NS NPY_BEHAVED_NS
|
||||
#define CARRAY_FLAGS_RO NPY_CARRAY_RO
|
||||
#define CARRAY_FLAGS NPY_CARRAY
|
||||
#define FARRAY_FLAGS NPY_FARRAY
|
||||
#define FARRAY_FLAGS_RO NPY_FARRAY_RO
|
||||
#define DEFAULT_FLAGS NPY_DEFAULT
|
||||
#define UPDATE_ALL_FLAGS NPY_UPDATE_ALL_FLAGS
|
||||
|
||||
#ifndef MIN
|
||||
#define MIN PyArray_MIN
|
||||
#endif
|
||||
#ifndef MAX
|
||||
#define MAX PyArray_MAX
|
||||
#endif
|
||||
#define MAX_INTP NPY_MAX_INTP
|
||||
#define MIN_INTP NPY_MIN_INTP
|
||||
#define MAX_UINTP NPY_MAX_UINTP
|
||||
#define INTP_FMT NPY_INTP_FMT
|
||||
|
||||
#define REFCOUNT PyArray_REFCOUNT
|
||||
#define MAX_ELSIZE NPY_MAX_ELSIZE
|
||||
|
||||
#endif
|
|
@ -1,417 +0,0 @@
|
|||
/*
|
||||
* This is a convenience header file providing compatibility utilities
|
||||
* for supporting Python 2 and Python 3 in the same code base.
|
||||
*
|
||||
* If you want to use this for your own projects, it's recommended to make a
|
||||
* copy of it. Although the stuff below is unlikely to change, we don't provide
|
||||
* strong backwards compatibility guarantees at the moment.
|
||||
*/
|
||||
|
||||
#ifndef _NPY_3KCOMPAT_H_
|
||||
#define _NPY_3KCOMPAT_H_
|
||||
|
||||
#include <Python.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#if PY_VERSION_HEX >= 0x03000000
|
||||
#ifndef NPY_PY3K
|
||||
#define NPY_PY3K 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include "numpy/npy_common.h"
|
||||
#include "numpy/ndarrayobject.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* PyInt -> PyLong
|
||||
*/
|
||||
|
||||
#if defined(NPY_PY3K)
|
||||
/* Return True only if the long fits in a C long */
|
||||
static NPY_INLINE int PyInt_Check(PyObject *op) {
|
||||
int overflow = 0;
|
||||
if (!PyLong_Check(op)) {
|
||||
return 0;
|
||||
}
|
||||
PyLong_AsLongAndOverflow(op, &overflow);
|
||||
return (overflow == 0);
|
||||
}
|
||||
|
||||
#define PyInt_FromLong PyLong_FromLong
|
||||
#define PyInt_AsLong PyLong_AsLong
|
||||
#define PyInt_AS_LONG PyLong_AsLong
|
||||
#define PyInt_AsSsize_t PyLong_AsSsize_t
|
||||
|
||||
/* NOTE:
|
||||
*
|
||||
* Since the PyLong type is very different from the fixed-range PyInt,
|
||||
* we don't define PyInt_Type -> PyLong_Type.
|
||||
*/
|
||||
#endif /* NPY_PY3K */
|
||||
|
||||
/*
|
||||
* PyString -> PyBytes
|
||||
*/
|
||||
|
||||
#if defined(NPY_PY3K)
|
||||
|
||||
#define PyString_Type PyBytes_Type
|
||||
#define PyString_Check PyBytes_Check
|
||||
#define PyStringObject PyBytesObject
|
||||
#define PyString_FromString PyBytes_FromString
|
||||
#define PyString_FromStringAndSize PyBytes_FromStringAndSize
|
||||
#define PyString_AS_STRING PyBytes_AS_STRING
|
||||
#define PyString_AsStringAndSize PyBytes_AsStringAndSize
|
||||
#define PyString_FromFormat PyBytes_FromFormat
|
||||
#define PyString_Concat PyBytes_Concat
|
||||
#define PyString_ConcatAndDel PyBytes_ConcatAndDel
|
||||
#define PyString_AsString PyBytes_AsString
|
||||
#define PyString_GET_SIZE PyBytes_GET_SIZE
|
||||
#define PyString_Size PyBytes_Size
|
||||
|
||||
#define PyUString_Type PyUnicode_Type
|
||||
#define PyUString_Check PyUnicode_Check
|
||||
#define PyUStringObject PyUnicodeObject
|
||||
#define PyUString_FromString PyUnicode_FromString
|
||||
#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize
|
||||
#define PyUString_FromFormat PyUnicode_FromFormat
|
||||
#define PyUString_Concat PyUnicode_Concat2
|
||||
#define PyUString_ConcatAndDel PyUnicode_ConcatAndDel
|
||||
#define PyUString_GET_SIZE PyUnicode_GET_SIZE
|
||||
#define PyUString_Size PyUnicode_Size
|
||||
#define PyUString_InternFromString PyUnicode_InternFromString
|
||||
#define PyUString_Format PyUnicode_Format
|
||||
|
||||
#else
|
||||
|
||||
#define PyBytes_Type PyString_Type
|
||||
#define PyBytes_Check PyString_Check
|
||||
#define PyBytesObject PyStringObject
|
||||
#define PyBytes_FromString PyString_FromString
|
||||
#define PyBytes_FromStringAndSize PyString_FromStringAndSize
|
||||
#define PyBytes_AS_STRING PyString_AS_STRING
|
||||
#define PyBytes_AsStringAndSize PyString_AsStringAndSize
|
||||
#define PyBytes_FromFormat PyString_FromFormat
|
||||
#define PyBytes_Concat PyString_Concat
|
||||
#define PyBytes_ConcatAndDel PyString_ConcatAndDel
|
||||
#define PyBytes_AsString PyString_AsString
|
||||
#define PyBytes_GET_SIZE PyString_GET_SIZE
|
||||
#define PyBytes_Size PyString_Size
|
||||
|
||||
#define PyUString_Type PyString_Type
|
||||
#define PyUString_Check PyString_Check
|
||||
#define PyUStringObject PyStringObject
|
||||
#define PyUString_FromString PyString_FromString
|
||||
#define PyUString_FromStringAndSize PyString_FromStringAndSize
|
||||
#define PyUString_FromFormat PyString_FromFormat
|
||||
#define PyUString_Concat PyString_Concat
|
||||
#define PyUString_ConcatAndDel PyString_ConcatAndDel
|
||||
#define PyUString_GET_SIZE PyString_GET_SIZE
|
||||
#define PyUString_Size PyString_Size
|
||||
#define PyUString_InternFromString PyString_InternFromString
|
||||
#define PyUString_Format PyString_Format
|
||||
|
||||
#endif /* NPY_PY3K */
|
||||
|
||||
|
||||
static NPY_INLINE void
|
||||
PyUnicode_ConcatAndDel(PyObject **left, PyObject *right)
|
||||
{
|
||||
PyObject *newobj;
|
||||
newobj = PyUnicode_Concat(*left, right);
|
||||
Py_DECREF(*left);
|
||||
Py_DECREF(right);
|
||||
*left = newobj;
|
||||
}
|
||||
|
||||
static NPY_INLINE void
|
||||
PyUnicode_Concat2(PyObject **left, PyObject *right)
|
||||
{
|
||||
PyObject *newobj;
|
||||
newobj = PyUnicode_Concat(*left, right);
|
||||
Py_DECREF(*left);
|
||||
*left = newobj;
|
||||
}
|
||||
|
||||
/*
|
||||
* PyFile_* compatibility
|
||||
*/
|
||||
#if defined(NPY_PY3K)
|
||||
|
||||
/*
|
||||
* Get a FILE* handle to the file represented by the Python object
|
||||
*/
|
||||
static NPY_INLINE FILE*
|
||||
npy_PyFile_Dup(PyObject *file, char *mode)
|
||||
{
|
||||
int fd, fd2;
|
||||
PyObject *ret, *os;
|
||||
Py_ssize_t pos;
|
||||
FILE *handle;
|
||||
/* Flush first to ensure things end up in the file in the correct order */
|
||||
ret = PyObject_CallMethod(file, "flush", "");
|
||||
if (ret == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
Py_DECREF(ret);
|
||||
fd = PyObject_AsFileDescriptor(file);
|
||||
if (fd == -1) {
|
||||
return NULL;
|
||||
}
|
||||
os = PyImport_ImportModule("os");
|
||||
if (os == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
ret = PyObject_CallMethod(os, "dup", "i", fd);
|
||||
Py_DECREF(os);
|
||||
if (ret == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
fd2 = PyNumber_AsSsize_t(ret, NULL);
|
||||
Py_DECREF(ret);
|
||||
#ifdef _WIN32
|
||||
handle = _fdopen(fd2, mode);
|
||||
#else
|
||||
handle = fdopen(fd2, mode);
|
||||
#endif
|
||||
if (handle == NULL) {
|
||||
PyErr_SetString(PyExc_IOError,
|
||||
"Getting a FILE* from a Python file object failed");
|
||||
}
|
||||
ret = PyObject_CallMethod(file, "tell", "");
|
||||
if (ret == NULL) {
|
||||
fclose(handle);
|
||||
return NULL;
|
||||
}
|
||||
pos = PyNumber_AsSsize_t(ret, PyExc_OverflowError);
|
||||
Py_DECREF(ret);
|
||||
if (PyErr_Occurred()) {
|
||||
fclose(handle);
|
||||
return NULL;
|
||||
}
|
||||
npy_fseek(handle, pos, SEEK_SET);
|
||||
return handle;
|
||||
}
|
||||
|
||||
/*
|
||||
* Close the dup-ed file handle, and seek the Python one to the current position
|
||||
*/
|
||||
static NPY_INLINE int
|
||||
npy_PyFile_DupClose(PyObject *file, FILE* handle)
|
||||
{
|
||||
PyObject *ret;
|
||||
Py_ssize_t position;
|
||||
position = npy_ftell(handle);
|
||||
fclose(handle);
|
||||
|
||||
ret = PyObject_CallMethod(file, "seek", NPY_SSIZE_T_PYFMT "i", position, 0);
|
||||
if (ret == NULL) {
|
||||
return -1;
|
||||
}
|
||||
Py_DECREF(ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static NPY_INLINE int
|
||||
npy_PyFile_Check(PyObject *file)
|
||||
{
|
||||
int fd;
|
||||
fd = PyObject_AsFileDescriptor(file);
|
||||
if (fd == -1) {
|
||||
PyErr_Clear();
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define npy_PyFile_Dup(file, mode) PyFile_AsFile(file)
|
||||
#define npy_PyFile_DupClose(file, handle) (0)
|
||||
#define npy_PyFile_Check PyFile_Check
|
||||
|
||||
#endif
|
||||
|
||||
static NPY_INLINE PyObject*
|
||||
npy_PyFile_OpenFile(PyObject *filename, const char *mode)
|
||||
{
|
||||
PyObject *open;
|
||||
open = PyDict_GetItemString(PyEval_GetBuiltins(), "open");
|
||||
if (open == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return PyObject_CallFunction(open, "Os", filename, mode);
|
||||
}
|
||||
|
||||
static NPY_INLINE int
|
||||
npy_PyFile_CloseFile(PyObject *file)
|
||||
{
|
||||
PyObject *ret;
|
||||
|
||||
ret = PyObject_CallMethod(file, "close", NULL);
|
||||
if (ret == NULL) {
|
||||
return -1;
|
||||
}
|
||||
Py_DECREF(ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* PyObject_Cmp
|
||||
*/
|
||||
#if defined(NPY_PY3K)
|
||||
static NPY_INLINE int
|
||||
PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp)
|
||||
{
|
||||
int v;
|
||||
v = PyObject_RichCompareBool(i1, i2, Py_LT);
|
||||
if (v == 0) {
|
||||
*cmp = -1;
|
||||
return 1;
|
||||
}
|
||||
else if (v == -1) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
v = PyObject_RichCompareBool(i1, i2, Py_GT);
|
||||
if (v == 0) {
|
||||
*cmp = 1;
|
||||
return 1;
|
||||
}
|
||||
else if (v == -1) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
v = PyObject_RichCompareBool(i1, i2, Py_EQ);
|
||||
if (v == 0) {
|
||||
*cmp = 0;
|
||||
return 1;
|
||||
}
|
||||
else {
|
||||
*cmp = 0;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* PyCObject functions adapted to PyCapsules.
|
||||
*
|
||||
* The main job here is to get rid of the improved error handling
|
||||
* of PyCapsules. It's a shame...
|
||||
*/
|
||||
#if PY_VERSION_HEX >= 0x03000000
|
||||
|
||||
static NPY_INLINE PyObject *
|
||||
NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *))
|
||||
{
|
||||
PyObject *ret = PyCapsule_New(ptr, NULL, dtor);
|
||||
if (ret == NULL) {
|
||||
PyErr_Clear();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static NPY_INLINE PyObject *
|
||||
NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *))
|
||||
{
|
||||
PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor);
|
||||
if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) {
|
||||
PyErr_Clear();
|
||||
Py_DECREF(ret);
|
||||
ret = NULL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static NPY_INLINE void *
|
||||
NpyCapsule_AsVoidPtr(PyObject *obj)
|
||||
{
|
||||
void *ret = PyCapsule_GetPointer(obj, NULL);
|
||||
if (ret == NULL) {
|
||||
PyErr_Clear();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static NPY_INLINE void *
|
||||
NpyCapsule_GetDesc(PyObject *obj)
|
||||
{
|
||||
return PyCapsule_GetContext(obj);
|
||||
}
|
||||
|
||||
static NPY_INLINE int
|
||||
NpyCapsule_Check(PyObject *ptr)
|
||||
{
|
||||
return PyCapsule_CheckExact(ptr);
|
||||
}
|
||||
|
||||
static NPY_INLINE void
|
||||
simple_capsule_dtor(PyObject *cap)
|
||||
{
|
||||
PyArray_free(PyCapsule_GetPointer(cap, NULL));
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static NPY_INLINE PyObject *
|
||||
NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *))
|
||||
{
|
||||
return PyCObject_FromVoidPtr(ptr, dtor);
|
||||
}
|
||||
|
||||
static NPY_INLINE PyObject *
|
||||
NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context,
|
||||
void (*dtor)(void *, void *))
|
||||
{
|
||||
return PyCObject_FromVoidPtrAndDesc(ptr, context, dtor);
|
||||
}
|
||||
|
||||
static NPY_INLINE void *
|
||||
NpyCapsule_AsVoidPtr(PyObject *ptr)
|
||||
{
|
||||
return PyCObject_AsVoidPtr(ptr);
|
||||
}
|
||||
|
||||
static NPY_INLINE void *
|
||||
NpyCapsule_GetDesc(PyObject *obj)
|
||||
{
|
||||
return PyCObject_GetDesc(obj);
|
||||
}
|
||||
|
||||
static NPY_INLINE int
|
||||
NpyCapsule_Check(PyObject *ptr)
|
||||
{
|
||||
return PyCObject_Check(ptr);
|
||||
}
|
||||
|
||||
static NPY_INLINE void
|
||||
simple_capsule_dtor(void *ptr)
|
||||
{
|
||||
PyArray_free(ptr);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Hash value compatibility.
|
||||
* As of Python 3.2 hash values are of type Py_hash_t.
|
||||
* Previous versions use C long.
|
||||
*/
|
||||
#if PY_VERSION_HEX < 0x03020000
|
||||
typedef long npy_hash_t;
|
||||
#define NPY_SIZEOF_HASH_T NPY_SIZEOF_LONG
|
||||
#else
|
||||
typedef Py_hash_t npy_hash_t;
|
||||
#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _NPY_3KCOMPAT_H_ */
|
|
@ -1,930 +0,0 @@
|
|||
#ifndef _NPY_COMMON_H_
|
||||
#define _NPY_COMMON_H_
|
||||
|
||||
/* numpconfig.h is auto-generated */
|
||||
#include "numpyconfig.h"
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#define NPY_INLINE __inline
|
||||
#elif defined(__GNUC__)
|
||||
#if defined(__STRICT_ANSI__)
|
||||
#define NPY_INLINE __inline__
|
||||
#else
|
||||
#define NPY_INLINE inline
|
||||
#endif
|
||||
#else
|
||||
#define NPY_INLINE
|
||||
#endif
|
||||
|
||||
/* Enable 64 bit file position support on win-amd64. Ticket #1660 */
|
||||
#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400)
|
||||
#define npy_fseek _fseeki64
|
||||
#define npy_ftell _ftelli64
|
||||
#else
|
||||
#define npy_fseek fseek
|
||||
#define npy_ftell ftell
|
||||
#endif
|
||||
|
||||
/* enums for detected endianness */
|
||||
enum {
|
||||
NPY_CPU_UNKNOWN_ENDIAN,
|
||||
NPY_CPU_LITTLE,
|
||||
NPY_CPU_BIG
|
||||
};
|
||||
|
||||
/*
|
||||
* This is to typedef npy_intp to the appropriate pointer size for
|
||||
* this platform. Py_intptr_t, Py_uintptr_t are defined in pyport.h.
|
||||
*/
|
||||
typedef Py_intptr_t npy_intp;
|
||||
typedef Py_uintptr_t npy_uintp;
|
||||
#define NPY_SIZEOF_CHAR 1
|
||||
#define NPY_SIZEOF_BYTE 1
|
||||
#define NPY_SIZEOF_INTP NPY_SIZEOF_PY_INTPTR_T
|
||||
#define NPY_SIZEOF_UINTP NPY_SIZEOF_PY_INTPTR_T
|
||||
#define NPY_SIZEOF_CFLOAT NPY_SIZEOF_COMPLEX_FLOAT
|
||||
#define NPY_SIZEOF_CDOUBLE NPY_SIZEOF_COMPLEX_DOUBLE
|
||||
#define NPY_SIZEOF_CLONGDOUBLE NPY_SIZEOF_COMPLEX_LONGDOUBLE
|
||||
|
||||
#ifdef constchar
|
||||
#undef constchar
|
||||
#endif
|
||||
|
||||
#if (PY_VERSION_HEX < 0x02050000)
|
||||
#ifndef PY_SSIZE_T_MIN
|
||||
typedef int Py_ssize_t;
|
||||
#define PY_SSIZE_T_MAX INT_MAX
|
||||
#define PY_SSIZE_T_MIN INT_MIN
|
||||
#endif
|
||||
#define NPY_SSIZE_T_PYFMT "i"
|
||||
#define constchar const char
|
||||
#else
|
||||
#define NPY_SSIZE_T_PYFMT "n"
|
||||
#define constchar char
|
||||
#endif
|
||||
|
||||
/* NPY_INTP_FMT Note:
|
||||
* Unlike the other NPY_*_FMT macros which are used with
|
||||
* PyOS_snprintf, NPY_INTP_FMT is used with PyErr_Format and
|
||||
* PyString_Format. These functions use different formatting
|
||||
* codes which are portably specified according to the Python
|
||||
* documentation. See ticket #1795.
|
||||
*
|
||||
* On Windows x64, the LONGLONG formatter should be used, but
|
||||
* in Python 2.6 the %lld formatter is not supported. In this
|
||||
* case we work around the problem by using the %zd formatter.
|
||||
*/
|
||||
#if NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_INT
|
||||
#define NPY_INTP NPY_INT
|
||||
#define NPY_UINTP NPY_UINT
|
||||
#define PyIntpArrType_Type PyIntArrType_Type
|
||||
#define PyUIntpArrType_Type PyUIntArrType_Type
|
||||
#define NPY_MAX_INTP NPY_MAX_INT
|
||||
#define NPY_MIN_INTP NPY_MIN_INT
|
||||
#define NPY_MAX_UINTP NPY_MAX_UINT
|
||||
#define NPY_INTP_FMT "d"
|
||||
#elif NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONG
|
||||
#define NPY_INTP NPY_LONG
|
||||
#define NPY_UINTP NPY_ULONG
|
||||
#define PyIntpArrType_Type PyLongArrType_Type
|
||||
#define PyUIntpArrType_Type PyULongArrType_Type
|
||||
#define NPY_MAX_INTP NPY_MAX_LONG
|
||||
#define NPY_MIN_INTP NPY_MIN_LONG
|
||||
#define NPY_MAX_UINTP NPY_MAX_ULONG
|
||||
#define NPY_INTP_FMT "ld"
|
||||
#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONGLONG)
|
||||
#define NPY_INTP NPY_LONGLONG
|
||||
#define NPY_UINTP NPY_ULONGLONG
|
||||
#define PyIntpArrType_Type PyLongLongArrType_Type
|
||||
#define PyUIntpArrType_Type PyULongLongArrType_Type
|
||||
#define NPY_MAX_INTP NPY_MAX_LONGLONG
|
||||
#define NPY_MIN_INTP NPY_MIN_LONGLONG
|
||||
#define NPY_MAX_UINTP NPY_MAX_ULONGLONG
|
||||
#if (PY_VERSION_HEX >= 0x02070000)
|
||||
#define NPY_INTP_FMT "lld"
|
||||
#else
|
||||
#define NPY_INTP_FMT "zd"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We can only use C99 formats for npy_int_p if it is the same as
|
||||
* intp_t, hence the condition on HAVE_UNITPTR_T
|
||||
*/
|
||||
#if (NPY_USE_C99_FORMATS) == 1 \
|
||||
&& (defined HAVE_UINTPTR_T) \
|
||||
&& (defined HAVE_INTTYPES_H)
|
||||
#include <inttypes.h>
|
||||
#undef NPY_INTP_FMT
|
||||
#define NPY_INTP_FMT PRIdPTR
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Some platforms don't define bool, long long, or long double.
|
||||
* Handle that here.
|
||||
*/
|
||||
#define NPY_BYTE_FMT "hhd"
|
||||
#define NPY_UBYTE_FMT "hhu"
|
||||
#define NPY_SHORT_FMT "hd"
|
||||
#define NPY_USHORT_FMT "hu"
|
||||
#define NPY_INT_FMT "d"
|
||||
#define NPY_UINT_FMT "u"
|
||||
#define NPY_LONG_FMT "ld"
|
||||
#define NPY_ULONG_FMT "lu"
|
||||
#define NPY_HALF_FMT "g"
|
||||
#define NPY_FLOAT_FMT "g"
|
||||
#define NPY_DOUBLE_FMT "g"
|
||||
|
||||
|
||||
#ifdef PY_LONG_LONG
|
||||
typedef PY_LONG_LONG npy_longlong;
|
||||
typedef unsigned PY_LONG_LONG npy_ulonglong;
|
||||
# ifdef _MSC_VER
|
||||
# define NPY_LONGLONG_FMT "I64d"
|
||||
# define NPY_ULONGLONG_FMT "I64u"
|
||||
# elif defined(__APPLE__) || defined(__FreeBSD__)
|
||||
/* "%Ld" only parses 4 bytes -- "L" is floating modifier on MacOS X/BSD */
|
||||
# define NPY_LONGLONG_FMT "lld"
|
||||
# define NPY_ULONGLONG_FMT "llu"
|
||||
/*
|
||||
another possible variant -- *quad_t works on *BSD, but is deprecated:
|
||||
#define LONGLONG_FMT "qd"
|
||||
#define ULONGLONG_FMT "qu"
|
||||
*/
|
||||
# else
|
||||
# define NPY_LONGLONG_FMT "Ld"
|
||||
# define NPY_ULONGLONG_FMT "Lu"
|
||||
# endif
|
||||
# ifdef _MSC_VER
|
||||
# define NPY_LONGLONG_SUFFIX(x) (x##i64)
|
||||
# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64)
|
||||
# else
|
||||
# define NPY_LONGLONG_SUFFIX(x) (x##LL)
|
||||
# define NPY_ULONGLONG_SUFFIX(x) (x##ULL)
|
||||
# endif
|
||||
#else
|
||||
typedef long npy_longlong;
|
||||
typedef unsigned long npy_ulonglong;
|
||||
# define NPY_LONGLONG_SUFFIX(x) (x##L)
|
||||
# define NPY_ULONGLONG_SUFFIX(x) (x##UL)
|
||||
#endif
|
||||
|
||||
|
||||
typedef unsigned char npy_bool;
|
||||
#define NPY_FALSE 0
|
||||
#define NPY_TRUE 1
|
||||
|
||||
|
||||
#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE
|
||||
typedef double npy_longdouble;
|
||||
#define NPY_LONGDOUBLE_FMT "g"
|
||||
#else
|
||||
typedef long double npy_longdouble;
|
||||
#define NPY_LONGDOUBLE_FMT "Lg"
|
||||
#endif
|
||||
|
||||
#ifndef Py_USING_UNICODE
|
||||
#error Must use Python with unicode enabled.
|
||||
#endif
|
||||
|
||||
|
||||
typedef signed char npy_byte;
|
||||
typedef unsigned char npy_ubyte;
|
||||
typedef unsigned short npy_ushort;
|
||||
typedef unsigned int npy_uint;
|
||||
typedef unsigned long npy_ulong;
|
||||
|
||||
/* These are for completeness */
|
||||
typedef char npy_char;
|
||||
typedef short npy_short;
|
||||
typedef int npy_int;
|
||||
typedef long npy_long;
|
||||
typedef float npy_float;
|
||||
typedef double npy_double;
|
||||
|
||||
/*
|
||||
* Disabling C99 complex usage: a lot of C code in numpy/scipy rely on being
|
||||
* able to do .real/.imag. Will have to convert code first.
|
||||
*/
|
||||
#if 0
|
||||
#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_DOUBLE)
|
||||
typedef complex npy_cdouble;
|
||||
#else
|
||||
typedef struct { double real, imag; } npy_cdouble;
|
||||
#endif
|
||||
|
||||
#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_FLOAT)
|
||||
typedef complex float npy_cfloat;
|
||||
#else
|
||||
typedef struct { float real, imag; } npy_cfloat;
|
||||
#endif
|
||||
|
||||
#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_LONG_DOUBLE)
|
||||
typedef complex long double npy_clongdouble;
|
||||
#else
|
||||
typedef struct {npy_longdouble real, imag;} npy_clongdouble;
|
||||
#endif
|
||||
#endif
|
||||
#if NPY_SIZEOF_COMPLEX_DOUBLE != 2 * NPY_SIZEOF_DOUBLE
|
||||
#error npy_cdouble definition is not compatible with C99 complex definition ! \
|
||||
Please contact Numpy maintainers and give detailed information about your \
|
||||
compiler and platform
|
||||
#endif
|
||||
typedef struct { double real, imag; } npy_cdouble;
|
||||
|
||||
#if NPY_SIZEOF_COMPLEX_FLOAT != 2 * NPY_SIZEOF_FLOAT
|
||||
#error npy_cfloat definition is not compatible with C99 complex definition ! \
|
||||
Please contact Numpy maintainers and give detailed information about your \
|
||||
compiler and platform
|
||||
#endif
|
||||
typedef struct { float real, imag; } npy_cfloat;
|
||||
|
||||
#if NPY_SIZEOF_COMPLEX_LONGDOUBLE != 2 * NPY_SIZEOF_LONGDOUBLE
|
||||
#error npy_clongdouble definition is not compatible with C99 complex definition ! \
|
||||
Please contact Numpy maintainers and give detailed information about your \
|
||||
compiler and platform
|
||||
#endif
|
||||
typedef struct { npy_longdouble real, imag; } npy_clongdouble;
|
||||
|
||||
/*
|
||||
* numarray-style bit-width typedefs
|
||||
*/
|
||||
#define NPY_MAX_INT8 127
|
||||
#define NPY_MIN_INT8 -128
|
||||
#define NPY_MAX_UINT8 255
|
||||
#define NPY_MAX_INT16 32767
|
||||
#define NPY_MIN_INT16 -32768
|
||||
#define NPY_MAX_UINT16 65535
|
||||
#define NPY_MAX_INT32 2147483647
|
||||
#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1)
|
||||
#define NPY_MAX_UINT32 4294967295U
|
||||
#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807)
|
||||
#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1))
|
||||
#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615)
|
||||
#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864)
|
||||
#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1))
|
||||
#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728)
|
||||
#define NPY_MAX_INT256 NPY_LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967)
|
||||
#define NPY_MIN_INT256 (-NPY_MAX_INT256 - NPY_LONGLONG_SUFFIX(1))
|
||||
#define NPY_MAX_UINT256 NPY_ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935)
|
||||
#define NPY_MIN_DATETIME NPY_MIN_INT64
|
||||
#define NPY_MAX_DATETIME NPY_MAX_INT64
|
||||
#define NPY_MIN_TIMEDELTA NPY_MIN_INT64
|
||||
#define NPY_MAX_TIMEDELTA NPY_MAX_INT64
|
||||
|
||||
/* Need to find the number of bits for each type and
|
||||
make definitions accordingly.
|
||||
|
||||
C states that sizeof(char) == 1 by definition
|
||||
|
||||
So, just using the sizeof keyword won't help.
|
||||
|
||||
It also looks like Python itself uses sizeof(char) quite a
|
||||
bit, which by definition should be 1 all the time.
|
||||
|
||||
Idea: Make Use of CHAR_BIT which should tell us how many
|
||||
BITS per CHARACTER
|
||||
*/
|
||||
|
||||
/* Include platform definitions -- These are in the C89/90 standard */
|
||||
#include <limits.h>
|
||||
#define NPY_MAX_BYTE SCHAR_MAX
|
||||
#define NPY_MIN_BYTE SCHAR_MIN
|
||||
#define NPY_MAX_UBYTE UCHAR_MAX
|
||||
#define NPY_MAX_SHORT SHRT_MAX
|
||||
#define NPY_MIN_SHORT SHRT_MIN
|
||||
#define NPY_MAX_USHORT USHRT_MAX
|
||||
#define NPY_MAX_INT INT_MAX
|
||||
#ifndef INT_MIN
|
||||
#define INT_MIN (-INT_MAX - 1)
|
||||
#endif
|
||||
#define NPY_MIN_INT INT_MIN
|
||||
#define NPY_MAX_UINT UINT_MAX
|
||||
#define NPY_MAX_LONG LONG_MAX
|
||||
#define NPY_MIN_LONG LONG_MIN
|
||||
#define NPY_MAX_ULONG ULONG_MAX
|
||||
|
||||
#define NPY_SIZEOF_HALF 2
|
||||
#define NPY_SIZEOF_DATETIME 8
|
||||
#define NPY_SIZEOF_TIMEDELTA 8
|
||||
|
||||
#define NPY_BITSOF_BOOL (sizeof(npy_bool) * CHAR_BIT)
|
||||
#define NPY_BITSOF_CHAR CHAR_BIT
|
||||
#define NPY_BITSOF_BYTE (NPY_SIZEOF_BYTE * CHAR_BIT)
|
||||
#define NPY_BITSOF_SHORT (NPY_SIZEOF_SHORT * CHAR_BIT)
|
||||
#define NPY_BITSOF_INT (NPY_SIZEOF_INT * CHAR_BIT)
|
||||
#define NPY_BITSOF_LONG (NPY_SIZEOF_LONG * CHAR_BIT)
|
||||
#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG * CHAR_BIT)
|
||||
#define NPY_BITSOF_INTP (NPY_SIZEOF_INTP * CHAR_BIT)
|
||||
#define NPY_BITSOF_HALF (NPY_SIZEOF_HALF * CHAR_BIT)
|
||||
#define NPY_BITSOF_FLOAT (NPY_SIZEOF_FLOAT * CHAR_BIT)
|
||||
#define NPY_BITSOF_DOUBLE (NPY_SIZEOF_DOUBLE * CHAR_BIT)
|
||||
#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE * CHAR_BIT)
|
||||
#define NPY_BITSOF_CFLOAT (NPY_SIZEOF_CFLOAT * CHAR_BIT)
|
||||
#define NPY_BITSOF_CDOUBLE (NPY_SIZEOF_CDOUBLE * CHAR_BIT)
|
||||
#define NPY_BITSOF_CLONGDOUBLE (NPY_SIZEOF_CLONGDOUBLE * CHAR_BIT)
|
||||
#define NPY_BITSOF_DATETIME (NPY_SIZEOF_DATETIME * CHAR_BIT)
|
||||
#define NPY_BITSOF_TIMEDELTA (NPY_SIZEOF_TIMEDELTA * CHAR_BIT)
|
||||
|
||||
#if NPY_BITSOF_LONG == 8
|
||||
#define NPY_INT8 NPY_LONG
|
||||
#define NPY_UINT8 NPY_ULONG
|
||||
typedef long npy_int8;
|
||||
typedef unsigned long npy_uint8;
|
||||
#define PyInt8ScalarObject PyLongScalarObject
|
||||
#define PyInt8ArrType_Type PyLongArrType_Type
|
||||
#define PyUInt8ScalarObject PyULongScalarObject
|
||||
#define PyUInt8ArrType_Type PyULongArrType_Type
|
||||
#define NPY_INT8_FMT NPY_LONG_FMT
|
||||
#define NPY_UINT8_FMT NPY_ULONG_FMT
|
||||
#elif NPY_BITSOF_LONG == 16
|
||||
#define NPY_INT16 NPY_LONG
|
||||
#define NPY_UINT16 NPY_ULONG
|
||||
typedef long npy_int16;
|
||||
typedef unsigned long npy_uint16;
|
||||
#define PyInt16ScalarObject PyLongScalarObject
|
||||
#define PyInt16ArrType_Type PyLongArrType_Type
|
||||
#define PyUInt16ScalarObject PyULongScalarObject
|
||||
#define PyUInt16ArrType_Type PyULongArrType_Type
|
||||
#define NPY_INT16_FMT NPY_LONG_FMT
|
||||
#define NPY_UINT16_FMT NPY_ULONG_FMT
|
||||
#elif NPY_BITSOF_LONG == 32
|
||||
#define NPY_INT32 NPY_LONG
|
||||
#define NPY_UINT32 NPY_ULONG
|
||||
typedef long npy_int32;
|
||||
typedef unsigned long npy_uint32;
|
||||
typedef unsigned long npy_ucs4;
|
||||
#define PyInt32ScalarObject PyLongScalarObject
|
||||
#define PyInt32ArrType_Type PyLongArrType_Type
|
||||
#define PyUInt32ScalarObject PyULongScalarObject
|
||||
#define PyUInt32ArrType_Type PyULongArrType_Type
|
||||
#define NPY_INT32_FMT NPY_LONG_FMT
|
||||
#define NPY_UINT32_FMT NPY_ULONG_FMT
|
||||
#elif NPY_BITSOF_LONG == 64
|
||||
#define NPY_INT64 NPY_LONG
|
||||
#define NPY_UINT64 NPY_ULONG
|
||||
typedef long npy_int64;
|
||||
typedef unsigned long npy_uint64;
|
||||
#define PyInt64ScalarObject PyLongScalarObject
|
||||
#define PyInt64ArrType_Type PyLongArrType_Type
|
||||
#define PyUInt64ScalarObject PyULongScalarObject
|
||||
#define PyUInt64ArrType_Type PyULongArrType_Type
|
||||
#define NPY_INT64_FMT NPY_LONG_FMT
|
||||
#define NPY_UINT64_FMT NPY_ULONG_FMT
|
||||
#define MyPyLong_FromInt64 PyLong_FromLong
|
||||
#define MyPyLong_AsInt64 PyLong_AsLong
|
||||
#elif NPY_BITSOF_LONG == 128
|
||||
#define NPY_INT128 NPY_LONG
|
||||
#define NPY_UINT128 NPY_ULONG
|
||||
typedef long npy_int128;
|
||||
typedef unsigned long npy_uint128;
|
||||
#define PyInt128ScalarObject PyLongScalarObject
|
||||
#define PyInt128ArrType_Type PyLongArrType_Type
|
||||
#define PyUInt128ScalarObject PyULongScalarObject
|
||||
#define PyUInt128ArrType_Type PyULongArrType_Type
|
||||
#define NPY_INT128_FMT NPY_LONG_FMT
|
||||
#define NPY_UINT128_FMT NPY_ULONG_FMT
|
||||
#endif
|
||||
|
||||
#if NPY_BITSOF_LONGLONG == 8
|
||||
# ifndef NPY_INT8
|
||||
# define NPY_INT8 NPY_LONGLONG
|
||||
# define NPY_UINT8 NPY_ULONGLONG
|
||||
typedef npy_longlong npy_int8;
|
||||
typedef npy_ulonglong npy_uint8;
|
||||
# define PyInt8ScalarObject PyLongLongScalarObject
|
||||
# define PyInt8ArrType_Type PyLongLongArrType_Type
|
||||
# define PyUInt8ScalarObject PyULongLongScalarObject
|
||||
# define PyUInt8ArrType_Type PyULongLongArrType_Type
|
||||
#define NPY_INT8_FMT NPY_LONGLONG_FMT
|
||||
#define NPY_UINT8_FMT NPY_ULONGLONG_FMT
|
||||
# endif
|
||||
# define NPY_MAX_LONGLONG NPY_MAX_INT8
|
||||
# define NPY_MIN_LONGLONG NPY_MIN_INT8
|
||||
# define NPY_MAX_ULONGLONG NPY_MAX_UINT8
|
||||
#elif NPY_BITSOF_LONGLONG == 16
|
||||
# ifndef NPY_INT16
|
||||
# define NPY_INT16 NPY_LONGLONG
|
||||
# define NPY_UINT16 NPY_ULONGLONG
|
||||
typedef npy_longlong npy_int16;
|
||||
typedef npy_ulonglong npy_uint16;
|
||||
# define PyInt16ScalarObject PyLongLongScalarObject
|
||||
# define PyInt16ArrType_Type PyLongLongArrType_Type
|
||||
# define PyUInt16ScalarObject PyULongLongScalarObject
|
||||
# define PyUInt16ArrType_Type PyULongLongArrType_Type
|
||||
#define NPY_INT16_FMT NPY_LONGLONG_FMT
|
||||
#define NPY_UINT16_FMT NPY_ULONGLONG_FMT
|
||||
# endif
|
||||
# define NPY_MAX_LONGLONG NPY_MAX_INT16
|
||||
# define NPY_MIN_LONGLONG NPY_MIN_INT16
|
||||
# define NPY_MAX_ULONGLONG NPY_MAX_UINT16
|
||||
#elif NPY_BITSOF_LONGLONG == 32
|
||||
# ifndef NPY_INT32
|
||||
# define NPY_INT32 NPY_LONGLONG
|
||||
# define NPY_UINT32 NPY_ULONGLONG
|
||||
typedef npy_longlong npy_int32;
|
||||
typedef npy_ulonglong npy_uint32;
|
||||
typedef npy_ulonglong npy_ucs4;
|
||||
# define PyInt32ScalarObject PyLongLongScalarObject
|
||||
# define PyInt32ArrType_Type PyLongLongArrType_Type
|
||||
# define PyUInt32ScalarObject PyULongLongScalarObject
|
||||
# define PyUInt32ArrType_Type PyULongLongArrType_Type
|
||||
#define NPY_INT32_FMT NPY_LONGLONG_FMT
|
||||
#define NPY_UINT32_FMT NPY_ULONGLONG_FMT
|
||||
# endif
|
||||
# define NPY_MAX_LONGLONG NPY_MAX_INT32
|
||||
# define NPY_MIN_LONGLONG NPY_MIN_INT32
|
||||
# define NPY_MAX_ULONGLONG NPY_MAX_UINT32
|
||||
#elif NPY_BITSOF_LONGLONG == 64
|
||||
# ifndef NPY_INT64
|
||||
# define NPY_INT64 NPY_LONGLONG
|
||||
# define NPY_UINT64 NPY_ULONGLONG
|
||||
typedef npy_longlong npy_int64;
|
||||
typedef npy_ulonglong npy_uint64;
|
||||
# define PyInt64ScalarObject PyLongLongScalarObject
|
||||
# define PyInt64ArrType_Type PyLongLongArrType_Type
|
||||
# define PyUInt64ScalarObject PyULongLongScalarObject
|
||||
# define PyUInt64ArrType_Type PyULongLongArrType_Type
|
||||
#define NPY_INT64_FMT NPY_LONGLONG_FMT
|
||||
#define NPY_UINT64_FMT NPY_ULONGLONG_FMT
|
||||
# define MyPyLong_FromInt64 PyLong_FromLongLong
|
||||
# define MyPyLong_AsInt64 PyLong_AsLongLong
|
||||
# endif
|
||||
# define NPY_MAX_LONGLONG NPY_MAX_INT64
|
||||
# define NPY_MIN_LONGLONG NPY_MIN_INT64
|
||||
# define NPY_MAX_ULONGLONG NPY_MAX_UINT64
|
||||
#elif NPY_BITSOF_LONGLONG == 128
|
||||
# ifndef NPY_INT128
|
||||
# define NPY_INT128 NPY_LONGLONG
|
||||
# define NPY_UINT128 NPY_ULONGLONG
|
||||
typedef npy_longlong npy_int128;
|
||||
typedef npy_ulonglong npy_uint128;
|
||||
# define PyInt128ScalarObject PyLongLongScalarObject
|
||||
# define PyInt128ArrType_Type PyLongLongArrType_Type
|
||||
# define PyUInt128ScalarObject PyULongLongScalarObject
|
||||
# define PyUInt128ArrType_Type PyULongLongArrType_Type
|
||||
#define NPY_INT128_FMT NPY_LONGLONG_FMT
|
||||
#define NPY_UINT128_FMT NPY_ULONGLONG_FMT
|
||||
# endif
|
||||
# define NPY_MAX_LONGLONG NPY_MAX_INT128
|
||||
# define NPY_MIN_LONGLONG NPY_MIN_INT128
|
||||
# define NPY_MAX_ULONGLONG NPY_MAX_UINT128
|
||||
#elif NPY_BITSOF_LONGLONG == 256
|
||||
# define NPY_INT256 NPY_LONGLONG
|
||||
# define NPY_UINT256 NPY_ULONGLONG
|
||||
typedef npy_longlong npy_int256;
|
||||
typedef npy_ulonglong npy_uint256;
|
||||
# define PyInt256ScalarObject PyLongLongScalarObject
|
||||
# define PyInt256ArrType_Type PyLongLongArrType_Type
|
||||
# define PyUInt256ScalarObject PyULongLongScalarObject
|
||||
# define PyUInt256ArrType_Type PyULongLongArrType_Type
|
||||
#define NPY_INT256_FMT NPY_LONGLONG_FMT
|
||||
#define NPY_UINT256_FMT NPY_ULONGLONG_FMT
|
||||
# define NPY_MAX_LONGLONG NPY_MAX_INT256
|
||||
# define NPY_MIN_LONGLONG NPY_MIN_INT256
|
||||
# define NPY_MAX_ULONGLONG NPY_MAX_UINT256
|
||||
#endif
|
||||
|
||||
#if NPY_BITSOF_INT == 8
|
||||
#ifndef NPY_INT8
|
||||
#define NPY_INT8 NPY_INT
|
||||
#define NPY_UINT8 NPY_UINT
|
||||
typedef int npy_int8;
|
||||
typedef unsigned int npy_uint8;
|
||||
# define PyInt8ScalarObject PyIntScalarObject
|
||||
# define PyInt8ArrType_Type PyIntArrType_Type
|
||||
# define PyUInt8ScalarObject PyUIntScalarObject
|
||||
# define PyUInt8ArrType_Type PyUIntArrType_Type
|
||||
#define NPY_INT8_FMT NPY_INT_FMT
|
||||
#define NPY_UINT8_FMT NPY_UINT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_INT == 16
|
||||
#ifndef NPY_INT16
|
||||
#define NPY_INT16 NPY_INT
|
||||
#define NPY_UINT16 NPY_UINT
|
||||
typedef int npy_int16;
|
||||
typedef unsigned int npy_uint16;
|
||||
# define PyInt16ScalarObject PyIntScalarObject
|
||||
# define PyInt16ArrType_Type PyIntArrType_Type
|
||||
# define PyUInt16ScalarObject PyIntUScalarObject
|
||||
# define PyUInt16ArrType_Type PyIntUArrType_Type
|
||||
#define NPY_INT16_FMT NPY_INT_FMT
|
||||
#define NPY_UINT16_FMT NPY_UINT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_INT == 32
|
||||
#ifndef NPY_INT32
|
||||
#define NPY_INT32 NPY_INT
|
||||
#define NPY_UINT32 NPY_UINT
|
||||
typedef int npy_int32;
|
||||
typedef unsigned int npy_uint32;
|
||||
typedef unsigned int npy_ucs4;
|
||||
# define PyInt32ScalarObject PyIntScalarObject
|
||||
# define PyInt32ArrType_Type PyIntArrType_Type
|
||||
# define PyUInt32ScalarObject PyUIntScalarObject
|
||||
# define PyUInt32ArrType_Type PyUIntArrType_Type
|
||||
#define NPY_INT32_FMT NPY_INT_FMT
|
||||
#define NPY_UINT32_FMT NPY_UINT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_INT == 64
|
||||
#ifndef NPY_INT64
|
||||
#define NPY_INT64 NPY_INT
|
||||
#define NPY_UINT64 NPY_UINT
|
||||
typedef int npy_int64;
|
||||
typedef unsigned int npy_uint64;
|
||||
# define PyInt64ScalarObject PyIntScalarObject
|
||||
# define PyInt64ArrType_Type PyIntArrType_Type
|
||||
# define PyUInt64ScalarObject PyUIntScalarObject
|
||||
# define PyUInt64ArrType_Type PyUIntArrType_Type
|
||||
#define NPY_INT64_FMT NPY_INT_FMT
|
||||
#define NPY_UINT64_FMT NPY_UINT_FMT
|
||||
# define MyPyLong_FromInt64 PyLong_FromLong
|
||||
# define MyPyLong_AsInt64 PyLong_AsLong
|
||||
#endif
|
||||
#elif NPY_BITSOF_INT == 128
|
||||
#ifndef NPY_INT128
|
||||
#define NPY_INT128 NPY_INT
|
||||
#define NPY_UINT128 NPY_UINT
|
||||
typedef int npy_int128;
|
||||
typedef unsigned int npy_uint128;
|
||||
# define PyInt128ScalarObject PyIntScalarObject
|
||||
# define PyInt128ArrType_Type PyIntArrType_Type
|
||||
# define PyUInt128ScalarObject PyUIntScalarObject
|
||||
# define PyUInt128ArrType_Type PyUIntArrType_Type
|
||||
#define NPY_INT128_FMT NPY_INT_FMT
|
||||
#define NPY_UINT128_FMT NPY_UINT_FMT
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if NPY_BITSOF_SHORT == 8
|
||||
#ifndef NPY_INT8
|
||||
#define NPY_INT8 NPY_SHORT
|
||||
#define NPY_UINT8 NPY_USHORT
|
||||
typedef short npy_int8;
|
||||
typedef unsigned short npy_uint8;
|
||||
# define PyInt8ScalarObject PyShortScalarObject
|
||||
# define PyInt8ArrType_Type PyShortArrType_Type
|
||||
# define PyUInt8ScalarObject PyUShortScalarObject
|
||||
# define PyUInt8ArrType_Type PyUShortArrType_Type
|
||||
#define NPY_INT8_FMT NPY_SHORT_FMT
|
||||
#define NPY_UINT8_FMT NPY_USHORT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_SHORT == 16
|
||||
#ifndef NPY_INT16
|
||||
#define NPY_INT16 NPY_SHORT
|
||||
#define NPY_UINT16 NPY_USHORT
|
||||
typedef short npy_int16;
|
||||
typedef unsigned short npy_uint16;
|
||||
# define PyInt16ScalarObject PyShortScalarObject
|
||||
# define PyInt16ArrType_Type PyShortArrType_Type
|
||||
# define PyUInt16ScalarObject PyUShortScalarObject
|
||||
# define PyUInt16ArrType_Type PyUShortArrType_Type
|
||||
#define NPY_INT16_FMT NPY_SHORT_FMT
|
||||
#define NPY_UINT16_FMT NPY_USHORT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_SHORT == 32
|
||||
#ifndef NPY_INT32
|
||||
#define NPY_INT32 NPY_SHORT
|
||||
#define NPY_UINT32 NPY_USHORT
|
||||
typedef short npy_int32;
|
||||
typedef unsigned short npy_uint32;
|
||||
typedef unsigned short npy_ucs4;
|
||||
# define PyInt32ScalarObject PyShortScalarObject
|
||||
# define PyInt32ArrType_Type PyShortArrType_Type
|
||||
# define PyUInt32ScalarObject PyUShortScalarObject
|
||||
# define PyUInt32ArrType_Type PyUShortArrType_Type
|
||||
#define NPY_INT32_FMT NPY_SHORT_FMT
|
||||
#define NPY_UINT32_FMT NPY_USHORT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_SHORT == 64
|
||||
#ifndef NPY_INT64
|
||||
#define NPY_INT64 NPY_SHORT
|
||||
#define NPY_UINT64 NPY_USHORT
|
||||
typedef short npy_int64;
|
||||
typedef unsigned short npy_uint64;
|
||||
# define PyInt64ScalarObject PyShortScalarObject
|
||||
# define PyInt64ArrType_Type PyShortArrType_Type
|
||||
# define PyUInt64ScalarObject PyUShortScalarObject
|
||||
# define PyUInt64ArrType_Type PyUShortArrType_Type
|
||||
#define NPY_INT64_FMT NPY_SHORT_FMT
|
||||
#define NPY_UINT64_FMT NPY_USHORT_FMT
|
||||
# define MyPyLong_FromInt64 PyLong_FromLong
|
||||
# define MyPyLong_AsInt64 PyLong_AsLong
|
||||
#endif
|
||||
#elif NPY_BITSOF_SHORT == 128
|
||||
#ifndef NPY_INT128
|
||||
#define NPY_INT128 NPY_SHORT
|
||||
#define NPY_UINT128 NPY_USHORT
|
||||
typedef short npy_int128;
|
||||
typedef unsigned short npy_uint128;
|
||||
# define PyInt128ScalarObject PyShortScalarObject
|
||||
# define PyInt128ArrType_Type PyShortArrType_Type
|
||||
# define PyUInt128ScalarObject PyUShortScalarObject
|
||||
# define PyUInt128ArrType_Type PyUShortArrType_Type
|
||||
#define NPY_INT128_FMT NPY_SHORT_FMT
|
||||
#define NPY_UINT128_FMT NPY_USHORT_FMT
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
#if NPY_BITSOF_CHAR == 8
|
||||
#ifndef NPY_INT8
|
||||
#define NPY_INT8 NPY_BYTE
|
||||
#define NPY_UINT8 NPY_UBYTE
|
||||
typedef signed char npy_int8;
|
||||
typedef unsigned char npy_uint8;
|
||||
# define PyInt8ScalarObject PyByteScalarObject
|
||||
# define PyInt8ArrType_Type PyByteArrType_Type
|
||||
# define PyUInt8ScalarObject PyUByteScalarObject
|
||||
# define PyUInt8ArrType_Type PyUByteArrType_Type
|
||||
#define NPY_INT8_FMT NPY_BYTE_FMT
|
||||
#define NPY_UINT8_FMT NPY_UBYTE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_CHAR == 16
|
||||
#ifndef NPY_INT16
|
||||
#define NPY_INT16 NPY_BYTE
|
||||
#define NPY_UINT16 NPY_UBYTE
|
||||
typedef signed char npy_int16;
|
||||
typedef unsigned char npy_uint16;
|
||||
# define PyInt16ScalarObject PyByteScalarObject
|
||||
# define PyInt16ArrType_Type PyByteArrType_Type
|
||||
# define PyUInt16ScalarObject PyUByteScalarObject
|
||||
# define PyUInt16ArrType_Type PyUByteArrType_Type
|
||||
#define NPY_INT16_FMT NPY_BYTE_FMT
|
||||
#define NPY_UINT16_FMT NPY_UBYTE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_CHAR == 32
|
||||
#ifndef NPY_INT32
|
||||
#define NPY_INT32 NPY_BYTE
|
||||
#define NPY_UINT32 NPY_UBYTE
|
||||
typedef signed char npy_int32;
|
||||
typedef unsigned char npy_uint32;
|
||||
typedef unsigned char npy_ucs4;
|
||||
# define PyInt32ScalarObject PyByteScalarObject
|
||||
# define PyInt32ArrType_Type PyByteArrType_Type
|
||||
# define PyUInt32ScalarObject PyUByteScalarObject
|
||||
# define PyUInt32ArrType_Type PyUByteArrType_Type
|
||||
#define NPY_INT32_FMT NPY_BYTE_FMT
|
||||
#define NPY_UINT32_FMT NPY_UBYTE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_CHAR == 64
|
||||
#ifndef NPY_INT64
|
||||
#define NPY_INT64 NPY_BYTE
|
||||
#define NPY_UINT64 NPY_UBYTE
|
||||
typedef signed char npy_int64;
|
||||
typedef unsigned char npy_uint64;
|
||||
# define PyInt64ScalarObject PyByteScalarObject
|
||||
# define PyInt64ArrType_Type PyByteArrType_Type
|
||||
# define PyUInt64ScalarObject PyUByteScalarObject
|
||||
# define PyUInt64ArrType_Type PyUByteArrType_Type
|
||||
#define NPY_INT64_FMT NPY_BYTE_FMT
|
||||
#define NPY_UINT64_FMT NPY_UBYTE_FMT
|
||||
# define MyPyLong_FromInt64 PyLong_FromLong
|
||||
# define MyPyLong_AsInt64 PyLong_AsLong
|
||||
#endif
|
||||
#elif NPY_BITSOF_CHAR == 128
|
||||
#ifndef NPY_INT128
|
||||
#define NPY_INT128 NPY_BYTE
|
||||
#define NPY_UINT128 NPY_UBYTE
|
||||
typedef signed char npy_int128;
|
||||
typedef unsigned char npy_uint128;
|
||||
# define PyInt128ScalarObject PyByteScalarObject
|
||||
# define PyInt128ArrType_Type PyByteArrType_Type
|
||||
# define PyUInt128ScalarObject PyUByteScalarObject
|
||||
# define PyUInt128ArrType_Type PyUByteArrType_Type
|
||||
#define NPY_INT128_FMT NPY_BYTE_FMT
|
||||
#define NPY_UINT128_FMT NPY_UBYTE_FMT
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
#if NPY_BITSOF_DOUBLE == 32
|
||||
#ifndef NPY_FLOAT32
|
||||
#define NPY_FLOAT32 NPY_DOUBLE
|
||||
#define NPY_COMPLEX64 NPY_CDOUBLE
|
||||
typedef double npy_float32;
|
||||
typedef npy_cdouble npy_complex64;
|
||||
# define PyFloat32ScalarObject PyDoubleScalarObject
|
||||
# define PyComplex64ScalarObject PyCDoubleScalarObject
|
||||
# define PyFloat32ArrType_Type PyDoubleArrType_Type
|
||||
# define PyComplex64ArrType_Type PyCDoubleArrType_Type
|
||||
#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT
|
||||
#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_DOUBLE == 64
|
||||
#ifndef NPY_FLOAT64
|
||||
#define NPY_FLOAT64 NPY_DOUBLE
|
||||
#define NPY_COMPLEX128 NPY_CDOUBLE
|
||||
typedef double npy_float64;
|
||||
typedef npy_cdouble npy_complex128;
|
||||
# define PyFloat64ScalarObject PyDoubleScalarObject
|
||||
# define PyComplex128ScalarObject PyCDoubleScalarObject
|
||||
# define PyFloat64ArrType_Type PyDoubleArrType_Type
|
||||
# define PyComplex128ArrType_Type PyCDoubleArrType_Type
|
||||
#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT
|
||||
#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_DOUBLE == 80
|
||||
#ifndef NPY_FLOAT80
|
||||
#define NPY_FLOAT80 NPY_DOUBLE
|
||||
#define NPY_COMPLEX160 NPY_CDOUBLE
|
||||
typedef double npy_float80;
|
||||
typedef npy_cdouble npy_complex160;
|
||||
# define PyFloat80ScalarObject PyDoubleScalarObject
|
||||
# define PyComplex160ScalarObject PyCDoubleScalarObject
|
||||
# define PyFloat80ArrType_Type PyDoubleArrType_Type
|
||||
# define PyComplex160ArrType_Type PyCDoubleArrType_Type
|
||||
#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT
|
||||
#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_DOUBLE == 96
|
||||
#ifndef NPY_FLOAT96
|
||||
#define NPY_FLOAT96 NPY_DOUBLE
|
||||
#define NPY_COMPLEX192 NPY_CDOUBLE
|
||||
typedef double npy_float96;
|
||||
typedef npy_cdouble npy_complex192;
|
||||
# define PyFloat96ScalarObject PyDoubleScalarObject
|
||||
# define PyComplex192ScalarObject PyCDoubleScalarObject
|
||||
# define PyFloat96ArrType_Type PyDoubleArrType_Type
|
||||
# define PyComplex192ArrType_Type PyCDoubleArrType_Type
|
||||
#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT
|
||||
#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_DOUBLE == 128
|
||||
#ifndef NPY_FLOAT128
|
||||
#define NPY_FLOAT128 NPY_DOUBLE
|
||||
#define NPY_COMPLEX256 NPY_CDOUBLE
|
||||
typedef double npy_float128;
|
||||
typedef npy_cdouble npy_complex256;
|
||||
# define PyFloat128ScalarObject PyDoubleScalarObject
|
||||
# define PyComplex256ScalarObject PyCDoubleScalarObject
|
||||
# define PyFloat128ArrType_Type PyDoubleArrType_Type
|
||||
# define PyComplex256ArrType_Type PyCDoubleArrType_Type
|
||||
#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT
|
||||
#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
#if NPY_BITSOF_FLOAT == 32
|
||||
#ifndef NPY_FLOAT32
|
||||
#define NPY_FLOAT32 NPY_FLOAT
|
||||
#define NPY_COMPLEX64 NPY_CFLOAT
|
||||
typedef float npy_float32;
|
||||
typedef npy_cfloat npy_complex64;
|
||||
# define PyFloat32ScalarObject PyFloatScalarObject
|
||||
# define PyComplex64ScalarObject PyCFloatScalarObject
|
||||
# define PyFloat32ArrType_Type PyFloatArrType_Type
|
||||
# define PyComplex64ArrType_Type PyCFloatArrType_Type
|
||||
#define NPY_FLOAT32_FMT NPY_FLOAT_FMT
|
||||
#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_FLOAT == 64
|
||||
#ifndef NPY_FLOAT64
|
||||
#define NPY_FLOAT64 NPY_FLOAT
|
||||
#define NPY_COMPLEX128 NPY_CFLOAT
|
||||
typedef float npy_float64;
|
||||
typedef npy_cfloat npy_complex128;
|
||||
# define PyFloat64ScalarObject PyFloatScalarObject
|
||||
# define PyComplex128ScalarObject PyCFloatScalarObject
|
||||
# define PyFloat64ArrType_Type PyFloatArrType_Type
|
||||
# define PyComplex128ArrType_Type PyCFloatArrType_Type
|
||||
#define NPY_FLOAT64_FMT NPY_FLOAT_FMT
|
||||
#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_FLOAT == 80
|
||||
#ifndef NPY_FLOAT80
|
||||
#define NPY_FLOAT80 NPY_FLOAT
|
||||
#define NPY_COMPLEX160 NPY_CFLOAT
|
||||
typedef float npy_float80;
|
||||
typedef npy_cfloat npy_complex160;
|
||||
# define PyFloat80ScalarObject PyFloatScalarObject
|
||||
# define PyComplex160ScalarObject PyCFloatScalarObject
|
||||
# define PyFloat80ArrType_Type PyFloatArrType_Type
|
||||
# define PyComplex160ArrType_Type PyCFloatArrType_Type
|
||||
#define NPY_FLOAT80_FMT NPY_FLOAT_FMT
|
||||
#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_FLOAT == 96
|
||||
#ifndef NPY_FLOAT96
|
||||
#define NPY_FLOAT96 NPY_FLOAT
|
||||
#define NPY_COMPLEX192 NPY_CFLOAT
|
||||
typedef float npy_float96;
|
||||
typedef npy_cfloat npy_complex192;
|
||||
# define PyFloat96ScalarObject PyFloatScalarObject
|
||||
# define PyComplex192ScalarObject PyCFloatScalarObject
|
||||
# define PyFloat96ArrType_Type PyFloatArrType_Type
|
||||
# define PyComplex192ArrType_Type PyCFloatArrType_Type
|
||||
#define NPY_FLOAT96_FMT NPY_FLOAT_FMT
|
||||
#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_FLOAT == 128
|
||||
#ifndef NPY_FLOAT128
|
||||
#define NPY_FLOAT128 NPY_FLOAT
|
||||
#define NPY_COMPLEX256 NPY_CFLOAT
|
||||
typedef float npy_float128;
|
||||
typedef npy_cfloat npy_complex256;
|
||||
# define PyFloat128ScalarObject PyFloatScalarObject
|
||||
# define PyComplex256ScalarObject PyCFloatScalarObject
|
||||
# define PyFloat128ArrType_Type PyFloatArrType_Type
|
||||
# define PyComplex256ArrType_Type PyCFloatArrType_Type
|
||||
#define NPY_FLOAT128_FMT NPY_FLOAT_FMT
|
||||
#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* half/float16 isn't a floating-point type in C */
|
||||
#define NPY_FLOAT16 NPY_HALF
|
||||
typedef npy_uint16 npy_half;
|
||||
typedef npy_half npy_float16;
|
||||
|
||||
#if NPY_BITSOF_LONGDOUBLE == 32
|
||||
#ifndef NPY_FLOAT32
|
||||
#define NPY_FLOAT32 NPY_LONGDOUBLE
|
||||
#define NPY_COMPLEX64 NPY_CLONGDOUBLE
|
||||
typedef npy_longdouble npy_float32;
|
||||
typedef npy_clongdouble npy_complex64;
|
||||
# define PyFloat32ScalarObject PyLongDoubleScalarObject
|
||||
# define PyComplex64ScalarObject PyCLongDoubleScalarObject
|
||||
# define PyFloat32ArrType_Type PyLongDoubleArrType_Type
|
||||
# define PyComplex64ArrType_Type PyCLongDoubleArrType_Type
|
||||
#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT
|
||||
#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_LONGDOUBLE == 64
|
||||
#ifndef NPY_FLOAT64
|
||||
#define NPY_FLOAT64 NPY_LONGDOUBLE
|
||||
#define NPY_COMPLEX128 NPY_CLONGDOUBLE
|
||||
typedef npy_longdouble npy_float64;
|
||||
typedef npy_clongdouble npy_complex128;
|
||||
# define PyFloat64ScalarObject PyLongDoubleScalarObject
|
||||
# define PyComplex128ScalarObject PyCLongDoubleScalarObject
|
||||
# define PyFloat64ArrType_Type PyLongDoubleArrType_Type
|
||||
# define PyComplex128ArrType_Type PyCLongDoubleArrType_Type
|
||||
#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT
|
||||
#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_LONGDOUBLE == 80
|
||||
#ifndef NPY_FLOAT80
|
||||
#define NPY_FLOAT80 NPY_LONGDOUBLE
|
||||
#define NPY_COMPLEX160 NPY_CLONGDOUBLE
|
||||
typedef npy_longdouble npy_float80;
|
||||
typedef npy_clongdouble npy_complex160;
|
||||
# define PyFloat80ScalarObject PyLongDoubleScalarObject
|
||||
# define PyComplex160ScalarObject PyCLongDoubleScalarObject
|
||||
# define PyFloat80ArrType_Type PyLongDoubleArrType_Type
|
||||
# define PyComplex160ArrType_Type PyCLongDoubleArrType_Type
|
||||
#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT
|
||||
#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_LONGDOUBLE == 96
|
||||
#ifndef NPY_FLOAT96
|
||||
#define NPY_FLOAT96 NPY_LONGDOUBLE
|
||||
#define NPY_COMPLEX192 NPY_CLONGDOUBLE
|
||||
typedef npy_longdouble npy_float96;
|
||||
typedef npy_clongdouble npy_complex192;
|
||||
# define PyFloat96ScalarObject PyLongDoubleScalarObject
|
||||
# define PyComplex192ScalarObject PyCLongDoubleScalarObject
|
||||
# define PyFloat96ArrType_Type PyLongDoubleArrType_Type
|
||||
# define PyComplex192ArrType_Type PyCLongDoubleArrType_Type
|
||||
#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT
|
||||
#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_LONGDOUBLE == 128
|
||||
#ifndef NPY_FLOAT128
|
||||
#define NPY_FLOAT128 NPY_LONGDOUBLE
|
||||
#define NPY_COMPLEX256 NPY_CLONGDOUBLE
|
||||
typedef npy_longdouble npy_float128;
|
||||
typedef npy_clongdouble npy_complex256;
|
||||
# define PyFloat128ScalarObject PyLongDoubleScalarObject
|
||||
# define PyComplex256ScalarObject PyCLongDoubleScalarObject
|
||||
# define PyFloat128ArrType_Type PyLongDoubleArrType_Type
|
||||
# define PyComplex256ArrType_Type PyCLongDoubleArrType_Type
|
||||
#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT
|
||||
#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT
|
||||
#endif
|
||||
#elif NPY_BITSOF_LONGDOUBLE == 256
|
||||
#define NPY_FLOAT256 NPY_LONGDOUBLE
|
||||
#define NPY_COMPLEX512 NPY_CLONGDOUBLE
|
||||
typedef npy_longdouble npy_float256;
|
||||
typedef npy_clongdouble npy_complex512;
|
||||
# define PyFloat256ScalarObject PyLongDoubleScalarObject
|
||||
# define PyComplex512ScalarObject PyCLongDoubleScalarObject
|
||||
# define PyFloat256ArrType_Type PyLongDoubleArrType_Type
|
||||
# define PyComplex512ArrType_Type PyCLongDoubleArrType_Type
|
||||
#define NPY_FLOAT256_FMT NPY_LONGDOUBLE_FMT
|
||||
#define NPY_COMPLEX512_FMT NPY_CLONGDOUBLE_FMT
|
||||
#endif
|
||||
|
||||
/* datetime typedefs */
|
||||
typedef npy_int64 npy_timedelta;
|
||||
typedef npy_int64 npy_datetime;
|
||||
#define NPY_DATETIME_FMT NPY_INT64_FMT
|
||||
#define NPY_TIMEDELTA_FMT NPY_INT64_FMT
|
||||
|
||||
/* End of typedefs for numarray style bit-width names */
|
||||
|
||||
#endif
|
||||
|
|
@ -1,109 +0,0 @@
|
|||
/*
|
||||
* This set (target) cpu specific macros:
|
||||
* - Possible values:
|
||||
* NPY_CPU_X86
|
||||
* NPY_CPU_AMD64
|
||||
* NPY_CPU_PPC
|
||||
* NPY_CPU_PPC64
|
||||
* NPY_CPU_SPARC
|
||||
* NPY_CPU_S390
|
||||
* NPY_CPU_IA64
|
||||
* NPY_CPU_HPPA
|
||||
* NPY_CPU_ALPHA
|
||||
* NPY_CPU_ARMEL
|
||||
* NPY_CPU_ARMEB
|
||||
* NPY_CPU_SH_LE
|
||||
* NPY_CPU_SH_BE
|
||||
*/
|
||||
#ifndef _NPY_CPUARCH_H_
|
||||
#define _NPY_CPUARCH_H_
|
||||
|
||||
#include "numpyconfig.h"
|
||||
|
||||
#if defined( __i386__ ) || defined(i386) || defined(_M_IX86)
|
||||
/*
|
||||
* __i386__ is defined by gcc and Intel compiler on Linux,
|
||||
* _M_IX86 by VS compiler,
|
||||
* i386 by Sun compilers on opensolaris at least
|
||||
*/
|
||||
#define NPY_CPU_X86
|
||||
#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64)
|
||||
/*
|
||||
* both __x86_64__ and __amd64__ are defined by gcc
|
||||
* __x86_64 defined by sun compiler on opensolaris at least
|
||||
* _M_AMD64 defined by MS compiler
|
||||
*/
|
||||
#define NPY_CPU_AMD64
|
||||
#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC)
|
||||
/*
|
||||
* __ppc__ is defined by gcc, I remember having seen __powerpc__ once,
|
||||
* but can't find it ATM
|
||||
* _ARCH_PPC is used by at least gcc on AIX
|
||||
*/
|
||||
#define NPY_CPU_PPC
|
||||
#elif defined(__ppc64__)
|
||||
#define NPY_CPU_PPC64
|
||||
#elif defined(__sparc__) || defined(__sparc)
|
||||
/* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */
|
||||
#define NPY_CPU_SPARC
|
||||
#elif defined(__s390__)
|
||||
#define NPY_CPU_S390
|
||||
#elif defined(__ia64)
|
||||
#define NPY_CPU_IA64
|
||||
#elif defined(__hppa)
|
||||
#define NPY_CPU_HPPA
|
||||
#elif defined(__alpha__)
|
||||
#define NPY_CPU_ALPHA
|
||||
#elif defined(__arm__) && defined(__ARMEL__)
|
||||
#define NPY_CPU_ARMEL
|
||||
#elif defined(__arm__) && defined(__ARMEB__)
|
||||
#define NPY_CPU_ARMEB
|
||||
#elif defined(__sh__) && defined(__LITTLE_ENDIAN__)
|
||||
#define NPY_CPU_SH_LE
|
||||
#elif defined(__sh__) && defined(__BIG_ENDIAN__)
|
||||
#define NPY_CPU_SH_BE
|
||||
#elif defined(__MIPSEL__)
|
||||
#define NPY_CPU_MIPSEL
|
||||
#elif defined(__MIPSEB__)
|
||||
#define NPY_CPU_MIPSEB
|
||||
#elif defined(__aarch64__)
|
||||
#define NPY_CPU_AARCH64
|
||||
#else
|
||||
#error Unknown CPU, please report this to numpy maintainers with \
|
||||
information about your platform (OS, CPU and compiler)
|
||||
#endif
|
||||
|
||||
/*
|
||||
This "white-lists" the architectures that we know don't require
|
||||
pointer alignment. We white-list, since the memcpy version will
|
||||
work everywhere, whereas assignment will only work where pointer
|
||||
dereferencing doesn't require alignment.
|
||||
|
||||
TODO: There may be more architectures we can white list.
|
||||
*/
|
||||
#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64)
|
||||
#define NPY_COPY_PYOBJECT_PTR(dst, src) (*((PyObject **)(dst)) = *((PyObject **)(src)))
|
||||
#else
|
||||
#if NPY_SIZEOF_PY_INTPTR_T == 4
|
||||
#define NPY_COPY_PYOBJECT_PTR(dst, src) \
|
||||
((char*)(dst))[0] = ((char*)(src))[0]; \
|
||||
((char*)(dst))[1] = ((char*)(src))[1]; \
|
||||
((char*)(dst))[2] = ((char*)(src))[2]; \
|
||||
((char*)(dst))[3] = ((char*)(src))[3];
|
||||
#elif NPY_SIZEOF_PY_INTPTR_T == 8
|
||||
#define NPY_COPY_PYOBJECT_PTR(dst, src) \
|
||||
((char*)(dst))[0] = ((char*)(src))[0]; \
|
||||
((char*)(dst))[1] = ((char*)(src))[1]; \
|
||||
((char*)(dst))[2] = ((char*)(src))[2]; \
|
||||
((char*)(dst))[3] = ((char*)(src))[3]; \
|
||||
((char*)(dst))[4] = ((char*)(src))[4]; \
|
||||
((char*)(dst))[5] = ((char*)(src))[5]; \
|
||||
((char*)(dst))[6] = ((char*)(src))[6]; \
|
||||
((char*)(dst))[7] = ((char*)(src))[7];
|
||||
#else
|
||||
#error Unknown architecture, please report this to numpy maintainers with \
|
||||
information about your platform (OS, CPU and compiler)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -1,129 +0,0 @@
|
|||
#ifndef _NPY_DEPRECATED_API_H
|
||||
#define _NPY_DEPRECATED_API_H
|
||||
|
||||
#if defined(_WIN32)
|
||||
#define _WARN___STR2__(x) #x
|
||||
#define _WARN___STR1__(x) _WARN___STR2__(x)
|
||||
#define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: "
|
||||
#pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it by " \
|
||||
"#defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION")
|
||||
#elif defined(__GNUC__)
|
||||
#warning "Using deprecated NumPy API, disable it by #defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION"
|
||||
#endif
|
||||
/* TODO: How to do this warning message for other compilers? */
|
||||
|
||||
/*
|
||||
* This header exists to collect all dangerous/deprecated NumPy API.
|
||||
*
|
||||
* This is an attempt to remove bad API, the proliferation of macros,
|
||||
* and namespace pollution currently produced by the NumPy headers.
|
||||
*/
|
||||
|
||||
#if defined(NPY_NO_DEPRECATED_API)
|
||||
#error Should never include npy_deprecated_api directly.
|
||||
#endif
|
||||
|
||||
/* These array flags are deprecated as of NumPy 1.7 */
|
||||
#define NPY_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS
|
||||
#define NPY_FORTRAN NPY_ARRAY_F_CONTIGUOUS
|
||||
|
||||
/*
|
||||
* The consistent NPY_ARRAY_* names which don't pollute the NPY_*
|
||||
* namespace were added in NumPy 1.7.
|
||||
*
|
||||
* These versions of the carray flags are deprecated, but
|
||||
* probably should only be removed after two releases instead of one.
|
||||
*/
|
||||
#define NPY_C_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS
|
||||
#define NPY_F_CONTIGUOUS NPY_ARRAY_F_CONTIGUOUS
|
||||
#define NPY_OWNDATA NPY_ARRAY_OWNDATA
|
||||
#define NPY_FORCECAST NPY_ARRAY_FORCECAST
|
||||
#define NPY_ENSURECOPY NPY_ARRAY_ENSURECOPY
|
||||
#define NPY_ENSUREARRAY NPY_ARRAY_ENSUREARRAY
|
||||
#define NPY_ELEMENTSTRIDES NPY_ARRAY_ELEMENTSTRIDES
|
||||
#define NPY_ALIGNED NPY_ARRAY_ALIGNED
|
||||
#define NPY_NOTSWAPPED NPY_ARRAY_NOTSWAPPED
|
||||
#define NPY_WRITEABLE NPY_ARRAY_WRITEABLE
|
||||
#define NPY_UPDATEIFCOPY NPY_ARRAY_UPDATEIFCOPY
|
||||
#define NPY_BEHAVED NPY_ARRAY_BEHAVED
|
||||
#define NPY_BEHAVED_NS NPY_ARRAY_BEHAVED_NS
|
||||
#define NPY_CARRAY NPY_ARRAY_CARRAY
|
||||
#define NPY_CARRAY_RO NPY_ARRAY_CARRAY_RO
|
||||
#define NPY_FARRAY NPY_ARRAY_FARRAY
|
||||
#define NPY_FARRAY_RO NPY_ARRAY_FARRAY_RO
|
||||
#define NPY_DEFAULT NPY_ARRAY_DEFAULT
|
||||
#define NPY_IN_ARRAY NPY_ARRAY_IN_ARRAY
|
||||
#define NPY_OUT_ARRAY NPY_ARRAY_OUT_ARRAY
|
||||
#define NPY_INOUT_ARRAY NPY_ARRAY_INOUT_ARRAY
|
||||
#define NPY_IN_FARRAY NPY_ARRAY_IN_FARRAY
|
||||
#define NPY_OUT_FARRAY NPY_ARRAY_OUT_FARRAY
|
||||
#define NPY_INOUT_FARRAY NPY_ARRAY_INOUT_FARRAY
|
||||
#define NPY_UPDATE_ALL NPY_ARRAY_UPDATE_ALL
|
||||
|
||||
/* This way of accessing the default type is deprecated as of NumPy 1.7 */
|
||||
#define PyArray_DEFAULT NPY_DEFAULT_TYPE
|
||||
|
||||
/* These DATETIME bits aren't used internally */
|
||||
#if PY_VERSION_HEX >= 0x03000000
|
||||
#define PyDataType_GetDatetimeMetaData(descr) \
|
||||
((descr->metadata == NULL) ? NULL : \
|
||||
((PyArray_DatetimeMetaData *)(PyCapsule_GetPointer( \
|
||||
PyDict_GetItemString( \
|
||||
descr->metadata, NPY_METADATA_DTSTR), NULL))))
|
||||
#else
|
||||
#define PyDataType_GetDatetimeMetaData(descr) \
|
||||
((descr->metadata == NULL) ? NULL : \
|
||||
((PyArray_DatetimeMetaData *)(PyCObject_AsVoidPtr( \
|
||||
PyDict_GetItemString(descr->metadata, NPY_METADATA_DTSTR)))))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Deprecated as of NumPy 1.7, this kind of shortcut doesn't
|
||||
* belong in the public API.
|
||||
*/
|
||||
#define NPY_AO PyArrayObject
|
||||
|
||||
/*
|
||||
* Deprecated as of NumPy 1.7, an all-lowercase macro doesn't
|
||||
* belong in the public API.
|
||||
*/
|
||||
#define fortran fortran_
|
||||
|
||||
/*
|
||||
* Deprecated as of NumPy 1.7, as it is a namespace-polluting
|
||||
* macro.
|
||||
*/
|
||||
#define FORTRAN_IF PyArray_FORTRAN_IF
|
||||
|
||||
/* Deprecated as of NumPy 1.7, datetime64 uses c_metadata instead */
|
||||
#define NPY_METADATA_DTSTR "__timeunit__"
|
||||
|
||||
/*
|
||||
* Deprecated as of NumPy 1.7.
|
||||
* The reasoning:
|
||||
* - These are for datetime, but there's no datetime "namespace".
|
||||
* - They just turn NPY_STR_<x> into "<x>", which is just
|
||||
* making something simple be indirected.
|
||||
*/
|
||||
#define NPY_STR_Y "Y"
|
||||
#define NPY_STR_M "M"
|
||||
#define NPY_STR_W "W"
|
||||
#define NPY_STR_D "D"
|
||||
#define NPY_STR_h "h"
|
||||
#define NPY_STR_m "m"
|
||||
#define NPY_STR_s "s"
|
||||
#define NPY_STR_ms "ms"
|
||||
#define NPY_STR_us "us"
|
||||
#define NPY_STR_ns "ns"
|
||||
#define NPY_STR_ps "ps"
|
||||
#define NPY_STR_fs "fs"
|
||||
#define NPY_STR_as "as"
|
||||
|
||||
/*
|
||||
* The macros in old_defines.h are Deprecated as of NumPy 1.7 and will be
|
||||
* removed in the next major release.
|
||||
*/
|
||||
#include "old_defines.h"
|
||||
|
||||
|
||||
#endif
|
|
@ -1,46 +0,0 @@
|
|||
#ifndef _NPY_ENDIAN_H_
|
||||
#define _NPY_ENDIAN_H_
|
||||
|
||||
/*
|
||||
* NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in
|
||||
* endian.h
|
||||
*/
|
||||
|
||||
#ifdef NPY_HAVE_ENDIAN_H
|
||||
/* Use endian.h if available */
|
||||
#include <endian.h>
|
||||
|
||||
#define NPY_BYTE_ORDER __BYTE_ORDER
|
||||
#define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN
|
||||
#define NPY_BIG_ENDIAN __BIG_ENDIAN
|
||||
#else
|
||||
/* Set endianness info using target CPU */
|
||||
#include "npy_cpu.h"
|
||||
|
||||
#define NPY_LITTLE_ENDIAN 1234
|
||||
#define NPY_BIG_ENDIAN 4321
|
||||
|
||||
#if defined(NPY_CPU_X86) \
|
||||
|| defined(NPY_CPU_AMD64) \
|
||||
|| defined(NPY_CPU_IA64) \
|
||||
|| defined(NPY_CPU_ALPHA) \
|
||||
|| defined(NPY_CPU_ARMEL) \
|
||||
|| defined(NPY_CPU_AARCH64) \
|
||||
|| defined(NPY_CPU_SH_LE) \
|
||||
|| defined(NPY_CPU_MIPSEL)
|
||||
#define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
|
||||
#elif defined(NPY_CPU_PPC) \
|
||||
|| defined(NPY_CPU_SPARC) \
|
||||
|| defined(NPY_CPU_S390) \
|
||||
|| defined(NPY_CPU_HPPA) \
|
||||
|| defined(NPY_CPU_PPC64) \
|
||||
|| defined(NPY_CPU_ARMEB) \
|
||||
|| defined(NPY_CPU_SH_BE) \
|
||||
|| defined(NPY_CPU_MIPSEB)
|
||||
#define NPY_BYTE_ORDER NPY_BIG_ENDIAN
|
||||
#else
|
||||
#error Unknown CPU: can not set endianness
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -1,117 +0,0 @@
|
|||
|
||||
/* Signal handling:
|
||||
|
||||
This header file defines macros that allow your code to handle
|
||||
interrupts received during processing. Interrupts that
|
||||
could reasonably be handled:
|
||||
|
||||
SIGINT, SIGABRT, SIGALRM, SIGSEGV
|
||||
|
||||
****Warning***************
|
||||
|
||||
Do not allow code that creates temporary memory or increases reference
|
||||
counts of Python objects to be interrupted unless you handle it
|
||||
differently.
|
||||
|
||||
**************************
|
||||
|
||||
The mechanism for handling interrupts is conceptually simple:
|
||||
|
||||
- replace the signal handler with our own home-grown version
|
||||
and store the old one.
|
||||
- run the code to be interrupted -- if an interrupt occurs
|
||||
the handler should basically just cause a return to the
|
||||
calling function for finish work.
|
||||
- restore the old signal handler
|
||||
|
||||
Of course, every code that allows interrupts must account for
|
||||
returning via the interrupt and handle clean-up correctly. But,
|
||||
even still, the simple paradigm is complicated by at least three
|
||||
factors.
|
||||
|
||||
1) platform portability (i.e. Microsoft says not to use longjmp
|
||||
to return from signal handling. They have a __try and __except
|
||||
extension to C instead but what about mingw?).
|
||||
|
||||
2) how to handle threads: apparently whether signals are delivered to
|
||||
every thread of the process or the "invoking" thread is platform
|
||||
dependent. --- we don't handle threads for now.
|
||||
|
||||
3) do we need to worry about re-entrance. For now, assume the
|
||||
code will not call-back into itself.
|
||||
|
||||
Ideas:
|
||||
|
||||
1) Start by implementing an approach that works on platforms that
|
||||
can use setjmp and longjmp functionality and does nothing
|
||||
on other platforms.
|
||||
|
||||
2) Ignore threads --- i.e. do not mix interrupt handling and threads
|
||||
|
||||
3) Add a default signal_handler function to the C-API but have the rest
|
||||
use macros.
|
||||
|
||||
|
||||
Simple Interface:
|
||||
|
||||
|
||||
In your C-extension: around a block of code you want to be interruptable
|
||||
with a SIGINT
|
||||
|
||||
NPY_SIGINT_ON
|
||||
[code]
|
||||
NPY_SIGINT_OFF
|
||||
|
||||
In order for this to work correctly, the
|
||||
[code] block must not allocate any memory or alter the reference count of any
|
||||
Python objects. In other words [code] must be interruptible so that continuation
|
||||
after NPY_SIGINT_OFF will only be "missing some computations"
|
||||
|
||||
Interrupt handling does not work well with threads.
|
||||
|
||||
*/
|
||||
|
||||
/* Add signal handling macros
|
||||
Make the global variable and signal handler part of the C-API
|
||||
*/
|
||||
|
||||
#ifndef NPY_INTERRUPT_H
|
||||
#define NPY_INTERRUPT_H
|
||||
|
||||
#ifndef NPY_NO_SIGNAL
|
||||
|
||||
#include <setjmp.h>
|
||||
#include <signal.h>
|
||||
|
||||
#ifndef sigsetjmp
|
||||
|
||||
#define NPY_SIGSETJMP(arg1, arg2) setjmp(arg1)
|
||||
#define NPY_SIGLONGJMP(arg1, arg2) longjmp(arg1, arg2)
|
||||
#define NPY_SIGJMP_BUF jmp_buf
|
||||
|
||||
#else
|
||||
|
||||
#define NPY_SIGSETJMP(arg1, arg2) sigsetjmp(arg1, arg2)
|
||||
#define NPY_SIGLONGJMP(arg1, arg2) siglongjmp(arg1, arg2)
|
||||
#define NPY_SIGJMP_BUF sigjmp_buf
|
||||
|
||||
#endif
|
||||
|
||||
# define NPY_SIGINT_ON { \
|
||||
PyOS_sighandler_t _npy_sig_save; \
|
||||
_npy_sig_save = PyOS_setsig(SIGINT, _PyArray_SigintHandler); \
|
||||
if (NPY_SIGSETJMP(*((NPY_SIGJMP_BUF *)_PyArray_GetSigintBuf()), \
|
||||
1) == 0) { \
|
||||
|
||||
# define NPY_SIGINT_OFF } \
|
||||
PyOS_setsig(SIGINT, _npy_sig_save); \
|
||||
}
|
||||
|
||||
#else /* NPY_NO_SIGNAL */
|
||||
|
||||
#define NPY_SIGINT_ON
|
||||
#define NPY_SIGINT_OFF
|
||||
|
||||
#endif /* HAVE_SIGSETJMP */
|
||||
|
||||
#endif /* NPY_INTERRUPT_H */
|
|
@ -1,438 +0,0 @@
|
|||
#ifndef __NPY_MATH_C99_H_
|
||||
#define __NPY_MATH_C99_H_
|
||||
|
||||
#include <math.h>
|
||||
#ifdef __SUNPRO_CC
|
||||
#include <sunmath.h>
|
||||
#endif
|
||||
#include <numpy/npy_common.h>
|
||||
|
||||
/*
|
||||
* NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99
|
||||
* for INFINITY)
|
||||
*
|
||||
* XXX: I should test whether INFINITY and NAN are available on the platform
|
||||
*/
|
||||
NPY_INLINE static float __npy_inff(void)
|
||||
{
|
||||
const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL};
|
||||
return __bint.__f;
|
||||
}
|
||||
|
||||
NPY_INLINE static float __npy_nanf(void)
|
||||
{
|
||||
const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL};
|
||||
return __bint.__f;
|
||||
}
|
||||
|
||||
NPY_INLINE static float __npy_pzerof(void)
|
||||
{
|
||||
const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL};
|
||||
return __bint.__f;
|
||||
}
|
||||
|
||||
NPY_INLINE static float __npy_nzerof(void)
|
||||
{
|
||||
const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL};
|
||||
return __bint.__f;
|
||||
}
|
||||
|
||||
#define NPY_INFINITYF __npy_inff()
|
||||
#define NPY_NANF __npy_nanf()
|
||||
#define NPY_PZEROF __npy_pzerof()
|
||||
#define NPY_NZEROF __npy_nzerof()
|
||||
|
||||
#define NPY_INFINITY ((npy_double)NPY_INFINITYF)
|
||||
#define NPY_NAN ((npy_double)NPY_NANF)
|
||||
#define NPY_PZERO ((npy_double)NPY_PZEROF)
|
||||
#define NPY_NZERO ((npy_double)NPY_NZEROF)
|
||||
|
||||
#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF)
|
||||
#define NPY_NANL ((npy_longdouble)NPY_NANF)
|
||||
#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF)
|
||||
#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF)
|
||||
|
||||
/*
|
||||
* Useful constants
|
||||
*/
|
||||
#define NPY_E 2.718281828459045235360287471352662498 /* e */
|
||||
#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */
|
||||
#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */
|
||||
#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */
|
||||
#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */
|
||||
#define NPY_PI 3.141592653589793238462643383279502884 /* pi */
|
||||
#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */
|
||||
#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */
|
||||
#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */
|
||||
#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */
|
||||
#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */
|
||||
#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */
|
||||
#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */
|
||||
|
||||
#define NPY_Ef 2.718281828459045235360287471352662498F /* e */
|
||||
#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */
|
||||
#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */
|
||||
#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */
|
||||
#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */
|
||||
#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */
|
||||
#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */
|
||||
#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */
|
||||
#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */
|
||||
#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */
|
||||
#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constan*/
|
||||
#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */
|
||||
#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */
|
||||
|
||||
#define NPY_El 2.718281828459045235360287471352662498L /* e */
|
||||
#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */
|
||||
#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */
|
||||
#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */
|
||||
#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */
|
||||
#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */
|
||||
#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */
|
||||
#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */
|
||||
#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */
|
||||
#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */
|
||||
#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constan*/
|
||||
#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */
|
||||
#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */
|
||||
|
||||
/*
|
||||
* C99 double math funcs
|
||||
*/
|
||||
double npy_sin(double x);
|
||||
double npy_cos(double x);
|
||||
double npy_tan(double x);
|
||||
double npy_sinh(double x);
|
||||
double npy_cosh(double x);
|
||||
double npy_tanh(double x);
|
||||
|
||||
double npy_asin(double x);
|
||||
double npy_acos(double x);
|
||||
double npy_atan(double x);
|
||||
double npy_aexp(double x);
|
||||
double npy_alog(double x);
|
||||
double npy_asqrt(double x);
|
||||
double npy_afabs(double x);
|
||||
|
||||
double npy_log(double x);
|
||||
double npy_log10(double x);
|
||||
double npy_exp(double x);
|
||||
double npy_sqrt(double x);
|
||||
|
||||
double npy_fabs(double x);
|
||||
double npy_ceil(double x);
|
||||
double npy_fmod(double x, double y);
|
||||
double npy_floor(double x);
|
||||
|
||||
double npy_expm1(double x);
|
||||
double npy_log1p(double x);
|
||||
double npy_hypot(double x, double y);
|
||||
double npy_acosh(double x);
|
||||
double npy_asinh(double xx);
|
||||
double npy_atanh(double x);
|
||||
double npy_rint(double x);
|
||||
double npy_trunc(double x);
|
||||
double npy_exp2(double x);
|
||||
double npy_log2(double x);
|
||||
|
||||
double npy_atan2(double x, double y);
|
||||
double npy_pow(double x, double y);
|
||||
double npy_modf(double x, double* y);
|
||||
|
||||
double npy_copysign(double x, double y);
|
||||
double npy_nextafter(double x, double y);
|
||||
double npy_spacing(double x);
|
||||
|
||||
/*
|
||||
* IEEE 754 fpu handling. Those are guaranteed to be macros
|
||||
*/
|
||||
#ifndef NPY_HAVE_DECL_ISNAN
|
||||
#define npy_isnan(x) ((x) != (x))
|
||||
#else
|
||||
#ifdef _MSC_VER
|
||||
#define npy_isnan(x) _isnan((x))
|
||||
#else
|
||||
#define npy_isnan(x) isnan((x))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef NPY_HAVE_DECL_ISFINITE
|
||||
#ifdef _MSC_VER
|
||||
#define npy_isfinite(x) _finite((x))
|
||||
#else
|
||||
#define npy_isfinite(x) !npy_isnan((x) + (-x))
|
||||
#endif
|
||||
#else
|
||||
#define npy_isfinite(x) isfinite((x))
|
||||
#endif
|
||||
|
||||
#ifndef NPY_HAVE_DECL_ISINF
|
||||
#define npy_isinf(x) (!npy_isfinite(x) && !npy_isnan(x))
|
||||
#else
|
||||
#ifdef _MSC_VER
|
||||
#define npy_isinf(x) (!_finite((x)) && !_isnan((x)))
|
||||
#else
|
||||
#define npy_isinf(x) isinf((x))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef NPY_HAVE_DECL_SIGNBIT
|
||||
int _npy_signbit_f(float x);
|
||||
int _npy_signbit_d(double x);
|
||||
int _npy_signbit_ld(long double x);
|
||||
#define npy_signbit(x) \
|
||||
(sizeof (x) == sizeof (long double) ? _npy_signbit_ld (x) \
|
||||
: sizeof (x) == sizeof (double) ? _npy_signbit_d (x) \
|
||||
: _npy_signbit_f (x))
|
||||
#else
|
||||
#define npy_signbit(x) signbit((x))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* float C99 math functions
|
||||
*/
|
||||
|
||||
float npy_sinf(float x);
|
||||
float npy_cosf(float x);
|
||||
float npy_tanf(float x);
|
||||
float npy_sinhf(float x);
|
||||
float npy_coshf(float x);
|
||||
float npy_tanhf(float x);
|
||||
float npy_fabsf(float x);
|
||||
float npy_floorf(float x);
|
||||
float npy_ceilf(float x);
|
||||
float npy_rintf(float x);
|
||||
float npy_truncf(float x);
|
||||
float npy_sqrtf(float x);
|
||||
float npy_log10f(float x);
|
||||
float npy_logf(float x);
|
||||
float npy_expf(float x);
|
||||
float npy_expm1f(float x);
|
||||
float npy_asinf(float x);
|
||||
float npy_acosf(float x);
|
||||
float npy_atanf(float x);
|
||||
float npy_asinhf(float x);
|
||||
float npy_acoshf(float x);
|
||||
float npy_atanhf(float x);
|
||||
float npy_log1pf(float x);
|
||||
float npy_exp2f(float x);
|
||||
float npy_log2f(float x);
|
||||
|
||||
float npy_atan2f(float x, float y);
|
||||
float npy_hypotf(float x, float y);
|
||||
float npy_powf(float x, float y);
|
||||
float npy_fmodf(float x, float y);
|
||||
|
||||
float npy_modff(float x, float* y);
|
||||
|
||||
float npy_copysignf(float x, float y);
|
||||
float npy_nextafterf(float x, float y);
|
||||
float npy_spacingf(float x);
|
||||
|
||||
/*
|
||||
* float C99 math functions
|
||||
*/
|
||||
|
||||
npy_longdouble npy_sinl(npy_longdouble x);
|
||||
npy_longdouble npy_cosl(npy_longdouble x);
|
||||
npy_longdouble npy_tanl(npy_longdouble x);
|
||||
npy_longdouble npy_sinhl(npy_longdouble x);
|
||||
npy_longdouble npy_coshl(npy_longdouble x);
|
||||
npy_longdouble npy_tanhl(npy_longdouble x);
|
||||
npy_longdouble npy_fabsl(npy_longdouble x);
|
||||
npy_longdouble npy_floorl(npy_longdouble x);
|
||||
npy_longdouble npy_ceill(npy_longdouble x);
|
||||
npy_longdouble npy_rintl(npy_longdouble x);
|
||||
npy_longdouble npy_truncl(npy_longdouble x);
|
||||
npy_longdouble npy_sqrtl(npy_longdouble x);
|
||||
npy_longdouble npy_log10l(npy_longdouble x);
|
||||
npy_longdouble npy_logl(npy_longdouble x);
|
||||
npy_longdouble npy_expl(npy_longdouble x);
|
||||
npy_longdouble npy_expm1l(npy_longdouble x);
|
||||
npy_longdouble npy_asinl(npy_longdouble x);
|
||||
npy_longdouble npy_acosl(npy_longdouble x);
|
||||
npy_longdouble npy_atanl(npy_longdouble x);
|
||||
npy_longdouble npy_asinhl(npy_longdouble x);
|
||||
npy_longdouble npy_acoshl(npy_longdouble x);
|
||||
npy_longdouble npy_atanhl(npy_longdouble x);
|
||||
npy_longdouble npy_log1pl(npy_longdouble x);
|
||||
npy_longdouble npy_exp2l(npy_longdouble x);
|
||||
npy_longdouble npy_log2l(npy_longdouble x);
|
||||
|
||||
npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y);
|
||||
npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y);
|
||||
npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y);
|
||||
npy_longdouble npy_fmodl(npy_longdouble x, npy_longdouble y);
|
||||
|
||||
npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y);
|
||||
|
||||
npy_longdouble npy_copysignl(npy_longdouble x, npy_longdouble y);
|
||||
npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y);
|
||||
npy_longdouble npy_spacingl(npy_longdouble x);
|
||||
|
||||
/*
|
||||
* Non standard functions
|
||||
*/
|
||||
double npy_deg2rad(double x);
|
||||
double npy_rad2deg(double x);
|
||||
double npy_logaddexp(double x, double y);
|
||||
double npy_logaddexp2(double x, double y);
|
||||
|
||||
float npy_deg2radf(float x);
|
||||
float npy_rad2degf(float x);
|
||||
float npy_logaddexpf(float x, float y);
|
||||
float npy_logaddexp2f(float x, float y);
|
||||
|
||||
npy_longdouble npy_deg2radl(npy_longdouble x);
|
||||
npy_longdouble npy_rad2degl(npy_longdouble x);
|
||||
npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y);
|
||||
npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y);
|
||||
|
||||
#define npy_degrees npy_rad2deg
|
||||
#define npy_degreesf npy_rad2degf
|
||||
#define npy_degreesl npy_rad2degl
|
||||
|
||||
#define npy_radians npy_deg2rad
|
||||
#define npy_radiansf npy_deg2radf
|
||||
#define npy_radiansl npy_deg2radl
|
||||
|
||||
/*
|
||||
* Complex declarations
|
||||
*/
|
||||
|
||||
/*
|
||||
* C99 specifies that complex numbers have the same representation as
|
||||
* an array of two elements, where the first element is the real part
|
||||
* and the second element is the imaginary part.
|
||||
*/
|
||||
#define __NPY_CPACK_IMP(x, y, type, ctype) \
|
||||
union { \
|
||||
ctype z; \
|
||||
type a[2]; \
|
||||
} z1;; \
|
||||
\
|
||||
z1.a[0] = (x); \
|
||||
z1.a[1] = (y); \
|
||||
\
|
||||
return z1.z;
|
||||
|
||||
static NPY_INLINE npy_cdouble npy_cpack(double x, double y)
|
||||
{
|
||||
__NPY_CPACK_IMP(x, y, double, npy_cdouble);
|
||||
}
|
||||
|
||||
static NPY_INLINE npy_cfloat npy_cpackf(float x, float y)
|
||||
{
|
||||
__NPY_CPACK_IMP(x, y, float, npy_cfloat);
|
||||
}
|
||||
|
||||
static NPY_INLINE npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y)
|
||||
{
|
||||
__NPY_CPACK_IMP(x, y, npy_longdouble, npy_clongdouble);
|
||||
}
|
||||
#undef __NPY_CPACK_IMP
|
||||
|
||||
/*
|
||||
* Same remark as above, but in the other direction: extract first/second
|
||||
* member of complex number, assuming a C99-compatible representation
|
||||
*
|
||||
* Those are defineds as static inline, and such as a reasonable compiler would
|
||||
* most likely compile this to one or two instructions (on CISC at least)
|
||||
*/
|
||||
#define __NPY_CEXTRACT_IMP(z, index, type, ctype) \
|
||||
union { \
|
||||
ctype z; \
|
||||
type a[2]; \
|
||||
} __z_repr; \
|
||||
__z_repr.z = z; \
|
||||
\
|
||||
return __z_repr.a[index];
|
||||
|
||||
static NPY_INLINE double npy_creal(npy_cdouble z)
|
||||
{
|
||||
__NPY_CEXTRACT_IMP(z, 0, double, npy_cdouble);
|
||||
}
|
||||
|
||||
static NPY_INLINE double npy_cimag(npy_cdouble z)
|
||||
{
|
||||
__NPY_CEXTRACT_IMP(z, 1, double, npy_cdouble);
|
||||
}
|
||||
|
||||
static NPY_INLINE float npy_crealf(npy_cfloat z)
|
||||
{
|
||||
__NPY_CEXTRACT_IMP(z, 0, float, npy_cfloat);
|
||||
}
|
||||
|
||||
static NPY_INLINE float npy_cimagf(npy_cfloat z)
|
||||
{
|
||||
__NPY_CEXTRACT_IMP(z, 1, float, npy_cfloat);
|
||||
}
|
||||
|
||||
static NPY_INLINE npy_longdouble npy_creall(npy_clongdouble z)
|
||||
{
|
||||
__NPY_CEXTRACT_IMP(z, 0, npy_longdouble, npy_clongdouble);
|
||||
}
|
||||
|
||||
static NPY_INLINE npy_longdouble npy_cimagl(npy_clongdouble z)
|
||||
{
|
||||
__NPY_CEXTRACT_IMP(z, 1, npy_longdouble, npy_clongdouble);
|
||||
}
|
||||
#undef __NPY_CEXTRACT_IMP
|
||||
|
||||
/*
|
||||
* Double precision complex functions
|
||||
*/
|
||||
double npy_cabs(npy_cdouble z);
|
||||
double npy_carg(npy_cdouble z);
|
||||
|
||||
npy_cdouble npy_cexp(npy_cdouble z);
|
||||
npy_cdouble npy_clog(npy_cdouble z);
|
||||
npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y);
|
||||
|
||||
npy_cdouble npy_csqrt(npy_cdouble z);
|
||||
|
||||
npy_cdouble npy_ccos(npy_cdouble z);
|
||||
npy_cdouble npy_csin(npy_cdouble z);
|
||||
|
||||
/*
|
||||
* Single precision complex functions
|
||||
*/
|
||||
float npy_cabsf(npy_cfloat z);
|
||||
float npy_cargf(npy_cfloat z);
|
||||
|
||||
npy_cfloat npy_cexpf(npy_cfloat z);
|
||||
npy_cfloat npy_clogf(npy_cfloat z);
|
||||
npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y);
|
||||
|
||||
npy_cfloat npy_csqrtf(npy_cfloat z);
|
||||
|
||||
npy_cfloat npy_ccosf(npy_cfloat z);
|
||||
npy_cfloat npy_csinf(npy_cfloat z);
|
||||
|
||||
/*
|
||||
* Extended precision complex functions
|
||||
*/
|
||||
npy_longdouble npy_cabsl(npy_clongdouble z);
|
||||
npy_longdouble npy_cargl(npy_clongdouble z);
|
||||
|
||||
npy_clongdouble npy_cexpl(npy_clongdouble z);
|
||||
npy_clongdouble npy_clogl(npy_clongdouble z);
|
||||
npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y);
|
||||
|
||||
npy_clongdouble npy_csqrtl(npy_clongdouble z);
|
||||
|
||||
npy_clongdouble npy_ccosl(npy_clongdouble z);
|
||||
npy_clongdouble npy_csinl(npy_clongdouble z);
|
||||
|
||||
/*
|
||||
* Functions that set the floating point error
|
||||
* status word.
|
||||
*/
|
||||
|
||||
void npy_set_floatstatus_divbyzero(void);
|
||||
void npy_set_floatstatus_overflow(void);
|
||||
void npy_set_floatstatus_underflow(void);
|
||||
void npy_set_floatstatus_invalid(void);
|
||||
|
||||
#endif
|
|
@ -1,19 +0,0 @@
|
|||
/*
|
||||
* This include file is provided for inclusion in Cython *.pyd files where
|
||||
* one would like to define the NPY_NO_DEPRECATED_API macro. It can be
|
||||
* included by
|
||||
*
|
||||
* cdef extern from "npy_no_deprecated_api.h": pass
|
||||
*
|
||||
*/
|
||||
#ifndef NPY_NO_DEPRECATED_API
|
||||
|
||||
/* put this check here since there may be multiple includes in C extensions. */
|
||||
#if defined(NDARRAYTYPES_H) || defined(_NPY_DEPRECATED_API_H) || \
|
||||
defined(OLD_DEFINES_H)
|
||||
#error "npy_no_deprecated_api.h" must be first among numpy includes.
|
||||
#else
|
||||
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -1,30 +0,0 @@
|
|||
#ifndef _NPY_OS_H_
|
||||
#define _NPY_OS_H_
|
||||
|
||||
#if defined(linux) || defined(__linux) || defined(__linux__)
|
||||
#define NPY_OS_LINUX
|
||||
#elif defined(__FreeBSD__) || defined(__NetBSD__) || \
|
||||
defined(__OpenBSD__) || defined(__DragonFly__)
|
||||
#define NPY_OS_BSD
|
||||
#ifdef __FreeBSD__
|
||||
#define NPY_OS_FREEBSD
|
||||
#elif defined(__NetBSD__)
|
||||
#define NPY_OS_NETBSD
|
||||
#elif defined(__OpenBSD__)
|
||||
#define NPY_OS_OPENBSD
|
||||
#elif defined(__DragonFly__)
|
||||
#define NPY_OS_DRAGONFLY
|
||||
#endif
|
||||
#elif defined(sun) || defined(__sun)
|
||||
#define NPY_OS_SOLARIS
|
||||
#elif defined(__CYGWIN__)
|
||||
#define NPY_OS_CYGWIN
|
||||
#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32)
|
||||
#define NPY_OS_WIN32
|
||||
#elif defined(__APPLE__)
|
||||
#define NPY_OS_DARWIN
|
||||
#else
|
||||
#define NPY_OS_UNKNOWN
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -1,33 +0,0 @@
|
|||
#ifndef _NPY_NUMPYCONFIG_H_
|
||||
#define _NPY_NUMPYCONFIG_H_
|
||||
|
||||
#include "_numpyconfig.h"
|
||||
|
||||
/*
|
||||
* On Mac OS X, because there is only one configuration stage for all the archs
|
||||
* in universal builds, any macro which depends on the arch needs to be
|
||||
* harcoded
|
||||
*/
|
||||
#ifdef __APPLE__
|
||||
#undef NPY_SIZEOF_LONG
|
||||
#undef NPY_SIZEOF_PY_INTPTR_T
|
||||
|
||||
#ifdef __LP64__
|
||||
#define NPY_SIZEOF_LONG 8
|
||||
#define NPY_SIZEOF_PY_INTPTR_T 8
|
||||
#else
|
||||
#define NPY_SIZEOF_LONG 4
|
||||
#define NPY_SIZEOF_PY_INTPTR_T 4
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/**
|
||||
* To help with the NPY_NO_DEPRECATED_API macro, we include API version
|
||||
* numbers for specific versions of NumPy. To exclude all API that was
|
||||
* deprecated as of 1.7, add the following before #including any NumPy
|
||||
* headers:
|
||||
* #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
|
||||
*/
|
||||
#define NPY_1_7_API_VERSION 0x00000007
|
||||
|
||||
#endif
|
|
@ -1,187 +0,0 @@
|
|||
/* This header is deprecated as of NumPy 1.7 */
|
||||
#ifndef OLD_DEFINES_H
|
||||
#define OLD_DEFINES_H
|
||||
|
||||
#if defined(NPY_NO_DEPRECATED_API) && NPY_NO_DEPRECATED_API >= NPY_1_7_API_VERSION
|
||||
#error The header "old_defines.h" is deprecated as of NumPy 1.7.
|
||||
#endif
|
||||
|
||||
#define NDARRAY_VERSION NPY_VERSION
|
||||
|
||||
#define PyArray_MIN_BUFSIZE NPY_MIN_BUFSIZE
|
||||
#define PyArray_MAX_BUFSIZE NPY_MAX_BUFSIZE
|
||||
#define PyArray_BUFSIZE NPY_BUFSIZE
|
||||
|
||||
#define PyArray_PRIORITY NPY_PRIORITY
|
||||
#define PyArray_SUBTYPE_PRIORITY NPY_PRIORITY
|
||||
#define PyArray_NUM_FLOATTYPE NPY_NUM_FLOATTYPE
|
||||
|
||||
#define NPY_MAX PyArray_MAX
|
||||
#define NPY_MIN PyArray_MIN
|
||||
|
||||
#define PyArray_TYPES NPY_TYPES
|
||||
#define PyArray_BOOL NPY_BOOL
|
||||
#define PyArray_BYTE NPY_BYTE
|
||||
#define PyArray_UBYTE NPY_UBYTE
|
||||
#define PyArray_SHORT NPY_SHORT
|
||||
#define PyArray_USHORT NPY_USHORT
|
||||
#define PyArray_INT NPY_INT
|
||||
#define PyArray_UINT NPY_UINT
|
||||
#define PyArray_LONG NPY_LONG
|
||||
#define PyArray_ULONG NPY_ULONG
|
||||
#define PyArray_LONGLONG NPY_LONGLONG
|
||||
#define PyArray_ULONGLONG NPY_ULONGLONG
|
||||
#define PyArray_HALF NPY_HALF
|
||||
#define PyArray_FLOAT NPY_FLOAT
|
||||
#define PyArray_DOUBLE NPY_DOUBLE
|
||||
#define PyArray_LONGDOUBLE NPY_LONGDOUBLE
|
||||
#define PyArray_CFLOAT NPY_CFLOAT
|
||||
#define PyArray_CDOUBLE NPY_CDOUBLE
|
||||
#define PyArray_CLONGDOUBLE NPY_CLONGDOUBLE
|
||||
#define PyArray_OBJECT NPY_OBJECT
|
||||
#define PyArray_STRING NPY_STRING
|
||||
#define PyArray_UNICODE NPY_UNICODE
|
||||
#define PyArray_VOID NPY_VOID
|
||||
#define PyArray_DATETIME NPY_DATETIME
|
||||
#define PyArray_TIMEDELTA NPY_TIMEDELTA
|
||||
#define PyArray_NTYPES NPY_NTYPES
|
||||
#define PyArray_NOTYPE NPY_NOTYPE
|
||||
#define PyArray_CHAR NPY_CHAR
|
||||
#define PyArray_USERDEF NPY_USERDEF
|
||||
#define PyArray_NUMUSERTYPES NPY_NUMUSERTYPES
|
||||
|
||||
#define PyArray_INTP NPY_INTP
|
||||
#define PyArray_UINTP NPY_UINTP
|
||||
|
||||
#define PyArray_INT8 NPY_INT8
|
||||
#define PyArray_UINT8 NPY_UINT8
|
||||
#define PyArray_INT16 NPY_INT16
|
||||
#define PyArray_UINT16 NPY_UINT16
|
||||
#define PyArray_INT32 NPY_INT32
|
||||
#define PyArray_UINT32 NPY_UINT32
|
||||
|
||||
#ifdef NPY_INT64
|
||||
#define PyArray_INT64 NPY_INT64
|
||||
#define PyArray_UINT64 NPY_UINT64
|
||||
#endif
|
||||
|
||||
#ifdef NPY_INT128
|
||||
#define PyArray_INT128 NPY_INT128
|
||||
#define PyArray_UINT128 NPY_UINT128
|
||||
#endif
|
||||
|
||||
#ifdef NPY_FLOAT16
|
||||
#define PyArray_FLOAT16 NPY_FLOAT16
|
||||
#define PyArray_COMPLEX32 NPY_COMPLEX32
|
||||
#endif
|
||||
|
||||
#ifdef NPY_FLOAT80
|
||||
#define PyArray_FLOAT80 NPY_FLOAT80
|
||||
#define PyArray_COMPLEX160 NPY_COMPLEX160
|
||||
#endif
|
||||
|
||||
#ifdef NPY_FLOAT96
|
||||
#define PyArray_FLOAT96 NPY_FLOAT96
|
||||
#define PyArray_COMPLEX192 NPY_COMPLEX192
|
||||
#endif
|
||||
|
||||
#ifdef NPY_FLOAT128
|
||||
#define PyArray_FLOAT128 NPY_FLOAT128
|
||||
#define PyArray_COMPLEX256 NPY_COMPLEX256
|
||||
#endif
|
||||
|
||||
#define PyArray_FLOAT32 NPY_FLOAT32
|
||||
#define PyArray_COMPLEX64 NPY_COMPLEX64
|
||||
#define PyArray_FLOAT64 NPY_FLOAT64
|
||||
#define PyArray_COMPLEX128 NPY_COMPLEX128
|
||||
|
||||
|
||||
#define PyArray_TYPECHAR NPY_TYPECHAR
|
||||
#define PyArray_BOOLLTR NPY_BOOLLTR
|
||||
#define PyArray_BYTELTR NPY_BYTELTR
|
||||
#define PyArray_UBYTELTR NPY_UBYTELTR
|
||||
#define PyArray_SHORTLTR NPY_SHORTLTR
|
||||
#define PyArray_USHORTLTR NPY_USHORTLTR
|
||||
#define PyArray_INTLTR NPY_INTLTR
|
||||
#define PyArray_UINTLTR NPY_UINTLTR
|
||||
#define PyArray_LONGLTR NPY_LONGLTR
|
||||
#define PyArray_ULONGLTR NPY_ULONGLTR
|
||||
#define PyArray_LONGLONGLTR NPY_LONGLONGLTR
|
||||
#define PyArray_ULONGLONGLTR NPY_ULONGLONGLTR
|
||||
#define PyArray_HALFLTR NPY_HALFLTR
|
||||
#define PyArray_FLOATLTR NPY_FLOATLTR
|
||||
#define PyArray_DOUBLELTR NPY_DOUBLELTR
|
||||
#define PyArray_LONGDOUBLELTR NPY_LONGDOUBLELTR
|
||||
#define PyArray_CFLOATLTR NPY_CFLOATLTR
|
||||
#define PyArray_CDOUBLELTR NPY_CDOUBLELTR
|
||||
#define PyArray_CLONGDOUBLELTR NPY_CLONGDOUBLELTR
|
||||
#define PyArray_OBJECTLTR NPY_OBJECTLTR
|
||||
#define PyArray_STRINGLTR NPY_STRINGLTR
|
||||
#define PyArray_STRINGLTR2 NPY_STRINGLTR2
|
||||
#define PyArray_UNICODELTR NPY_UNICODELTR
|
||||
#define PyArray_VOIDLTR NPY_VOIDLTR
|
||||
#define PyArray_DATETIMELTR NPY_DATETIMELTR
|
||||
#define PyArray_TIMEDELTALTR NPY_TIMEDELTALTR
|
||||
#define PyArray_CHARLTR NPY_CHARLTR
|
||||
#define PyArray_INTPLTR NPY_INTPLTR
|
||||
#define PyArray_UINTPLTR NPY_UINTPLTR
|
||||
#define PyArray_GENBOOLLTR NPY_GENBOOLLTR
|
||||
#define PyArray_SIGNEDLTR NPY_SIGNEDLTR
|
||||
#define PyArray_UNSIGNEDLTR NPY_UNSIGNEDLTR
|
||||
#define PyArray_FLOATINGLTR NPY_FLOATINGLTR
|
||||
#define PyArray_COMPLEXLTR NPY_COMPLEXLTR
|
||||
|
||||
#define PyArray_QUICKSORT NPY_QUICKSORT
|
||||
#define PyArray_HEAPSORT NPY_HEAPSORT
|
||||
#define PyArray_MERGESORT NPY_MERGESORT
|
||||
#define PyArray_SORTKIND NPY_SORTKIND
|
||||
#define PyArray_NSORTS NPY_NSORTS
|
||||
|
||||
#define PyArray_NOSCALAR NPY_NOSCALAR
|
||||
#define PyArray_BOOL_SCALAR NPY_BOOL_SCALAR
|
||||
#define PyArray_INTPOS_SCALAR NPY_INTPOS_SCALAR
|
||||
#define PyArray_INTNEG_SCALAR NPY_INTNEG_SCALAR
|
||||
#define PyArray_FLOAT_SCALAR NPY_FLOAT_SCALAR
|
||||
#define PyArray_COMPLEX_SCALAR NPY_COMPLEX_SCALAR
|
||||
#define PyArray_OBJECT_SCALAR NPY_OBJECT_SCALAR
|
||||
#define PyArray_SCALARKIND NPY_SCALARKIND
|
||||
#define PyArray_NSCALARKINDS NPY_NSCALARKINDS
|
||||
|
||||
#define PyArray_ANYORDER NPY_ANYORDER
|
||||
#define PyArray_CORDER NPY_CORDER
|
||||
#define PyArray_FORTRANORDER NPY_FORTRANORDER
|
||||
#define PyArray_ORDER NPY_ORDER
|
||||
|
||||
#define PyDescr_ISBOOL PyDataType_ISBOOL
|
||||
#define PyDescr_ISUNSIGNED PyDataType_ISUNSIGNED
|
||||
#define PyDescr_ISSIGNED PyDataType_ISSIGNED
|
||||
#define PyDescr_ISINTEGER PyDataType_ISINTEGER
|
||||
#define PyDescr_ISFLOAT PyDataType_ISFLOAT
|
||||
#define PyDescr_ISNUMBER PyDataType_ISNUMBER
|
||||
#define PyDescr_ISSTRING PyDataType_ISSTRING
|
||||
#define PyDescr_ISCOMPLEX PyDataType_ISCOMPLEX
|
||||
#define PyDescr_ISPYTHON PyDataType_ISPYTHON
|
||||
#define PyDescr_ISFLEXIBLE PyDataType_ISFLEXIBLE
|
||||
#define PyDescr_ISUSERDEF PyDataType_ISUSERDEF
|
||||
#define PyDescr_ISEXTENDED PyDataType_ISEXTENDED
|
||||
#define PyDescr_ISOBJECT PyDataType_ISOBJECT
|
||||
#define PyDescr_HASFIELDS PyDataType_HASFIELDS
|
||||
|
||||
#define PyArray_LITTLE NPY_LITTLE
|
||||
#define PyArray_BIG NPY_BIG
|
||||
#define PyArray_NATIVE NPY_NATIVE
|
||||
#define PyArray_SWAP NPY_SWAP
|
||||
#define PyArray_IGNORE NPY_IGNORE
|
||||
|
||||
#define PyArray_NATBYTE NPY_NATBYTE
|
||||
#define PyArray_OPPBYTE NPY_OPPBYTE
|
||||
|
||||
#define PyArray_MAX_ELSIZE NPY_MAX_ELSIZE
|
||||
|
||||
#define PyArray_USE_PYMEM NPY_USE_PYMEM
|
||||
|
||||
#define PyArray_RemoveLargest PyArray_RemoveSmallest
|
||||
|
||||
#define PyArray_UCS4 npy_ucs4
|
||||
|
||||
#endif
|
|
@ -1,23 +0,0 @@
|
|||
#include "arrayobject.h"
|
||||
|
||||
#ifndef REFCOUNT
|
||||
# define REFCOUNT NPY_REFCOUNT
|
||||
# define MAX_ELSIZE 16
|
||||
#endif
|
||||
|
||||
#define PyArray_UNSIGNED_TYPES
|
||||
#define PyArray_SBYTE NPY_BYTE
|
||||
#define PyArray_CopyArray PyArray_CopyInto
|
||||
#define _PyArray_multiply_list PyArray_MultiplyIntList
|
||||
#define PyArray_ISSPACESAVER(m) NPY_FALSE
|
||||
#define PyScalarArray_Check PyArray_CheckScalar
|
||||
|
||||
#define CONTIGUOUS NPY_CONTIGUOUS
|
||||
#define OWN_DIMENSIONS 0
|
||||
#define OWN_STRIDES 0
|
||||
#define OWN_DATA NPY_OWNDATA
|
||||
#define SAVESPACE 0
|
||||
#define SAVESPACEBIT 0
|
||||
|
||||
#undef import_array
|
||||
#define import_array() { if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); } }
|
|
@ -1,312 +0,0 @@
|
|||
|
||||
=================
|
||||
Numpy Ufunc C-API
|
||||
=================
|
||||
::
|
||||
|
||||
PyObject *
|
||||
PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void
|
||||
**data, char *types, int ntypes, int nin, int
|
||||
nout, int identity, char *name, char *doc, int
|
||||
check_return)
|
||||
|
||||
|
||||
::
|
||||
|
||||
int
|
||||
PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, int
|
||||
usertype, PyUFuncGenericFunction
|
||||
function, int *arg_types, void *data)
|
||||
|
||||
|
||||
::
|
||||
|
||||
int
|
||||
PyUFunc_GenericFunction(PyUFuncObject *ufunc, PyObject *args, PyObject
|
||||
*kwds, PyArrayObject **op)
|
||||
|
||||
|
||||
This generic function is called with the ufunc object, the arguments to it,
|
||||
and an array of (pointers to) PyArrayObjects which are NULL.
|
||||
|
||||
'op' is an array of at least NPY_MAXARGS PyArrayObject *.
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_f_f_As_d_d(char **args, npy_intp *dimensions, npy_intp
|
||||
*steps, void *func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_d_d(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||
*func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_f_f(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||
*func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_g_g(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||
*func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_F_F_As_D_D(char **args, npy_intp *dimensions, npy_intp
|
||||
*steps, void *func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_F_F(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||
*func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_D_D(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||
*func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_G_G(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||
*func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_O_O(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||
*func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_ff_f_As_dd_d(char **args, npy_intp *dimensions, npy_intp
|
||||
*steps, void *func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_ff_f(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||
*func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_dd_d(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||
*func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_gg_g(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||
*func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_FF_F_As_DD_D(char **args, npy_intp *dimensions, npy_intp
|
||||
*steps, void *func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_DD_D(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||
*func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_FF_F(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||
*func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_GG_G(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||
*func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_OO_O(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||
*func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_O_O_method(char **args, npy_intp *dimensions, npy_intp
|
||||
*steps, void *func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_OO_O_method(char **args, npy_intp *dimensions, npy_intp
|
||||
*steps, void *func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_On_Om(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||
*func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
int
|
||||
PyUFunc_GetPyValues(char *name, int *bufsize, int *errmask, PyObject
|
||||
**errobj)
|
||||
|
||||
|
||||
On return, if errobj is populated with a non-NULL value, the caller
|
||||
owns a new reference to errobj.
|
||||
|
||||
::
|
||||
|
||||
int
|
||||
PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_clearfperr()
|
||||
|
||||
|
||||
::
|
||||
|
||||
int
|
||||
PyUFunc_getfperr(void )
|
||||
|
||||
|
||||
::
|
||||
|
||||
int
|
||||
PyUFunc_handlefperr(int errmask, PyObject *errobj, int retstatus, int
|
||||
*first)
|
||||
|
||||
|
||||
::
|
||||
|
||||
int
|
||||
PyUFunc_ReplaceLoopBySignature(PyUFuncObject
|
||||
*func, PyUFuncGenericFunction
|
||||
newfunc, int
|
||||
*signature, PyUFuncGenericFunction
|
||||
*oldfunc)
|
||||
|
||||
|
||||
::
|
||||
|
||||
PyObject *
|
||||
PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void
|
||||
**data, char *types, int
|
||||
ntypes, int nin, int nout, int
|
||||
identity, char *name, char
|
||||
*doc, int check_return, const char
|
||||
*signature)
|
||||
|
||||
|
||||
::
|
||||
|
||||
int
|
||||
PyUFunc_SetUsesArraysAsData(void **data, size_t i)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_e_e(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||
*func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_e_e_As_f_f(char **args, npy_intp *dimensions, npy_intp
|
||||
*steps, void *func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_e_e_As_d_d(char **args, npy_intp *dimensions, npy_intp
|
||||
*steps, void *func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_ee_e(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||
*func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_ee_e_As_ff_f(char **args, npy_intp *dimensions, npy_intp
|
||||
*steps, void *func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
PyUFunc_ee_e_As_dd_d(char **args, npy_intp *dimensions, npy_intp
|
||||
*steps, void *func)
|
||||
|
||||
|
||||
::
|
||||
|
||||
int
|
||||
PyUFunc_DefaultTypeResolver(PyUFuncObject *ufunc, NPY_CASTING
|
||||
casting, PyArrayObject
|
||||
**operands, PyObject
|
||||
*type_tup, PyArray_Descr **out_dtypes)
|
||||
|
||||
|
||||
This function applies the default type resolution rules
|
||||
for the provided ufunc.
|
||||
|
||||
Returns 0 on success, -1 on error.
|
||||
|
||||
::
|
||||
|
||||
int
|
||||
PyUFunc_ValidateCasting(PyUFuncObject *ufunc, NPY_CASTING
|
||||
casting, PyArrayObject
|
||||
**operands, PyArray_Descr **dtypes)
|
||||
|
||||
|
||||
Validates that the input operands can be cast to
|
||||
the input types, and the output types can be cast to
|
||||
the output operands where provided.
|
||||
|
||||
Returns 0 on success, -1 (with exception raised) on validation failure.
|
||||
|
|
@ -1,446 +0,0 @@
|
|||
#ifndef Py_UFUNCOBJECT_H
|
||||
#define Py_UFUNCOBJECT_H
|
||||
|
||||
#include <numpy/npy_math.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The legacy generic inner loop for a standard element-wise or
|
||||
* generalized ufunc.
|
||||
*/
|
||||
typedef void (*PyUFuncGenericFunction)
|
||||
(char **args,
|
||||
npy_intp *dimensions,
|
||||
npy_intp *strides,
|
||||
void *innerloopdata);
|
||||
|
||||
/*
|
||||
* The most generic one-dimensional inner loop for
|
||||
* a standard element-wise ufunc. This typedef is also
|
||||
* more consistent with the other NumPy function pointer typedefs
|
||||
* than PyUFuncGenericFunction.
|
||||
*/
|
||||
typedef void (PyUFunc_StridedInnerLoopFunc)(
|
||||
char **dataptrs, npy_intp *strides,
|
||||
npy_intp count,
|
||||
NpyAuxData *innerloopdata);
|
||||
|
||||
/*
|
||||
* The most generic one-dimensional inner loop for
|
||||
* a masked standard element-wise ufunc. "Masked" here means that it skips
|
||||
* doing calculations on any items for which the maskptr array has a true
|
||||
* value.
|
||||
*/
|
||||
typedef void (PyUFunc_MaskedStridedInnerLoopFunc)(
|
||||
char **dataptrs, npy_intp *strides,
|
||||
char *maskptr, npy_intp mask_stride,
|
||||
npy_intp count,
|
||||
NpyAuxData *innerloopdata);
|
||||
|
||||
/* Forward declaration for the type resolver and loop selector typedefs */
|
||||
struct _tagPyUFuncObject;
|
||||
|
||||
/*
|
||||
* Given the operands for calling a ufunc, should determine the
|
||||
* calculation input and output data types and return an inner loop function.
|
||||
* This function should validate that the casting rule is being followed,
|
||||
* and fail if it is not.
|
||||
*
|
||||
* For backwards compatibility, the regular type resolution function does not
|
||||
* support auxiliary data with object semantics. The type resolution call
|
||||
* which returns a masked generic function returns a standard NpyAuxData
|
||||
* object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros
|
||||
* work.
|
||||
*
|
||||
* ufunc: The ufunc object.
|
||||
* casting: The 'casting' parameter provided to the ufunc.
|
||||
* operands: An array of length (ufunc->nin + ufunc->nout),
|
||||
* with the output parameters possibly NULL.
|
||||
* type_tup: Either NULL, or the type_tup passed to the ufunc.
|
||||
* out_dtypes: An array which should be populated with new
|
||||
* references to (ufunc->nin + ufunc->nout) new
|
||||
* dtypes, one for each input and output. These
|
||||
* dtypes should all be in native-endian format.
|
||||
*
|
||||
* Should return 0 on success, -1 on failure (with exception set),
|
||||
* or -2 if Py_NotImplemented should be returned.
|
||||
*/
|
||||
typedef int (PyUFunc_TypeResolutionFunc)(
|
||||
struct _tagPyUFuncObject *ufunc,
|
||||
NPY_CASTING casting,
|
||||
PyArrayObject **operands,
|
||||
PyObject *type_tup,
|
||||
PyArray_Descr **out_dtypes);
|
||||
|
||||
/*
|
||||
* Given an array of DTypes as returned by the PyUFunc_TypeResolutionFunc,
|
||||
* and an array of fixed strides (the array will contain NPY_MAX_INTP for
|
||||
* strides which are not necessarily fixed), returns an inner loop
|
||||
* with associated auxiliary data.
|
||||
*
|
||||
* For backwards compatibility, there is a variant of the inner loop
|
||||
* selection which returns an inner loop irrespective of the strides,
|
||||
* and with a void* static auxiliary data instead of an NpyAuxData *
|
||||
* dynamically allocatable auxiliary data.
|
||||
*
|
||||
* ufunc: The ufunc object.
|
||||
* dtypes: An array which has been populated with dtypes,
|
||||
* in most cases by the type resolution funciton
|
||||
* for the same ufunc.
|
||||
* fixed_strides: For each input/output, either the stride that
|
||||
* will be used every time the function is called
|
||||
* or NPY_MAX_INTP if the stride might change or
|
||||
* is not known ahead of time. The loop selection
|
||||
* function may use this stride to pick inner loops
|
||||
* which are optimized for contiguous or 0-stride
|
||||
* cases.
|
||||
* out_innerloop: Should be populated with the correct ufunc inner
|
||||
* loop for the given type.
|
||||
* out_innerloopdata: Should be populated with the void* data to
|
||||
* be passed into the out_innerloop function.
|
||||
* out_needs_api: If the inner loop needs to use the Python API,
|
||||
* should set the to 1, otherwise should leave
|
||||
* this untouched.
|
||||
*/
|
||||
typedef int (PyUFunc_LegacyInnerLoopSelectionFunc)(
|
||||
struct _tagPyUFuncObject *ufunc,
|
||||
PyArray_Descr **dtypes,
|
||||
PyUFuncGenericFunction *out_innerloop,
|
||||
void **out_innerloopdata,
|
||||
int *out_needs_api);
|
||||
typedef int (PyUFunc_InnerLoopSelectionFunc)(
|
||||
struct _tagPyUFuncObject *ufunc,
|
||||
PyArray_Descr **dtypes,
|
||||
npy_intp *fixed_strides,
|
||||
PyUFunc_StridedInnerLoopFunc **out_innerloop,
|
||||
NpyAuxData **out_innerloopdata,
|
||||
int *out_needs_api);
|
||||
typedef int (PyUFunc_MaskedInnerLoopSelectionFunc)(
|
||||
struct _tagPyUFuncObject *ufunc,
|
||||
PyArray_Descr **dtypes,
|
||||
PyArray_Descr *mask_dtype,
|
||||
npy_intp *fixed_strides,
|
||||
npy_intp fixed_mask_stride,
|
||||
PyUFunc_MaskedStridedInnerLoopFunc **out_innerloop,
|
||||
NpyAuxData **out_innerloopdata,
|
||||
int *out_needs_api);
|
||||
|
||||
typedef struct _tagPyUFuncObject {
|
||||
PyObject_HEAD
|
||||
/*
|
||||
* nin: Number of inputs
|
||||
* nout: Number of outputs
|
||||
* nargs: Always nin + nout (Why is it stored?)
|
||||
*/
|
||||
int nin, nout, nargs;
|
||||
|
||||
/* Identity for reduction, either PyUFunc_One or PyUFunc_Zero */
|
||||
int identity;
|
||||
|
||||
/* Array of one-dimensional core loops */
|
||||
PyUFuncGenericFunction *functions;
|
||||
/* Array of funcdata that gets passed into the functions */
|
||||
void **data;
|
||||
/* The number of elements in 'functions' and 'data' */
|
||||
int ntypes;
|
||||
|
||||
/* Does not appear to be used */
|
||||
int check_return;
|
||||
|
||||
/* The name of the ufunc */
|
||||
char *name;
|
||||
|
||||
/* Array of type numbers, of size ('nargs' * 'ntypes') */
|
||||
char *types;
|
||||
|
||||
/* Documentation string */
|
||||
char *doc;
|
||||
|
||||
void *ptr;
|
||||
PyObject *obj;
|
||||
PyObject *userloops;
|
||||
|
||||
/* generalized ufunc parameters */
|
||||
|
||||
/* 0 for scalar ufunc; 1 for generalized ufunc */
|
||||
int core_enabled;
|
||||
/* number of distinct dimension names in signature */
|
||||
int core_num_dim_ix;
|
||||
|
||||
/*
|
||||
* dimension indices of input/output argument k are stored in
|
||||
* core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1]
|
||||
*/
|
||||
|
||||
/* numbers of core dimensions of each argument */
|
||||
int *core_num_dims;
|
||||
/*
|
||||
* dimension indices in a flatted form; indices
|
||||
* are in the range of [0,core_num_dim_ix)
|
||||
*/
|
||||
int *core_dim_ixs;
|
||||
/*
|
||||
* positions of 1st core dimensions of each
|
||||
* argument in core_dim_ixs
|
||||
*/
|
||||
int *core_offsets;
|
||||
/* signature string for printing purpose */
|
||||
char *core_signature;
|
||||
|
||||
/*
|
||||
* A function which resolves the types and fills an array
|
||||
* with the dtypes for the inputs and outputs.
|
||||
*/
|
||||
PyUFunc_TypeResolutionFunc *type_resolver;
|
||||
/*
|
||||
* A function which returns an inner loop written for
|
||||
* NumPy 1.6 and earlier ufuncs. This is for backwards
|
||||
* compatibility, and may be NULL if inner_loop_selector
|
||||
* is specified.
|
||||
*/
|
||||
PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector;
|
||||
/*
|
||||
* A function which returns an inner loop for the new mechanism
|
||||
* in NumPy 1.7 and later. If provided, this is used, otherwise
|
||||
* if NULL the legacy_inner_loop_selector is used instead.
|
||||
*/
|
||||
PyUFunc_InnerLoopSelectionFunc *inner_loop_selector;
|
||||
/*
|
||||
* A function which returns a masked inner loop for the ufunc.
|
||||
*/
|
||||
PyUFunc_MaskedInnerLoopSelectionFunc *masked_inner_loop_selector;
|
||||
} PyUFuncObject;
|
||||
|
||||
#include "arrayobject.h"
|
||||
|
||||
#define UFUNC_ERR_IGNORE 0
|
||||
#define UFUNC_ERR_WARN 1
|
||||
#define UFUNC_ERR_RAISE 2
|
||||
#define UFUNC_ERR_CALL 3
|
||||
#define UFUNC_ERR_PRINT 4
|
||||
#define UFUNC_ERR_LOG 5
|
||||
|
||||
/* Python side integer mask */
|
||||
|
||||
#define UFUNC_MASK_DIVIDEBYZERO 0x07
|
||||
#define UFUNC_MASK_OVERFLOW 0x3f
|
||||
#define UFUNC_MASK_UNDERFLOW 0x1ff
|
||||
#define UFUNC_MASK_INVALID 0xfff
|
||||
|
||||
#define UFUNC_SHIFT_DIVIDEBYZERO 0
|
||||
#define UFUNC_SHIFT_OVERFLOW 3
|
||||
#define UFUNC_SHIFT_UNDERFLOW 6
|
||||
#define UFUNC_SHIFT_INVALID 9
|
||||
|
||||
|
||||
/* platform-dependent code translates floating point
|
||||
status to an integer sum of these values
|
||||
*/
|
||||
#define UFUNC_FPE_DIVIDEBYZERO 1
|
||||
#define UFUNC_FPE_OVERFLOW 2
|
||||
#define UFUNC_FPE_UNDERFLOW 4
|
||||
#define UFUNC_FPE_INVALID 8
|
||||
|
||||
/* Error mode that avoids look-up (no checking) */
|
||||
#define UFUNC_ERR_DEFAULT 0
|
||||
|
||||
#define UFUNC_OBJ_ISOBJECT 1
|
||||
#define UFUNC_OBJ_NEEDS_API 2
|
||||
|
||||
/* Default user error mode */
|
||||
#define UFUNC_ERR_DEFAULT2 \
|
||||
(UFUNC_ERR_WARN << UFUNC_SHIFT_DIVIDEBYZERO) + \
|
||||
(UFUNC_ERR_WARN << UFUNC_SHIFT_OVERFLOW) + \
|
||||
(UFUNC_ERR_WARN << UFUNC_SHIFT_INVALID)
|
||||
|
||||
#if NPY_ALLOW_THREADS
|
||||
#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0);
|
||||
#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0);
|
||||
#else
|
||||
#define NPY_LOOP_BEGIN_THREADS
|
||||
#define NPY_LOOP_END_THREADS
|
||||
#endif
|
||||
|
||||
/*
|
||||
* UFunc has unit of 1, and the order of operations can be reordered
|
||||
* This case allows reduction with multiple axes at once.
|
||||
*/
|
||||
#define PyUFunc_One 1
|
||||
/*
|
||||
* UFunc has unit of 0, and the order of operations can be reordered
|
||||
* This case allows reduction with multiple axes at once.
|
||||
*/
|
||||
#define PyUFunc_Zero 0
|
||||
/*
|
||||
* UFunc has no unit, and the order of operations cannot be reordered.
|
||||
* This case does not allow reduction with multiple axes at once.
|
||||
*/
|
||||
#define PyUFunc_None -1
|
||||
/*
|
||||
* UFunc has no unit, and the order of operations can be reordered
|
||||
* This case allows reduction with multiple axes at once.
|
||||
*/
|
||||
#define PyUFunc_ReorderableNone -2
|
||||
|
||||
#define UFUNC_REDUCE 0
|
||||
#define UFUNC_ACCUMULATE 1
|
||||
#define UFUNC_REDUCEAT 2
|
||||
#define UFUNC_OUTER 3
|
||||
|
||||
|
||||
typedef struct {
|
||||
int nin;
|
||||
int nout;
|
||||
PyObject *callable;
|
||||
} PyUFunc_PyFuncData;
|
||||
|
||||
/* A linked-list of function information for
|
||||
user-defined 1-d loops.
|
||||
*/
|
||||
typedef struct _loop1d_info {
|
||||
PyUFuncGenericFunction func;
|
||||
void *data;
|
||||
int *arg_types;
|
||||
struct _loop1d_info *next;
|
||||
} PyUFunc_Loop1d;
|
||||
|
||||
|
||||
#include "__ufunc_api.h"
|
||||
|
||||
#define UFUNC_PYVALS_NAME "UFUNC_PYVALS"
|
||||
|
||||
#define UFUNC_CHECK_ERROR(arg) \
|
||||
do {if ((((arg)->obj & UFUNC_OBJ_NEEDS_API) && PyErr_Occurred()) || \
|
||||
((arg)->errormask && \
|
||||
PyUFunc_checkfperr((arg)->errormask, \
|
||||
(arg)->errobj, \
|
||||
&(arg)->first))) \
|
||||
goto fail;} while (0)
|
||||
|
||||
/* This code checks the IEEE status flags in a platform-dependent way */
|
||||
/* Adapted from Numarray */
|
||||
|
||||
#if (defined(__unix__) || defined(unix)) && !defined(USG)
|
||||
#include <sys/param.h>
|
||||
#endif
|
||||
|
||||
/* OSF/Alpha (Tru64) ---------------------------------------------*/
|
||||
#if defined(__osf__) && defined(__alpha)
|
||||
|
||||
#include <machine/fpu.h>
|
||||
|
||||
#define UFUNC_CHECK_STATUS(ret) { \
|
||||
unsigned long fpstatus; \
|
||||
\
|
||||
fpstatus = ieee_get_fp_control(); \
|
||||
/* clear status bits as well as disable exception mode if on */ \
|
||||
ieee_set_fp_control( 0 ); \
|
||||
ret = ((IEEE_STATUS_DZE & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \
|
||||
| ((IEEE_STATUS_OVF & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \
|
||||
| ((IEEE_STATUS_UNF & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \
|
||||
| ((IEEE_STATUS_INV & fpstatus) ? UFUNC_FPE_INVALID : 0); \
|
||||
}
|
||||
|
||||
/* MS Windows -----------------------------------------------------*/
|
||||
#elif defined(_MSC_VER)
|
||||
|
||||
#include <float.h>
|
||||
|
||||
/* Clear the floating point exception default of Borland C++ */
|
||||
#if defined(__BORLANDC__)
|
||||
#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM);
|
||||
#endif
|
||||
|
||||
#define UFUNC_CHECK_STATUS(ret) { \
|
||||
int fpstatus = (int) _clearfp(); \
|
||||
\
|
||||
ret = ((SW_ZERODIVIDE & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \
|
||||
| ((SW_OVERFLOW & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \
|
||||
| ((SW_UNDERFLOW & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \
|
||||
| ((SW_INVALID & fpstatus) ? UFUNC_FPE_INVALID : 0); \
|
||||
}
|
||||
|
||||
/* Solaris --------------------------------------------------------*/
|
||||
/* --------ignoring SunOS ieee_flags approach, someone else can
|
||||
** deal with that! */
|
||||
#elif defined(sun) || defined(__BSD__) || defined(__OpenBSD__) || \
|
||||
(defined(__FreeBSD__) && (__FreeBSD_version < 502114)) || \
|
||||
defined(__NetBSD__)
|
||||
#include <ieeefp.h>
|
||||
|
||||
#define UFUNC_CHECK_STATUS(ret) { \
|
||||
int fpstatus; \
|
||||
\
|
||||
fpstatus = (int) fpgetsticky(); \
|
||||
ret = ((FP_X_DZ & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \
|
||||
| ((FP_X_OFL & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \
|
||||
| ((FP_X_UFL & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \
|
||||
| ((FP_X_INV & fpstatus) ? UFUNC_FPE_INVALID : 0); \
|
||||
(void) fpsetsticky(0); \
|
||||
}
|
||||
|
||||
#elif defined(__GLIBC__) || defined(__APPLE__) || \
|
||||
defined(__CYGWIN__) || defined(__MINGW32__) || \
|
||||
(defined(__FreeBSD__) && (__FreeBSD_version >= 502114))
|
||||
|
||||
#if defined(__GLIBC__) || defined(__APPLE__) || \
|
||||
defined(__MINGW32__) || defined(__FreeBSD__)
|
||||
#include <fenv.h>
|
||||
#endif
|
||||
|
||||
#define UFUNC_CHECK_STATUS(ret) { \
|
||||
int fpstatus = (int) fetestexcept(FE_DIVBYZERO | FE_OVERFLOW | \
|
||||
FE_UNDERFLOW | FE_INVALID); \
|
||||
ret = ((FE_DIVBYZERO & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \
|
||||
| ((FE_OVERFLOW & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \
|
||||
| ((FE_UNDERFLOW & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \
|
||||
| ((FE_INVALID & fpstatus) ? UFUNC_FPE_INVALID : 0); \
|
||||
(void) feclearexcept(FE_DIVBYZERO | FE_OVERFLOW | \
|
||||
FE_UNDERFLOW | FE_INVALID); \
|
||||
}
|
||||
|
||||
#elif defined(_AIX)
|
||||
|
||||
#include <float.h>
|
||||
#include <fpxcp.h>
|
||||
|
||||
#define UFUNC_CHECK_STATUS(ret) { \
|
||||
fpflag_t fpstatus; \
|
||||
\
|
||||
fpstatus = fp_read_flag(); \
|
||||
ret = ((FP_DIV_BY_ZERO & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \
|
||||
| ((FP_OVERFLOW & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \
|
||||
| ((FP_UNDERFLOW & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \
|
||||
| ((FP_INVALID & fpstatus) ? UFUNC_FPE_INVALID : 0); \
|
||||
fp_swap_flag(0); \
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define NO_FLOATING_POINT_SUPPORT
|
||||
#define UFUNC_CHECK_STATUS(ret) { \
|
||||
ret = 0; \
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* THESE MACROS ARE DEPRECATED.
|
||||
* Use npy_set_floatstatus_* in the npymath library.
|
||||
*/
|
||||
#define generate_divbyzero_error() npy_set_floatstatus_divbyzero()
|
||||
#define generate_overflow_error() npy_set_floatstatus_overflow()
|
||||
|
||||
/* Make sure it gets defined if it isn't already */
|
||||
#ifndef UFUNC_NOFPE
|
||||
#define UFUNC_NOFPE
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_UFUNCOBJECT_H */
|
|
@ -1,19 +0,0 @@
|
|||
#ifndef __NUMPY_UTILS_HEADER__
|
||||
#define __NUMPY_UTILS_HEADER__
|
||||
|
||||
#ifndef __COMP_NPY_UNUSED
|
||||
#if defined(__GNUC__)
|
||||
#define __COMP_NPY_UNUSED __attribute__ ((__unused__))
|
||||
# elif defined(__ICC)
|
||||
#define __COMP_NPY_UNUSED __attribute__ ((__unused__))
|
||||
#else
|
||||
#define __COMP_NPY_UNUSED
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Use this to tag a variable as not used. It will remove unused variable
|
||||
* warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable
|
||||
* to avoid accidental use */
|
||||
#define NPY_UNUSED(x) (__NPY_UNUSED_TAGGED ## x) __COMP_NPY_UNUSED
|
||||
|
||||
#endif
|
7
setup.py
7
setup.py
|
@ -4,7 +4,6 @@ import sys
|
|||
import platform
|
||||
from distutils.command.build_ext import build_ext
|
||||
from distutils.sysconfig import get_python_inc
|
||||
from distutils import ccompiler, msvccompiler
|
||||
import numpy
|
||||
from pathlib import Path
|
||||
import shutil
|
||||
|
@ -195,13 +194,7 @@ def setup_package():
|
|||
include_dirs = [
|
||||
get_python_inc(plat_specific=True),
|
||||
numpy.get_include(),
|
||||
str(ROOT / "include"),
|
||||
]
|
||||
if (
|
||||
ccompiler.new_compiler().compiler_type == "msvc"
|
||||
and msvccompiler.get_build_version() == 9
|
||||
):
|
||||
include_dirs.append(str(ROOT / "include" / "msvc9"))
|
||||
ext_modules = []
|
||||
for name in MOD_NAMES:
|
||||
mod_path = name.replace(".", "/") + ".pyx"
|
||||
|
|
|
@ -27,18 +27,23 @@ if sys.maxunicode == 65535:
|
|||
|
||||
def load(
|
||||
name: Union[str, Path],
|
||||
disable: Iterable[str] = tuple(),
|
||||
disable: Iterable[str] = util.SimpleFrozenList(),
|
||||
exclude: Iterable[str] = util.SimpleFrozenList(),
|
||||
config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(),
|
||||
) -> Language:
|
||||
"""Load a spaCy model from an installed package or a local path.
|
||||
|
||||
name (str): Package name or model path.
|
||||
disable (Iterable[str]): Names of pipeline components to disable.
|
||||
disable (Iterable[str]): Names of pipeline components to disable. Disabled
|
||||
pipes will be loaded but they won't be run unless you explicitly
|
||||
enable them by calling nlp.enable_pipe.
|
||||
exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
|
||||
components won't be loaded.
|
||||
config (Dict[str, Any] / Config): Config overrides as nested dict or dict
|
||||
keyed by section values in dot notation.
|
||||
RETURNS (Language): The loaded nlp object.
|
||||
"""
|
||||
return util.load_model(name, disable=disable, config=config)
|
||||
return util.load_model(name, disable=disable, exclude=exclude, config=config)
|
||||
|
||||
|
||||
def blank(name: str, **overrides) -> Language:
|
||||
|
|
|
@ -2,7 +2,6 @@ from typing import Optional
|
|||
from pathlib import Path
|
||||
from wasabi import msg
|
||||
import subprocess
|
||||
import shutil
|
||||
import re
|
||||
|
||||
from ... import about
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
"""This module contains helpers and subcommands for integrating spaCy projects
|
||||
with Data Version Controk (DVC). https://dvc.org"""
|
||||
from typing import Dict, Any, List, Optional
|
||||
from typing import Dict, Any, List, Optional, Iterable
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from wasabi import msg
|
||||
|
@ -8,6 +8,7 @@ from wasabi import msg
|
|||
from .._util import PROJECT_FILE, load_project_config, get_hash, project_cli
|
||||
from .._util import Arg, Opt, NAME, COMMAND
|
||||
from ...util import working_dir, split_command, join_command, run_command
|
||||
from ...util import SimpleFrozenList
|
||||
|
||||
|
||||
DVC_CONFIG = "dvc.yaml"
|
||||
|
@ -130,7 +131,7 @@ def update_dvc_config(
|
|||
|
||||
|
||||
def run_dvc_commands(
|
||||
commands: List[str] = tuple(), flags: Dict[str, bool] = {},
|
||||
commands: Iterable[str] = SimpleFrozenList(), flags: Dict[str, bool] = {},
|
||||
) -> None:
|
||||
"""Run a sequence of DVC commands in a subprocess, in order.
|
||||
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
from typing import Optional, List, Dict, Sequence, Any
|
||||
from typing import Optional, List, Dict, Sequence, Any, Iterable
|
||||
from pathlib import Path
|
||||
from wasabi import msg
|
||||
import sys
|
||||
import srsly
|
||||
|
||||
from ...util import working_dir, run_command, split_command, is_cwd, join_command
|
||||
from ...util import SimpleFrozenList
|
||||
from .._util import PROJECT_FILE, PROJECT_LOCK, load_project_config, get_hash
|
||||
from .._util import get_checksum, project_cli, Arg, Opt, COMMAND
|
||||
|
||||
|
@ -115,7 +116,9 @@ def print_run_help(project_dir: Path, subcommand: Optional[str] = None) -> None:
|
|||
|
||||
|
||||
def run_commands(
|
||||
commands: List[str] = tuple(), silent: bool = False, dry: bool = False,
|
||||
commands: Iterable[str] = SimpleFrozenList(),
|
||||
silent: bool = False,
|
||||
dry: bool = False,
|
||||
) -> None:
|
||||
"""Run a sequence of commands in a subprocess, in order.
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ use_pytorch_for_gpu_memory = false
|
|||
[nlp]
|
||||
lang = null
|
||||
pipeline = []
|
||||
disabled = []
|
||||
load_vocab_data = true
|
||||
before_creation = null
|
||||
after_creation = null
|
||||
|
|
|
@ -128,7 +128,8 @@ class Errors:
|
|||
"got {component} (name: '{name}'). If you're using a custom "
|
||||
"component factory, double-check that it correctly returns your "
|
||||
"initialized component.")
|
||||
E004 = ("Can't set up pipeline component: a factory for '{name}' already exists.")
|
||||
E004 = ("Can't set up pipeline component: a factory for '{name}' already "
|
||||
"exists. Existing factory: {func}. New factory: {new_func}")
|
||||
E005 = ("Pipeline component '{name}' returned None. If you're using a "
|
||||
"custom component, maybe you forgot to return the processed Doc?")
|
||||
E006 = ("Invalid constraints for adding pipeline component. You can only "
|
||||
|
@ -136,11 +137,10 @@ class Errors:
|
|||
"after (component name or index), first (True) or last (True). "
|
||||
"Invalid configuration: {args}. Existing components: {opts}")
|
||||
E007 = ("'{name}' already exists in pipeline. Existing names: {opts}")
|
||||
E008 = ("Some current components would be lost when restoring previous "
|
||||
"pipeline state. If you added components after calling "
|
||||
"`nlp.select_pipes()`, you should remove them explicitly with "
|
||||
"`nlp.remove_pipe()` before the pipeline is restored. Names of "
|
||||
"the new components: {names}")
|
||||
E008 = ("Can't restore disabled pipeline component '{name}' because it "
|
||||
"doesn't exist in the pipeline anymore. If you want to remove "
|
||||
"components from the pipeline, you should do it before calling "
|
||||
"`nlp.select_pipes()` or after restoring the disabled components.")
|
||||
E010 = ("Word vectors set to length 0. This may be because you don't have "
|
||||
"a model installed or loaded, or because your model doesn't "
|
||||
"include word vectors. For more info, see the docs:\n"
|
||||
|
@ -473,6 +473,13 @@ class Errors:
|
|||
E199 = ("Unable to merge 0-length span at doc[{start}:{end}].")
|
||||
|
||||
# TODO: fix numbering after merging develop into master
|
||||
E926 = ("It looks like you're trying to modify nlp.{attr} directly. This "
|
||||
"doesn't work because it's an immutable computed property. If you "
|
||||
"need to modify the pipeline, use the built-in methods like "
|
||||
"nlp.add_pipe, nlp.remove_pipe, nlp.disable_pipe or nlp.enable_pipe "
|
||||
"instead.")
|
||||
E927 = ("Can't write to frozen list Maybe you're trying to modify a computed "
|
||||
"property or default function argument?")
|
||||
E928 = ("A 'KnowledgeBase' should be written to / read from a file, but the "
|
||||
"provided argument {loc} is an existing directory.")
|
||||
E929 = ("A 'KnowledgeBase' could not be read from {loc} - the path does "
|
||||
|
|
|
@ -9,7 +9,7 @@ from wasabi import msg
|
|||
@registry.loggers("spacy.ConsoleLogger.v1")
|
||||
def console_logger():
|
||||
def setup_printer(
|
||||
nlp: "Language"
|
||||
nlp: "Language",
|
||||
) -> Tuple[Callable[[Dict[str, Any]], None], Callable]:
|
||||
score_cols = list(nlp.config["training"]["score_weights"])
|
||||
score_widths = [max(len(col), 6) for col in score_cols]
|
||||
|
@ -73,7 +73,7 @@ def wandb_logger(project_name: str, remove_config_values: List[str] = []):
|
|||
console = console_logger()
|
||||
|
||||
def setup_logger(
|
||||
nlp: "Language"
|
||||
nlp: "Language",
|
||||
) -> Tuple[Callable[[Dict[str, Any]], None], Callable]:
|
||||
config = nlp.config.interpolate()
|
||||
config_dot = util.dict_to_dot(config)
|
||||
|
|
|
@ -6,7 +6,7 @@ import itertools
|
|||
import weakref
|
||||
import functools
|
||||
from contextlib import contextmanager
|
||||
from copy import copy, deepcopy
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
import warnings
|
||||
from thinc.api import get_current_ops, Config, require_gpu, Optimizer
|
||||
|
@ -20,7 +20,7 @@ from .vocab import Vocab, create_vocab
|
|||
from .pipe_analysis import validate_attrs, analyze_pipes, print_pipe_analysis
|
||||
from .gold import Example, validate_examples
|
||||
from .scorer import Scorer
|
||||
from .util import create_default_optimizer, registry
|
||||
from .util import create_default_optimizer, registry, SimpleFrozenList
|
||||
from .util import SimpleFrozenDict, combine_score_weights, CONFIG_SECTION_ORDER
|
||||
from .lang.tokenizer_exceptions import URL_MATCH, BASE_EXCEPTIONS
|
||||
from .lang.punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
|
||||
|
@ -159,7 +159,8 @@ class Language:
|
|||
self.vocab: Vocab = vocab
|
||||
if self.lang is None:
|
||||
self.lang = self.vocab.lang
|
||||
self.pipeline = []
|
||||
self._components = []
|
||||
self._disabled = set()
|
||||
self.max_length = max_length
|
||||
self.resolved = {}
|
||||
# Create the default tokenizer from the default config
|
||||
|
@ -206,10 +207,11 @@ class Language:
|
|||
"keys": self.vocab.vectors.n_keys,
|
||||
"name": self.vocab.vectors.name,
|
||||
}
|
||||
self._meta["labels"] = self.pipe_labels
|
||||
self._meta["labels"] = dict(self.pipe_labels)
|
||||
# TODO: Adding this back to prevent breaking people's code etc., but
|
||||
# we should consider removing it
|
||||
self._meta["pipeline"] = self.pipe_names
|
||||
self._meta["pipeline"] = list(self.pipe_names)
|
||||
self._meta["disabled"] = list(self.disabled)
|
||||
return self._meta
|
||||
|
||||
@meta.setter
|
||||
|
@ -232,13 +234,14 @@ class Language:
|
|||
# we can populate the config again later
|
||||
pipeline = {}
|
||||
score_weights = []
|
||||
for pipe_name in self.pipe_names:
|
||||
for pipe_name in self.component_names:
|
||||
pipe_meta = self.get_pipe_meta(pipe_name)
|
||||
pipe_config = self.get_pipe_config(pipe_name)
|
||||
pipeline[pipe_name] = {"factory": pipe_meta.factory, **pipe_config}
|
||||
if pipe_meta.default_score_weights:
|
||||
score_weights.append(pipe_meta.default_score_weights)
|
||||
self._config["nlp"]["pipeline"] = self.pipe_names
|
||||
self._config["nlp"]["pipeline"] = list(self.component_names)
|
||||
self._config["nlp"]["disabled"] = list(self.disabled)
|
||||
self._config["components"] = pipeline
|
||||
self._config["training"]["score_weights"] = combine_score_weights(score_weights)
|
||||
if not srsly.is_json_serializable(self._config):
|
||||
|
@ -249,21 +252,64 @@ class Language:
|
|||
def config(self, value: Config) -> None:
|
||||
self._config = value
|
||||
|
||||
@property
|
||||
def disabled(self) -> List[str]:
|
||||
"""Get the names of all disabled components.
|
||||
|
||||
RETURNS (List[str]): The disabled components.
|
||||
"""
|
||||
# Make sure the disabled components are returned in the order they
|
||||
# appear in the pipeline (which isn't guaranteed by the set)
|
||||
names = [name for name, _ in self._components if name in self._disabled]
|
||||
return SimpleFrozenList(names, error=Errors.E926.format(attr="disabled"))
|
||||
|
||||
@property
|
||||
def factory_names(self) -> List[str]:
|
||||
"""Get names of all available factories.
|
||||
|
||||
RETURNS (List[str]): The factory names.
|
||||
"""
|
||||
return list(self.factories.keys())
|
||||
names = list(self.factories.keys())
|
||||
return SimpleFrozenList(names)
|
||||
|
||||
@property
|
||||
def pipe_names(self) -> List[str]:
|
||||
"""Get names of available pipeline components.
|
||||
def components(self) -> List[Tuple[str, Callable[[Doc], Doc]]]:
|
||||
"""Get all (name, component) tuples in the pipeline, including the
|
||||
currently disabled components.
|
||||
"""
|
||||
return SimpleFrozenList(
|
||||
self._components, error=Errors.E926.format(attr="components")
|
||||
)
|
||||
|
||||
@property
|
||||
def component_names(self) -> List[str]:
|
||||
"""Get the names of the available pipeline components. Includes all
|
||||
active and inactive pipeline components.
|
||||
|
||||
RETURNS (List[str]): List of component name strings, in order.
|
||||
"""
|
||||
return [pipe_name for pipe_name, _ in self.pipeline]
|
||||
names = [pipe_name for pipe_name, _ in self._components]
|
||||
return SimpleFrozenList(names, error=Errors.E926.format(attr="component_names"))
|
||||
|
||||
@property
|
||||
def pipeline(self) -> List[Tuple[str, Callable[[Doc], Doc]]]:
|
||||
"""The processing pipeline consisting of (name, component) tuples. The
|
||||
components are called on the Doc in order as it passes through the
|
||||
pipeline.
|
||||
|
||||
RETURNS (List[Tuple[str, Callable[[Doc], Doc]]]): The pipeline.
|
||||
"""
|
||||
pipes = [(n, p) for n, p in self._components if n not in self._disabled]
|
||||
return SimpleFrozenList(pipes, error=Errors.E926.format(attr="pipeline"))
|
||||
|
||||
@property
|
||||
def pipe_names(self) -> List[str]:
|
||||
"""Get names of available active pipeline components.
|
||||
|
||||
RETURNS (List[str]): List of component name strings, in order.
|
||||
"""
|
||||
names = [pipe_name for pipe_name, _ in self.pipeline]
|
||||
return SimpleFrozenList(names, error=Errors.E926.format(attr="pipe_names"))
|
||||
|
||||
@property
|
||||
def pipe_factories(self) -> Dict[str, str]:
|
||||
|
@ -272,9 +318,9 @@ class Language:
|
|||
RETURNS (Dict[str, str]): Factory names, keyed by component names.
|
||||
"""
|
||||
factories = {}
|
||||
for pipe_name, pipe in self.pipeline:
|
||||
for pipe_name, pipe in self._components:
|
||||
factories[pipe_name] = self.get_pipe_meta(pipe_name).factory
|
||||
return factories
|
||||
return SimpleFrozenDict(factories)
|
||||
|
||||
@property
|
||||
def pipe_labels(self) -> Dict[str, List[str]]:
|
||||
|
@ -284,10 +330,10 @@ class Language:
|
|||
RETURNS (Dict[str, List[str]]): Labels keyed by component name.
|
||||
"""
|
||||
labels = {}
|
||||
for name, pipe in self.pipeline:
|
||||
for name, pipe in self._components:
|
||||
if hasattr(pipe, "labels"):
|
||||
labels[name] = list(pipe.labels)
|
||||
return labels
|
||||
return SimpleFrozenDict(labels)
|
||||
|
||||
@classmethod
|
||||
def has_factory(cls, name: str) -> bool:
|
||||
|
@ -358,10 +404,10 @@ class Language:
|
|||
name: str,
|
||||
*,
|
||||
default_config: Dict[str, Any] = SimpleFrozenDict(),
|
||||
assigns: Iterable[str] = tuple(),
|
||||
requires: Iterable[str] = tuple(),
|
||||
assigns: Iterable[str] = SimpleFrozenList(),
|
||||
requires: Iterable[str] = SimpleFrozenList(),
|
||||
retokenizes: bool = False,
|
||||
scores: Iterable[str] = tuple(),
|
||||
scores: Iterable[str] = SimpleFrozenList(),
|
||||
default_score_weights: Dict[str, float] = SimpleFrozenDict(),
|
||||
func: Optional[Callable] = None,
|
||||
) -> Callable:
|
||||
|
@ -396,13 +442,21 @@ class Language:
|
|||
style="default config", name=name, cfg_type=type(default_config)
|
||||
)
|
||||
raise ValueError(err)
|
||||
internal_name = cls.get_factory_name(name)
|
||||
if internal_name in registry.factories:
|
||||
# We only check for the internal name here – it's okay if it's a
|
||||
# subclass and the base class has a factory of the same name
|
||||
raise ValueError(Errors.E004.format(name=name))
|
||||
|
||||
def add_factory(factory_func: Callable) -> Callable:
|
||||
internal_name = cls.get_factory_name(name)
|
||||
if internal_name in registry.factories:
|
||||
# We only check for the internal name here – it's okay if it's a
|
||||
# subclass and the base class has a factory of the same name. We
|
||||
# also only raise if the function is different to prevent raising
|
||||
# if module is reloaded.
|
||||
existing_func = registry.factories.get(internal_name)
|
||||
if not util.is_same_func(factory_func, existing_func):
|
||||
err = Errors.E004.format(
|
||||
name=name, func=existing_func, new_func=factory_func
|
||||
)
|
||||
raise ValueError(err)
|
||||
|
||||
arg_names = util.get_arg_names(factory_func)
|
||||
if "nlp" not in arg_names or "name" not in arg_names:
|
||||
raise ValueError(Errors.E964.format(name=name))
|
||||
|
@ -439,8 +493,8 @@ class Language:
|
|||
cls,
|
||||
name: Optional[str] = None,
|
||||
*,
|
||||
assigns: Iterable[str] = tuple(),
|
||||
requires: Iterable[str] = tuple(),
|
||||
assigns: Iterable[str] = SimpleFrozenList(),
|
||||
requires: Iterable[str] = SimpleFrozenList(),
|
||||
retokenizes: bool = False,
|
||||
func: Optional[Callable[[Doc], Doc]] = None,
|
||||
) -> Callable:
|
||||
|
@ -472,6 +526,21 @@ class Language:
|
|||
def factory_func(nlp: cls, name: str) -> Callable[[Doc], Doc]:
|
||||
return component_func
|
||||
|
||||
internal_name = cls.get_factory_name(name)
|
||||
if internal_name in registry.factories:
|
||||
# We only check for the internal name here – it's okay if it's a
|
||||
# subclass and the base class has a factory of the same name. We
|
||||
# also only raise if the function is different to prevent raising
|
||||
# if module is reloaded. It's hacky, but we need to check the
|
||||
# existing functure for a closure and whether that's identical
|
||||
# to the component function (because factory_func created above
|
||||
# will always be different, even for the same function)
|
||||
existing_func = registry.factories.get(internal_name)
|
||||
closure = existing_func.__closure__
|
||||
wrapped = [c.cell_contents for c in closure][0] if closure else None
|
||||
if util.is_same_func(wrapped, component_func):
|
||||
factory_func = existing_func # noqa: F811
|
||||
|
||||
cls.factory(
|
||||
component_name,
|
||||
assigns=assigns,
|
||||
|
@ -512,10 +581,10 @@ class Language:
|
|||
|
||||
DOCS: https://spacy.io/api/language#get_pipe
|
||||
"""
|
||||
for pipe_name, component in self.pipeline:
|
||||
for pipe_name, component in self._components:
|
||||
if pipe_name == name:
|
||||
return component
|
||||
raise KeyError(Errors.E001.format(name=name, opts=self.pipe_names))
|
||||
raise KeyError(Errors.E001.format(name=name, opts=self.component_names))
|
||||
|
||||
def create_pipe(
|
||||
self,
|
||||
|
@ -660,8 +729,8 @@ class Language:
|
|||
err = Errors.E966.format(component=bad_val, name=name)
|
||||
raise ValueError(err)
|
||||
name = name if name is not None else factory_name
|
||||
if name in self.pipe_names:
|
||||
raise ValueError(Errors.E007.format(name=name, opts=self.pipe_names))
|
||||
if name in self.component_names:
|
||||
raise ValueError(Errors.E007.format(name=name, opts=self.component_names))
|
||||
if source is not None:
|
||||
# We're loading the component from a model. After loading the
|
||||
# component, we know its real factory name
|
||||
|
@ -686,7 +755,7 @@ class Language:
|
|||
)
|
||||
pipe_index = self._get_pipe_index(before, after, first, last)
|
||||
self._pipe_meta[name] = self.get_factory_meta(factory_name)
|
||||
self.pipeline.insert(pipe_index, (name, pipe_component))
|
||||
self._components.insert(pipe_index, (name, pipe_component))
|
||||
return pipe_component
|
||||
|
||||
def _get_pipe_index(
|
||||
|
@ -707,32 +776,42 @@ class Language:
|
|||
"""
|
||||
all_args = {"before": before, "after": after, "first": first, "last": last}
|
||||
if sum(arg is not None for arg in [before, after, first, last]) >= 2:
|
||||
raise ValueError(Errors.E006.format(args=all_args, opts=self.pipe_names))
|
||||
raise ValueError(
|
||||
Errors.E006.format(args=all_args, opts=self.component_names)
|
||||
)
|
||||
if last or not any(value is not None for value in [first, before, after]):
|
||||
return len(self.pipeline)
|
||||
return len(self._components)
|
||||
elif first:
|
||||
return 0
|
||||
elif isinstance(before, str):
|
||||
if before not in self.pipe_names:
|
||||
raise ValueError(Errors.E001.format(name=before, opts=self.pipe_names))
|
||||
return self.pipe_names.index(before)
|
||||
if before not in self.component_names:
|
||||
raise ValueError(
|
||||
Errors.E001.format(name=before, opts=self.component_names)
|
||||
)
|
||||
return self.component_names.index(before)
|
||||
elif isinstance(after, str):
|
||||
if after not in self.pipe_names:
|
||||
raise ValueError(Errors.E001.format(name=after, opts=self.pipe_names))
|
||||
return self.pipe_names.index(after) + 1
|
||||
if after not in self.component_names:
|
||||
raise ValueError(
|
||||
Errors.E001.format(name=after, opts=self.component_names)
|
||||
)
|
||||
return self.component_names.index(after) + 1
|
||||
# We're only accepting indices referring to components that exist
|
||||
# (can't just do isinstance here because bools are instance of int, too)
|
||||
elif type(before) == int:
|
||||
if before >= len(self.pipeline) or before < 0:
|
||||
err = Errors.E959.format(dir="before", idx=before, opts=self.pipe_names)
|
||||
if before >= len(self._components) or before < 0:
|
||||
err = Errors.E959.format(
|
||||
dir="before", idx=before, opts=self.component_names
|
||||
)
|
||||
raise ValueError(err)
|
||||
return before
|
||||
elif type(after) == int:
|
||||
if after >= len(self.pipeline) or after < 0:
|
||||
err = Errors.E959.format(dir="after", idx=after, opts=self.pipe_names)
|
||||
if after >= len(self._components) or after < 0:
|
||||
err = Errors.E959.format(
|
||||
dir="after", idx=after, opts=self.component_names
|
||||
)
|
||||
raise ValueError(err)
|
||||
return after + 1
|
||||
raise ValueError(Errors.E006.format(args=all_args, opts=self.pipe_names))
|
||||
raise ValueError(Errors.E006.format(args=all_args, opts=self.component_names))
|
||||
|
||||
def has_pipe(self, name: str) -> bool:
|
||||
"""Check if a component name is present in the pipeline. Equivalent to
|
||||
|
@ -773,7 +852,7 @@ class Language:
|
|||
# to Language.pipeline to make sure the configs are handled correctly
|
||||
pipe_index = self.pipe_names.index(name)
|
||||
self.remove_pipe(name)
|
||||
if not len(self.pipeline) or pipe_index == len(self.pipeline):
|
||||
if not len(self._components) or pipe_index == len(self._components):
|
||||
# we have no components to insert before/after, or we're replacing the last component
|
||||
self.add_pipe(factory_name, name=name, config=config, validate=validate)
|
||||
else:
|
||||
|
@ -793,12 +872,16 @@ class Language:
|
|||
|
||||
DOCS: https://spacy.io/api/language#rename_pipe
|
||||
"""
|
||||
if old_name not in self.pipe_names:
|
||||
raise ValueError(Errors.E001.format(name=old_name, opts=self.pipe_names))
|
||||
if new_name in self.pipe_names:
|
||||
raise ValueError(Errors.E007.format(name=new_name, opts=self.pipe_names))
|
||||
i = self.pipe_names.index(old_name)
|
||||
self.pipeline[i] = (new_name, self.pipeline[i][1])
|
||||
if old_name not in self.component_names:
|
||||
raise ValueError(
|
||||
Errors.E001.format(name=old_name, opts=self.component_names)
|
||||
)
|
||||
if new_name in self.component_names:
|
||||
raise ValueError(
|
||||
Errors.E007.format(name=new_name, opts=self.component_names)
|
||||
)
|
||||
i = self.component_names.index(old_name)
|
||||
self._components[i] = (new_name, self._components[i][1])
|
||||
self._pipe_meta[new_name] = self._pipe_meta.pop(old_name)
|
||||
self._pipe_configs[new_name] = self._pipe_configs.pop(old_name)
|
||||
|
||||
|
@ -810,20 +893,45 @@ class Language:
|
|||
|
||||
DOCS: https://spacy.io/api/language#remove_pipe
|
||||
"""
|
||||
if name not in self.pipe_names:
|
||||
raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names))
|
||||
removed = self.pipeline.pop(self.pipe_names.index(name))
|
||||
if name not in self.component_names:
|
||||
raise ValueError(Errors.E001.format(name=name, opts=self.component_names))
|
||||
removed = self._components.pop(self.component_names.index(name))
|
||||
# We're only removing the component itself from the metas/configs here
|
||||
# because factory may be used for something else
|
||||
self._pipe_meta.pop(name)
|
||||
self._pipe_configs.pop(name)
|
||||
# Make sure the name is also removed from the set of disabled components
|
||||
if name in self.disabled:
|
||||
self._disabled.remove(name)
|
||||
return removed
|
||||
|
||||
def disable_pipe(self, name: str) -> None:
|
||||
"""Disable a pipeline component. The component will still exist on
|
||||
the nlp object, but it won't be run as part of the pipeline. Does
|
||||
nothing if the component is already disabled.
|
||||
|
||||
name (str): The name of the component to disable.
|
||||
"""
|
||||
if name not in self.component_names:
|
||||
raise ValueError(Errors.E001.format(name=name, opts=self.component_names))
|
||||
self._disabled.add(name)
|
||||
|
||||
def enable_pipe(self, name: str) -> None:
|
||||
"""Enable a previously disabled pipeline component so it's run as part
|
||||
of the pipeline. Does nothing if the component is already enabled.
|
||||
|
||||
name (str): The name of the component to enable.
|
||||
"""
|
||||
if name not in self.component_names:
|
||||
raise ValueError(Errors.E001.format(name=name, opts=self.component_names))
|
||||
if name in self.disabled:
|
||||
self._disabled.remove(name)
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
text: str,
|
||||
*,
|
||||
disable: Iterable[str] = tuple(),
|
||||
disable: Iterable[str] = SimpleFrozenList(),
|
||||
component_cfg: Optional[Dict[str, Dict[str, Any]]] = None,
|
||||
) -> Doc:
|
||||
"""Apply the pipeline to some text. The text can span multiple sentences,
|
||||
|
@ -869,7 +977,7 @@ class Language:
|
|||
warnings.warn(Warnings.W096, DeprecationWarning)
|
||||
if len(names) == 1 and isinstance(names[0], (list, tuple)):
|
||||
names = names[0] # support list of names instead of spread
|
||||
return DisabledPipes(self, names)
|
||||
return self.select_pipes(disable=names)
|
||||
|
||||
def select_pipes(
|
||||
self,
|
||||
|
@ -922,7 +1030,7 @@ class Language:
|
|||
sgd: Optional[Optimizer] = None,
|
||||
losses: Optional[Dict[str, float]] = None,
|
||||
component_cfg: Optional[Dict[str, Dict[str, Any]]] = None,
|
||||
exclude: Iterable[str] = tuple(),
|
||||
exclude: Iterable[str] = SimpleFrozenList(),
|
||||
):
|
||||
"""Update the models in the pipeline.
|
||||
|
||||
|
@ -976,7 +1084,7 @@ class Language:
|
|||
sgd: Optional[Optimizer] = None,
|
||||
losses: Optional[Dict[str, float]] = None,
|
||||
component_cfg: Optional[Dict[str, Dict[str, Any]]] = None,
|
||||
exclude: Iterable[str] = tuple(),
|
||||
exclude: Iterable[str] = SimpleFrozenList(),
|
||||
) -> Dict[str, float]:
|
||||
"""Make a "rehearsal" update to the models in the pipeline, to prevent
|
||||
forgetting. Rehearsal updates run an initial copy of the model over some
|
||||
|
@ -1205,7 +1313,7 @@ class Language:
|
|||
*,
|
||||
as_tuples: bool = False,
|
||||
batch_size: int = 1000,
|
||||
disable: Iterable[str] = tuple(),
|
||||
disable: Iterable[str] = SimpleFrozenList(),
|
||||
cleanup: bool = False,
|
||||
component_cfg: Optional[Dict[str, Dict[str, Any]]] = None,
|
||||
n_process: int = 1,
|
||||
|
@ -1365,7 +1473,8 @@ class Language:
|
|||
config: Union[Dict[str, Any], Config] = {},
|
||||
*,
|
||||
vocab: Union[Vocab, bool] = True,
|
||||
disable: Iterable[str] = tuple(),
|
||||
disable: Iterable[str] = SimpleFrozenList(),
|
||||
exclude: Iterable[str] = SimpleFrozenList(),
|
||||
auto_fill: bool = True,
|
||||
validate: bool = True,
|
||||
) -> "Language":
|
||||
|
@ -1375,7 +1484,11 @@ class Language:
|
|||
|
||||
config (Dict[str, Any] / Config): The loaded config.
|
||||
vocab (Vocab): A Vocab object. If True, a vocab is created.
|
||||
disable (Iterable[str]): List of pipeline component names to disable.
|
||||
disable (Iterable[str]): Names of pipeline components to disable.
|
||||
Disabled pipes will be loaded but they won't be run unless you
|
||||
explicitly enable them by calling nlp.enable_pipe.
|
||||
exclude (Iterable[str]): Names of pipeline components to exclude.
|
||||
Excluded components won't be loaded.
|
||||
auto_fill (bool): Automatically fill in missing values in config based
|
||||
on defaults and function argument annotations.
|
||||
validate (bool): Validate the component config and arguments against
|
||||
|
@ -1448,7 +1561,7 @@ class Language:
|
|||
raise ValueError(Errors.E956.format(name=pipe_name, opts=opts))
|
||||
pipe_cfg = util.copy_config(pipeline[pipe_name])
|
||||
raw_config = Config(filled["components"][pipe_name])
|
||||
if pipe_name not in disable:
|
||||
if pipe_name not in exclude:
|
||||
if "factory" not in pipe_cfg and "source" not in pipe_cfg:
|
||||
err = Errors.E984.format(name=pipe_name, config=pipe_cfg)
|
||||
raise ValueError(err)
|
||||
|
@ -1473,6 +1586,8 @@ class Language:
|
|||
)
|
||||
source_name = pipe_cfg.get("component", pipe_name)
|
||||
nlp.add_pipe(source_name, source=source_nlps[model], name=pipe_name)
|
||||
disabled_pipes = [*config["nlp"]["disabled"], *disable]
|
||||
nlp._disabled = set(p for p in disabled_pipes if p not in exclude)
|
||||
nlp.config = filled if auto_fill else config
|
||||
nlp.resolved = resolved
|
||||
if after_pipeline_creation is not None:
|
||||
|
@ -1484,7 +1599,7 @@ class Language:
|
|||
return nlp
|
||||
|
||||
def to_disk(
|
||||
self, path: Union[str, Path], *, exclude: Iterable[str] = tuple()
|
||||
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
|
||||
) -> None:
|
||||
"""Save the current state to a directory. If a model is loaded, this
|
||||
will include the model.
|
||||
|
@ -1502,9 +1617,7 @@ class Language:
|
|||
)
|
||||
serializers["meta.json"] = lambda p: srsly.write_json(p, self.meta)
|
||||
serializers["config.cfg"] = lambda p: self.config.to_disk(p)
|
||||
for name, proc in self.pipeline:
|
||||
if not hasattr(proc, "name"):
|
||||
continue
|
||||
for name, proc in self._components:
|
||||
if name in exclude:
|
||||
continue
|
||||
if not hasattr(proc, "to_disk"):
|
||||
|
@ -1514,7 +1627,7 @@ class Language:
|
|||
util.to_disk(path, serializers, exclude)
|
||||
|
||||
def from_disk(
|
||||
self, path: Union[str, Path], *, exclude: Iterable[str] = tuple()
|
||||
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
|
||||
) -> "Language":
|
||||
"""Loads state from a directory. Modifies the object in place and
|
||||
returns it. If the saved `Language` object contains a model, the
|
||||
|
@ -1550,7 +1663,7 @@ class Language:
|
|||
deserializers["tokenizer"] = lambda p: self.tokenizer.from_disk(
|
||||
p, exclude=["vocab"]
|
||||
)
|
||||
for name, proc in self.pipeline:
|
||||
for name, proc in self._components:
|
||||
if name in exclude:
|
||||
continue
|
||||
if not hasattr(proc, "from_disk"):
|
||||
|
@ -1566,7 +1679,7 @@ class Language:
|
|||
self._link_components()
|
||||
return self
|
||||
|
||||
def to_bytes(self, *, exclude: Iterable[str] = tuple()) -> bytes:
|
||||
def to_bytes(self, *, exclude: Iterable[str] = SimpleFrozenList()) -> bytes:
|
||||
"""Serialize the current state to a binary string.
|
||||
|
||||
exclude (list): Names of components or serialization fields to exclude.
|
||||
|
@ -1579,7 +1692,7 @@ class Language:
|
|||
serializers["tokenizer"] = lambda: self.tokenizer.to_bytes(exclude=["vocab"])
|
||||
serializers["meta.json"] = lambda: srsly.json_dumps(self.meta)
|
||||
serializers["config.cfg"] = lambda: self.config.to_bytes()
|
||||
for name, proc in self.pipeline:
|
||||
for name, proc in self._components:
|
||||
if name in exclude:
|
||||
continue
|
||||
if not hasattr(proc, "to_bytes"):
|
||||
|
@ -1588,7 +1701,7 @@ class Language:
|
|||
return util.to_bytes(serializers, exclude)
|
||||
|
||||
def from_bytes(
|
||||
self, bytes_data: bytes, *, exclude: Iterable[str] = tuple()
|
||||
self, bytes_data: bytes, *, exclude: Iterable[str] = SimpleFrozenList()
|
||||
) -> "Language":
|
||||
"""Load state from a binary string.
|
||||
|
||||
|
@ -1615,7 +1728,7 @@ class Language:
|
|||
deserializers["tokenizer"] = lambda b: self.tokenizer.from_bytes(
|
||||
b, exclude=["vocab"]
|
||||
)
|
||||
for name, proc in self.pipeline:
|
||||
for name, proc in self._components:
|
||||
if name in exclude:
|
||||
continue
|
||||
if not hasattr(proc, "from_bytes"):
|
||||
|
@ -1651,14 +1764,10 @@ class DisabledPipes(list):
|
|||
def __init__(self, nlp: Language, names: List[str]) -> None:
|
||||
self.nlp = nlp
|
||||
self.names = names
|
||||
# Important! Not deep copy -- we just want the container (but we also
|
||||
# want to support people providing arbitrarily typed nlp.pipeline
|
||||
# objects.)
|
||||
self.original_pipeline = copy(nlp.pipeline)
|
||||
self.metas = {name: nlp.get_pipe_meta(name) for name in names}
|
||||
self.configs = {name: nlp.get_pipe_config(name) for name in names}
|
||||
for name in self.names:
|
||||
self.nlp.disable_pipe(name)
|
||||
list.__init__(self)
|
||||
self.extend(nlp.remove_pipe(name) for name in names)
|
||||
self.extend(self.names)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
@ -1668,14 +1777,10 @@ class DisabledPipes(list):
|
|||
|
||||
def restore(self) -> None:
|
||||
"""Restore the pipeline to its state when DisabledPipes was created."""
|
||||
current, self.nlp.pipeline = self.nlp.pipeline, self.original_pipeline
|
||||
unexpected = [name for name, pipe in current if not self.nlp.has_pipe(name)]
|
||||
if unexpected:
|
||||
# Don't change the pipeline if we're raising an error.
|
||||
self.nlp.pipeline = current
|
||||
raise ValueError(Errors.E008.format(names=unexpected))
|
||||
self.nlp._pipe_meta.update(self.metas)
|
||||
self.nlp._pipe_configs.update(self.configs)
|
||||
for name in self.names:
|
||||
if name not in self.nlp.component_names:
|
||||
raise ValueError(Errors.E008.format(name=name))
|
||||
self.nlp.enable_pipe(name)
|
||||
self[:] = []
|
||||
|
||||
|
||||
|
|
|
@ -47,7 +47,6 @@ def init(model, X=None, Y=None):
|
|||
|
||||
|
||||
def resize_output(model, new_nO):
|
||||
tok2vec = model.get_ref("tok2vec")
|
||||
lower = model.get_ref("lower")
|
||||
upper = model.get_ref("upper")
|
||||
if not model.attrs["has_upper"]:
|
||||
|
|
|
@ -12,6 +12,7 @@ from ..symbols import IDS, TAG, POS, MORPH, LEMMA
|
|||
from ..tokens import Doc, Span
|
||||
from ..tokens._retokenize import normalize_token_attrs, set_token_attrs
|
||||
from ..vocab import Vocab
|
||||
from ..util import SimpleFrozenList
|
||||
from .. import util
|
||||
|
||||
|
||||
|
@ -78,7 +79,7 @@ class AttributeRuler(Pipe):
|
|||
|
||||
DOCS: https://spacy.io/api/attributeruler#call
|
||||
"""
|
||||
matches = self.matcher(doc)
|
||||
matches = sorted(self.matcher(doc))
|
||||
|
||||
for match_id, start, end in matches:
|
||||
span = Span(doc, start, end, label=match_id)
|
||||
|
@ -220,7 +221,7 @@ class AttributeRuler(Pipe):
|
|||
results.update(Scorer.score_token_attr(examples, "lemma", **kwargs))
|
||||
return results
|
||||
|
||||
def to_bytes(self, exclude: Iterable[str] = tuple()) -> bytes:
|
||||
def to_bytes(self, exclude: Iterable[str] = SimpleFrozenList()) -> bytes:
|
||||
"""Serialize the AttributeRuler to a bytestring.
|
||||
|
||||
exclude (Iterable[str]): String names of serialization fields to exclude.
|
||||
|
@ -230,13 +231,12 @@ class AttributeRuler(Pipe):
|
|||
"""
|
||||
serialize = {}
|
||||
serialize["vocab"] = self.vocab.to_bytes
|
||||
patterns = {k: self.matcher.get(k)[1] for k in range(len(self.attrs))}
|
||||
serialize["patterns"] = lambda: srsly.msgpack_dumps(patterns)
|
||||
serialize["attrs"] = lambda: srsly.msgpack_dumps(self.attrs)
|
||||
serialize["indices"] = lambda: srsly.msgpack_dumps(self.indices)
|
||||
serialize["patterns"] = lambda: srsly.msgpack_dumps(self.patterns)
|
||||
return util.to_bytes(serialize, exclude)
|
||||
|
||||
def from_bytes(self, bytes_data: bytes, exclude: Iterable[str] = tuple()):
|
||||
def from_bytes(
|
||||
self, bytes_data: bytes, exclude: Iterable[str] = SimpleFrozenList()
|
||||
):
|
||||
"""Load the AttributeRuler from a bytestring.
|
||||
|
||||
bytes_data (bytes): The data to load.
|
||||
|
@ -245,51 +245,35 @@ class AttributeRuler(Pipe):
|
|||
|
||||
DOCS: https://spacy.io/api/attributeruler#from_bytes
|
||||
"""
|
||||
data = {"patterns": b""}
|
||||
|
||||
def load_patterns(b):
|
||||
data["patterns"] = srsly.msgpack_loads(b)
|
||||
|
||||
def load_attrs(b):
|
||||
self.attrs = srsly.msgpack_loads(b)
|
||||
|
||||
def load_indices(b):
|
||||
self.indices = srsly.msgpack_loads(b)
|
||||
self.add_patterns(srsly.msgpack_loads(b))
|
||||
|
||||
deserialize = {
|
||||
"vocab": lambda b: self.vocab.from_bytes(b),
|
||||
"patterns": load_patterns,
|
||||
"attrs": load_attrs,
|
||||
"indices": load_indices,
|
||||
}
|
||||
util.from_bytes(bytes_data, deserialize, exclude)
|
||||
|
||||
if data["patterns"]:
|
||||
for key, pattern in data["patterns"].items():
|
||||
self.matcher.add(key, pattern)
|
||||
assert len(self.attrs) == len(data["patterns"])
|
||||
assert len(self.indices) == len(data["patterns"])
|
||||
|
||||
return self
|
||||
|
||||
def to_disk(self, path: Union[Path, str], exclude: Iterable[str] = tuple()) -> None:
|
||||
def to_disk(
|
||||
self, path: Union[Path, str], exclude: Iterable[str] = SimpleFrozenList()
|
||||
) -> None:
|
||||
"""Serialize the AttributeRuler to disk.
|
||||
|
||||
path (Union[Path, str]): A path to a directory.
|
||||
exclude (Iterable[str]): String names of serialization fields to exclude.
|
||||
DOCS: https://spacy.io/api/attributeruler#to_disk
|
||||
"""
|
||||
patterns = {k: self.matcher.get(k)[1] for k in range(len(self.attrs))}
|
||||
serialize = {
|
||||
"vocab": lambda p: self.vocab.to_disk(p),
|
||||
"patterns": lambda p: srsly.write_msgpack(p, patterns),
|
||||
"attrs": lambda p: srsly.write_msgpack(p, self.attrs),
|
||||
"indices": lambda p: srsly.write_msgpack(p, self.indices),
|
||||
"patterns": lambda p: srsly.write_msgpack(p, self.patterns),
|
||||
}
|
||||
util.to_disk(path, serialize, exclude)
|
||||
|
||||
def from_disk(
|
||||
self, path: Union[Path, str], exclude: Iterable[str] = tuple()
|
||||
self, path: Union[Path, str], exclude: Iterable[str] = SimpleFrozenList()
|
||||
) -> None:
|
||||
"""Load the AttributeRuler from disk.
|
||||
|
||||
|
@ -297,31 +281,16 @@ class AttributeRuler(Pipe):
|
|||
exclude (Iterable[str]): String names of serialization fields to exclude.
|
||||
DOCS: https://spacy.io/api/attributeruler#from_disk
|
||||
"""
|
||||
data = {"patterns": b""}
|
||||
|
||||
def load_patterns(p):
|
||||
data["patterns"] = srsly.read_msgpack(p)
|
||||
|
||||
def load_attrs(p):
|
||||
self.attrs = srsly.read_msgpack(p)
|
||||
|
||||
def load_indices(p):
|
||||
self.indices = srsly.read_msgpack(p)
|
||||
self.add_patterns(srsly.read_msgpack(p))
|
||||
|
||||
deserialize = {
|
||||
"vocab": lambda p: self.vocab.from_disk(p),
|
||||
"patterns": load_patterns,
|
||||
"attrs": load_attrs,
|
||||
"indices": load_indices,
|
||||
}
|
||||
util.from_disk(path, deserialize, exclude)
|
||||
|
||||
if data["patterns"]:
|
||||
for key, pattern in data["patterns"].items():
|
||||
self.matcher.add(key, pattern)
|
||||
assert len(self.attrs) == len(data["patterns"])
|
||||
assert len(self.indices) == len(data["patterns"])
|
||||
|
||||
return self
|
||||
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ from typing import Optional, Iterable, Callable, Dict, Iterator, Union, List, Tu
|
|||
from pathlib import Path
|
||||
import srsly
|
||||
import random
|
||||
from thinc.api import CosineDistance, get_array_module, Model, Optimizer, Config
|
||||
from thinc.api import CosineDistance, Model, Optimizer, Config
|
||||
from thinc.api import set_dropout_rate
|
||||
import warnings
|
||||
|
||||
|
@ -13,6 +13,7 @@ from ..language import Language
|
|||
from ..vocab import Vocab
|
||||
from ..gold import Example, validate_examples
|
||||
from ..errors import Errors, Warnings
|
||||
from ..util import SimpleFrozenList
|
||||
from .. import util
|
||||
|
||||
|
||||
|
@ -404,7 +405,7 @@ class EntityLinker(Pipe):
|
|||
token.ent_kb_id_ = kb_id
|
||||
|
||||
def to_disk(
|
||||
self, path: Union[str, Path], *, exclude: Iterable[str] = tuple()
|
||||
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList(),
|
||||
) -> None:
|
||||
"""Serialize the pipe to disk.
|
||||
|
||||
|
@ -421,7 +422,7 @@ class EntityLinker(Pipe):
|
|||
util.to_disk(path, serialize, exclude)
|
||||
|
||||
def from_disk(
|
||||
self, path: Union[str, Path], *, exclude: Iterable[str] = tuple()
|
||||
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList(),
|
||||
) -> "EntityLinker":
|
||||
"""Load the pipe from disk. Modifies the object in place and returns it.
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ import srsly
|
|||
|
||||
from ..language import Language
|
||||
from ..errors import Errors
|
||||
from ..util import ensure_path, to_disk, from_disk
|
||||
from ..util import ensure_path, to_disk, from_disk, SimpleFrozenList
|
||||
from ..tokens import Doc, Span
|
||||
from ..matcher import Matcher, PhraseMatcher
|
||||
from ..scorer import Scorer
|
||||
|
@ -317,7 +317,7 @@ class EntityRuler:
|
|||
return Scorer.score_spans(examples, "ents", **kwargs)
|
||||
|
||||
def from_bytes(
|
||||
self, patterns_bytes: bytes, *, exclude: Iterable[str] = tuple()
|
||||
self, patterns_bytes: bytes, *, exclude: Iterable[str] = SimpleFrozenList()
|
||||
) -> "EntityRuler":
|
||||
"""Load the entity ruler from a bytestring.
|
||||
|
||||
|
@ -341,7 +341,7 @@ class EntityRuler:
|
|||
self.add_patterns(cfg)
|
||||
return self
|
||||
|
||||
def to_bytes(self, *, exclude: Iterable[str] = tuple()) -> bytes:
|
||||
def to_bytes(self, *, exclude: Iterable[str] = SimpleFrozenList()) -> bytes:
|
||||
"""Serialize the entity ruler patterns to a bytestring.
|
||||
|
||||
RETURNS (bytes): The serialized patterns.
|
||||
|
@ -357,7 +357,7 @@ class EntityRuler:
|
|||
return srsly.msgpack_dumps(serial)
|
||||
|
||||
def from_disk(
|
||||
self, path: Union[str, Path], *, exclude: Iterable[str] = tuple()
|
||||
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
|
||||
) -> "EntityRuler":
|
||||
"""Load the entity ruler from a file. Expects a file containing
|
||||
newline-delimited JSON (JSONL) with one entry per line.
|
||||
|
@ -394,7 +394,7 @@ class EntityRuler:
|
|||
return self
|
||||
|
||||
def to_disk(
|
||||
self, path: Union[str, Path], *, exclude: Iterable[str] = tuple()
|
||||
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
|
||||
) -> None:
|
||||
"""Save the entity ruler patterns to a directory. The patterns will be
|
||||
saved as newline-delimited JSON (JSONL).
|
||||
|
|
|
@ -223,6 +223,7 @@ class ConfigSchemaNlp(BaseModel):
|
|||
# fmt: off
|
||||
lang: StrictStr = Field(..., title="The base language to use")
|
||||
pipeline: List[StrictStr] = Field(..., title="The pipeline component names in order")
|
||||
disabled: List[StrictStr] = Field(..., title="Pipeline components to disable by default")
|
||||
tokenizer: Callable = Field(..., title="The tokenizer to use")
|
||||
load_vocab_data: StrictBool = Field(..., title="Whether to load additional vocab data from spacy-lookups-data")
|
||||
before_creation: Optional[Callable[[Type["Language"]], Type["Language"]]] = Field(..., title="Optional callback to modify Language class before initialization")
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
from typing import Optional, Iterable, Dict, Any, Callable, Tuple, TYPE_CHECKING
|
||||
from typing import Optional, Iterable, Dict, Any, Callable, TYPE_CHECKING
|
||||
import numpy as np
|
||||
|
||||
from .gold import Example
|
||||
from .tokens import Token, Doc, Span
|
||||
from .errors import Errors
|
||||
from .util import get_lang_class
|
||||
from .util import get_lang_class, SimpleFrozenList
|
||||
from .morphology import Morphology
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
@ -317,7 +317,7 @@ class Scorer:
|
|||
attr: str,
|
||||
*,
|
||||
getter: Callable[[Doc, str], Any] = getattr,
|
||||
labels: Iterable[str] = tuple(),
|
||||
labels: Iterable[str] = SimpleFrozenList(),
|
||||
multi_label: bool = True,
|
||||
positive_label: Optional[str] = None,
|
||||
threshold: Optional[float] = None,
|
||||
|
@ -447,7 +447,7 @@ class Scorer:
|
|||
getter: Callable[[Token, str], Any] = getattr,
|
||||
head_attr: str = "head",
|
||||
head_getter: Callable[[Token, str], Token] = getattr,
|
||||
ignore_labels: Tuple[str] = tuple(),
|
||||
ignore_labels: Iterable[str] = SimpleFrozenList(),
|
||||
**cfg,
|
||||
) -> Dict[str, Any]:
|
||||
"""Returns the UAS, LAS, and LAS per type scores for dependency
|
||||
|
|
|
@ -104,7 +104,11 @@ def test_attributeruler_score(nlp, pattern_dicts):
|
|||
assert doc[3].lemma_ == "cat"
|
||||
assert doc[3].morph_ == "Case=Nom|Number=Sing"
|
||||
|
||||
dev_examples = [Example.from_dict(nlp.make_doc("This is a test."), {"lemmas": ["this", "is", "a", "cat", "."]})]
|
||||
dev_examples = [
|
||||
Example.from_dict(
|
||||
nlp.make_doc("This is a test."), {"lemmas": ["this", "is", "a", "cat", "."]}
|
||||
)
|
||||
]
|
||||
scores = nlp.evaluate(dev_examples)
|
||||
# "cat" is the only correct lemma
|
||||
assert scores["lemma_acc"] == pytest.approx(0.2)
|
||||
|
@ -112,6 +116,22 @@ def test_attributeruler_score(nlp, pattern_dicts):
|
|||
assert scores["morph_acc"] == pytest.approx(0.6)
|
||||
|
||||
|
||||
def test_attributeruler_rule_order(nlp):
|
||||
a = AttributeRuler(nlp.vocab)
|
||||
patterns = [
|
||||
{"patterns": [[{"TAG": "VBZ"}]], "attrs": {"POS": "VERB"}},
|
||||
{"patterns": [[{"TAG": "VBZ"}]], "attrs": {"POS": "NOUN"}},
|
||||
]
|
||||
a.add_patterns(patterns)
|
||||
doc = get_doc(
|
||||
nlp.vocab,
|
||||
words=["This", "is", "a", "test", "."],
|
||||
tags=["DT", "VBZ", "DT", "NN", "."],
|
||||
)
|
||||
doc = a(doc)
|
||||
assert doc[1].pos_ == "NOUN"
|
||||
|
||||
|
||||
def test_attributeruler_tag_map(nlp, tag_map):
|
||||
a = AttributeRuler(nlp.vocab)
|
||||
a.load_from_tag_map(tag_map)
|
||||
|
@ -215,6 +235,7 @@ def test_attributeruler_serialize(nlp, pattern_dicts):
|
|||
assert a.to_bytes() == a_reloaded.to_bytes()
|
||||
doc1 = a_reloaded(nlp.make_doc(text))
|
||||
numpy.array_equal(doc.to_array(attrs), doc1.to_array(attrs))
|
||||
assert a.patterns == a_reloaded.patterns
|
||||
|
||||
# disk roundtrip
|
||||
with make_tempdir() as tmp_dir:
|
||||
|
@ -223,3 +244,4 @@ def test_attributeruler_serialize(nlp, pattern_dicts):
|
|||
doc2 = nlp2(text)
|
||||
assert nlp2.get_pipe("attribute_ruler").to_bytes() == a.to_bytes()
|
||||
assert numpy.array_equal(doc.to_array(attrs), doc2.to_array(attrs))
|
||||
assert a.patterns == nlp2.get_pipe("attribute_ruler").patterns
|
||||
|
|
|
@ -438,3 +438,26 @@ def test_pipe_factories_from_source_config():
|
|||
config = nlp.config["components"]["custom"]
|
||||
assert config["factory"] == name
|
||||
assert config["arg"] == "world"
|
||||
|
||||
|
||||
def test_pipe_factories_decorator_idempotent():
|
||||
"""Check that decorator can be run multiple times if the function is the
|
||||
same. This is especially relevant for live reloading because we don't
|
||||
want spaCy to raise an error if a module registering components is reloaded.
|
||||
"""
|
||||
name = "test_pipe_factories_decorator_idempotent"
|
||||
func = lambda nlp, name: lambda doc: doc
|
||||
for i in range(5):
|
||||
Language.factory(name, func=func)
|
||||
nlp = Language()
|
||||
nlp.add_pipe(name)
|
||||
Language.factory(name, func=func)
|
||||
# Make sure it also works for component decorator, which creates the
|
||||
# factory function
|
||||
name2 = f"{name}2"
|
||||
func2 = lambda doc: doc
|
||||
for i in range(5):
|
||||
Language.component(name2, func=func2)
|
||||
nlp = Language()
|
||||
nlp.add_pipe(name)
|
||||
Language.component(name2, func=func2)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import pytest
|
||||
from spacy.language import Language
|
||||
from spacy.util import SimpleFrozenList
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -181,6 +182,11 @@ def test_select_pipes_errors(nlp):
|
|||
with pytest.raises(ValueError):
|
||||
nlp.select_pipes(enable=[], disable=["c3"])
|
||||
|
||||
disabled = nlp.select_pipes(disable=["c2"])
|
||||
nlp.remove_pipe("c2")
|
||||
with pytest.raises(ValueError):
|
||||
disabled.restore()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("n_pipes", [100])
|
||||
def test_add_lots_of_pipes(nlp, n_pipes):
|
||||
|
@ -249,3 +255,94 @@ def test_add_pipe_before_after():
|
|||
nlp.add_pipe("entity_ruler", before=True)
|
||||
with pytest.raises(ValueError):
|
||||
nlp.add_pipe("entity_ruler", first=False)
|
||||
|
||||
|
||||
def test_disable_enable_pipes():
|
||||
name = "test_disable_enable_pipes"
|
||||
results = {}
|
||||
|
||||
def make_component(name):
|
||||
results[name] = ""
|
||||
|
||||
def component(doc):
|
||||
nonlocal results
|
||||
results[name] = doc.text
|
||||
return doc
|
||||
|
||||
return component
|
||||
|
||||
c1 = Language.component(f"{name}1", func=make_component(f"{name}1"))
|
||||
c2 = Language.component(f"{name}2", func=make_component(f"{name}2"))
|
||||
|
||||
nlp = Language()
|
||||
nlp.add_pipe(f"{name}1")
|
||||
nlp.add_pipe(f"{name}2")
|
||||
assert results[f"{name}1"] == ""
|
||||
assert results[f"{name}2"] == ""
|
||||
assert nlp.pipeline == [(f"{name}1", c1), (f"{name}2", c2)]
|
||||
assert nlp.pipe_names == [f"{name}1", f"{name}2"]
|
||||
nlp.disable_pipe(f"{name}1")
|
||||
assert nlp.disabled == [f"{name}1"]
|
||||
assert nlp.component_names == [f"{name}1", f"{name}2"]
|
||||
assert nlp.pipe_names == [f"{name}2"]
|
||||
assert nlp.config["nlp"]["disabled"] == [f"{name}1"]
|
||||
nlp("hello")
|
||||
assert results[f"{name}1"] == "" # didn't run
|
||||
assert results[f"{name}2"] == "hello" # ran
|
||||
nlp.enable_pipe(f"{name}1")
|
||||
assert nlp.disabled == []
|
||||
assert nlp.pipe_names == [f"{name}1", f"{name}2"]
|
||||
assert nlp.config["nlp"]["disabled"] == []
|
||||
nlp("world")
|
||||
assert results[f"{name}1"] == "world"
|
||||
assert results[f"{name}2"] == "world"
|
||||
nlp.disable_pipe(f"{name}2")
|
||||
nlp.remove_pipe(f"{name}2")
|
||||
assert nlp.components == [(f"{name}1", c1)]
|
||||
assert nlp.pipeline == [(f"{name}1", c1)]
|
||||
assert nlp.component_names == [f"{name}1"]
|
||||
assert nlp.pipe_names == [f"{name}1"]
|
||||
assert nlp.disabled == []
|
||||
assert nlp.config["nlp"]["disabled"] == []
|
||||
nlp.rename_pipe(f"{name}1", name)
|
||||
assert nlp.components == [(name, c1)]
|
||||
assert nlp.component_names == [name]
|
||||
nlp("!")
|
||||
assert results[f"{name}1"] == "!"
|
||||
assert results[f"{name}2"] == "world"
|
||||
with pytest.raises(ValueError):
|
||||
nlp.disable_pipe(f"{name}2")
|
||||
nlp.disable_pipe(name)
|
||||
assert nlp.component_names == [name]
|
||||
assert nlp.pipe_names == []
|
||||
assert nlp.config["nlp"]["disabled"] == [name]
|
||||
nlp("?")
|
||||
assert results[f"{name}1"] == "!"
|
||||
|
||||
|
||||
def test_pipe_methods_frozen():
|
||||
"""Test that spaCy raises custom error messages if "frozen" properties are
|
||||
accessed. We still want to use a list here to not break backwards
|
||||
compatibility, but users should see an error if they're trying to append
|
||||
to nlp.pipeline etc."""
|
||||
nlp = Language()
|
||||
ner = nlp.add_pipe("ner")
|
||||
assert nlp.pipe_names == ["ner"]
|
||||
for prop in [
|
||||
nlp.pipeline,
|
||||
nlp.pipe_names,
|
||||
nlp.components,
|
||||
nlp.component_names,
|
||||
nlp.disabled,
|
||||
nlp.factory_names,
|
||||
]:
|
||||
assert isinstance(prop, list)
|
||||
assert isinstance(prop, SimpleFrozenList)
|
||||
with pytest.raises(NotImplementedError):
|
||||
nlp.pipeline.append(("ner2", ner))
|
||||
with pytest.raises(NotImplementedError):
|
||||
nlp.pipe_names.pop()
|
||||
with pytest.raises(NotImplementedError):
|
||||
nlp.components.sort()
|
||||
with pytest.raises(NotImplementedError):
|
||||
nlp.component_names.clear()
|
||||
|
|
|
@ -161,6 +161,7 @@ def test_issue4674():
|
|||
assert kb2.get_size_entities() == 1
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="API change: disable just disables, new exclude arg")
|
||||
def test_issue4707():
|
||||
"""Tests that disabled component names are also excluded from nlp.from_disk
|
||||
by default when loading a model.
|
||||
|
|
|
@ -6,6 +6,8 @@ from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL
|
|||
from spacy.pipeline.tagger import DEFAULT_TAGGER_MODEL
|
||||
from spacy.pipeline.textcat import DEFAULT_TEXTCAT_MODEL
|
||||
from spacy.pipeline.senter import DEFAULT_SENTER_MODEL
|
||||
from spacy.lang.en import English
|
||||
import spacy
|
||||
|
||||
from ..util import make_tempdir
|
||||
|
||||
|
@ -173,3 +175,34 @@ def test_serialize_sentencerecognizer(en_vocab):
|
|||
sr_b = sr.to_bytes()
|
||||
sr_d = SentenceRecognizer(en_vocab, model).from_bytes(sr_b)
|
||||
assert sr.to_bytes() == sr_d.to_bytes()
|
||||
|
||||
|
||||
def test_serialize_pipeline_disable_enable():
|
||||
nlp = English()
|
||||
nlp.add_pipe("ner")
|
||||
nlp.add_pipe("tagger")
|
||||
nlp.disable_pipe("tagger")
|
||||
assert nlp.config["nlp"]["disabled"] == ["tagger"]
|
||||
config = nlp.config.copy()
|
||||
nlp2 = English.from_config(config)
|
||||
assert nlp2.pipe_names == ["ner"]
|
||||
assert nlp2.component_names == ["ner", "tagger"]
|
||||
assert nlp2.disabled == ["tagger"]
|
||||
assert nlp2.config["nlp"]["disabled"] == ["tagger"]
|
||||
with make_tempdir() as d:
|
||||
nlp2.to_disk(d)
|
||||
nlp3 = spacy.load(d)
|
||||
assert nlp3.pipe_names == ["ner"]
|
||||
assert nlp3.component_names == ["ner", "tagger"]
|
||||
with make_tempdir() as d:
|
||||
nlp3.to_disk(d)
|
||||
nlp4 = spacy.load(d, disable=["ner"])
|
||||
assert nlp4.pipe_names == []
|
||||
assert nlp4.component_names == ["ner", "tagger"]
|
||||
assert nlp4.disabled == ["ner", "tagger"]
|
||||
with make_tempdir() as d:
|
||||
nlp.to_disk(d)
|
||||
nlp5 = spacy.load(d, exclude=["tagger"])
|
||||
assert nlp5.pipe_names == ["ner"]
|
||||
assert nlp5.component_names == ["ner"]
|
||||
assert nlp5.disabled == []
|
||||
|
|
|
@ -373,8 +373,7 @@ def test_parse_config_overrides(args, expected):
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"args",
|
||||
[["--foo"], ["--x.foo", "bar", "--baz"]],
|
||||
"args", [["--foo"], ["--x.foo", "bar", "--baz"]],
|
||||
)
|
||||
def test_parse_config_overrides_invalid(args):
|
||||
with pytest.raises(NoSuchOption):
|
||||
|
@ -382,8 +381,7 @@ def test_parse_config_overrides_invalid(args):
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"args",
|
||||
[["--x.foo", "bar", "baz"], ["x.foo"]],
|
||||
"args", [["--x.foo", "bar", "baz"], ["x.foo"]],
|
||||
)
|
||||
def test_parse_config_overrides_invalid_2(args):
|
||||
with pytest.raises(SystemExit):
|
||||
|
|
|
@ -3,10 +3,9 @@ import pytest
|
|||
from .util import get_random_doc
|
||||
|
||||
from spacy import util
|
||||
from spacy.util import dot_to_object
|
||||
from spacy.util import dot_to_object, SimpleFrozenList
|
||||
from thinc.api import Config, Optimizer
|
||||
from spacy.gold.batchers import minibatch_by_words
|
||||
|
||||
from ..lang.en import English
|
||||
from ..lang.nl import Dutch
|
||||
from ..language import DEFAULT_CONFIG_PATH
|
||||
|
@ -106,3 +105,20 @@ def test_util_dot_section():
|
|||
assert not dot_to_object(en_config, "nlp.load_vocab_data")
|
||||
assert dot_to_object(nl_config, "nlp.load_vocab_data")
|
||||
assert isinstance(dot_to_object(nl_config, "training.optimizer"), Optimizer)
|
||||
|
||||
|
||||
def test_simple_frozen_list():
|
||||
t = SimpleFrozenList(["foo", "bar"])
|
||||
assert t == ["foo", "bar"]
|
||||
assert t.index("bar") == 1 # okay method
|
||||
with pytest.raises(NotImplementedError):
|
||||
t.append("baz")
|
||||
with pytest.raises(NotImplementedError):
|
||||
t.sort()
|
||||
with pytest.raises(NotImplementedError):
|
||||
t.extend(["baz"])
|
||||
with pytest.raises(NotImplementedError):
|
||||
t.pop()
|
||||
t = SimpleFrozenList(["foo", "bar"], error="Error!")
|
||||
with pytest.raises(NotImplementedError):
|
||||
t.append("baz")
|
||||
|
|
|
@ -10,7 +10,7 @@ from ..vocab import Vocab
|
|||
from ..compat import copy_reg
|
||||
from ..attrs import SPACY, ORTH, intify_attr
|
||||
from ..errors import Errors
|
||||
from ..util import ensure_path
|
||||
from ..util import ensure_path, SimpleFrozenList
|
||||
|
||||
# fmt: off
|
||||
ALL_ATTRS = ("ORTH", "TAG", "HEAD", "DEP", "ENT_IOB", "ENT_TYPE", "ENT_KB_ID", "LEMMA", "MORPH", "POS")
|
||||
|
@ -52,7 +52,7 @@ class DocBin:
|
|||
self,
|
||||
attrs: Iterable[str] = ALL_ATTRS,
|
||||
store_user_data: bool = False,
|
||||
docs: Iterable[Doc] = tuple(),
|
||||
docs: Iterable[Doc] = SimpleFrozenList(),
|
||||
) -> None:
|
||||
"""Create a DocBin object to hold serialized annotations.
|
||||
|
||||
|
|
123
spacy/util.py
123
spacy/util.py
|
@ -120,6 +120,47 @@ class SimpleFrozenDict(dict):
|
|||
raise NotImplementedError(self.error)
|
||||
|
||||
|
||||
class SimpleFrozenList(list):
|
||||
"""Wrapper class around a list that lets us raise custom errors if certain
|
||||
attributes/methods are accessed. Mostly used for properties like
|
||||
Language.pipeline that return an immutable list (and that we don't want to
|
||||
convert to a tuple to not break too much backwards compatibility). If a user
|
||||
accidentally calls nlp.pipeline.append(), we can raise a more helpful error.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, error: str = Errors.E927) -> None:
|
||||
"""Initialize the frozen list.
|
||||
|
||||
error (str): The error message when user tries to mutate the list.
|
||||
"""
|
||||
self.error = error
|
||||
super().__init__(*args)
|
||||
|
||||
def append(self, *args, **kwargs):
|
||||
raise NotImplementedError(self.error)
|
||||
|
||||
def clear(self, *args, **kwargs):
|
||||
raise NotImplementedError(self.error)
|
||||
|
||||
def extend(self, *args, **kwargs):
|
||||
raise NotImplementedError(self.error)
|
||||
|
||||
def insert(self, *args, **kwargs):
|
||||
raise NotImplementedError(self.error)
|
||||
|
||||
def pop(self, *args, **kwargs):
|
||||
raise NotImplementedError(self.error)
|
||||
|
||||
def remove(self, *args, **kwargs):
|
||||
raise NotImplementedError(self.error)
|
||||
|
||||
def reverse(self, *args, **kwargs):
|
||||
raise NotImplementedError(self.error)
|
||||
|
||||
def sort(self, *args, **kwargs):
|
||||
raise NotImplementedError(self.error)
|
||||
|
||||
|
||||
def lang_class_is_loaded(lang: str) -> bool:
|
||||
"""Check whether a Language class is already loaded. Language classes are
|
||||
loaded lazily, to avoid expensive setup code associated with the language
|
||||
|
@ -215,7 +256,8 @@ def load_model(
|
|||
name: Union[str, Path],
|
||||
*,
|
||||
vocab: Union["Vocab", bool] = True,
|
||||
disable: Iterable[str] = tuple(),
|
||||
disable: Iterable[str] = SimpleFrozenList(),
|
||||
exclude: Iterable[str] = SimpleFrozenList(),
|
||||
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
|
||||
) -> "Language":
|
||||
"""Load a model from a package or data path.
|
||||
|
@ -228,7 +270,7 @@ def load_model(
|
|||
keyed by section values in dot notation.
|
||||
RETURNS (Language): The loaded nlp object.
|
||||
"""
|
||||
kwargs = {"vocab": vocab, "disable": disable, "config": config}
|
||||
kwargs = {"vocab": vocab, "disable": disable, "exclude": exclude, "config": config}
|
||||
if isinstance(name, str): # name or string path
|
||||
if name.startswith("blank:"): # shortcut for blank model
|
||||
return get_lang_class(name.replace("blank:", ""))()
|
||||
|
@ -247,7 +289,8 @@ def load_model_from_package(
|
|||
name: str,
|
||||
*,
|
||||
vocab: Union["Vocab", bool] = True,
|
||||
disable: Iterable[str] = tuple(),
|
||||
disable: Iterable[str] = SimpleFrozenList(),
|
||||
exclude: Iterable[str] = SimpleFrozenList(),
|
||||
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
|
||||
) -> "Language":
|
||||
"""Load a model from an installed package.
|
||||
|
@ -255,13 +298,17 @@ def load_model_from_package(
|
|||
name (str): The package name.
|
||||
vocab (Vocab / True): Optional vocab to pass in on initialization. If True,
|
||||
a new Vocab object will be created.
|
||||
disable (Iterable[str]): Names of pipeline components to disable.
|
||||
disable (Iterable[str]): Names of pipeline components to disable. Disabled
|
||||
pipes will be loaded but they won't be run unless you explicitly
|
||||
enable them by calling nlp.enable_pipe.
|
||||
exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
|
||||
components won't be loaded.
|
||||
config (Dict[str, Any] / Config): Config overrides as nested dict or dict
|
||||
keyed by section values in dot notation.
|
||||
RETURNS (Language): The loaded nlp object.
|
||||
"""
|
||||
cls = importlib.import_module(name)
|
||||
return cls.load(vocab=vocab, disable=disable, config=config)
|
||||
return cls.load(vocab=vocab, disable=disable, exclude=exclude, config=config)
|
||||
|
||||
|
||||
def load_model_from_path(
|
||||
|
@ -269,7 +316,8 @@ def load_model_from_path(
|
|||
*,
|
||||
meta: Optional[Dict[str, Any]] = None,
|
||||
vocab: Union["Vocab", bool] = True,
|
||||
disable: Iterable[str] = tuple(),
|
||||
disable: Iterable[str] = SimpleFrozenList(),
|
||||
exclude: Iterable[str] = SimpleFrozenList(),
|
||||
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
|
||||
) -> "Language":
|
||||
"""Load a model from a data directory path. Creates Language class with
|
||||
|
@ -279,7 +327,11 @@ def load_model_from_path(
|
|||
meta (Dict[str, Any]): Optional model meta.
|
||||
vocab (Vocab / True): Optional vocab to pass in on initialization. If True,
|
||||
a new Vocab object will be created.
|
||||
disable (Iterable[str]): Names of pipeline components to disable.
|
||||
disable (Iterable[str]): Names of pipeline components to disable. Disabled
|
||||
pipes will be loaded but they won't be run unless you explicitly
|
||||
enable them by calling nlp.enable_pipe.
|
||||
exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
|
||||
components won't be loaded.
|
||||
config (Dict[str, Any] / Config): Config overrides as nested dict or dict
|
||||
keyed by section values in dot notation.
|
||||
RETURNS (Language): The loaded nlp object.
|
||||
|
@ -290,15 +342,18 @@ def load_model_from_path(
|
|||
meta = get_model_meta(model_path)
|
||||
config_path = model_path / "config.cfg"
|
||||
config = load_config(config_path, overrides=dict_to_dot(config))
|
||||
nlp, _ = load_model_from_config(config, vocab=vocab, disable=disable)
|
||||
return nlp.from_disk(model_path, exclude=disable)
|
||||
nlp, _ = load_model_from_config(
|
||||
config, vocab=vocab, disable=disable, exclude=exclude
|
||||
)
|
||||
return nlp.from_disk(model_path, exclude=exclude)
|
||||
|
||||
|
||||
def load_model_from_config(
|
||||
config: Union[Dict[str, Any], Config],
|
||||
*,
|
||||
vocab: Union["Vocab", bool] = True,
|
||||
disable: Iterable[str] = tuple(),
|
||||
disable: Iterable[str] = SimpleFrozenList(),
|
||||
exclude: Iterable[str] = SimpleFrozenList(),
|
||||
auto_fill: bool = False,
|
||||
validate: bool = True,
|
||||
) -> Tuple["Language", Config]:
|
||||
|
@ -309,7 +364,11 @@ def load_model_from_config(
|
|||
meta (Dict[str, Any]): Optional model meta.
|
||||
vocab (Vocab / True): Optional vocab to pass in on initialization. If True,
|
||||
a new Vocab object will be created.
|
||||
disable (Iterable[str]): Names of pipeline components to disable.
|
||||
disable (Iterable[str]): Names of pipeline components to disable. Disabled
|
||||
pipes will be loaded but they won't be run unless you explicitly
|
||||
enable them by calling nlp.enable_pipe.
|
||||
exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
|
||||
components won't be loaded.
|
||||
auto_fill (bool): Whether to auto-fill config with missing defaults.
|
||||
validate (bool): Whether to show config validation errors.
|
||||
RETURNS (Language): The loaded nlp object.
|
||||
|
@ -323,7 +382,12 @@ def load_model_from_config(
|
|||
# registry, including custom subclasses provided via entry points
|
||||
lang_cls = get_lang_class(nlp_config["lang"])
|
||||
nlp = lang_cls.from_config(
|
||||
config, vocab=vocab, disable=disable, auto_fill=auto_fill, validate=validate,
|
||||
config,
|
||||
vocab=vocab,
|
||||
disable=disable,
|
||||
exclude=exclude,
|
||||
auto_fill=auto_fill,
|
||||
validate=validate,
|
||||
)
|
||||
return nlp, nlp.resolved
|
||||
|
||||
|
@ -332,7 +396,8 @@ def load_model_from_init_py(
|
|||
init_file: Union[Path, str],
|
||||
*,
|
||||
vocab: Union["Vocab", bool] = True,
|
||||
disable: Iterable[str] = tuple(),
|
||||
disable: Iterable[str] = SimpleFrozenList(),
|
||||
exclude: Iterable[str] = SimpleFrozenList(),
|
||||
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
|
||||
) -> "Language":
|
||||
"""Helper function to use in the `load()` method of a model package's
|
||||
|
@ -340,7 +405,11 @@ def load_model_from_init_py(
|
|||
|
||||
vocab (Vocab / True): Optional vocab to pass in on initialization. If True,
|
||||
a new Vocab object will be created.
|
||||
disable (Iterable[str]): Names of pipeline components to disable.
|
||||
disable (Iterable[str]): Names of pipeline components to disable. Disabled
|
||||
pipes will be loaded but they won't be run unless you explicitly
|
||||
enable them by calling nlp.enable_pipe.
|
||||
exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
|
||||
components won't be loaded.
|
||||
config (Dict[str, Any] / Config): Config overrides as nested dict or dict
|
||||
keyed by section values in dot notation.
|
||||
RETURNS (Language): The loaded nlp object.
|
||||
|
@ -352,7 +421,12 @@ def load_model_from_init_py(
|
|||
if not model_path.exists():
|
||||
raise IOError(Errors.E052.format(path=data_path))
|
||||
return load_model_from_path(
|
||||
data_path, vocab=vocab, meta=meta, disable=disable, config=config
|
||||
data_path,
|
||||
vocab=vocab,
|
||||
meta=meta,
|
||||
disable=disable,
|
||||
exclude=exclude,
|
||||
config=config,
|
||||
)
|
||||
|
||||
|
||||
|
@ -673,6 +747,25 @@ def get_object_name(obj: Any) -> str:
|
|||
return repr(obj)
|
||||
|
||||
|
||||
def is_same_func(func1: Callable, func2: Callable) -> bool:
|
||||
"""Approximately decide whether two functions are the same, even if their
|
||||
identity is different (e.g. after they have been live reloaded). Mostly
|
||||
used in the @Language.component and @Language.factory decorators to decide
|
||||
whether to raise if a factory already exists. Allows decorator to run
|
||||
multiple times with the same function.
|
||||
|
||||
func1 (Callable): The first function.
|
||||
func2 (Callable): The second function.
|
||||
RETURNS (bool): Whether it's the same function (most likely).
|
||||
"""
|
||||
if not callable(func1) or not callable(func2):
|
||||
return False
|
||||
same_name = func1.__qualname__ == func2.__qualname__
|
||||
same_file = inspect.getfile(func1) == inspect.getfile(func2)
|
||||
same_code = inspect.getsourcelines(func1) == inspect.getsourcelines(func2)
|
||||
return same_name and same_file and same_code
|
||||
|
||||
|
||||
def get_cuda_stream(
|
||||
require: bool = False, non_blocking: bool = True
|
||||
) -> Optional[CudaStream]:
|
||||
|
|
|
@ -12,7 +12,8 @@ The attribute ruler lets you set token attributes for tokens identified by
|
|||
[`Matcher` patterns](/usage/rule-based-matching#matcher). The attribute ruler is
|
||||
typically used to handle exceptions for token attributes and to map values
|
||||
between attributes such as mapping fine-grained POS tags to coarse-grained POS
|
||||
tags.
|
||||
tags. See the [usage guide](/usage/linguistic-features/#mappings-exceptions) for
|
||||
examples.
|
||||
|
||||
## Config and implementation {#config}
|
||||
|
||||
|
|
|
@ -74,15 +74,16 @@ your config and check that it's valid, you can run the
|
|||
Defines the `nlp` object, its tokenizer and
|
||||
[processing pipeline](/usage/processing-pipelines) component names.
|
||||
|
||||
| Name | Description |
|
||||
| ------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `lang` | Model language [ISO code](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes). Defaults to `null`. ~~str~~ |
|
||||
| `pipeline` | Names of pipeline components in order. Should correspond to sections in the `[components]` block, e.g. `[components.ner]`. See docs on [defining components](/usage/training#config-components). Defaults to `[]`. ~~List[str]~~ |
|
||||
| `load_vocab_data` | Whether to load additional lexeme and vocab data from [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) if available. Defaults to `true`. ~~bool~~ |
|
||||
| `before_creation` | Optional [callback](/usage/training#custom-code-nlp-callbacks) to modify `Language` subclass before it's initialized. Defaults to `null`. ~~Optional[Callable[[Type[Language]], Type[Language]]]~~ |
|
||||
| `after_creation` | Optional [callback](/usage/training#custom-code-nlp-callbacks) to modify `nlp` object right after it's initialized. Defaults to `null`. ~~Optional[Callable[[Language], Language]]~~ |
|
||||
| `after_pipeline_creation` | Optional [callback](/usage/training#custom-code-nlp-callbacks) to modify `nlp` object after the pipeline components have been added. Defaults to `null`. ~~Optional[Callable[[Language], Language]]~~ |
|
||||
| `tokenizer` | The tokenizer to use. Defaults to [`Tokenizer`](/api/tokenizer). ~~Callable[[str], Doc]~~ |
|
||||
| Name | Description |
|
||||
| ------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `lang` | Model language [ISO code](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes). Defaults to `null`. ~~str~~ |
|
||||
| `pipeline` | Names of pipeline components in order. Should correspond to sections in the `[components]` block, e.g. `[components.ner]`. See docs on [defining components](/usage/training#config-components). Defaults to `[]`. ~~List[str]~~ |
|
||||
| `disabled` | Names of pipeline components that are loaded but disabled by default and not run as part of the pipeline. Should correspond to components listed in `pipeline`. After a model is loaded, disabled components can be enabled using [`Language.enable_pipe`](/api/language#enable_pipe). ~~List[str]~~ |
|
||||
| `load_vocab_data` | Whether to load additional lexeme and vocab data from [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) if available. Defaults to `true`. ~~bool~~ |
|
||||
| `before_creation` | Optional [callback](/usage/training#custom-code-nlp-callbacks) to modify `Language` subclass before it's initialized. Defaults to `null`. ~~Optional[Callable[[Type[Language]], Type[Language]]]~~ |
|
||||
| `after_creation` | Optional [callback](/usage/training#custom-code-nlp-callbacks) to modify `nlp` object right after it's initialized. Defaults to `null`. ~~Optional[Callable[[Language], Language]]~~ |
|
||||
| `after_pipeline_creation` | Optional [callback](/usage/training#custom-code-nlp-callbacks) to modify `nlp` object after the pipeline components have been added. Defaults to `null`. ~~Optional[Callable[[Language], Language]]~~ |
|
||||
| `tokenizer` | The tokenizer to use. Defaults to [`Tokenizer`](/api/tokenizer). ~~Callable[[str], Doc]~~ |
|
||||
|
||||
### components {#config-components tag="section"}
|
||||
|
||||
|
|
|
@ -357,35 +357,6 @@ their original weights after the block.
|
|||
| -------- | ------------------------------------------------------ |
|
||||
| `params` | A dictionary of parameters keyed by model ID. ~~dict~~ |
|
||||
|
||||
## Language.create_pipe {#create_pipe tag="method" new="2"}
|
||||
|
||||
Create a pipeline component from a factory.
|
||||
|
||||
<Infobox title="Changed in v3.0" variant="warning">
|
||||
|
||||
As of v3.0, the [`Language.add_pipe`](/api/language#add_pipe) method also takes
|
||||
the string name of the factory, creates the component, adds it to the pipeline
|
||||
and returns it. The `Language.create_pipe` method is now mostly used internally.
|
||||
To create a component and add it to the pipeline, you should always use
|
||||
`Language.add_pipe`.
|
||||
|
||||
</Infobox>
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> parser = nlp.create_pipe("parser")
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `factory_name` | Name of the registered component factory. ~~str~~ |
|
||||
| `name` | Optional unique name of pipeline component instance. If not set, the factory name is used. An error is raised if the name already exists in the pipeline. ~~Optional[str]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `config` <Tag variant="new">3</Tag> | Optional config parameters to use for this component. Will be merged with the `default_config` specified by the component factory. ~~Optional[Dict[str, Any]]~~ |
|
||||
| `validate` <Tag variant="new">3</Tag> | Whether to validate the component config and arguments against the types expected by the factory. Defaults to `True`. ~~bool~~ |
|
||||
| **RETURNS** | The pipeline component. ~~Callable[[Doc], Doc]~~ |
|
||||
|
||||
## Language.add_pipe {#add_pipe tag="method" new="2"}
|
||||
|
||||
Add a component to the processing pipeline. Expects a name that maps to a
|
||||
|
@ -434,6 +405,35 @@ component, adds it to the pipeline and returns it.
|
|||
| `validate` <Tag variant="new">3</Tag> | Whether to validate the component config and arguments against the types expected by the factory. Defaults to `True`. ~~bool~~ |
|
||||
| **RETURNS** | The pipeline component. ~~Callable[[Doc], Doc]~~ |
|
||||
|
||||
## Language.create_pipe {#create_pipe tag="method" new="2"}
|
||||
|
||||
Create a pipeline component from a factory.
|
||||
|
||||
<Infobox title="Changed in v3.0" variant="warning">
|
||||
|
||||
As of v3.0, the [`Language.add_pipe`](/api/language#add_pipe) method also takes
|
||||
the string name of the factory, creates the component, adds it to the pipeline
|
||||
and returns it. The `Language.create_pipe` method is now mostly used internally.
|
||||
To create a component and add it to the pipeline, you should always use
|
||||
`Language.add_pipe`.
|
||||
|
||||
</Infobox>
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> parser = nlp.create_pipe("parser")
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `factory_name` | Name of the registered component factory. ~~str~~ |
|
||||
| `name` | Optional unique name of pipeline component instance. If not set, the factory name is used. An error is raised if the name already exists in the pipeline. ~~Optional[str]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `config` <Tag variant="new">3</Tag> | Optional config parameters to use for this component. Will be merged with the `default_config` specified by the component factory. ~~Optional[Dict[str, Any]]~~ |
|
||||
| `validate` <Tag variant="new">3</Tag> | Whether to validate the component config and arguments against the types expected by the factory. Defaults to `True`. ~~bool~~ |
|
||||
| **RETURNS** | The pipeline component. ~~Callable[[Doc], Doc]~~ |
|
||||
|
||||
## Language.has_factory {#has_factory tag="classmethod" new="3"}
|
||||
|
||||
Check whether a factory name is registered on the `Language` class or subclass.
|
||||
|
@ -561,6 +561,54 @@ component function.
|
|||
| `name` | Name of the component to remove. ~~str~~ |
|
||||
| **RETURNS** | A `(name, component)` tuple of the removed component. ~~Tuple[str, Callable[[Doc], Doc]]~~ |
|
||||
|
||||
## Language.disable_pipe {#disable_pipe tag="method" new="3"}
|
||||
|
||||
Temporarily disable a pipeline component so it's not run as part of the
|
||||
pipeline. Disabled components are listed in
|
||||
[`nlp.disabled`](/api/language#attributes) and included in
|
||||
[`nlp.components`](/api/language#attributes), but not in
|
||||
[`nlp.pipeline`](/api/language#pipeline), so they're not run when you process a
|
||||
`Doc` with the `nlp` object. If the component is already disabled, this method
|
||||
does nothing.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> nlp.add_pipe("ner")
|
||||
> nlp.add_pipe("textcat")
|
||||
> assert nlp.pipe_names == ["ner", "textcat"]
|
||||
> nlp.disable_pipe("ner")
|
||||
> assert nlp.pipe_names == ["textcat"]
|
||||
> assert nlp.component_names == ["ner", "textcat"]
|
||||
> assert nlp.disabled == ["ner"]
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ------ | ----------------------------------------- |
|
||||
| `name` | Name of the component to disable. ~~str~~ |
|
||||
|
||||
## Language.enable_pipe {#enable_pipe tag="method" new="3"}
|
||||
|
||||
Enable a previously disable component (e.g. via
|
||||
[`Language.disable_pipes`](/api/language#disable_pipes)) so it's run as part of
|
||||
the pipeline, [`nlp.pipeline`](/api/language#pipeline). If the component is
|
||||
already enabled, this method does nothing.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> nlp.disable_pipe("ner")
|
||||
> assert "ner" in nlp.disabled
|
||||
> assert not "ner" in nlp.pipe_names
|
||||
> nlp.enable_pipe("ner")
|
||||
> assert not "ner" in nlp.disabled
|
||||
> assert "ner" in nlp.pipe_names
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ------ | ---------------------------------------- |
|
||||
| `name` | Name of the component to enable. ~~str~~ |
|
||||
|
||||
## Language.select_pipes {#select_pipes tag="contextmanager, method" new="3"}
|
||||
|
||||
Disable one or more pipeline components. If used as a context manager, the
|
||||
|
@ -568,7 +616,9 @@ pipeline will be restored to the initial state at the end of the block.
|
|||
Otherwise, a `DisabledPipes` object is returned, that has a `.restore()` method
|
||||
you can use to undo your changes. You can specify either `disable` (as a list or
|
||||
string), or `enable`. In the latter case, all components not in the `enable`
|
||||
list, will be disabled.
|
||||
list, will be disabled. Under the hood, this method calls into
|
||||
[`disable_pipe`](/api/language#disable_pipe) and
|
||||
[`enable_pipe`](/api/language#enable_pipe).
|
||||
|
||||
> #### Example
|
||||
>
|
||||
|
@ -860,18 +910,21 @@ available to the loaded object.
|
|||
|
||||
## Attributes {#attributes}
|
||||
|
||||
| Name | Description |
|
||||
| --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `vocab` | A container for the lexical types. ~~Vocab~~ |
|
||||
| `tokenizer` | The tokenizer. ~~Tokenizer~~ |
|
||||
| `make_doc` | Callable that takes a string and returns a `Doc`. ~~Callable[[str], Doc]~~ |
|
||||
| `pipeline` | List of `(name, component)` tuples describing the current processing pipeline, in order. ~~List[str, Callable[[Doc], Doc]]~~ |
|
||||
| `pipe_names` <Tag variant="new">2</Tag> | List of pipeline component names, in order. ~~List[str]~~ |
|
||||
| `pipe_labels` <Tag variant="new">2.2</Tag> | List of labels set by the pipeline components, if available, keyed by component name. ~~Dict[str, List[str]]~~ |
|
||||
| `pipe_factories` <Tag variant="new">2.2</Tag> | Dictionary of pipeline component names, mapped to their factory names. ~~Dict[str, str]~~ |
|
||||
| `factories` | All available factory functions, keyed by name. ~~Dict[str, Callable[[...], Callable[[Doc], Doc]]]~~ |
|
||||
| `factory_names` <Tag variant="new">3</Tag> | List of all available factory names. ~~List[str]~~ |
|
||||
| `path` <Tag variant="new">2</Tag> | Path to the model data directory, if a model is loaded. Otherwise `None`. ~~Optional[Path]~~ |
|
||||
| Name | Description |
|
||||
| --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `vocab` | A container for the lexical types. ~~Vocab~~ |
|
||||
| `tokenizer` | The tokenizer. ~~Tokenizer~~ |
|
||||
| `make_doc` | Callable that takes a string and returns a `Doc`. ~~Callable[[str], Doc]~~ |
|
||||
| `pipeline` | List of `(name, component)` tuples describing the current processing pipeline, in order. ~~List[Tuple[str, Callable[[Doc], Doc]]]~~ |
|
||||
| `pipe_names` <Tag variant="new">2</Tag> | List of pipeline component names, in order. ~~List[str]~~ |
|
||||
| `pipe_labels` <Tag variant="new">2.2</Tag> | List of labels set by the pipeline components, if available, keyed by component name. ~~Dict[str, List[str]]~~ |
|
||||
| `pipe_factories` <Tag variant="new">2.2</Tag> | Dictionary of pipeline component names, mapped to their factory names. ~~Dict[str, str]~~ |
|
||||
| `factories` | All available factory functions, keyed by name. ~~Dict[str, Callable[[...], Callable[[Doc], Doc]]]~~ |
|
||||
| `factory_names` <Tag variant="new">3</Tag> | List of all available factory names. ~~List[str]~~ |
|
||||
| `components` <Tag variant="new">3</Tag> | List of all available `(name, component)` tuples, including components that are currently disabled. ~~List[Tuple[str, Callable[[Doc], Doc]]]~~ |
|
||||
| `component_names` <Tag variant="new">3</Tag> | List of all available component names, including components that are currently disabled. ~~List[str]~~ |
|
||||
| `disabled` <Tag variant="new">3</Tag> | Names of components that are currently disabled and don't run as part of the pipeline. ~~List[str]~~ |
|
||||
| `path` <Tag variant="new">2</Tag> | Path to the model data directory, if a model is loaded. Otherwise `None`. ~~Optional[Path]~~ |
|
||||
|
||||
## Class attributes {#class-attributes}
|
||||
|
||||
|
|
|
@ -25,9 +25,10 @@ added to your pipeline, and not a hidden part of the vocab that runs behind the
|
|||
scenes. This makes it easier to customize how lemmas should be assigned in your
|
||||
pipeline.
|
||||
|
||||
If the lemmatization mode is set to `"rule"` and requires part-of-speech tags to
|
||||
be assigned, make sure a [`Tagger`](/api/tagger) or another component assigning
|
||||
tags is available in the pipeline and runs _before_ the lemmatizer.
|
||||
If the lemmatization mode is set to `"rule"`, which requires coarse-grained POS
|
||||
(`Token.pos`) to be assigned, make sure a [`Tagger`](/api/tagger),
|
||||
[`Morphologizer`](/api/morphologizer) or another component assigning POS is
|
||||
available in the pipeline and runs _before_ the lemmatizer.
|
||||
|
||||
</Infobox>
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ api_trainable: true
|
|||
---
|
||||
|
||||
A trainable pipeline component for sentence segmentation. For a simpler,
|
||||
ruse-based strategy, see the [`Sentencizer`](/api/sentencizer).
|
||||
rule-based strategy, see the [`Sentencizer`](/api/sentencizer).
|
||||
|
||||
## Config and implementation {#config}
|
||||
|
||||
|
|
|
@ -23,6 +23,14 @@ path, spaCy will assume it's a data directory, load its
|
|||
information to construct the `Language` class. The data will be loaded in via
|
||||
[`Language.from_disk`](/api/language#from_disk).
|
||||
|
||||
<Infobox variant="warning" title="Changed in v3.0">
|
||||
|
||||
As of v3.0, the `disable` keyword argument specifies components to load but
|
||||
disable, instead of components to not load at all. Those components can now be
|
||||
specified separately using the new `exclude` keyword argument.
|
||||
|
||||
</Infobox>
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
|
@ -30,16 +38,17 @@ information to construct the `Language` class. The data will be loaded in via
|
|||
> nlp = spacy.load("/path/to/en") # string path
|
||||
> nlp = spacy.load(Path("/path/to/en")) # pathlib Path
|
||||
>
|
||||
> nlp = spacy.load("en_core_web_sm", disable=["parser", "tagger"])
|
||||
> nlp = spacy.load("en_core_web_sm", exclude=["parser", "tagger"])
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ----------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `name` | Model to load, i.e. package name or path. ~~Union[str, Path]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). ~~List[str]~~ |
|
||||
| `config` <Tag variant="new">3</Tag> | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ |
|
||||
| **RETURNS** | A `Language` object with the loaded model. ~~Language~~ |
|
||||
| Name | Description |
|
||||
| ------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `name` | Model to load, i.e. package name or path. ~~Union[str, Path]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~List[str]~~ |
|
||||
| `exclude` <Tag variant="new">3</Tag> | Names of pipeline components to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~List[str]~~ |
|
||||
| `config` <Tag variant="new">3</Tag> | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ |
|
||||
| **RETURNS** | A `Language` object with the loaded model. ~~Language~~ |
|
||||
|
||||
Essentially, `spacy.load()` is a convenience wrapper that reads the model's
|
||||
[`config.cfg`](/api/data-formats#config), uses the language and pipeline
|
||||
|
@ -562,17 +571,18 @@ and create a `Language` object. The model data will then be loaded in via
|
|||
>
|
||||
> ```python
|
||||
> nlp = util.load_model("en_core_web_sm")
|
||||
> nlp = util.load_model("en_core_web_sm", disable=["ner"])
|
||||
> nlp = util.load_model("en_core_web_sm", exclude=["ner"])
|
||||
> nlp = util.load_model("/path/to/data")
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ----------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `name` | Package name or model path. ~~str~~ |
|
||||
| `vocab` <Tag variant="new">3</Tag> | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~. |
|
||||
| `disable` | Names of pipeline components to disable. ~~Iterable[str]~~ |
|
||||
| `config` <Tag variant="new">3</Tag> | Config overrides as nested dict or flat dict keyed by section values in dot notation, e.g. `"nlp.pipeline"`. ~~Union[Dict[str, Any], Config]~~ |
|
||||
| **RETURNS** | `Language` class with the loaded model. ~~Language~~ |
|
||||
| Name | Description |
|
||||
| ------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `name` | Package name or model path. ~~str~~ |
|
||||
| `vocab` <Tag variant="new">3</Tag> | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~. |
|
||||
| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~List[str]~~ |
|
||||
| `exclude` <Tag variant="new">3</Tag> | Names of pipeline components to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~List[str]~~ |
|
||||
| `config` <Tag variant="new">3</Tag> | Config overrides as nested dict or flat dict keyed by section values in dot notation, e.g. `"nlp.pipeline"`. ~~Union[Dict[str, Any], Config]~~ |
|
||||
| **RETURNS** | `Language` class with the loaded model. ~~Language~~ |
|
||||
|
||||
### util.load_model_from_init_py {#util.load_model_from_init_py tag="function" new="2"}
|
||||
|
||||
|
@ -588,13 +598,14 @@ A helper function to use in the `load()` method of a model package's
|
|||
> return load_model_from_init_py(__file__, **overrides)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ----------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `init_file` | Path to model's `__init__.py`, i.e. `__file__`. ~~Union[str, Path]~~ |
|
||||
| `vocab` <Tag variant="new">3</Tag> | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~. |
|
||||
| `disable` | Names of pipeline components to disable. ~~Iterable[str]~~ |
|
||||
| `config` <Tag variant="new">3</Tag> | Config overrides as nested dict or flat dict keyed by section values in dot notation, e.g. `"nlp.pipeline"`. ~~Union[Dict[str, Any], Config]~~ |
|
||||
| **RETURNS** | `Language` class with the loaded model. ~~Language~~ |
|
||||
| Name | Description |
|
||||
| ------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `init_file` | Path to model's `__init__.py`, i.e. `__file__`. ~~Union[str, Path]~~ |
|
||||
| `vocab` <Tag variant="new">3</Tag> | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~. |
|
||||
| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~List[str]~~ |
|
||||
| `exclude` <Tag variant="new">3</Tag> | Names of pipeline components to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~List[str]~~ |
|
||||
| `config` <Tag variant="new">3</Tag> | Config overrides as nested dict or flat dict keyed by section values in dot notation, e.g. `"nlp.pipeline"`. ~~Union[Dict[str, Any], Config]~~ |
|
||||
| **RETURNS** | `Language` class with the loaded model. ~~Language~~ |
|
||||
|
||||
### util.load_config {#util.load_config tag="function" new="3"}
|
||||
|
||||
|
|
|
@ -22,15 +22,15 @@ values are defined in the [`Language.Defaults`](/api/language#defaults).
|
|||
> nlp_de = German() # Includes German data
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ---------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Stop words**<br />[`stop_words.py`][stop_words.py] | List of most common words of a language that are often useful to filter out, for example "and" or "I". Matching tokens will return `True` for `is_stop`. |
|
||||
| **Tokenizer exceptions**<br />[`tokenizer_exceptions.py`][tokenizer_exceptions.py] | Special-case rules for the tokenizer, for example, contractions like "can't" and abbreviations with punctuation, like "U.K.". |
|
||||
| **Punctuation rules**<br />[`punctuation.py`][punctuation.py] | Regular expressions for splitting tokens, e.g. on punctuation or special characters like emoji. Includes rules for prefixes, suffixes and infixes. |
|
||||
| **Character classes**<br />[`char_classes.py`][char_classes.py] | Character classes to be used in regular expressions, for example, latin characters, quotes, hyphens or icons. |
|
||||
| **Lexical attributes**<br />[`lex_attrs.py`][lex_attrs.py] | Custom functions for setting lexical attributes on tokens, e.g. `like_num`, which includes language-specific words like "ten" or "hundred". |
|
||||
| **Syntax iterators**<br />[`syntax_iterators.py`][syntax_iterators.py] | Functions that compute views of a `Doc` object based on its syntax. At the moment, only used for [noun chunks](/usage/linguistic-features#noun-chunks). |
|
||||
| **Lemmatizer**<br />[`spacy-lookups-data`][spacy-lookups-data] | Lemmatization rules or a lookup-based lemmatization table to assign base forms, for example "be" for "was". |
|
||||
| Name | Description |
|
||||
| ----------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Stop words**<br />[`stop_words.py`][stop_words.py] | List of most common words of a language that are often useful to filter out, for example "and" or "I". Matching tokens will return `True` for `is_stop`. |
|
||||
| **Tokenizer exceptions**<br />[`tokenizer_exceptions.py`][tokenizer_exceptions.py] | Special-case rules for the tokenizer, for example, contractions like "can't" and abbreviations with punctuation, like "U.K.". |
|
||||
| **Punctuation rules**<br />[`punctuation.py`][punctuation.py] | Regular expressions for splitting tokens, e.g. on punctuation or special characters like emoji. Includes rules for prefixes, suffixes and infixes. |
|
||||
| **Character classes**<br />[`char_classes.py`][char_classes.py] | Character classes to be used in regular expressions, for example, Latin characters, quotes, hyphens or icons. |
|
||||
| **Lexical attributes**<br />[`lex_attrs.py`][lex_attrs.py] | Custom functions for setting lexical attributes on tokens, e.g. `like_num`, which includes language-specific words like "ten" or "hundred". |
|
||||
| **Syntax iterators**<br />[`syntax_iterators.py`][syntax_iterators.py] | Functions that compute views of a `Doc` object based on its syntax. At the moment, only used for [noun chunks](/usage/linguistic-features#noun-chunks). |
|
||||
| **Lemmatizer**<br />[`lemmatizer.py`][lemmatizer.py] [`spacy-lookups-data`][spacy-lookups-data] | Custom lemmatizer implementation and lemmatization tables. |
|
||||
|
||||
[stop_words.py]:
|
||||
https://github.com/explosion/spaCy/tree/master/spacy/lang/en/stop_words.py
|
||||
|
@ -44,4 +44,6 @@ values are defined in the [`Language.Defaults`](/api/language#defaults).
|
|||
https://github.com/explosion/spaCy/tree/master/spacy/lang/en/lex_attrs.py
|
||||
[syntax_iterators.py]:
|
||||
https://github.com/explosion/spaCy/tree/master/spacy/lang/en/syntax_iterators.py
|
||||
[lemmatizer.py]:
|
||||
https://github.com/explosion/spaCy/tree/master/spacy/lang/fr/lemmatizer.py
|
||||
[spacy-lookups-data]: https://github.com/explosion/spacy-lookups-data
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
When you call `nlp` on a text, spaCy first tokenizes the text to produce a `Doc`
|
||||
object. The `Doc` is then processed in several different steps – this is also
|
||||
referred to as the **processing pipeline**. The pipeline used by the
|
||||
[default models](/models) consists of a tagger, a parser and an entity
|
||||
recognizer. Each pipeline component returns the processed `Doc`, which is then
|
||||
passed on to the next component.
|
||||
[default models](/models) typically include a tagger, a lemmatizer, a parser and
|
||||
an entity recognizer. Each pipeline component returns the processed `Doc`, which
|
||||
is then passed on to the next component.
|
||||
|
||||
![The processing pipeline](../../images/pipeline.svg)
|
||||
|
||||
|
@ -12,15 +12,16 @@ passed on to the next component.
|
|||
> - **Creates:** Objects, attributes and properties modified and set by the
|
||||
> component.
|
||||
|
||||
| Name | Component | Creates | Description |
|
||||
| -------------- | ------------------------------------------------------------------ | --------------------------------------------------------- | ------------------------------------------------ |
|
||||
| **tokenizer** | [`Tokenizer`](/api/tokenizer) | `Doc` | Segment text into tokens. |
|
||||
| **tagger** | [`Tagger`](/api/tagger) | `Token.tag` | Assign part-of-speech tags. |
|
||||
| **parser** | [`DependencyParser`](/api/dependencyparser) | `Token.head`, `Token.dep`, `Doc.sents`, `Doc.noun_chunks` | Assign dependency labels. |
|
||||
| **ner** | [`EntityRecognizer`](/api/entityrecognizer) | `Doc.ents`, `Token.ent_iob`, `Token.ent_type` | Detect and label named entities. |
|
||||
| **lemmatizer** | [`Lemmatizer`](/api/lemmatizer) | `Token.lemma` | Assign base forms. |
|
||||
| **textcat** | [`TextCategorizer`](/api/textcategorizer) | `Doc.cats` | Assign document labels. |
|
||||
| **custom** | [custom components](/usage/processing-pipelines#custom-components) | `Doc._.xxx`, `Token._.xxx`, `Span._.xxx` | Assign custom attributes, methods or properties. |
|
||||
| Name | Component | Creates | Description |
|
||||
| --------------------- | ------------------------------------------------------------------ | --------------------------------------------------------- | ------------------------------------------------ |
|
||||
| **tokenizer** | [`Tokenizer`](/api/tokenizer) | `Doc` | Segment text into tokens. |
|
||||
| _processing pipeline_ | | |
|
||||
| **tagger** | [`Tagger`](/api/tagger) | `Token.tag` | Assign part-of-speech tags. |
|
||||
| **parser** | [`DependencyParser`](/api/dependencyparser) | `Token.head`, `Token.dep`, `Doc.sents`, `Doc.noun_chunks` | Assign dependency labels. |
|
||||
| **ner** | [`EntityRecognizer`](/api/entityrecognizer) | `Doc.ents`, `Token.ent_iob`, `Token.ent_type` | Detect and label named entities. |
|
||||
| **lemmatizer** | [`Lemmatizer`](/api/lemmatizer) | `Token.lemma` | Assign base forms. |
|
||||
| **textcat** | [`TextCategorizer`](/api/textcategorizer) | `Doc.cats` | Assign document labels. |
|
||||
| **custom** | [custom components](/usage/processing-pipelines#custom-components) | `Doc._.xxx`, `Token._.xxx`, `Span._.xxx` | Assign custom attributes, methods or properties. |
|
||||
|
||||
The processing pipeline always **depends on the statistical model** and its
|
||||
capabilities. For example, a pipeline can only include an entity recognizer
|
||||
|
|
|
@ -179,7 +179,7 @@ interoperates with [PyTorch](https://pytorch.org) and the
|
|||
giving you access to thousands of pretrained models for your pipelines. There
|
||||
are many [great guides](http://jalammar.github.io/illustrated-transformer/) to
|
||||
transformer models, but for practical purposes, you can simply think of them as
|
||||
a drop-in replacement that let you achieve **higher accuracy** in exchange for
|
||||
drop-in replacements that let you achieve **higher accuracy** in exchange for
|
||||
**higher training and runtime costs**.
|
||||
|
||||
### Setup and installation {#transformers-installation}
|
||||
|
@ -225,10 +225,12 @@ transformers as subnetworks directly, you can also use them via the
|
|||
|
||||
![The processing pipeline with the transformer component](../images/pipeline_transformer.svg)
|
||||
|
||||
The `Transformer` component sets the
|
||||
By default, the `Transformer` component sets the
|
||||
[`Doc._.trf_data`](/api/transformer#custom_attributes) extension attribute,
|
||||
which lets you access the transformers outputs at runtime.
|
||||
|
||||
<!-- TODO: update/confirm once we have final models trained -->
|
||||
|
||||
```cli
|
||||
$ python -m spacy download en_core_trf_lg
|
||||
```
|
||||
|
@ -249,8 +251,8 @@ for doc in nlp.pipe(["some text", "some other text"]):
|
|||
tokvecs = doc._.trf_data.tensors[-1]
|
||||
```
|
||||
|
||||
You can also customize how the [`Transformer`](/api/transformer) component sets
|
||||
annotations onto the [`Doc`](/api/doc), by customizing the `annotation_setter`.
|
||||
You can customize how the [`Transformer`](/api/transformer) component sets
|
||||
annotations onto the [`Doc`](/api/doc), by changing the `annotation_setter`.
|
||||
This callback will be called with the raw input and output data for the whole
|
||||
batch, along with the batch of `Doc` objects, allowing you to implement whatever
|
||||
you need. The annotation setter is called with a batch of [`Doc`](/api/doc)
|
||||
|
@ -259,13 +261,15 @@ containing the transformers data for the batch.
|
|||
|
||||
```python
|
||||
def custom_annotation_setter(docs, trf_data):
|
||||
# TODO:
|
||||
...
|
||||
doc_data = list(trf_data.doc_data)
|
||||
for doc, data in zip(docs, doc_data):
|
||||
doc._.custom_attr = data
|
||||
|
||||
nlp = spacy.load("en_core_trf_lg")
|
||||
nlp.get_pipe("transformer").annotation_setter = custom_annotation_setter
|
||||
doc = nlp("This is a text")
|
||||
print() # TODO:
|
||||
assert isinstance(doc._.custom_attr, TransformerData)
|
||||
print(doc._.custom_attr.tensors)
|
||||
```
|
||||
|
||||
### Training usage {#transformers-training}
|
||||
|
@ -299,7 +303,7 @@ component:
|
|||
>
|
||||
> ```python
|
||||
> from spacy_transformers import Transformer, TransformerModel
|
||||
> from spacy_transformers.annotation_setters import null_annotation_setter
|
||||
> from spacy_transformers.annotation_setters import configure_trfdata_setter
|
||||
> from spacy_transformers.span_getters import get_doc_spans
|
||||
>
|
||||
> trf = Transformer(
|
||||
|
@ -309,7 +313,7 @@ component:
|
|||
> get_spans=get_doc_spans,
|
||||
> tokenizer_config={"use_fast": True},
|
||||
> ),
|
||||
> annotation_setter=null_annotation_setter,
|
||||
> annotation_setter=configure_trfdata_setter(),
|
||||
> max_batch_items=4096,
|
||||
> )
|
||||
> ```
|
||||
|
@ -329,7 +333,7 @@ tokenizer_config = {"use_fast": true}
|
|||
@span_getters = "doc_spans.v1"
|
||||
|
||||
[components.transformer.annotation_setter]
|
||||
@annotation_setters = "spacy-transformers.null_annotation_setter.v1"
|
||||
@annotation_setters = "spacy-transformers.trfdata_setter.v1"
|
||||
|
||||
```
|
||||
|
||||
|
@ -343,9 +347,9 @@ in a block starts with `@`, it's **resolved to a function** and all other
|
|||
settings are passed to the function as arguments. In this case, `name`,
|
||||
`tokenizer_config` and `get_spans`.
|
||||
|
||||
`get_spans` is a function that takes a batch of `Doc` object and returns lists
|
||||
`get_spans` is a function that takes a batch of `Doc` objects and returns lists
|
||||
of potentially overlapping `Span` objects to process by the transformer. Several
|
||||
[built-in functions](/api/transformer#span-getters) are available – for example,
|
||||
[built-in functions](/api/transformer#span_getters) are available – for example,
|
||||
to process the whole document or individual sentences. When the config is
|
||||
resolved, the function is created and passed into the model as an argument.
|
||||
|
||||
|
@ -366,13 +370,17 @@ To change any of the settings, you can edit the `config.cfg` and re-run the
|
|||
training. To change any of the functions, like the span getter, you can replace
|
||||
the name of the referenced function – e.g. `@span_getters = "sent_spans.v1"` to
|
||||
process sentences. You can also register your own functions using the
|
||||
`span_getters` registry:
|
||||
[`span_getters` registry](/api/top-level#registry). For instance, the following
|
||||
custom function returns [`Span`](/api/span) objects following sentence
|
||||
boundaries, unless a sentence succeeds a certain amount of tokens, in which case
|
||||
subsentences of at most `max_length` tokens are returned.
|
||||
|
||||
> #### config.cfg
|
||||
>
|
||||
> ```ini
|
||||
> [components.transformer.model.get_spans]
|
||||
> @span_getters = "custom_sent_spans"
|
||||
> max_length = 25
|
||||
> ```
|
||||
|
||||
```python
|
||||
|
@ -380,18 +388,29 @@ process sentences. You can also register your own functions using the
|
|||
import spacy_transformers
|
||||
|
||||
@spacy_transformers.registry.span_getters("custom_sent_spans")
|
||||
def configure_custom_sent_spans():
|
||||
# TODO: write custom example
|
||||
def get_sent_spans(docs):
|
||||
return [list(doc.sents) for doc in docs]
|
||||
def configure_custom_sent_spans(max_length: int):
|
||||
def get_custom_sent_spans(docs):
|
||||
spans = []
|
||||
for doc in docs:
|
||||
spans.append([])
|
||||
for sent in doc.sents:
|
||||
start = 0
|
||||
end = max_length
|
||||
while end <= len(sent):
|
||||
spans[-1].append(sent[start:end])
|
||||
start += max_length
|
||||
end += max_length
|
||||
if start < len(sent):
|
||||
spans[-1].append(sent[start:len(sent)])
|
||||
return spans
|
||||
|
||||
return get_sent_spans
|
||||
return get_custom_sent_spans
|
||||
```
|
||||
|
||||
To resolve the config during training, spaCy needs to know about your custom
|
||||
function. You can make it available via the `--code` argument that can point to
|
||||
a Python file. For more details on training with custom code, see the
|
||||
[training documentation](/usage/training#custom-code).
|
||||
[training documentation](/usage/training#custom-functions).
|
||||
|
||||
```cli
|
||||
python -m spacy train ./config.cfg --code ./code.py
|
||||
|
@ -412,8 +431,8 @@ The same idea applies to task models that power the **downstream components**.
|
|||
Most of spaCy's built-in model creation functions support a `tok2vec` argument,
|
||||
which should be a Thinc layer of type ~~Model[List[Doc], List[Floats2d]]~~. This
|
||||
is where we'll plug in our transformer model, using the
|
||||
[Tok2VecListener](/api/architectures#Tok2VecListener) layer, which sneakily
|
||||
delegates to the `Transformer` pipeline component.
|
||||
[TransformerListener](/api/architectures#TransformerListener) layer, which
|
||||
sneakily delegates to the `Transformer` pipeline component.
|
||||
|
||||
```ini
|
||||
### config.cfg (excerpt) {highlight="12"}
|
||||
|
@ -428,18 +447,18 @@ maxout_pieces = 3
|
|||
use_upper = false
|
||||
|
||||
[nlp.pipeline.ner.model.tok2vec]
|
||||
@architectures = "spacy-transformers.Tok2VecListener.v1"
|
||||
@architectures = "spacy-transformers.TransformerListener.v1"
|
||||
grad_factor = 1.0
|
||||
|
||||
[nlp.pipeline.ner.model.tok2vec.pooling]
|
||||
@layers = "reduce_mean.v1"
|
||||
```
|
||||
|
||||
The [Tok2VecListener](/api/architectures#Tok2VecListener) layer expects a
|
||||
[pooling layer](https://thinc.ai/docs/api-layers#reduction-ops) as the argument
|
||||
`pooling`, which needs to be of type ~~Model[Ragged, Floats2d]~~. This layer
|
||||
determines how the vector for each spaCy token will be computed from the zero or
|
||||
more source rows the token is aligned against. Here we use the
|
||||
The [TransformerListener](/api/architectures#TransformerListener) layer expects
|
||||
a [pooling layer](https://thinc.ai/docs/api-layers#reduction-ops) as the
|
||||
argument `pooling`, which needs to be of type ~~Model[Ragged, Floats2d]~~. This
|
||||
layer determines how the vector for each spaCy token will be computed from the
|
||||
zero or more source rows the token is aligned against. Here we use the
|
||||
[`reduce_mean`](https://thinc.ai/docs/api-layers#reduce_mean) layer, which
|
||||
averages the wordpiece rows. We could instead use
|
||||
[`reduce_max`](https://thinc.ai/docs/api-layers#reduce_max), or a custom
|
||||
|
@ -535,8 +554,9 @@ vectors, but combines them via summation with a smaller table of learned
|
|||
embeddings.
|
||||
|
||||
```python
|
||||
from thinc.api import add, chain, remap_ids, Embed
|
||||
from thinc.api import add, chain, remap_ids, Embed, FeatureExtractor
|
||||
from spacy.ml.staticvectors import StaticVectors
|
||||
from spacy.util import registry
|
||||
|
||||
@registry.architectures("my_example.MyEmbedding.v1")
|
||||
def MyCustomVectors(
|
||||
|
|
|
@ -52,9 +52,9 @@ $ pip install -U spacy
|
|||
To install additional data tables for lemmatization you can run
|
||||
`pip install spacy[lookups]` or install
|
||||
[`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data)
|
||||
separately. The lookups package is needed to create blank models with
|
||||
lemmatization data, and to lemmatize in languages that don't yet come with
|
||||
pretrained models and aren't powered by third-party libraries.
|
||||
separately. The lookups package is needed to provide normalization and
|
||||
lemmatization data for new models and to lemmatize in languages that don't yet
|
||||
come with pretrained models and aren't powered by third-party libraries.
|
||||
|
||||
</Infobox>
|
||||
|
||||
|
|
|
@ -3,6 +3,8 @@ title: Linguistic Features
|
|||
next: /usage/rule-based-matching
|
||||
menu:
|
||||
- ['POS Tagging', 'pos-tagging']
|
||||
- ['Morphology', 'morphology']
|
||||
- ['Lemmatization', 'lemmatization']
|
||||
- ['Dependency Parse', 'dependency-parse']
|
||||
- ['Named Entities', 'named-entities']
|
||||
- ['Entity Linking', 'entity-linking']
|
||||
|
@ -10,7 +12,8 @@ menu:
|
|||
- ['Merging & Splitting', 'retokenization']
|
||||
- ['Sentence Segmentation', 'sbd']
|
||||
- ['Vectors & Similarity', 'vectors-similarity']
|
||||
- ['Language data', 'language-data']
|
||||
- ['Mappings & Exceptions', 'mappings-exceptions']
|
||||
- ['Language Data', 'language-data']
|
||||
---
|
||||
|
||||
Processing raw text intelligently is difficult: most words are rare, and it's
|
||||
|
@ -37,7 +40,7 @@ in the [models directory](/models).
|
|||
|
||||
</Infobox>
|
||||
|
||||
### Rule-based morphology {#rule-based-morphology}
|
||||
## Morphology {#morphology}
|
||||
|
||||
Inflectional morphology is the process by which a root form of a word is
|
||||
modified by adding prefixes or suffixes that specify its grammatical function
|
||||
|
@ -45,33 +48,157 @@ but do not changes its part-of-speech. We say that a **lemma** (root form) is
|
|||
**inflected** (modified/combined) with one or more **morphological features** to
|
||||
create a surface form. Here are some examples:
|
||||
|
||||
| Context | Surface | Lemma | POS | Morphological Features |
|
||||
| ---------------------------------------- | ------- | ----- | ---- | ---------------------------------------- |
|
||||
| I was reading the paper | reading | read | verb | `VerbForm=Ger` |
|
||||
| I don't watch the news, I read the paper | read | read | verb | `VerbForm=Fin`, `Mood=Ind`, `Tense=Pres` |
|
||||
| I read the paper yesterday | read | read | verb | `VerbForm=Fin`, `Mood=Ind`, `Tense=Past` |
|
||||
| Context | Surface | Lemma | POS | Morphological Features |
|
||||
| ---------------------------------------- | ------- | ----- | ------ | ---------------------------------------- |
|
||||
| I was reading the paper | reading | read | `VERB` | `VerbForm=Ger` |
|
||||
| I don't watch the news, I read the paper | read | read | `VERB` | `VerbForm=Fin`, `Mood=Ind`, `Tense=Pres` |
|
||||
| I read the paper yesterday | read | read | `VERB` | `VerbForm=Fin`, `Mood=Ind`, `Tense=Past` |
|
||||
|
||||
English has a relatively simple morphological system, which spaCy handles using
|
||||
rules that can be keyed by the token, the part-of-speech tag, or the combination
|
||||
of the two. The system works as follows:
|
||||
Morphological features are stored in the [`MorphAnalysis`](/api/morphanalysis)
|
||||
under `Token.morph`, which allows you to access individual morphological
|
||||
features. The attribute `Token.morph_` provides the morphological analysis in
|
||||
the Universal Dependencies
|
||||
[FEATS](https://universaldependencies.org/format.html#morphological-annotation)
|
||||
format.
|
||||
|
||||
1. The tokenizer consults a
|
||||
[mapping table](/usage/adding-languages#tokenizer-exceptions)
|
||||
`TOKENIZER_EXCEPTIONS`, which allows sequences of characters to be mapped to
|
||||
multiple tokens. Each token may be assigned a part of speech and one or more
|
||||
morphological features.
|
||||
2. The part-of-speech tagger then assigns each token an **extended POS tag**. In
|
||||
the API, these tags are known as `Token.tag`. They express the part-of-speech
|
||||
(e.g. `VERB`) and some amount of morphological information, e.g. that the
|
||||
verb is past tense.
|
||||
3. For words whose POS is not set by a prior process, a
|
||||
[mapping table](/usage/adding-languages#tag-map) `TAG_MAP` maps the tags to a
|
||||
part-of-speech and a set of morphological features.
|
||||
4. Finally, a **rule-based deterministic lemmatizer** maps the surface form, to
|
||||
a lemma in light of the previously assigned extended part-of-speech and
|
||||
morphological information, without consulting the context of the token. The
|
||||
lemmatizer also accepts list-based exception files, acquired from
|
||||
[WordNet](https://wordnet.princeton.edu/).
|
||||
> #### 📝 Things to try
|
||||
>
|
||||
> 1. Change "I" to "She". You should see that the morphological features change
|
||||
> and express that it's a pronoun in the third person.
|
||||
> 2. Inspect `token.morph_` for the other tokens.
|
||||
|
||||
```python
|
||||
### {executable="true"}
|
||||
import spacy
|
||||
|
||||
nlp = spacy.load("en_core_web_sm")
|
||||
print("Pipeline:", nlp.pipe_names)
|
||||
doc = nlp("I was reading the paper.")
|
||||
token = doc[0] # 'I'
|
||||
print(token.morph_) # 'Case=Nom|Number=Sing|Person=1|PronType=Prs'
|
||||
print(token.morph.get("PronType")) # ['Prs']
|
||||
```
|
||||
|
||||
### Statistical morphology {#morphologizer new="3" model="morphologizer"}
|
||||
|
||||
spaCy's statistical [`Morphologizer`](/api/morphologizer) component assigns the
|
||||
morphological features and coarse-grained part-of-speech tags as `Token.morph`
|
||||
and `Token.pos`.
|
||||
|
||||
```python
|
||||
### {executable="true"}
|
||||
import spacy
|
||||
|
||||
nlp = spacy.load("de_core_news_sm")
|
||||
doc = nlp("Wo bist du?") # English: 'Where are you?'
|
||||
print(doc[2].morph_) # 'Case=Nom|Number=Sing|Person=2|PronType=Prs'
|
||||
print(doc[2].pos_) # 'PRON'
|
||||
```
|
||||
|
||||
### Rule-based morphology {#rule-based-morphology}
|
||||
|
||||
For languages with relatively simple morphological systems like English, spaCy
|
||||
can assign morphological features through a rule-based approach, which uses the
|
||||
**token text** and **fine-grained part-of-speech tags** to produce
|
||||
coarse-grained part-of-speech tags and morphological features.
|
||||
|
||||
1. The part-of-speech tagger assigns each token a **fine-grained part-of-speech
|
||||
tag**. In the API, these tags are known as `Token.tag`. They express the
|
||||
part-of-speech (e.g. verb) and some amount of morphological information, e.g.
|
||||
that the verb is past tense (e.g. `VBD` for a past tense verb in the Penn
|
||||
Treebank) .
|
||||
2. For words whose coarse-grained POS is not set by a prior process, a
|
||||
[mapping table](#mapping-exceptions) maps the fine-grained tags to a
|
||||
coarse-grained POS tags and morphological features.
|
||||
|
||||
```python
|
||||
### {executable="true"}
|
||||
import spacy
|
||||
|
||||
nlp = spacy.load("en_core_web_sm")
|
||||
doc = nlp("Where are you?")
|
||||
print(doc[2].morph_) # 'Case=Nom|Person=2|PronType=Prs'
|
||||
print(doc[2].pos_) # 'PRON'
|
||||
```
|
||||
|
||||
## Lemmatization {#lemmatization model="lemmatizer" new="3"}
|
||||
|
||||
The [`Lemmatizer`](/api/lemmatizer) is a pipeline component that provides lookup
|
||||
and rule-based lemmatization methods in a configurable component. An individual
|
||||
language can extend the `Lemmatizer` as part of its
|
||||
[language data](#language-data).
|
||||
|
||||
```python
|
||||
### {executable="true"}
|
||||
import spacy
|
||||
|
||||
# English models include a rule-based lemmatizer
|
||||
nlp = spacy.load("en_core_web_sm")
|
||||
lemmatizer = nlp.get_pipe("lemmatizer")
|
||||
print(lemmatizer.mode) # 'rule'
|
||||
|
||||
doc = nlp("I was reading the paper.")
|
||||
print([token.lemma_ for token in doc])
|
||||
# ['I', 'be', 'read', 'the', 'paper', '.']
|
||||
```
|
||||
|
||||
<Infobox title="Changed in v3.0" variant="warning">
|
||||
|
||||
Unlike spaCy v2, spaCy v3 models do _not_ provide lemmas by default or switch
|
||||
automatically between lookup and rule-based lemmas depending on whether a tagger
|
||||
is in the pipeline. To have lemmas in a `Doc`, the pipeline needs to include a
|
||||
[`Lemmatizer`](/api/lemmatizer) component. The lemmatizer component is
|
||||
configured to use a single mode such as `"lookup"` or `"rule"` on
|
||||
initialization. The `"rule"` mode requires `Token.pos` to be set by a previous
|
||||
component.
|
||||
|
||||
</Infobox>
|
||||
|
||||
The data for spaCy's lemmatizers is distributed in the package
|
||||
[`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data). The
|
||||
provided models already include all the required tables, but if you are creating
|
||||
new models, you'll probably want to install `spacy-lookups-data` to provide the
|
||||
data when the lemmatizer is initialized.
|
||||
|
||||
### Lookup lemmatizer {#lemmatizer-lookup}
|
||||
|
||||
For models without a tagger or morphologizer, a lookup lemmatizer can be added
|
||||
to the pipeline as long as a lookup table is provided, typically through
|
||||
[`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data). The
|
||||
lookup lemmatizer looks up the token surface form in the lookup table without
|
||||
reference to the token's part-of-speech or context.
|
||||
|
||||
```python
|
||||
# pip install spacy-lookups-data
|
||||
import spacy
|
||||
|
||||
nlp = spacy.blank("sv")
|
||||
nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
|
||||
```
|
||||
|
||||
### Rule-based lemmatizer {#lemmatizer-rule}
|
||||
|
||||
When training models that include a component that assigns POS (a morphologizer
|
||||
or a tagger with a [POS mapping](#mappings-exceptions)), a rule-based lemmatizer
|
||||
can be added using rule tables from
|
||||
[`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data):
|
||||
|
||||
```python
|
||||
# pip install spacy-lookups-data
|
||||
import spacy
|
||||
|
||||
nlp = spacy.blank("de")
|
||||
# Morphologizer (note: model is not yet trained!)
|
||||
nlp.add_pipe("morphologizer")
|
||||
# Rule-based lemmatizer
|
||||
nlp.add_pipe("lemmatizer", config={"mode": "rule"})
|
||||
```
|
||||
|
||||
The rule-based deterministic lemmatizer maps the surface form to a lemma in
|
||||
light of the previously assigned coarse-grained part-of-speech and morphological
|
||||
information, without consulting the context of the token. The rule-based
|
||||
lemmatizer also accepts list-based exception files. For English, these are
|
||||
acquired from [WordNet](https://wordnet.princeton.edu/).
|
||||
|
||||
## Dependency Parsing {#dependency-parse model="parser"}
|
||||
|
||||
|
@ -420,7 +547,7 @@ on a token, it will return an empty string.
|
|||
>
|
||||
> #### BILUO Scheme
|
||||
>
|
||||
> - `B` – Token is the **beginning** of an entity.
|
||||
> - `B` – Token is the **beginning** of a multi-token entity.
|
||||
> - `I` – Token is **inside** a multi-token entity.
|
||||
> - `L` – Token is the **last** token of a multi-token entity.
|
||||
> - `U` – Token is a single-token **unit** entity.
|
||||
|
@ -750,14 +877,6 @@ subclass.
|
|||
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
### Customizing the tokenizer {#tokenizer-custom}
|
||||
|
||||
TODO: rewrite the docs on custom tokenization in a more user-friendly order, including details on how to integrate a fully custom tokenizer, representing a tokenizer in the config etc.
|
||||
|
||||
-->
|
||||
|
||||
### Adding special case tokenization rules {#special-cases}
|
||||
|
||||
Most domains have at least some idiosyncrasies that require custom tokenization
|
||||
|
@ -1472,28 +1591,46 @@ print("After:", [(token.text, token._.is_musician) for token in doc])
|
|||
|
||||
## Sentence Segmentation {#sbd}
|
||||
|
||||
<!-- TODO: include senter -->
|
||||
|
||||
A [`Doc`](/api/doc) object's sentences are available via the `Doc.sents`
|
||||
property. Unlike other libraries, spaCy uses the dependency parse to determine
|
||||
sentence boundaries. This is usually more accurate than a rule-based approach,
|
||||
but it also means you'll need a **statistical model** and accurate predictions.
|
||||
If your texts are closer to general-purpose news or web text, this should work
|
||||
well out-of-the-box. For social media or conversational text that doesn't follow
|
||||
the same rules, your application may benefit from a custom rule-based
|
||||
implementation. You can either use the built-in
|
||||
[`Sentencizer`](/api/sentencizer) or plug an entirely custom rule-based function
|
||||
into your [processing pipeline](/usage/processing-pipelines).
|
||||
property. To view a `Doc`'s sentences, you can iterate over the `Doc.sents`, a
|
||||
generator that yields [`Span`](/api/span) objects. You can check whether a `Doc`
|
||||
has sentence boundaries with the `doc.is_sentenced` attribute.
|
||||
|
||||
spaCy's dependency parser respects already set boundaries, so you can preprocess
|
||||
your `Doc` using custom rules _before_ it's parsed. Depending on your text, this
|
||||
may also improve accuracy, since the parser is constrained to predict parses
|
||||
consistent with the sentence boundaries.
|
||||
```python
|
||||
### {executable="true"}
|
||||
import spacy
|
||||
|
||||
nlp = spacy.load("en_core_web_sm")
|
||||
doc = nlp("This is a sentence. This is another sentence.")
|
||||
assert doc.is_sentenced
|
||||
for sent in doc.sents:
|
||||
print(sent.text)
|
||||
```
|
||||
|
||||
spaCy provides four alternatives for sentence segmentation:
|
||||
|
||||
1. [Dependency parser](#sbd-parser): the statistical
|
||||
[`DependencyParser`](/api/dependencyparser) provides the most accurate
|
||||
sentence boundaries based on full dependency parses.
|
||||
2. [Statistical sentence segmenter](#sbd-senter): the statistical
|
||||
[`SentenceRecognizer`](/api/sentencerecognizer) is a simpler and faster
|
||||
alternative to the parser that only sets sentence boundaries.
|
||||
3. [Rule-based pipeline component](#sbd-component): the rule-based
|
||||
[`Sentencizer`](/api/sentencizer) sets sentence boundaries using a
|
||||
customizable list of sentence-final punctuation.
|
||||
4. [Custom function](#sbd-custom): your own custom function added to the
|
||||
processing pipeline can set sentence boundaries by writing to
|
||||
`Token.is_sent_start`.
|
||||
|
||||
### Default: Using the dependency parse {#sbd-parser model="parser"}
|
||||
|
||||
To view a `Doc`'s sentences, you can iterate over the `Doc.sents`, a generator
|
||||
that yields [`Span`](/api/span) objects.
|
||||
Unlike other libraries, spaCy uses the dependency parse to determine sentence
|
||||
boundaries. This is usually the most accurate approach, but it requires a
|
||||
**statistical model** that provides accurate predictions. If your texts are
|
||||
closer to general-purpose news or web text, this should work well out-of-the-box
|
||||
with spaCy's provided models. For social media or conversational text that
|
||||
doesn't follow the same rules, your application may benefit from a custom model
|
||||
or rule-based component.
|
||||
|
||||
```python
|
||||
### {executable="true"}
|
||||
|
@ -1505,12 +1642,43 @@ for sent in doc.sents:
|
|||
print(sent.text)
|
||||
```
|
||||
|
||||
spaCy's dependency parser respects already set boundaries, so you can preprocess
|
||||
your `Doc` using custom components _before_ it's parsed. Depending on your text,
|
||||
this may also improve parse accuracy, since the parser is constrained to predict
|
||||
parses consistent with the sentence boundaries.
|
||||
|
||||
### Statistical sentence segmenter {#sbd-senter model="senter" new="3"}
|
||||
|
||||
The [`SentenceRecognizer`](/api/sentencerecognizer) is a simple statistical
|
||||
component that only provides sentence boundaries. Along with being faster and
|
||||
smaller than the parser, its primary advantage is that it's easier to train
|
||||
custom models because it only requires annotated sentence boundaries rather than
|
||||
full dependency parses.
|
||||
|
||||
<!-- TODO: update/confirm usage once we have final models trained -->
|
||||
|
||||
> #### senter vs. parser
|
||||
>
|
||||
> The recall for the `senter` is typically slightly lower than for the parser,
|
||||
> which is better at predicting sentence boundaries when punctuation is not
|
||||
> present.
|
||||
|
||||
```python
|
||||
### {executable="true"}
|
||||
import spacy
|
||||
|
||||
nlp = spacy.load("en_core_web_sm", enable=["senter"], disable=["parser"])
|
||||
doc = nlp("This is a sentence. This is another sentence.")
|
||||
for sent in doc.sents:
|
||||
print(sent.text)
|
||||
```
|
||||
|
||||
### Rule-based pipeline component {#sbd-component}
|
||||
|
||||
The [`Sentencizer`](/api/sentencizer) component is a
|
||||
[pipeline component](/usage/processing-pipelines) that splits sentences on
|
||||
punctuation like `.`, `!` or `?`. You can plug it into your pipeline if you only
|
||||
need sentence boundaries without the dependency parse.
|
||||
need sentence boundaries without dependency parses.
|
||||
|
||||
```python
|
||||
### {executable="true"}
|
||||
|
@ -1537,7 +1705,7 @@ and can still be overwritten by the parser.
|
|||
<Infobox title="Important note" variant="warning">
|
||||
|
||||
To prevent inconsistent state, you can only set boundaries **before** a document
|
||||
is parsed (and `Doc.is_parsed` is `False`). To ensure that your component is
|
||||
is parsed (and `doc.is_parsed` is `False`). To ensure that your component is
|
||||
added in the right place, you can set `before='parser'` or `first=True` when
|
||||
adding it to the pipeline using [`nlp.add_pipe`](/api/language#add_pipe).
|
||||
|
||||
|
@ -1574,6 +1742,77 @@ doc = nlp(text)
|
|||
print("After:", [sent.text for sent in doc.sents])
|
||||
```
|
||||
|
||||
## Mappings & Exceptions {#mappings-exceptions new="3"}
|
||||
|
||||
The [`AttributeRuler`](/api/attributeruler) manages **rule-based mappings and
|
||||
exceptions** for all token-level attributes. As the number of
|
||||
[pipeline components](/api/#architecture-pipeline) has grown from spaCy v2 to
|
||||
v3, handling rules and exceptions in each component individually has become
|
||||
impractical, so the `AttributeRuler` provides a single component with a unified
|
||||
pattern format for all token attribute mappings and exceptions.
|
||||
|
||||
The `AttributeRuler` uses
|
||||
[`Matcher` patterns](/usage/rule-based-matching#adding-patterns) to identify
|
||||
tokens and then assigns them the provided attributes. If needed, the
|
||||
[`Matcher`](/api/matcher) patterns can include context around the target token.
|
||||
For example, the attribute ruler can:
|
||||
|
||||
- provide exceptions for any **token attributes**
|
||||
- map **fine-grained tags** to **coarse-grained tags** for languages without
|
||||
statistical morphologizers (replacing the v2.x `tag_map` in the
|
||||
[language data](#language-data))
|
||||
- map token **surface form + fine-grained tags** to **morphological features**
|
||||
(replacing the v2.x `morph_rules` in the [language data](#language-data))
|
||||
- specify the **tags for space tokens** (replacing hard-coded behavior in the
|
||||
tagger)
|
||||
|
||||
The following example shows how the tag and POS `NNP`/`PROPN` can be specified
|
||||
for the phrase `"The Who"`, overriding the tags provided by the statistical
|
||||
tagger and the POS tag map.
|
||||
|
||||
```python
|
||||
### {executable="true"}
|
||||
import spacy
|
||||
|
||||
nlp = spacy.load("en_core_web_sm")
|
||||
text = "I saw The Who perform. Who did you see?"
|
||||
doc1 = nlp(text)
|
||||
print(doc1[2].tag_, doc1[2].pos_) # DT DET
|
||||
print(doc1[3].tag_, doc1[3].pos_) # WP PRON
|
||||
|
||||
# Add attribute ruler with exception for "The Who" as NNP/PROPN NNP/PROPN
|
||||
ruler = nlp.get_pipe("attribute_ruler")
|
||||
# Pattern to match "The Who"
|
||||
patterns = [[{"LOWER": "the"}, {"TEXT": "Who"}]]
|
||||
# The attributes to assign to the matched token
|
||||
attrs = {"TAG": "NNP", "POS": "PROPN"}
|
||||
# Add rules to the attribute ruler
|
||||
ruler.add(patterns=patterns, attrs=attrs, index=0) # "The" in "The Who"
|
||||
ruler.add(patterns=patterns, attrs=attrs, index=1) # "Who" in "The Who"
|
||||
|
||||
doc2 = nlp(text)
|
||||
print(doc2[2].tag_, doc2[2].pos_) # NNP PROPN
|
||||
print(doc2[3].tag_, doc2[3].pos_) # NNP PROPN
|
||||
# The second "Who" remains unmodified
|
||||
print(doc2[5].tag_, doc2[5].pos_) # WP PRON
|
||||
```
|
||||
|
||||
<Infobox variant="warning" title="Migrating from spaCy v2.x">
|
||||
|
||||
For easy migration from from spaCy v2 to v3, the
|
||||
[`AttributeRuler`](/api/attributeruler) can import a **tag map and morph rules**
|
||||
in the v2 format with the methods
|
||||
[`load_from_tag_map`](/api/attributeruler#load_from_tag_map) and
|
||||
[`load_from_morph_rules`](/api/attributeruler#load_from_morph_rules).
|
||||
|
||||
```diff
|
||||
nlp = spacy.blank("en")
|
||||
+ ruler = nlp.add_pipe("attribute_ruler")
|
||||
+ ruler.load_from_tag_map(YOUR_TAG_MAP)
|
||||
```
|
||||
|
||||
</Infobox>
|
||||
|
||||
## Word vectors and semantic similarity {#vectors-similarity}
|
||||
|
||||
import Vectors101 from 'usage/101/\_vectors-similarity.md'
|
||||
|
@ -1703,7 +1942,7 @@ for word, vector in vector_data.items():
|
|||
vocab.set_vector(word, vector)
|
||||
```
|
||||
|
||||
## Language data {#language-data}
|
||||
## Language Data {#language-data}
|
||||
|
||||
import LanguageData101 from 'usage/101/\_language-data.md'
|
||||
|
||||
|
|
|
@ -220,53 +220,70 @@ available pipeline components and component functions.
|
|||
> ruler = nlp.add_pipe("entity_ruler")
|
||||
> ```
|
||||
|
||||
| String name | Component | Description |
|
||||
| --------------- | ----------------------------------------------- | ----------------------------------------------------------------------------------------- |
|
||||
| `tagger` | [`Tagger`](/api/tagger) | Assign part-of-speech-tags. |
|
||||
| `parser` | [`DependencyParser`](/api/dependencyparser) | Assign dependency labels. |
|
||||
| `ner` | [`EntityRecognizer`](/api/entityrecognizer) | Assign named entities. |
|
||||
| `entity_linker` | [`EntityLinker`](/api/entitylinker) | Assign knowledge base IDs to named entities. Should be added after the entity recognizer. |
|
||||
| `entity_ruler` | [`EntityRuler`](/api/entityruler) | Assign named entities based on pattern rules and dictionaries. |
|
||||
| `textcat` | [`TextCategorizer`](/api/textcategorizer) | Assign text categories. |
|
||||
| `lemmatizer` | [`Lemmatizer`](/api/lemmatizer) | Assign base forms to words. |
|
||||
| `morphologizer` | [`Morphologizer`](/api/morphologizer) | Assign morphological features and coarse-grained POS tags. |
|
||||
| `senter` | [`SentenceRecognizer`](/api/sentencerecognizer) | Assign sentence boundaries. |
|
||||
| `sentencizer` | [`Sentencizer`](/api/sentencizer) | Add rule-based sentence segmentation without the dependency parse. |
|
||||
| `tok2vec` | [`Tok2Vec`](/api/tok2vec) | Assign token-to-vector embeddings. |
|
||||
| `transformer` | [`Transformer`](/api/transformer) | Assign the tokens and outputs of a transformer model. |
|
||||
| String name | Component | Description |
|
||||
| ----------------- | ----------------------------------------------- | ----------------------------------------------------------------------------------------- |
|
||||
| `tagger` | [`Tagger`](/api/tagger) | Assign part-of-speech-tags. |
|
||||
| `parser` | [`DependencyParser`](/api/dependencyparser) | Assign dependency labels. |
|
||||
| `ner` | [`EntityRecognizer`](/api/entityrecognizer) | Assign named entities. |
|
||||
| `entity_linker` | [`EntityLinker`](/api/entitylinker) | Assign knowledge base IDs to named entities. Should be added after the entity recognizer. |
|
||||
| `entity_ruler` | [`EntityRuler`](/api/entityruler) | Assign named entities based on pattern rules and dictionaries. |
|
||||
| `textcat` | [`TextCategorizer`](/api/textcategorizer) | Assign text categories. |
|
||||
| `lemmatizer` | [`Lemmatizer`](/api/lemmatizer) | Assign base forms to words. |
|
||||
| `morphologizer` | [`Morphologizer`](/api/morphologizer) | Assign morphological features and coarse-grained POS tags. |
|
||||
| `attribute_ruler` | [`AttributeRuler`](/api/attributeruler) | Assign token attribute mappings and rule-based exceptions. |
|
||||
| `senter` | [`SentenceRecognizer`](/api/sentencerecognizer) | Assign sentence boundaries. |
|
||||
| `sentencizer` | [`Sentencizer`](/api/sentencizer) | Add rule-based sentence segmentation without the dependency parse. |
|
||||
| `tok2vec` | [`Tok2Vec`](/api/tok2vec) | Assign token-to-vector embeddings. |
|
||||
| `transformer` | [`Transformer`](/api/transformer) | Assign the tokens and outputs of a transformer model. |
|
||||
|
||||
### Disabling and modifying pipeline components {#disabling}
|
||||
### Disabling, excluding and modifying components {#disabling}
|
||||
|
||||
If you don't need a particular component of the pipeline – for example, the
|
||||
tagger or the parser, you can **disable loading** it. This can sometimes make a
|
||||
big difference and improve loading speed. Disabled component names can be
|
||||
provided to [`spacy.load`](/api/top-level#spacy.load),
|
||||
[`Language.from_disk`](/api/language#from_disk) or the `nlp` object itself as a
|
||||
list:
|
||||
tagger or the parser, you can **disable or exclude** it. This can sometimes make
|
||||
a big difference and improve loading and inference speed. There are two
|
||||
different mechanisms you can use:
|
||||
|
||||
1. **Disable:** The component and its data will be loaded with the model, but it
|
||||
will be disabled by default and not run as part of the processing pipeline.
|
||||
To run it, you can explicitly enable it by calling
|
||||
[`nlp.enable_pipe`](/api/language#enable_pipe). When you save out the `nlp`
|
||||
object, the disabled component will be included but disabled by default.
|
||||
2. **Exclude:** Don't load the component and its data with the model. Once the
|
||||
model is loaded, there will be no reference to the excluded component.
|
||||
|
||||
Disabled and excluded component names can be provided to
|
||||
[`spacy.load`](/api/top-level#spacy.load) as a list.
|
||||
|
||||
<!-- TODO: update with info on our models shipped with optional components -->
|
||||
|
||||
> #### 💡 Models with optional components
|
||||
>
|
||||
> The `disable` mechanism makes it easy to distribute models with optional
|
||||
> components that you can enable or disable at runtime. For instance, your model
|
||||
> may include a statistical _and_ a rule-based component for sentence
|
||||
> segmentation, and you can choose which one to run depending on your use case.
|
||||
|
||||
```python
|
||||
### Disable loading
|
||||
# Load the model without the entity recognizer
|
||||
nlp = spacy.load("en_core_web_sm", exclude=["ner"])
|
||||
|
||||
# Load the tagger and parser but don't enable them
|
||||
nlp = spacy.load("en_core_web_sm", disable=["tagger", "parser"])
|
||||
# Explicitly enable the tagger later on
|
||||
nlp.enable_pipe("tagger")
|
||||
```
|
||||
|
||||
In some cases, you do want to load all pipeline components and their weights,
|
||||
because you need them at different points in your application. However, if you
|
||||
only need a `Doc` object with named entities, there's no need to run all
|
||||
pipeline components on it – that can potentially make processing much slower.
|
||||
Instead, you can use the `disable` keyword argument on
|
||||
[`nlp.pipe`](/api/language#pipe) to temporarily disable the components **during
|
||||
processing**:
|
||||
<Infobox variant="warning" title="Changed in v3.0">
|
||||
|
||||
```python
|
||||
### Disable for processing
|
||||
for doc in nlp.pipe(texts, disable=["tagger", "parser"]):
|
||||
# Do something with the doc here
|
||||
```
|
||||
As of v3.0, the `disable` keyword argument specifies components to load but
|
||||
disable, instead of components to not load at all. Those components can now be
|
||||
specified separately using the new `exclude` keyword argument.
|
||||
|
||||
If you need to **execute more code** with components disabled – e.g. to reset
|
||||
the weights or update only some components during training – you can use the
|
||||
[`nlp.select_pipes`](/api/language#select_pipes) context manager. At the end of
|
||||
the `with` block, the disabled pipeline components will be restored
|
||||
</Infobox>
|
||||
|
||||
As a shortcut, you can use the [`nlp.select_pipes`](/api/language#select_pipes)
|
||||
context manager to temporarily disable certain components for a given block. At
|
||||
the end of the `with` block, the disabled pipeline components will be restored
|
||||
automatically. Alternatively, `select_pipes` returns an object that lets you
|
||||
call its `restore()` method to restore the disabled components when needed. This
|
||||
can be useful if you want to prevent unnecessary code indentation of large
|
||||
|
@ -295,6 +312,14 @@ with nlp.select_pipes(enable="parser"):
|
|||
doc = nlp("I will only be parsed")
|
||||
```
|
||||
|
||||
The [`nlp.pipe`](/api/language#pipe) method also supports a `disable` keyword
|
||||
argument if you only want to disable components during processing:
|
||||
|
||||
```python
|
||||
for doc in nlp.pipe(texts, disable=["tagger", "parser"]):
|
||||
# Do something with the doc here
|
||||
```
|
||||
|
||||
Finally, you can also use the [`remove_pipe`](/api/language#remove_pipe) method
|
||||
to remove pipeline components from an existing pipeline, the
|
||||
[`rename_pipe`](/api/language#rename_pipe) method to rename them, or the
|
||||
|
@ -308,6 +333,31 @@ nlp.rename_pipe("ner", "entityrecognizer")
|
|||
nlp.replace_pipe("tagger", my_custom_tagger)
|
||||
```
|
||||
|
||||
The `Language` object exposes different [attributes](/api/language#attributes)
|
||||
that let you inspect all available components and the components that currently
|
||||
run as part of the pipeline.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> nlp = spacy.blank("en")
|
||||
> nlp.add_pipe("ner")
|
||||
> nlp.add_pipe("textcat")
|
||||
> assert nlp.pipe_names == ["ner", "textcat"]
|
||||
> nlp.disable_pipe("ner")
|
||||
> assert nlp.pipe_names == ["textcat"]
|
||||
> assert nlp.component_names == ["ner", "textcat"]
|
||||
> assert nlp.disabled == ["ner"]
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| --------------------- | ---------------------------------------------------------------- |
|
||||
| `nlp.pipeline` | `(name, component)` tuples of the processing pipeline, in order. |
|
||||
| `nlp.pipe_names` | Pipeline component names, in order. |
|
||||
| `nlp.components` | All `(name, component)` tuples, including disabled components. |
|
||||
| `nlp.component_names` | All component names, including disabled components. |
|
||||
| `nlp.disabled` | Names of components that are currently disabled. |
|
||||
|
||||
### Sourcing pipeline components from existing models {#sourced-components new="3"}
|
||||
|
||||
Pipeline components that are independent can also be reused across models.
|
||||
|
|
|
@ -142,6 +142,7 @@ add to your pipeline and customize for your use case:
|
|||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> # pip install spacy-lookups-data
|
||||
> nlp = spacy.blank("en")
|
||||
> nlp.add_pipe("lemmatizer")
|
||||
> ```
|
||||
|
@ -249,23 +250,26 @@ in your config and see validation errors if the argument values don't match.
|
|||
|
||||
The following methods, attributes and commands are new in spaCy v3.0.
|
||||
|
||||
| Name | Description |
|
||||
| ----------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| [`Token.lex`](/api/token#attributes) | Access a token's [`Lexeme`](/api/lexeme). |
|
||||
| [`Token.morph`](/api/token#attributes) [`Token.morph_`](/api/token#attributes) | Access a token's morphological analysis. |
|
||||
| [`Language.select_pipes`](/api/language#select_pipes) | Context manager for enabling or disabling specific pipeline components for a block. |
|
||||
| [`Language.analyze_pipes`](/api/language#analyze_pipes) | [Analyze](/usage/processing-pipelines#analysis) components and their interdependencies. |
|
||||
| [`Language.resume_training`](/api/language#resume_training) | Experimental: continue training a pretrained model and initialize "rehearsal" for components that implement a `rehearse` method to prevent catastrophic forgetting. |
|
||||
| [`@Language.factory`](/api/language#factory) [`@Language.component`](/api/language#component) | Decorators for [registering](/usage/processing-pipelines#custom-components) pipeline component factories and simple stateless component functions. |
|
||||
| [`Language.has_factory`](/api/language#has_factory) | Check whether a component factory is registered on a language class.s |
|
||||
| [`Language.get_factory_meta`](/api/language#get_factory_meta) [`Language.get_pipe_meta`](/api/language#get_factory_meta) | Get the [`FactoryMeta`](/api/language#factorymeta) with component metadata for a factory or instance name. |
|
||||
| [`Language.config`](/api/language#config) | The [config](/usage/training#config) used to create the current `nlp` object. An instance of [`Config`](https://thinc.ai/docs/api-config#config) and can be saved to disk and used for training. |
|
||||
| [`Pipe.score`](/api/pipe#score) | Method on trainable pipeline components that returns a dictionary of evaluation scores. |
|
||||
| [`registry`](/api/top-level#registry) | Function registry to map functions to string names that can be referenced in [configs](/usage/training#config). |
|
||||
| [`util.load_meta`](/api/top-level#util.load_meta) [`util.load_config`](/api/top-level#util.load_config) | Updated helpers for loading a model's [`meta.json`](/api/data-formats#meta) and [`config.cfg`](/api/data-formats#config). |
|
||||
| [`util.get_installed_models`](/api/top-level#util.get_installed_models) | Names of all models installed in the environment. |
|
||||
| [`init config`](/api/cli#init-config) [`init fill-config`](/api/cli#init-fill-config) [`debug config`](/api/cli#debug-config) | CLI commands for initializing, auto-filling and debugging [training configs](/usage/training). |
|
||||
| [`project`](/api/cli#project) | Suite of CLI commands for cloning, running and managing [spaCy projects](/usage/projects). |
|
||||
| Name | Description |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| [`Token.lex`](/api/token#attributes) | Access a token's [`Lexeme`](/api/lexeme). |
|
||||
| [`Token.morph`](/api/token#attributes), [`Token.morph_`](/api/token#attributes) | Access a token's morphological analysis. |
|
||||
| [`Language.select_pipes`](/api/language#select_pipes) | Context manager for enabling or disabling specific pipeline components for a block. |
|
||||
| [`Language.disable_pipe`](/api/language#disable_pipe), [`Language.enable_pipe`](/api/language#enable_pipe) | Disable or enable a loaded pipeline component (but don't remove it). |
|
||||
| [`Language.analyze_pipes`](/api/language#analyze_pipes) | [Analyze](/usage/processing-pipelines#analysis) components and their interdependencies. |
|
||||
| [`Language.resume_training`](/api/language#resume_training) | Experimental: continue training a pretrained model and initialize "rehearsal" for components that implement a `rehearse` method to prevent catastrophic forgetting. |
|
||||
| [`@Language.factory`](/api/language#factory), [`@Language.component`](/api/language#component) | Decorators for [registering](/usage/processing-pipelines#custom-components) pipeline component factories and simple stateless component functions. |
|
||||
| [`Language.has_factory`](/api/language#has_factory) | Check whether a component factory is registered on a language class.s |
|
||||
| [`Language.get_factory_meta`](/api/language#get_factory_meta), [`Language.get_pipe_meta`](/api/language#get_factory_meta) | Get the [`FactoryMeta`](/api/language#factorymeta) with component metadata for a factory or instance name. |
|
||||
| [`Language.config`](/api/language#config) | The [config](/usage/training#config) used to create the current `nlp` object. An instance of [`Config`](https://thinc.ai/docs/api-config#config) and can be saved to disk and used for training. |
|
||||
| [`Language.components`](/api/language#attributes), [`Language.component_names`](/api/language#attributes) | All available components and component names, including disabled components that are not run as part of the pipeline. |
|
||||
| [`Language.disabled`](/api/language#attributes) | Names of disabled components that are not run as part of the pipeline. |
|
||||
| [`Pipe.score`](/api/pipe#score) | Method on pipeline components that returns a dictionary of evaluation scores. |
|
||||
| [`registry`](/api/top-level#registry) | Function registry to map functions to string names that can be referenced in [configs](/usage/training#config). |
|
||||
| [`util.load_meta`](/api/top-level#util.load_meta), [`util.load_config`](/api/top-level#util.load_config) | Updated helpers for loading a model's [`meta.json`](/api/data-formats#meta) and [`config.cfg`](/api/data-formats#config). |
|
||||
| [`util.get_installed_models`](/api/top-level#util.get_installed_models) | Names of all models installed in the environment. |
|
||||
| [`init config`](/api/cli#init-config), [`init fill-config`](/api/cli#init-fill-config), [`debug config`](/api/cli#debug-config) | CLI commands for initializing, auto-filling and debugging [training configs](/usage/training). |
|
||||
| [`project`](/api/cli#project) | Suite of CLI commands for cloning, running and managing [spaCy projects](/usage/projects). |
|
||||
|
||||
### New and updated documentation {#new-docs}
|
||||
|
||||
|
@ -300,7 +304,10 @@ format for documenting argument and return types.
|
|||
[Layers & Architectures](/usage/layers-architectures),
|
||||
[Projects](/usage/projects),
|
||||
[Custom pipeline components](/usage/processing-pipelines#custom-components),
|
||||
[Custom tokenizers](/usage/linguistic-features#custom-tokenizer)
|
||||
[Custom tokenizers](/usage/linguistic-features#custom-tokenizer),
|
||||
[Morphology](/usage/linguistic-features#morphology),
|
||||
[Lemmatization](/usage/linguistic-features#lemmatization),
|
||||
[Mapping & Exceptions](/usage/linguistic-features#mappings-exceptions)
|
||||
- **API Reference: ** [Library architecture](/api),
|
||||
[Model architectures](/api/architectures), [Data formats](/api/data-formats)
|
||||
- **New Classes: ** [`Example`](/api/example), [`Tok2Vec`](/api/tok2vec),
|
||||
|
@ -367,19 +374,25 @@ Note that spaCy v3.0 now requires **Python 3.6+**.
|
|||
arguments). The `on_match` callback becomes an optional keyword argument.
|
||||
- The `PRON_LEMMA` symbol and `-PRON-` as an indicator for pronoun lemmas has
|
||||
been removed.
|
||||
- The `TAG_MAP` and `MORPH_RULES` in the language data have been replaced by the
|
||||
more flexible [`AttributeRuler`](/api/attributeruler).
|
||||
- The [`Lemmatizer`](/api/lemmatizer) is now a standalone pipeline component and
|
||||
doesn't provide lemmas by default or switch automatically between lookup and
|
||||
rule-based lemmas. You can now add it to your pipeline explicitly and set its
|
||||
mode on initialization.
|
||||
|
||||
### Removed or renamed API {#incompat-removed}
|
||||
|
||||
| Removed | Replacement |
|
||||
| -------------------------------------------------------- | ------------------------------------------------------------------------------------------ |
|
||||
| `Language.disable_pipes` | [`Language.select_pipes`](/api/language#select_pipes) |
|
||||
| `GoldParse` | [`Example`](/api/example) |
|
||||
| `GoldCorpus` | [`Corpus`](/api/corpus) |
|
||||
| `KnowledgeBase.load_bulk`, `KnowledgeBase.dump` | [`KnowledgeBase.from_disk`](/api/kb#from_disk), [`KnowledgeBase.to_disk`](/api/kb#to_disk) |
|
||||
| `spacy init-model` | [`spacy init model`](/api/cli#init-model) |
|
||||
| `spacy debug-data` | [`spacy debug data`](/api/cli#debug-data) |
|
||||
| `spacy profile` | [`spacy debug profile`](/api/cli#debug-profile) |
|
||||
| `spacy link`, `util.set_data_path`, `util.get_data_path` | not needed, model symlinks are deprecated |
|
||||
| Removed | Replacement |
|
||||
| -------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ |
|
||||
| `Language.disable_pipes` | [`Language.select_pipes`](/api/language#select_pipes), [`Language.disable_pipe`](/api/language#disable_pipe) |
|
||||
| `GoldParse` | [`Example`](/api/example) |
|
||||
| `GoldCorpus` | [`Corpus`](/api/corpus) |
|
||||
| `KnowledgeBase.load_bulk`, `KnowledgeBase.dump` | [`KnowledgeBase.from_disk`](/api/kb#from_disk), [`KnowledgeBase.to_disk`](/api/kb#to_disk) |
|
||||
| `spacy init-model` | [`spacy init model`](/api/cli#init-model) |
|
||||
| `spacy debug-data` | [`spacy debug data`](/api/cli#debug-data) |
|
||||
| `spacy profile` | [`spacy debug profile`](/api/cli#debug-profile) |
|
||||
| `spacy link`, `util.set_data_path`, `util.get_data_path` | not needed, model symlinks are deprecated |
|
||||
|
||||
The following deprecated methods, attributes and arguments were removed in v3.0.
|
||||
Most of them have been **deprecated for a while** and many would previously
|
||||
|
@ -396,7 +409,7 @@ on them.
|
|||
| keyword-arguments like `vocab=False` on `to_disk`, `from_disk`, `to_bytes`, `from_bytes` | `exclude=["vocab"]` |
|
||||
| `n_threads` argument on [`Tokenizer`](/api/tokenizer), [`Matcher`](/api/matcher), [`PhraseMatcher`](/api/phrasematcher) | `n_process` |
|
||||
| `verbose` argument on [`Language.evaluate`](/api/language#evaluate) | logging (`DEBUG`) |
|
||||
| `SentenceSegmenter` hook, `SimilarityHook` | [user hooks](/usage/processing-pipelines#custom-components-user-hooks), [`Sentencizer`](/api/sentencizer), [`SentenceRecognizer`](/api/sentenceregognizer) |
|
||||
| `SentenceSegmenter` hook, `SimilarityHook` | [user hooks](/usage/processing-pipelines#custom-components-user-hooks), [`Sentencizer`](/api/sentencizer), [`SentenceRecognizer`](/api/sentencerecognizer) |
|
||||
|
||||
## Migrating from v2.x {#migrating}
|
||||
|
||||
|
@ -553,6 +566,24 @@ patterns = [nlp("health care reform"), nlp("healthcare reform")]
|
|||
+ matcher.add("HEALTH", patterns, on_match=on_match)
|
||||
```
|
||||
|
||||
### Migrating tag maps and morph rules {#migrating-training-mappings-exceptions}
|
||||
|
||||
Instead of defining a `tag_map` and `morph_rules` in the language data, spaCy
|
||||
v3.0 now manages mappings and exceptions with a separate and more flexible
|
||||
pipeline component, the [`AttributeRuler`](/api/attributeruler). See the
|
||||
[usage guide](/usage/linguistic-features#mappings-exceptions) for examples. The
|
||||
`AttributeRuler` provides two handy helper methods
|
||||
[`load_from_tag_map`](/api/attributeruler#load_from_tag_map) and
|
||||
[`load_from_morph_rules`](/api/attributeruler#load_from_morph_rules) that let
|
||||
you load in your existing tag map or morph rules:
|
||||
|
||||
```diff
|
||||
nlp = spacy.blank("en")
|
||||
- nlp.vocab.morphology.load_tag_map(YOUR_TAG_MAP)
|
||||
+ ruler = nlp.add_pipe("attribute_ruler")
|
||||
+ ruler.load_from_tag_map(YOUR_TAG_MAP)
|
||||
```
|
||||
|
||||
### Training models {#migrating-training}
|
||||
|
||||
To train your models, you should now pretty much always use the
|
||||
|
@ -598,8 +629,8 @@ If you've exported a starter config from our
|
|||
values. You can then use the auto-generated `config.cfg` for training:
|
||||
|
||||
```diff
|
||||
### {wrap="true"}
|
||||
- python -m spacy train en ./output ./train.json ./dev.json --pipeline tagger,parser --cnn-window 1 --bilstm-depth 0
|
||||
- python -m spacy train en ./output ./train.json ./dev.json
|
||||
--pipeline tagger,parser --cnn-window 1 --bilstm-depth 0
|
||||
+ python -m spacy train ./config.cfg --output ./output
|
||||
```
|
||||
|
||||
|
|
|
@ -169,7 +169,13 @@ function formatCode(html, lang, prompt) {
|
|||
}
|
||||
const result = html
|
||||
.split('\n')
|
||||
.map((line, i) => (prompt ? replacePrompt(line, prompt, i === 0) : line))
|
||||
.map((line, i) => {
|
||||
let newLine = prompt ? replacePrompt(line, prompt, i === 0) : line
|
||||
if (lang === 'diff' && !line.startsWith('<')) {
|
||||
newLine = highlightCode('python', line)
|
||||
}
|
||||
return newLine
|
||||
})
|
||||
.join('\n')
|
||||
return htmlToReact(result)
|
||||
}
|
||||
|
|
|
@ -28,7 +28,6 @@ export default class Juniper extends React.Component {
|
|||
mode: this.props.lang,
|
||||
theme: this.props.theme,
|
||||
})
|
||||
|
||||
const runCode = () => this.execute(outputArea, cm.getValue())
|
||||
cm.setOption('extraKeys', { 'Shift-Enter': runCode })
|
||||
Widget.attach(outputArea, this.outputRef)
|
||||
|
|
|
@ -65,12 +65,12 @@
|
|||
--color-subtle-dark: hsl(162, 5%, 60%)
|
||||
|
||||
--color-green-medium: hsl(108, 66%, 63%)
|
||||
--color-green-transparent: hsla(108, 66%, 63%, 0.11)
|
||||
--color-green-transparent: hsla(108, 66%, 63%, 0.12)
|
||||
--color-red-light: hsl(355, 100%, 96%)
|
||||
--color-red-medium: hsl(346, 84%, 61%)
|
||||
--color-red-dark: hsl(332, 64%, 34%)
|
||||
--color-red-opaque: hsl(346, 96%, 89%)
|
||||
--color-red-transparent: hsla(346, 84%, 61%, 0.11)
|
||||
--color-red-transparent: hsla(346, 84%, 61%, 0.12)
|
||||
--color-yellow-light: hsl(46, 100%, 95%)
|
||||
--color-yellow-medium: hsl(45, 90%, 55%)
|
||||
--color-yellow-dark: hsl(44, 94%, 27%)
|
||||
|
@ -79,11 +79,11 @@
|
|||
// Syntax Highlighting
|
||||
--syntax-comment: hsl(162, 5%, 60%)
|
||||
--syntax-tag: hsl(266, 72%, 72%)
|
||||
--syntax-number: hsl(266, 72%, 72%)
|
||||
--syntax-number: var(--syntax-tag)
|
||||
--syntax-selector: hsl(31, 100%, 71%)
|
||||
--syntax-operator: hsl(342, 100%, 59%)
|
||||
--syntax-function: hsl(195, 70%, 54%)
|
||||
--syntax-keyword: hsl(342, 100%, 59%)
|
||||
--syntax-keyword: hsl(343, 100%, 68%)
|
||||
--syntax-operator: var(--syntax-keyword)
|
||||
--syntax-regex: hsl(45, 90%, 55%)
|
||||
|
||||
// Other
|
||||
|
@ -354,6 +354,7 @@ body [id]:target
|
|||
&.inserted, &.deleted
|
||||
padding: 2px 0
|
||||
border-radius: 2px
|
||||
opacity: 0.9
|
||||
|
||||
&.inserted
|
||||
color: var(--color-green-medium)
|
||||
|
@ -388,7 +389,6 @@ body [id]:target
|
|||
.token
|
||||
color: var(--color-subtle)
|
||||
|
||||
|
||||
.gatsby-highlight-code-line
|
||||
background-color: var(--color-dark-secondary)
|
||||
border-left: 0.35em solid var(--color-theme)
|
||||
|
@ -409,6 +409,7 @@ body [id]:target
|
|||
color: var(--color-subtle)
|
||||
|
||||
.CodeMirror-line
|
||||
color: var(--syntax-comment)
|
||||
padding: 0
|
||||
|
||||
.CodeMirror-selected
|
||||
|
@ -418,26 +419,25 @@ body [id]:target
|
|||
.CodeMirror-cursor
|
||||
border-left-color: currentColor
|
||||
|
||||
.cm-variable-2
|
||||
color: inherit
|
||||
font-style: italic
|
||||
.cm-property, .cm-variable, .cm-variable-2, .cm-meta // decorators
|
||||
color: var(--color-subtle)
|
||||
|
||||
.cm-comment
|
||||
color: var(--syntax-comment)
|
||||
|
||||
.cm-keyword
|
||||
.cm-keyword, .cm-builtin
|
||||
color: var(--syntax-keyword)
|
||||
|
||||
.cm-operator
|
||||
color: var(--syntax-operator)
|
||||
|
||||
.cm-string, .cm-builtin
|
||||
.cm-string
|
||||
color: var(--syntax-selector)
|
||||
|
||||
.cm-number
|
||||
color: var(--syntax-number)
|
||||
|
||||
.cm-def, .cm-meta
|
||||
.cm-def
|
||||
color: var(--syntax-function)
|
||||
|
||||
// Jupyter
|
||||
|
|
Loading…
Reference in New Issue
Block a user