mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-24 17:06:29 +03:00
Merge pull request #5798 from explosion/feature/language-data-config
This commit is contained in:
commit
cdbd6ba912
|
@ -1,4 +1,5 @@
|
|||
[training]
|
||||
max_steps = 0
|
||||
patience = 10000
|
||||
eval_frequency = 200
|
||||
dropout = 0.2
|
||||
|
@ -8,13 +9,20 @@ max_epochs = 100
|
|||
orth_variant_level = 0.0
|
||||
gold_preproc = true
|
||||
max_length = 0
|
||||
use_gpu = -1
|
||||
scores = ["tags_acc", "uas", "las"]
|
||||
score_weights = {"las": 0.8, "tags_acc": 0.2}
|
||||
limit = 0
|
||||
seed = 0
|
||||
accumulate_gradient = 2
|
||||
discard_oversize = false
|
||||
raw_text = null
|
||||
tag_map = null
|
||||
morph_rules = null
|
||||
base_model = null
|
||||
|
||||
eval_batch_size = 128
|
||||
use_pytorch_for_gpu_memory = false
|
||||
batch_by = "padded"
|
||||
|
||||
[training.batch_size]
|
||||
@schedules = "compounding.v1"
|
||||
|
@ -30,41 +38,48 @@ beta2 = 0.999
|
|||
|
||||
[nlp]
|
||||
lang = "en"
|
||||
vectors = ${training:vectors}
|
||||
pipeline = ["tok2vec", "tagger", "parser"]
|
||||
load_vocab_data = false
|
||||
|
||||
[nlp.pipeline.tok2vec]
|
||||
[nlp.tokenizer]
|
||||
@tokenizers = "spacy.Tokenizer.v1"
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[components]
|
||||
|
||||
[components.tok2vec]
|
||||
factory = "tok2vec"
|
||||
|
||||
[nlp.pipeline.tagger]
|
||||
[components.tagger]
|
||||
factory = "tagger"
|
||||
|
||||
[nlp.pipeline.parser]
|
||||
[components.parser]
|
||||
factory = "parser"
|
||||
learn_tokens = false
|
||||
min_action_freq = 1
|
||||
beam_width = 1
|
||||
beam_update_prob = 1.0
|
||||
|
||||
[nlp.pipeline.tagger.model]
|
||||
[components.tagger.model]
|
||||
@architectures = "spacy.Tagger.v1"
|
||||
|
||||
[nlp.pipeline.tagger.model.tok2vec]
|
||||
[components.tagger.model.tok2vec]
|
||||
@architectures = "spacy.Tok2VecTensors.v1"
|
||||
width = ${nlp.pipeline.tok2vec.model:width}
|
||||
width = ${components.tok2vec.model:width}
|
||||
|
||||
[nlp.pipeline.parser.model]
|
||||
[components.parser.model]
|
||||
@architectures = "spacy.TransitionBasedParser.v1"
|
||||
nr_feature_tokens = 8
|
||||
hidden_width = 64
|
||||
maxout_pieces = 3
|
||||
|
||||
[nlp.pipeline.parser.model.tok2vec]
|
||||
[components.parser.model.tok2vec]
|
||||
@architectures = "spacy.Tok2VecTensors.v1"
|
||||
width = ${nlp.pipeline.tok2vec.model:width}
|
||||
width = ${components.tok2vec.model:width}
|
||||
|
||||
[nlp.pipeline.tok2vec.model]
|
||||
[components.tok2vec.model]
|
||||
@architectures = "spacy.HashEmbedCNN.v1"
|
||||
pretrained_vectors = ${nlp:vectors}
|
||||
pretrained_vectors = ${training:vectors}
|
||||
width = 96
|
||||
depth = 4
|
||||
window_size = 1
|
||||
|
|
|
@ -104,7 +104,6 @@ exclude =
|
|||
.git,
|
||||
__pycache__,
|
||||
_tokenizer_exceptions_list.py,
|
||||
spacy/__init__.py
|
||||
|
||||
[tool:pytest]
|
||||
markers =
|
||||
|
|
|
@ -1,32 +1,50 @@
|
|||
from typing import Union, Iterable, Dict, Any
|
||||
from pathlib import Path
|
||||
import warnings
|
||||
import sys
|
||||
|
||||
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
|
||||
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
|
||||
warnings.filterwarnings("ignore", message="numpy.dtype size changed") # noqa
|
||||
warnings.filterwarnings("ignore", message="numpy.ufunc size changed") # noqa
|
||||
|
||||
# These are imported as part of the API
|
||||
from thinc.api import prefer_gpu, require_gpu
|
||||
from thinc.api import prefer_gpu, require_gpu # noqa: F401
|
||||
|
||||
from . import pipeline
|
||||
from .cli.info import info
|
||||
from .glossary import explain
|
||||
from .about import __version__
|
||||
from .errors import Errors, Warnings
|
||||
from . import pipeline # noqa: F401
|
||||
from .cli.info import info # noqa: F401
|
||||
from .glossary import explain # noqa: F401
|
||||
from .about import __version__ # noqa: F401
|
||||
from .util import registry # noqa: F401
|
||||
|
||||
from .errors import Errors
|
||||
from .language import Language
|
||||
from . import util
|
||||
from .util import registry
|
||||
|
||||
|
||||
if sys.maxunicode == 65535:
|
||||
raise SystemError(Errors.E130)
|
||||
|
||||
|
||||
config = registry
|
||||
def load(
|
||||
name: Union[str, Path],
|
||||
disable: Iterable[str] = tuple(),
|
||||
component_cfg: Dict[str, Dict[str, Any]] = util.SimpleFrozenDict(),
|
||||
) -> Language:
|
||||
"""Load a spaCy model from an installed package or a local path.
|
||||
|
||||
name (str): Package name or model path.
|
||||
disable (Iterable[str]): Names of pipeline components to disable.
|
||||
component_cfg (Dict[str, dict]): Config overrides for pipeline components,
|
||||
keyed by component names.
|
||||
RETURNS (Language): The loaded nlp object.
|
||||
"""
|
||||
return util.load_model(name, disable=disable, component_cfg=component_cfg)
|
||||
|
||||
|
||||
def load(name, **overrides):
|
||||
return util.load_model(name, **overrides)
|
||||
def blank(name: str, **overrides) -> Language:
|
||||
"""Create a blank nlp object for a given language code.
|
||||
|
||||
|
||||
def blank(name, **kwargs):
|
||||
name (str): The language code, e.g. "en".
|
||||
**overrides: Keyword arguments passed to language subclass on init.
|
||||
RETURNS (Language): The nlp object.
|
||||
"""
|
||||
LangClass = util.get_lang_class(name)
|
||||
return LangClass(**kwargs)
|
||||
return LangClass(**overrides)
|
||||
|
|
|
@ -41,7 +41,6 @@ def init_model_cli(
|
|||
truncate_vectors: int = Opt(0, "--truncate-vectors", "-t", help="Optional number of vectors to truncate to when reading in vectors file"),
|
||||
vectors_name: Optional[str] = Opt(None, "--vectors-name", "-vn", help="Optional name for the word vectors, e.g. en_core_web_lg.vectors"),
|
||||
model_name: Optional[str] = Opt(None, "--model-name", "-mn", help="Optional name for the model meta"),
|
||||
omit_extra_lookups: bool = Opt(False, "--omit-extra-lookups", "-OEL", help="Don't include extra lookups in model"),
|
||||
base_model: Optional[str] = Opt(None, "--base-model", "-b", help="Base model (for languages with custom tokenizers)")
|
||||
# fmt: on
|
||||
):
|
||||
|
@ -60,7 +59,6 @@ def init_model_cli(
|
|||
truncate_vectors=truncate_vectors,
|
||||
vectors_name=vectors_name,
|
||||
model_name=model_name,
|
||||
omit_extra_lookups=omit_extra_lookups,
|
||||
base_model=base_model,
|
||||
silent=False,
|
||||
)
|
||||
|
@ -77,7 +75,6 @@ def init_model(
|
|||
truncate_vectors: int = 0,
|
||||
vectors_name: Optional[str] = None,
|
||||
model_name: Optional[str] = None,
|
||||
omit_extra_lookups: bool = False,
|
||||
base_model: Optional[str] = None,
|
||||
silent: bool = True,
|
||||
) -> Language:
|
||||
|
@ -109,14 +106,6 @@ def init_model(
|
|||
with msg.loading("Creating model..."):
|
||||
nlp = create_model(lang, lex_attrs, name=model_name, base_model=base_model)
|
||||
|
||||
# Create empty extra lexeme tables so the data from spacy-lookups-data
|
||||
# isn't loaded if these features are accessed
|
||||
if omit_extra_lookups:
|
||||
nlp.vocab.lookups_extra = Lookups()
|
||||
nlp.vocab.lookups_extra.add_table("lexeme_cluster")
|
||||
nlp.vocab.lookups_extra.add_table("lexeme_prob")
|
||||
nlp.vocab.lookups_extra.add_table("lexeme_settings")
|
||||
|
||||
msg.good("Successfully created model")
|
||||
if vectors_loc is not None:
|
||||
add_vectors(
|
||||
|
|
|
@ -120,14 +120,6 @@ def train(
|
|||
# Load morph rules
|
||||
nlp.vocab.morphology.load_morph_exceptions(morph_rules)
|
||||
|
||||
# Create empty extra lexeme tables so the data from spacy-lookups-data
|
||||
# isn't loaded if these features are accessed
|
||||
if config["training"]["omit_extra_lookups"]:
|
||||
nlp.vocab.lookups_extra = Lookups()
|
||||
nlp.vocab.lookups_extra.add_table("lexeme_cluster")
|
||||
nlp.vocab.lookups_extra.add_table("lexeme_prob")
|
||||
nlp.vocab.lookups_extra.add_table("lexeme_settings")
|
||||
|
||||
# Load a pretrained tok2vec model - cf. CLI command 'pretrain'
|
||||
if weights_data is not None:
|
||||
tok2vec_path = config.get("pretraining", {}).get("tok2vec_model", None)
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
[nlp]
|
||||
lang = null
|
||||
stop_words = []
|
||||
lex_attr_getters = {}
|
||||
pipeline = []
|
||||
load_vocab_data = true
|
||||
|
||||
[nlp.tokenizer]
|
||||
@tokenizers = "spacy.Tokenizer.v1"
|
||||
|
@ -10,11 +9,6 @@ pipeline = []
|
|||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[nlp.writing_system]
|
||||
direction = "ltr"
|
||||
has_case = true
|
||||
has_letters = true
|
||||
|
||||
[components]
|
||||
|
||||
# Training hyper-parameters and additional features.
|
||||
|
@ -45,7 +39,6 @@ score_weights = {"tag_acc": 0.2, "dep_las": 0.4, "ents_f": 0.4}
|
|||
# These settings are invalid for the transformer models.
|
||||
init_tok2vec = null
|
||||
discard_oversize = false
|
||||
omit_extra_lookups = false
|
||||
batch_by = "sequences"
|
||||
raw_text = null
|
||||
tag_map = null
|
||||
|
|
|
@ -83,7 +83,7 @@ class Warnings:
|
|||
"doesn't have a normalization table, please ignore this warning. "
|
||||
"If this is surprising, make sure you have the spacy-lookups-data "
|
||||
"package installed. The languages with lexeme normalization tables "
|
||||
"are currently: da, de, el, en, id, lb, pt, ru, sr, ta, th.")
|
||||
"are currently: {langs}")
|
||||
|
||||
# TODO: fix numbering after merging develop into master
|
||||
W091 = ("Could not clean/remove the temp directory at {dir}: {msg}.")
|
||||
|
@ -434,9 +434,6 @@ class Errors:
|
|||
E170 = ("Cannot apply transition {name}: invalid for the current state.")
|
||||
E171 = ("Matcher.add received invalid on_match callback argument: expected "
|
||||
"callable or None, but got: {arg_type}")
|
||||
E172 = ("The Lemmatizer.load classmethod is deprecated. To create a "
|
||||
"Lemmatizer, initialize the class directly. See the docs for "
|
||||
"details: https://spacy.io/api/lemmatizer")
|
||||
E175 = ("Can't remove rule for unknown match pattern ID: {key}")
|
||||
E176 = ("Alias '{alias}' is not defined in the Knowledge Base.")
|
||||
E177 = ("Ill-formed IOB input detected: {tag}")
|
||||
|
@ -486,6 +483,7 @@ class Errors:
|
|||
E199 = ("Unable to merge 0-length span at doc[{start}:{end}].")
|
||||
|
||||
# TODO: fix numbering after merging develop into master
|
||||
E955 = ("Can't find table '{table}' for language '{lang}' in spacy-lookups-data.")
|
||||
E956 = ("Can't find component '{name}' in [components] block in the config. "
|
||||
"Available components: {opts}")
|
||||
E957 = ("Writing directly to Language.factories isn't needed anymore in "
|
||||
|
@ -601,7 +599,7 @@ class Errors:
|
|||
"the same `Vocab`.")
|
||||
E1000 = ("No pkuseg model available. Provide a pkuseg model when "
|
||||
"initializing the pipeline:\n"
|
||||
'cfg = {"tokenizer": {"segmenter": "pkuseg", "pkuseg_model": name_or_path}}\m'
|
||||
'cfg = {"tokenizer": {"segmenter": "pkuseg", "pkuseg_model": name_or_path}}\n'
|
||||
'nlp = Chinese(config=cfg)')
|
||||
|
||||
|
||||
|
|
|
@ -25,8 +25,9 @@ def make_orth_variants(nlp, raw_text, orig_token_dict, orth_variant_level=0.0):
|
|||
lower = True
|
||||
if raw is not None:
|
||||
raw = raw.lower()
|
||||
ndsv = nlp.Defaults.single_orth_variants
|
||||
ndpv = nlp.Defaults.paired_orth_variants
|
||||
orth_variants = nlp.vocab.lookups.get_table("orth_variants", {})
|
||||
ndsv = orth_variants.get("single", [])
|
||||
ndpv = orth_variants.get("paired", [])
|
||||
words = token_dict.get("words", [])
|
||||
tags = token_dict.get("tags", [])
|
||||
# keep unmodified if words or tags are not defined
|
||||
|
|
|
@ -1,26 +1,14 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "af"
|
||||
stop_words = {"@language_data": "spacy.af.stop_words"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.af.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
class AfrikaansDefaults(Language.Defaults):
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Afrikaans(Language):
|
||||
lang = "af"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = AfrikaansDefaults
|
||||
|
||||
|
||||
__all__ = ["Afrikaans"]
|
||||
|
|
|
@ -1,48 +1,21 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from .punctuation import TOKENIZER_SUFFIXES
|
||||
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "ar"
|
||||
stop_words = {"@language_data": "spacy.ar.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.ar.lex_attr_getters"}
|
||||
|
||||
[nlp.writing_system]
|
||||
direction = "rtl"
|
||||
has_case = false
|
||||
has_letters = true
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.ar.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.ar.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
|
||||
|
||||
class ArabicDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
suffixes = TOKENIZER_SUFFIXES
|
||||
stop_words = STOP_WORDS
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
writing_system = {"direction": "rtl", "has_case": False, "has_letters": True}
|
||||
|
||||
|
||||
class Arabic(Language):
|
||||
lang = "ar"
|
||||
Defaults = ArabicDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
lang = "ar"
|
||||
|
||||
|
||||
__all__ = ["Arabic"]
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
from ...symbols import ORTH, LEMMA
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH, NORM
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
_exc = {}
|
||||
|
@ -6,41 +8,41 @@ _exc = {}
|
|||
|
||||
# Time
|
||||
for exc_data in [
|
||||
{LEMMA: "قبل الميلاد", ORTH: "ق.م"},
|
||||
{LEMMA: "بعد الميلاد", ORTH: "ب. م"},
|
||||
{LEMMA: "ميلادي", ORTH: ".م"},
|
||||
{LEMMA: "هجري", ORTH: ".هـ"},
|
||||
{LEMMA: "توفي", ORTH: ".ت"},
|
||||
{NORM: "قبل الميلاد", ORTH: "ق.م"},
|
||||
{NORM: "بعد الميلاد", ORTH: "ب. م"},
|
||||
{NORM: "ميلادي", ORTH: ".م"},
|
||||
{NORM: "هجري", ORTH: ".هـ"},
|
||||
{NORM: "توفي", ORTH: ".ت"},
|
||||
]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
||||
# Scientific abv.
|
||||
for exc_data in [
|
||||
{LEMMA: "صلى الله عليه وسلم", ORTH: "صلعم"},
|
||||
{LEMMA: "الشارح", ORTH: "الشـ"},
|
||||
{LEMMA: "الظاهر", ORTH: "الظـ"},
|
||||
{LEMMA: "أيضًا", ORTH: "أيضـ"},
|
||||
{LEMMA: "إلى آخره", ORTH: "إلخ"},
|
||||
{LEMMA: "انتهى", ORTH: "اهـ"},
|
||||
{LEMMA: "حدّثنا", ORTH: "ثنا"},
|
||||
{LEMMA: "حدثني", ORTH: "ثنى"},
|
||||
{LEMMA: "أنبأنا", ORTH: "أنا"},
|
||||
{LEMMA: "أخبرنا", ORTH: "نا"},
|
||||
{LEMMA: "مصدر سابق", ORTH: "م. س"},
|
||||
{LEMMA: "مصدر نفسه", ORTH: "م. ن"},
|
||||
{NORM: "صلى الله عليه وسلم", ORTH: "صلعم"},
|
||||
{NORM: "الشارح", ORTH: "الشـ"},
|
||||
{NORM: "الظاهر", ORTH: "الظـ"},
|
||||
{NORM: "أيضًا", ORTH: "أيضـ"},
|
||||
{NORM: "إلى آخره", ORTH: "إلخ"},
|
||||
{NORM: "انتهى", ORTH: "اهـ"},
|
||||
{NORM: "حدّثنا", ORTH: "ثنا"},
|
||||
{NORM: "حدثني", ORTH: "ثنى"},
|
||||
{NORM: "أنبأنا", ORTH: "أنا"},
|
||||
{NORM: "أخبرنا", ORTH: "نا"},
|
||||
{NORM: "مصدر سابق", ORTH: "م. س"},
|
||||
{NORM: "مصدر نفسه", ORTH: "م. ن"},
|
||||
]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
||||
# Other abv.
|
||||
for exc_data in [
|
||||
{LEMMA: "دكتور", ORTH: "د."},
|
||||
{LEMMA: "أستاذ دكتور", ORTH: "أ.د"},
|
||||
{LEMMA: "أستاذ", ORTH: "أ."},
|
||||
{LEMMA: "بروفيسور", ORTH: "ب."},
|
||||
{NORM: "دكتور", ORTH: "د."},
|
||||
{NORM: "أستاذ دكتور", ORTH: "أ.د"},
|
||||
{NORM: "أستاذ", ORTH: "أ."},
|
||||
{NORM: "بروفيسور", ORTH: "ب."},
|
||||
]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
||||
for exc_data in [{LEMMA: "تلفون", ORTH: "ت."}, {LEMMA: "صندوق بريد", ORTH: "ص.ب"}]:
|
||||
for exc_data in [{NORM: "تلفون", ORTH: "ت."}, {NORM: "صندوق بريد", ORTH: "ص.ب"}]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,26 +1,14 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "bg"
|
||||
stop_words = {"@language_data": "spacy.bg.stop_words"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.bg.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
class BulgarianDefaults(Language.Defaults):
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Bulgarian(Language):
|
||||
lang = "bg"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = BulgarianDefaults
|
||||
|
||||
|
||||
__all__ = ["Bulgarian"]
|
||||
|
|
|
@ -1,44 +1,20 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES
|
||||
from .stop_words import STOP_WORDS
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "bn"
|
||||
stop_words = {"@language_data": "spacy.bn.stop_words"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.bn.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
class BengaliDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
prefixes = TOKENIZER_PREFIXES
|
||||
suffixes = TOKENIZER_SUFFIXES
|
||||
infixes = TOKENIZER_INFIXES
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Bengali(Language):
|
||||
lang = "bn"
|
||||
Defaults = BengaliDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Bengali"]
|
||||
|
|
|
@ -1,24 +1,26 @@
|
|||
from ...symbols import ORTH, LEMMA
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH, NORM
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
_exc = {}
|
||||
|
||||
|
||||
for exc_data in [
|
||||
{ORTH: "ডঃ", LEMMA: "ডক্টর"},
|
||||
{ORTH: "ডাঃ", LEMMA: "ডাক্তার"},
|
||||
{ORTH: "ড.", LEMMA: "ডক্টর"},
|
||||
{ORTH: "ডা.", LEMMA: "ডাক্তার"},
|
||||
{ORTH: "মোঃ", LEMMA: "মোহাম্মদ"},
|
||||
{ORTH: "মো.", LEMMA: "মোহাম্মদ"},
|
||||
{ORTH: "সে.", LEMMA: "সেলসিয়াস"},
|
||||
{ORTH: "কি.মি.", LEMMA: "কিলোমিটার"},
|
||||
{ORTH: "কি.মি", LEMMA: "কিলোমিটার"},
|
||||
{ORTH: "সে.মি.", LEMMA: "সেন্টিমিটার"},
|
||||
{ORTH: "সে.মি", LEMMA: "সেন্টিমিটার"},
|
||||
{ORTH: "মি.লি.", LEMMA: "মিলিলিটার"},
|
||||
{ORTH: "ডঃ", NORM: "ডক্টর"},
|
||||
{ORTH: "ডাঃ", NORM: "ডাক্তার"},
|
||||
{ORTH: "ড.", NORM: "ডক্টর"},
|
||||
{ORTH: "ডা.", NORM: "ডাক্তার"},
|
||||
{ORTH: "মোঃ", NORM: "মোহাম্মদ"},
|
||||
{ORTH: "মো.", NORM: "মোহাম্মদ"},
|
||||
{ORTH: "সে.", NORM: "সেলসিয়াস"},
|
||||
{ORTH: "কি.মি.", NORM: "কিলোমিটার"},
|
||||
{ORTH: "কি.মি", NORM: "কিলোমিটার"},
|
||||
{ORTH: "সে.মি.", NORM: "সেন্টিমিটার"},
|
||||
{ORTH: "সে.মি", NORM: "সেন্টিমিটার"},
|
||||
{ORTH: "মি.লি.", NORM: "মিলিলিটার"},
|
||||
]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,49 +1,20 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .punctuation import TOKENIZER_INFIXES
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
from .punctuation import TOKENIZER_INFIXES
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "ca"
|
||||
stop_words = {"@language_data": "spacy.ca.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.ca.lex_attr_getters"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.ca.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.ca.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
|
||||
|
||||
class CatalanDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
infixes = TOKENIZER_INFIXES
|
||||
stop_words = STOP_WORDS
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
|
||||
|
||||
class Catalan(Language):
|
||||
lang = "ca"
|
||||
Defaults = CatalanDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Catalan"]
|
||||
|
|
|
@ -1,38 +1,40 @@
|
|||
from ...symbols import ORTH, LEMMA
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH, NORM
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
_exc = {}
|
||||
|
||||
for exc_data in [
|
||||
{ORTH: "aprox.", LEMMA: "aproximadament"},
|
||||
{ORTH: "pàg.", LEMMA: "pàgina"},
|
||||
{ORTH: "p.ex.", LEMMA: "per exemple"},
|
||||
{ORTH: "gen.", LEMMA: "gener"},
|
||||
{ORTH: "feb.", LEMMA: "febrer"},
|
||||
{ORTH: "abr.", LEMMA: "abril"},
|
||||
{ORTH: "jul.", LEMMA: "juliol"},
|
||||
{ORTH: "set.", LEMMA: "setembre"},
|
||||
{ORTH: "oct.", LEMMA: "octubre"},
|
||||
{ORTH: "nov.", LEMMA: "novembre"},
|
||||
{ORTH: "dec.", LEMMA: "desembre"},
|
||||
{ORTH: "Dr.", LEMMA: "doctor"},
|
||||
{ORTH: "Sr.", LEMMA: "senyor"},
|
||||
{ORTH: "Sra.", LEMMA: "senyora"},
|
||||
{ORTH: "Srta.", LEMMA: "senyoreta"},
|
||||
{ORTH: "núm", LEMMA: "número"},
|
||||
{ORTH: "St.", LEMMA: "sant"},
|
||||
{ORTH: "Sta.", LEMMA: "santa"},
|
||||
{ORTH: "aprox.", NORM: "aproximadament"},
|
||||
{ORTH: "pàg.", NORM: "pàgina"},
|
||||
{ORTH: "p.ex.", NORM: "per exemple"},
|
||||
{ORTH: "gen.", NORM: "gener"},
|
||||
{ORTH: "feb.", NORM: "febrer"},
|
||||
{ORTH: "abr.", NORM: "abril"},
|
||||
{ORTH: "jul.", NORM: "juliol"},
|
||||
{ORTH: "set.", NORM: "setembre"},
|
||||
{ORTH: "oct.", NORM: "octubre"},
|
||||
{ORTH: "nov.", NORM: "novembre"},
|
||||
{ORTH: "dec.", NORM: "desembre"},
|
||||
{ORTH: "Dr.", NORM: "doctor"},
|
||||
{ORTH: "Sr.", NORM: "senyor"},
|
||||
{ORTH: "Sra.", NORM: "senyora"},
|
||||
{ORTH: "Srta.", NORM: "senyoreta"},
|
||||
{ORTH: "núm", NORM: "número"},
|
||||
{ORTH: "St.", NORM: "sant"},
|
||||
{ORTH: "Sta.", NORM: "santa"},
|
||||
]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
||||
# Times
|
||||
_exc["12m."] = [{ORTH: "12"}, {ORTH: "m.", LEMMA: "p.m."}]
|
||||
_exc["12m."] = [{ORTH: "12"}, {ORTH: "m.", NORM: "p.m."}]
|
||||
|
||||
for h in range(1, 12 + 1):
|
||||
for period in ["a.m.", "am"]:
|
||||
_exc[f"{h}{period}"] = [{ORTH: f"{h}"}, {ORTH: period, LEMMA: "a.m."}]
|
||||
_exc[f"{h}{period}"] = [{ORTH: f"{h}"}, {ORTH: period, NORM: "a.m."}]
|
||||
for period in ["p.m.", "pm"]:
|
||||
_exc[f"{h}{period}"] = [{ORTH: f"{h}"}, {ORTH: period, LEMMA: "p.m."}]
|
||||
_exc[f"{h}{period}"] = [{ORTH: f"{h}"}, {ORTH: period, NORM: "p.m."}]
|
||||
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,26 +1,14 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "cs"
|
||||
stop_words = {"@language_data": "spacy.cs.stop_words"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.cs.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
class CzechDefaults(Language.Defaults):
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Czech(Language):
|
||||
lang = "cs"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = CzechDefaults
|
||||
|
||||
|
||||
__all__ = ["Czech"]
|
||||
|
|
|
@ -1,50 +1,21 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "da"
|
||||
stop_words = {"@language_data": "spacy.da.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.da.lex_attr_getters"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.da.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.da.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
|
||||
|
||||
class DanishDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
infixes = TOKENIZER_INFIXES
|
||||
suffixes = TOKENIZER_SUFFIXES
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Danish(Language):
|
||||
lang = "da"
|
||||
Defaults = DanishDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Danish"]
|
||||
|
|
|
@ -2,7 +2,9 @@
|
|||
Tokenizer Exceptions.
|
||||
Source: https://forkortelse.dk/ and various others.
|
||||
"""
|
||||
from ...symbols import ORTH, LEMMA, NORM
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH, NORM
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
_exc = {}
|
||||
|
@ -11,44 +13,44 @@ _exc = {}
|
|||
# (for "torsdag") are left out because they are ambiguous. The same is the case
|
||||
# for abbreviations "jul." and "Jul." ("juli").
|
||||
for exc_data in [
|
||||
{ORTH: "Kbh.", LEMMA: "København", NORM: "København"},
|
||||
{ORTH: "jan.", LEMMA: "januar"},
|
||||
{ORTH: "febr.", LEMMA: "februar"},
|
||||
{ORTH: "feb.", LEMMA: "februar"},
|
||||
{ORTH: "mar.", LEMMA: "marts"},
|
||||
{ORTH: "apr.", LEMMA: "april"},
|
||||
{ORTH: "jun.", LEMMA: "juni"},
|
||||
{ORTH: "aug.", LEMMA: "august"},
|
||||
{ORTH: "sept.", LEMMA: "september"},
|
||||
{ORTH: "sep.", LEMMA: "september"},
|
||||
{ORTH: "okt.", LEMMA: "oktober"},
|
||||
{ORTH: "nov.", LEMMA: "november"},
|
||||
{ORTH: "dec.", LEMMA: "december"},
|
||||
{ORTH: "man.", LEMMA: "mandag"},
|
||||
{ORTH: "tirs.", LEMMA: "tirsdag"},
|
||||
{ORTH: "ons.", LEMMA: "onsdag"},
|
||||
{ORTH: "tor.", LEMMA: "torsdag"},
|
||||
{ORTH: "tors.", LEMMA: "torsdag"},
|
||||
{ORTH: "fre.", LEMMA: "fredag"},
|
||||
{ORTH: "lør.", LEMMA: "lørdag"},
|
||||
{ORTH: "Jan.", LEMMA: "januar"},
|
||||
{ORTH: "Febr.", LEMMA: "februar"},
|
||||
{ORTH: "Feb.", LEMMA: "februar"},
|
||||
{ORTH: "Mar.", LEMMA: "marts"},
|
||||
{ORTH: "Apr.", LEMMA: "april"},
|
||||
{ORTH: "Jun.", LEMMA: "juni"},
|
||||
{ORTH: "Aug.", LEMMA: "august"},
|
||||
{ORTH: "Sept.", LEMMA: "september"},
|
||||
{ORTH: "Sep.", LEMMA: "september"},
|
||||
{ORTH: "Okt.", LEMMA: "oktober"},
|
||||
{ORTH: "Nov.", LEMMA: "november"},
|
||||
{ORTH: "Dec.", LEMMA: "december"},
|
||||
{ORTH: "Man.", LEMMA: "mandag"},
|
||||
{ORTH: "Tirs.", LEMMA: "tirsdag"},
|
||||
{ORTH: "Ons.", LEMMA: "onsdag"},
|
||||
{ORTH: "Fre.", LEMMA: "fredag"},
|
||||
{ORTH: "Lør.", LEMMA: "lørdag"},
|
||||
{ORTH: "og/eller", LEMMA: "og/eller", NORM: "og/eller"},
|
||||
{ORTH: "Kbh.", NORM: "København"},
|
||||
{ORTH: "jan.", NORM: "januar"},
|
||||
{ORTH: "febr.", NORM: "februar"},
|
||||
{ORTH: "feb.", NORM: "februar"},
|
||||
{ORTH: "mar.", NORM: "marts"},
|
||||
{ORTH: "apr.", NORM: "april"},
|
||||
{ORTH: "jun.", NORM: "juni"},
|
||||
{ORTH: "aug.", NORM: "august"},
|
||||
{ORTH: "sept.", NORM: "september"},
|
||||
{ORTH: "sep.", NORM: "september"},
|
||||
{ORTH: "okt.", NORM: "oktober"},
|
||||
{ORTH: "nov.", NORM: "november"},
|
||||
{ORTH: "dec.", NORM: "december"},
|
||||
{ORTH: "man.", NORM: "mandag"},
|
||||
{ORTH: "tirs.", NORM: "tirsdag"},
|
||||
{ORTH: "ons.", NORM: "onsdag"},
|
||||
{ORTH: "tor.", NORM: "torsdag"},
|
||||
{ORTH: "tors.", NORM: "torsdag"},
|
||||
{ORTH: "fre.", NORM: "fredag"},
|
||||
{ORTH: "lør.", NORM: "lørdag"},
|
||||
{ORTH: "Jan.", NORM: "januar"},
|
||||
{ORTH: "Febr.", NORM: "februar"},
|
||||
{ORTH: "Feb.", NORM: "februar"},
|
||||
{ORTH: "Mar.", NORM: "marts"},
|
||||
{ORTH: "Apr.", NORM: "april"},
|
||||
{ORTH: "Jun.", NORM: "juni"},
|
||||
{ORTH: "Aug.", NORM: "august"},
|
||||
{ORTH: "Sept.", NORM: "september"},
|
||||
{ORTH: "Sep.", NORM: "september"},
|
||||
{ORTH: "Okt.", NORM: "oktober"},
|
||||
{ORTH: "Nov.", NORM: "november"},
|
||||
{ORTH: "Dec.", NORM: "december"},
|
||||
{ORTH: "Man.", NORM: "mandag"},
|
||||
{ORTH: "Tirs.", NORM: "tirsdag"},
|
||||
{ORTH: "Ons.", NORM: "onsdag"},
|
||||
{ORTH: "Fre.", NORM: "fredag"},
|
||||
{ORTH: "Lør.", NORM: "lørdag"},
|
||||
{ORTH: "og/eller", NORM: "og/eller"},
|
||||
]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
||||
|
@ -548,22 +550,22 @@ for orth in [
|
|||
_exc[capitalized] = [{ORTH: capitalized}]
|
||||
|
||||
for exc_data in [
|
||||
{ORTH: "s'gu", LEMMA: "s'gu", NORM: "s'gu"},
|
||||
{ORTH: "S'gu", LEMMA: "s'gu", NORM: "s'gu"},
|
||||
{ORTH: "sgu'", LEMMA: "s'gu", NORM: "s'gu"},
|
||||
{ORTH: "Sgu'", LEMMA: "s'gu", NORM: "s'gu"},
|
||||
{ORTH: "sku'", LEMMA: "skal", NORM: "skulle"},
|
||||
{ORTH: "ku'", LEMMA: "kan", NORM: "kunne"},
|
||||
{ORTH: "Ku'", LEMMA: "kan", NORM: "kunne"},
|
||||
{ORTH: "ka'", LEMMA: "kan", NORM: "kan"},
|
||||
{ORTH: "Ka'", LEMMA: "kan", NORM: "kan"},
|
||||
{ORTH: "gi'", LEMMA: "give", NORM: "giv"},
|
||||
{ORTH: "Gi'", LEMMA: "give", NORM: "giv"},
|
||||
{ORTH: "li'", LEMMA: "lide", NORM: "lide"},
|
||||
{ORTH: "ha'", LEMMA: "have", NORM: "have"},
|
||||
{ORTH: "Ha'", LEMMA: "have", NORM: "have"},
|
||||
{ORTH: "ik'", LEMMA: "ikke", NORM: "ikke"},
|
||||
{ORTH: "Ik'", LEMMA: "ikke", NORM: "ikke"},
|
||||
{ORTH: "s'gu", NORM: "s'gu"},
|
||||
{ORTH: "S'gu", NORM: "s'gu"},
|
||||
{ORTH: "sgu'", NORM: "s'gu"},
|
||||
{ORTH: "Sgu'", NORM: "s'gu"},
|
||||
{ORTH: "sku'", NORM: "skulle"},
|
||||
{ORTH: "ku'", NORM: "kunne"},
|
||||
{ORTH: "Ku'", NORM: "kunne"},
|
||||
{ORTH: "ka'", NORM: "kan"},
|
||||
{ORTH: "Ka'", NORM: "kan"},
|
||||
{ORTH: "gi'", NORM: "giv"},
|
||||
{ORTH: "Gi'", NORM: "giv"},
|
||||
{ORTH: "li'", NORM: "lide"},
|
||||
{ORTH: "ha'", NORM: "have"},
|
||||
{ORTH: "Ha'", NORM: "have"},
|
||||
{ORTH: "ik'", NORM: "ikke"},
|
||||
{ORTH: "Ik'", NORM: "ikke"},
|
||||
]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
||||
|
@ -573,7 +575,7 @@ for h in range(1, 31 + 1):
|
|||
for period in ["."]:
|
||||
_exc[f"{h}{period}"] = [{ORTH: f"{h}."}]
|
||||
|
||||
_custom_base_exc = {"i.": [{ORTH: "i", LEMMA: "i", NORM: "i"}, {ORTH: "."}]}
|
||||
_custom_base_exc = {"i.": [{ORTH: "i", NORM: "i"}, {ORTH: "."}]}
|
||||
_exc.update(_custom_base_exc)
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,61 +1,22 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
|
||||
from .punctuation import TOKENIZER_INFIXES
|
||||
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES
|
||||
from .stop_words import STOP_WORDS
|
||||
from .syntax_iterators import SYNTAX_ITERATORS
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "de"
|
||||
stop_words = {"@language_data": "spacy.de.stop_words"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.de.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
class GermanDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
prefixes = TOKENIZER_PREFIXES
|
||||
suffixes = TOKENIZER_SUFFIXES
|
||||
infixes = TOKENIZER_INFIXES
|
||||
syntax_iterators = SYNTAX_ITERATORS
|
||||
single_orth_variants = [
|
||||
{"tags": ["$("], "variants": ["…", "..."]},
|
||||
{"tags": ["$("], "variants": ["-", "—", "–", "--", "---", "——"]},
|
||||
]
|
||||
paired_orth_variants = [
|
||||
{
|
||||
"tags": ["$("],
|
||||
"variants": [("'", "'"), (",", "'"), ("‚", "‘"), ("›", "‹"), ("‹", "›")],
|
||||
},
|
||||
{
|
||||
"tags": ["$("],
|
||||
"variants": [("``", "''"), ('"', '"'), ("„", "“"), ("»", "«"), ("«", "»")],
|
||||
},
|
||||
]
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class German(Language):
|
||||
lang = "de"
|
||||
Defaults = GermanDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["German"]
|
||||
|
|
|
@ -1,39 +1,26 @@
|
|||
from typing import Union, Iterator
|
||||
|
||||
from ...symbols import NOUN, PROPN, PRON
|
||||
from ...errors import Errors
|
||||
from ...tokens import Doc, Span
|
||||
|
||||
|
||||
def noun_chunks(doclike):
|
||||
"""
|
||||
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
|
||||
"""
|
||||
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
|
||||
"""Detect base noun phrases from a dependency parse. Works on Doc and Span."""
|
||||
# this iterator extracts spans headed by NOUNs starting from the left-most
|
||||
# syntactic dependent until the NOUN itself for close apposition and
|
||||
# measurement construction, the span is sometimes extended to the right of
|
||||
# the NOUN. Example: "eine Tasse Tee" (a cup (of) tea) returns "eine Tasse Tee"
|
||||
# and not just "eine Tasse", same for "das Thema Familie".
|
||||
labels = [
|
||||
"sb",
|
||||
"oa",
|
||||
"da",
|
||||
"nk",
|
||||
"mo",
|
||||
"ag",
|
||||
"ROOT",
|
||||
"root",
|
||||
"cj",
|
||||
"pd",
|
||||
"og",
|
||||
"app",
|
||||
]
|
||||
# fmt: off
|
||||
labels = ["sb", "oa", "da", "nk", "mo", "ag", "ROOT", "root", "cj", "pd", "og", "app"]
|
||||
# fmt: on
|
||||
doc = doclike.doc # Ensure works on both Doc and Span.
|
||||
|
||||
if not doc.is_parsed:
|
||||
raise ValueError(Errors.E029)
|
||||
|
||||
np_label = doc.vocab.strings.add("NP")
|
||||
np_deps = set(doc.vocab.strings.add(label) for label in labels)
|
||||
close_app = doc.vocab.strings.add("nk")
|
||||
|
||||
rbracket = 0
|
||||
for i, word in enumerate(doclike):
|
||||
if i < rbracket:
|
||||
|
|
|
@ -1,157 +1,135 @@
|
|||
from ...symbols import ORTH, LEMMA, TAG, NORM, PRON_LEMMA
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH, NORM
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
_exc = {
|
||||
"auf'm": [{ORTH: "auf", LEMMA: "auf"}, {ORTH: "'m", LEMMA: "der", NORM: "dem"}],
|
||||
"du's": [
|
||||
{ORTH: "du", LEMMA: PRON_LEMMA, TAG: "PPER"},
|
||||
{ORTH: "'s", LEMMA: PRON_LEMMA, TAG: "PPER", NORM: "es"},
|
||||
],
|
||||
"er's": [
|
||||
{ORTH: "er", LEMMA: PRON_LEMMA, TAG: "PPER"},
|
||||
{ORTH: "'s", LEMMA: PRON_LEMMA, TAG: "PPER", NORM: "es"},
|
||||
],
|
||||
"hinter'm": [
|
||||
{ORTH: "hinter", LEMMA: "hinter"},
|
||||
{ORTH: "'m", LEMMA: "der", NORM: "dem"},
|
||||
],
|
||||
"ich's": [
|
||||
{ORTH: "ich", LEMMA: PRON_LEMMA, TAG: "PPER"},
|
||||
{ORTH: "'s", LEMMA: PRON_LEMMA, TAG: "PPER", NORM: "es"},
|
||||
],
|
||||
"ihr's": [
|
||||
{ORTH: "ihr", LEMMA: PRON_LEMMA, TAG: "PPER"},
|
||||
{ORTH: "'s", LEMMA: PRON_LEMMA, TAG: "PPER", NORM: "es"},
|
||||
],
|
||||
"sie's": [
|
||||
{ORTH: "sie", LEMMA: PRON_LEMMA, TAG: "PPER"},
|
||||
{ORTH: "'s", LEMMA: PRON_LEMMA, TAG: "PPER", NORM: "es"},
|
||||
],
|
||||
"unter'm": [
|
||||
{ORTH: "unter", LEMMA: "unter"},
|
||||
{ORTH: "'m", LEMMA: "der", NORM: "dem"},
|
||||
],
|
||||
"vor'm": [{ORTH: "vor", LEMMA: "vor"}, {ORTH: "'m", LEMMA: "der", NORM: "dem"}],
|
||||
"wir's": [
|
||||
{ORTH: "wir", LEMMA: PRON_LEMMA, TAG: "PPER"},
|
||||
{ORTH: "'s", LEMMA: PRON_LEMMA, TAG: "PPER", NORM: "es"},
|
||||
],
|
||||
"über'm": [{ORTH: "über", LEMMA: "über"}, {ORTH: "'m", LEMMA: "der", NORM: "dem"}],
|
||||
"auf'm": [{ORTH: "auf"}, {ORTH: "'m", NORM: "dem"}],
|
||||
"du's": [{ORTH: "du"}, {ORTH: "'s", NORM: "es"}],
|
||||
"er's": [{ORTH: "er"}, {ORTH: "'s", NORM: "es"}],
|
||||
"hinter'm": [{ORTH: "hinter"}, {ORTH: "'m", NORM: "dem"}],
|
||||
"ich's": [{ORTH: "ich"}, {ORTH: "'s", NORM: "es"}],
|
||||
"ihr's": [{ORTH: "ihr"}, {ORTH: "'s", NORM: "es"}],
|
||||
"sie's": [{ORTH: "sie"}, {ORTH: "'s", NORM: "es"}],
|
||||
"unter'm": [{ORTH: "unter"}, {ORTH: "'m", NORM: "dem"}],
|
||||
"vor'm": [{ORTH: "vor"}, {ORTH: "'m", NORM: "dem"}],
|
||||
"wir's": [{ORTH: "wir"}, {ORTH: "'s", NORM: "es"}],
|
||||
"über'm": [{ORTH: "über"}, {ORTH: "'m", NORM: "dem"}],
|
||||
}
|
||||
|
||||
|
||||
for exc_data in [
|
||||
{ORTH: "'S", LEMMA: PRON_LEMMA, NORM: "'s", TAG: "PPER"},
|
||||
{ORTH: "'s", LEMMA: PRON_LEMMA, NORM: "'s", TAG: "PPER"},
|
||||
{ORTH: "S'", LEMMA: PRON_LEMMA, NORM: "'s", TAG: "PPER"},
|
||||
{ORTH: "s'", LEMMA: PRON_LEMMA, NORM: "'s", TAG: "PPER"},
|
||||
{ORTH: "'n", LEMMA: "ein", NORM: "ein"},
|
||||
{ORTH: "'ne", LEMMA: "eine", NORM: "eine"},
|
||||
{ORTH: "'nen", LEMMA: "ein", NORM: "einen"},
|
||||
{ORTH: "'nem", LEMMA: "ein", NORM: "einem"},
|
||||
{ORTH: "Abb.", LEMMA: "Abbildung", NORM: "Abbildung"},
|
||||
{ORTH: "Abk.", LEMMA: "Abkürzung", NORM: "Abkürzung"},
|
||||
{ORTH: "Abt.", LEMMA: "Abteilung", NORM: "Abteilung"},
|
||||
{ORTH: "Apr.", LEMMA: "April", NORM: "April"},
|
||||
{ORTH: "Aug.", LEMMA: "August", NORM: "August"},
|
||||
{ORTH: "Bd.", LEMMA: "Band", NORM: "Band"},
|
||||
{ORTH: "Betr.", LEMMA: "Betreff", NORM: "Betreff"},
|
||||
{ORTH: "Bf.", LEMMA: "Bahnhof", NORM: "Bahnhof"},
|
||||
{ORTH: "Bhf.", LEMMA: "Bahnhof", NORM: "Bahnhof"},
|
||||
{ORTH: "Bsp.", LEMMA: "Beispiel", NORM: "Beispiel"},
|
||||
{ORTH: "Dez.", LEMMA: "Dezember", NORM: "Dezember"},
|
||||
{ORTH: "Di.", LEMMA: "Dienstag", NORM: "Dienstag"},
|
||||
{ORTH: "Do.", LEMMA: "Donnerstag", NORM: "Donnerstag"},
|
||||
{ORTH: "Fa.", LEMMA: "Firma", NORM: "Firma"},
|
||||
{ORTH: "Fam.", LEMMA: "Familie", NORM: "Familie"},
|
||||
{ORTH: "Feb.", LEMMA: "Februar", NORM: "Februar"},
|
||||
{ORTH: "Fr.", LEMMA: "Frau", NORM: "Frau"},
|
||||
{ORTH: "Frl.", LEMMA: "Fräulein", NORM: "Fräulein"},
|
||||
{ORTH: "Hbf.", LEMMA: "Hauptbahnhof", NORM: "Hauptbahnhof"},
|
||||
{ORTH: "Hr.", LEMMA: "Herr", NORM: "Herr"},
|
||||
{ORTH: "Hrn.", LEMMA: "Herr", NORM: "Herrn"},
|
||||
{ORTH: "Jan.", LEMMA: "Januar", NORM: "Januar"},
|
||||
{ORTH: "Jh.", LEMMA: "Jahrhundert", NORM: "Jahrhundert"},
|
||||
{ORTH: "Jhd.", LEMMA: "Jahrhundert", NORM: "Jahrhundert"},
|
||||
{ORTH: "Jul.", LEMMA: "Juli", NORM: "Juli"},
|
||||
{ORTH: "Jun.", LEMMA: "Juni", NORM: "Juni"},
|
||||
{ORTH: "Mi.", LEMMA: "Mittwoch", NORM: "Mittwoch"},
|
||||
{ORTH: "Mio.", LEMMA: "Million", NORM: "Million"},
|
||||
{ORTH: "Mo.", LEMMA: "Montag", NORM: "Montag"},
|
||||
{ORTH: "Mrd.", LEMMA: "Milliarde", NORM: "Milliarde"},
|
||||
{ORTH: "Mrz.", LEMMA: "März", NORM: "März"},
|
||||
{ORTH: "MwSt.", LEMMA: "Mehrwertsteuer", NORM: "Mehrwertsteuer"},
|
||||
{ORTH: "Mär.", LEMMA: "März", NORM: "März"},
|
||||
{ORTH: "Nov.", LEMMA: "November", NORM: "November"},
|
||||
{ORTH: "Nr.", LEMMA: "Nummer", NORM: "Nummer"},
|
||||
{ORTH: "Okt.", LEMMA: "Oktober", NORM: "Oktober"},
|
||||
{ORTH: "Orig.", LEMMA: "Original", NORM: "Original"},
|
||||
{ORTH: "Pkt.", LEMMA: "Punkt", NORM: "Punkt"},
|
||||
{ORTH: "Prof.", LEMMA: "Professor", NORM: "Professor"},
|
||||
{ORTH: "Red.", LEMMA: "Redaktion", NORM: "Redaktion"},
|
||||
{ORTH: "Sa.", LEMMA: "Samstag", NORM: "Samstag"},
|
||||
{ORTH: "Sep.", LEMMA: "September", NORM: "September"},
|
||||
{ORTH: "Sept.", LEMMA: "September", NORM: "September"},
|
||||
{ORTH: "So.", LEMMA: "Sonntag", NORM: "Sonntag"},
|
||||
{ORTH: "Std.", LEMMA: "Stunde", NORM: "Stunde"},
|
||||
{ORTH: "Str.", LEMMA: "Straße", NORM: "Straße"},
|
||||
{ORTH: "Tel.", LEMMA: "Telefon", NORM: "Telefon"},
|
||||
{ORTH: "Tsd.", LEMMA: "Tausend", NORM: "Tausend"},
|
||||
{ORTH: "Univ.", LEMMA: "Universität", NORM: "Universität"},
|
||||
{ORTH: "abzgl.", LEMMA: "abzüglich", NORM: "abzüglich"},
|
||||
{ORTH: "allg.", LEMMA: "allgemein", NORM: "allgemein"},
|
||||
{ORTH: "bspw.", LEMMA: "beispielsweise", NORM: "beispielsweise"},
|
||||
{ORTH: "bzgl.", LEMMA: "bezüglich", NORM: "bezüglich"},
|
||||
{ORTH: "bzw.", LEMMA: "beziehungsweise", NORM: "beziehungsweise"},
|
||||
{ORTH: "d.h.", LEMMA: "das heißt"},
|
||||
{ORTH: "dgl.", LEMMA: "dergleichen", NORM: "dergleichen"},
|
||||
{ORTH: "ebd.", LEMMA: "ebenda", NORM: "ebenda"},
|
||||
{ORTH: "eigtl.", LEMMA: "eigentlich", NORM: "eigentlich"},
|
||||
{ORTH: "engl.", LEMMA: "englisch", NORM: "englisch"},
|
||||
{ORTH: "evtl.", LEMMA: "eventuell", NORM: "eventuell"},
|
||||
{ORTH: "frz.", LEMMA: "französisch", NORM: "französisch"},
|
||||
{ORTH: "gegr.", LEMMA: "gegründet", NORM: "gegründet"},
|
||||
{ORTH: "ggf.", LEMMA: "gegebenenfalls", NORM: "gegebenenfalls"},
|
||||
{ORTH: "ggfs.", LEMMA: "gegebenenfalls", NORM: "gegebenenfalls"},
|
||||
{ORTH: "ggü.", LEMMA: "gegenüber", NORM: "gegenüber"},
|
||||
{ORTH: "i.O.", LEMMA: "in Ordnung"},
|
||||
{ORTH: "i.d.R.", LEMMA: "in der Regel"},
|
||||
{ORTH: "incl.", LEMMA: "inklusive", NORM: "inklusive"},
|
||||
{ORTH: "inkl.", LEMMA: "inklusive", NORM: "inklusive"},
|
||||
{ORTH: "insb.", LEMMA: "insbesondere", NORM: "insbesondere"},
|
||||
{ORTH: "kath.", LEMMA: "katholisch", NORM: "katholisch"},
|
||||
{ORTH: "lt.", LEMMA: "laut", NORM: "laut"},
|
||||
{ORTH: "max.", LEMMA: "maximal", NORM: "maximal"},
|
||||
{ORTH: "min.", LEMMA: "minimal", NORM: "minimal"},
|
||||
{ORTH: "mind.", LEMMA: "mindestens", NORM: "mindestens"},
|
||||
{ORTH: "mtl.", LEMMA: "monatlich", NORM: "monatlich"},
|
||||
{ORTH: "n.Chr.", LEMMA: "nach Christus"},
|
||||
{ORTH: "orig.", LEMMA: "original", NORM: "original"},
|
||||
{ORTH: "röm.", LEMMA: "römisch", NORM: "römisch"},
|
||||
{ORTH: "s.o.", LEMMA: "siehe oben"},
|
||||
{ORTH: "sog.", LEMMA: "so genannt"},
|
||||
{ORTH: "stellv.", LEMMA: "stellvertretend"},
|
||||
{ORTH: "tägl.", LEMMA: "täglich", NORM: "täglich"},
|
||||
{ORTH: "u.U.", LEMMA: "unter Umständen"},
|
||||
{ORTH: "u.s.w.", LEMMA: "und so weiter"},
|
||||
{ORTH: "u.v.m.", LEMMA: "und vieles mehr"},
|
||||
{ORTH: "usf.", LEMMA: "und so fort"},
|
||||
{ORTH: "usw.", LEMMA: "und so weiter"},
|
||||
{ORTH: "uvm.", LEMMA: "und vieles mehr"},
|
||||
{ORTH: "v.Chr.", LEMMA: "vor Christus"},
|
||||
{ORTH: "v.a.", LEMMA: "vor allem"},
|
||||
{ORTH: "v.l.n.r.", LEMMA: "von links nach rechts"},
|
||||
{ORTH: "vgl.", LEMMA: "vergleiche", NORM: "vergleiche"},
|
||||
{ORTH: "vllt.", LEMMA: "vielleicht", NORM: "vielleicht"},
|
||||
{ORTH: "vlt.", LEMMA: "vielleicht", NORM: "vielleicht"},
|
||||
{ORTH: "z.B.", LEMMA: "zum Beispiel"},
|
||||
{ORTH: "z.Bsp.", LEMMA: "zum Beispiel"},
|
||||
{ORTH: "z.T.", LEMMA: "zum Teil"},
|
||||
{ORTH: "z.Z.", LEMMA: "zur Zeit"},
|
||||
{ORTH: "z.Zt.", LEMMA: "zur Zeit"},
|
||||
{ORTH: "z.b.", LEMMA: "zum Beispiel"},
|
||||
{ORTH: "zzgl.", LEMMA: "zuzüglich"},
|
||||
{ORTH: "österr.", LEMMA: "österreichisch", NORM: "österreichisch"},
|
||||
{ORTH: "'S", NORM: "'s"},
|
||||
{ORTH: "'s", NORM: "'s"},
|
||||
{ORTH: "S'", NORM: "'s"},
|
||||
{ORTH: "s'", NORM: "'s"},
|
||||
{ORTH: "'n", NORM: "ein"},
|
||||
{ORTH: "'ne", NORM: "eine"},
|
||||
{ORTH: "'nen", NORM: "einen"},
|
||||
{ORTH: "'nem", NORM: "einem"},
|
||||
{ORTH: "Abb.", NORM: "Abbildung"},
|
||||
{ORTH: "Abk.", NORM: "Abkürzung"},
|
||||
{ORTH: "Abt.", NORM: "Abteilung"},
|
||||
{ORTH: "Apr.", NORM: "April"},
|
||||
{ORTH: "Aug.", NORM: "August"},
|
||||
{ORTH: "Bd.", NORM: "Band"},
|
||||
{ORTH: "Betr.", NORM: "Betreff"},
|
||||
{ORTH: "Bf.", NORM: "Bahnhof"},
|
||||
{ORTH: "Bhf.", NORM: "Bahnhof"},
|
||||
{ORTH: "Bsp.", NORM: "Beispiel"},
|
||||
{ORTH: "Dez.", NORM: "Dezember"},
|
||||
{ORTH: "Di.", NORM: "Dienstag"},
|
||||
{ORTH: "Do.", NORM: "Donnerstag"},
|
||||
{ORTH: "Fa.", NORM: "Firma"},
|
||||
{ORTH: "Fam.", NORM: "Familie"},
|
||||
{ORTH: "Feb.", NORM: "Februar"},
|
||||
{ORTH: "Fr.", NORM: "Frau"},
|
||||
{ORTH: "Frl.", NORM: "Fräulein"},
|
||||
{ORTH: "Hbf.", NORM: "Hauptbahnhof"},
|
||||
{ORTH: "Hr.", NORM: "Herr"},
|
||||
{ORTH: "Hrn.", NORM: "Herrn"},
|
||||
{ORTH: "Jan.", NORM: "Januar"},
|
||||
{ORTH: "Jh.", NORM: "Jahrhundert"},
|
||||
{ORTH: "Jhd.", NORM: "Jahrhundert"},
|
||||
{ORTH: "Jul.", NORM: "Juli"},
|
||||
{ORTH: "Jun.", NORM: "Juni"},
|
||||
{ORTH: "Mi.", NORM: "Mittwoch"},
|
||||
{ORTH: "Mio.", NORM: "Million"},
|
||||
{ORTH: "Mo.", NORM: "Montag"},
|
||||
{ORTH: "Mrd.", NORM: "Milliarde"},
|
||||
{ORTH: "Mrz.", NORM: "März"},
|
||||
{ORTH: "MwSt.", NORM: "Mehrwertsteuer"},
|
||||
{ORTH: "Mär.", NORM: "März"},
|
||||
{ORTH: "Nov.", NORM: "November"},
|
||||
{ORTH: "Nr.", NORM: "Nummer"},
|
||||
{ORTH: "Okt.", NORM: "Oktober"},
|
||||
{ORTH: "Orig.", NORM: "Original"},
|
||||
{ORTH: "Pkt.", NORM: "Punkt"},
|
||||
{ORTH: "Prof.", NORM: "Professor"},
|
||||
{ORTH: "Red.", NORM: "Redaktion"},
|
||||
{ORTH: "Sa.", NORM: "Samstag"},
|
||||
{ORTH: "Sep.", NORM: "September"},
|
||||
{ORTH: "Sept.", NORM: "September"},
|
||||
{ORTH: "So.", NORM: "Sonntag"},
|
||||
{ORTH: "Std.", NORM: "Stunde"},
|
||||
{ORTH: "Str.", NORM: "Straße"},
|
||||
{ORTH: "Tel.", NORM: "Telefon"},
|
||||
{ORTH: "Tsd.", NORM: "Tausend"},
|
||||
{ORTH: "Univ.", NORM: "Universität"},
|
||||
{ORTH: "abzgl.", NORM: "abzüglich"},
|
||||
{ORTH: "allg.", NORM: "allgemein"},
|
||||
{ORTH: "bspw.", NORM: "beispielsweise"},
|
||||
{ORTH: "bzgl.", NORM: "bezüglich"},
|
||||
{ORTH: "bzw.", NORM: "beziehungsweise"},
|
||||
{ORTH: "d.h."},
|
||||
{ORTH: "dgl.", NORM: "dergleichen"},
|
||||
{ORTH: "ebd.", NORM: "ebenda"},
|
||||
{ORTH: "eigtl.", NORM: "eigentlich"},
|
||||
{ORTH: "engl.", NORM: "englisch"},
|
||||
{ORTH: "evtl.", NORM: "eventuell"},
|
||||
{ORTH: "frz.", NORM: "französisch"},
|
||||
{ORTH: "gegr.", NORM: "gegründet"},
|
||||
{ORTH: "ggf.", NORM: "gegebenenfalls"},
|
||||
{ORTH: "ggfs.", NORM: "gegebenenfalls"},
|
||||
{ORTH: "ggü.", NORM: "gegenüber"},
|
||||
{ORTH: "i.O."},
|
||||
{ORTH: "i.d.R."},
|
||||
{ORTH: "incl.", NORM: "inklusive"},
|
||||
{ORTH: "inkl.", NORM: "inklusive"},
|
||||
{ORTH: "insb.", NORM: "insbesondere"},
|
||||
{ORTH: "kath.", NORM: "katholisch"},
|
||||
{ORTH: "lt.", NORM: "laut"},
|
||||
{ORTH: "max.", NORM: "maximal"},
|
||||
{ORTH: "min.", NORM: "minimal"},
|
||||
{ORTH: "mind.", NORM: "mindestens"},
|
||||
{ORTH: "mtl.", NORM: "monatlich"},
|
||||
{ORTH: "n.Chr."},
|
||||
{ORTH: "orig.", NORM: "original"},
|
||||
{ORTH: "röm.", NORM: "römisch"},
|
||||
{ORTH: "s.o."},
|
||||
{ORTH: "sog."},
|
||||
{ORTH: "stellv."},
|
||||
{ORTH: "tägl.", NORM: "täglich"},
|
||||
{ORTH: "u.U."},
|
||||
{ORTH: "u.s.w."},
|
||||
{ORTH: "u.v.m."},
|
||||
{ORTH: "usf."},
|
||||
{ORTH: "usw."},
|
||||
{ORTH: "uvm."},
|
||||
{ORTH: "v.Chr."},
|
||||
{ORTH: "v.a."},
|
||||
{ORTH: "v.l.n.r."},
|
||||
{ORTH: "vgl.", NORM: "vergleiche"},
|
||||
{ORTH: "vllt.", NORM: "vielleicht"},
|
||||
{ORTH: "vlt.", NORM: "vielleicht"},
|
||||
{ORTH: "z.B."},
|
||||
{ORTH: "z.Bsp."},
|
||||
{ORTH: "z.T."},
|
||||
{ORTH: "z.Z."},
|
||||
{ORTH: "z.Zt."},
|
||||
{ORTH: "z.b."},
|
||||
{ORTH: "zzgl."},
|
||||
{ORTH: "österr.", NORM: "österreichisch"},
|
||||
]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
||||
|
@ -254,4 +232,4 @@ for orth in [
|
|||
_exc[orth] = [{ORTH: orth}]
|
||||
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from typing import Callable
|
||||
from thinc.api import Config
|
||||
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
|
@ -7,53 +7,44 @@ from .lex_attrs import LEX_ATTRS
|
|||
from .lemmatizer import GreekLemmatizer
|
||||
from .syntax_iterators import SYNTAX_ITERATORS
|
||||
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...lookups import load_lookups
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "el"
|
||||
stop_words = {"@language_data": "spacy.el.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.el.lex_attr_getters"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.GreekLemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
@lemmatizers = "spacy.el.GreekLemmatizer"
|
||||
"""
|
||||
|
||||
|
||||
@registry.lemmatizers("spacy.GreekLemmatizer.v1")
|
||||
def create_greek_lemmatizer(data_paths: dict = {}) -> GreekLemmatizer:
|
||||
return GreekLemmatizer(data_paths=data_paths)
|
||||
@registry.lemmatizers("spacy.el.GreekLemmatizer")
|
||||
def create_lemmatizer() -> Callable[[Language], GreekLemmatizer]:
|
||||
tables = ["lemma_index", "lemma_exc", "lemma_rules"]
|
||||
|
||||
def lemmatizer_factory(nlp: Language) -> GreekLemmatizer:
|
||||
lookups = load_lookups(lang=nlp.lang, tables=tables)
|
||||
return GreekLemmatizer(lookups=lookups)
|
||||
|
||||
@registry.language_data("spacy.el.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.el.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
return lemmatizer_factory
|
||||
|
||||
|
||||
class GreekDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
config = Config().from_str(DEFAULT_CONFIG)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
prefixes = TOKENIZER_PREFIXES
|
||||
suffixes = TOKENIZER_SUFFIXES
|
||||
infixes = TOKENIZER_INFIXES
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
syntax_iterators = SYNTAX_ITERATORS
|
||||
|
||||
|
||||
class Greek(Language):
|
||||
lang = "el"
|
||||
Defaults = GreekDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Greek"]
|
||||
|
|
|
@ -1,21 +1,20 @@
|
|||
from typing import Union, Iterator
|
||||
|
||||
from ...symbols import NOUN, PROPN, PRON
|
||||
from ...errors import Errors
|
||||
from ...tokens import Doc, Span
|
||||
|
||||
|
||||
def noun_chunks(doclike):
|
||||
"""
|
||||
Detect base noun phrases. Works on both Doc and Span.
|
||||
"""
|
||||
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
|
||||
"""Detect base noun phrases from a dependency parse. Works on Doc and Span."""
|
||||
# It follows the logic of the noun chunks finder of English language,
|
||||
# adjusted to some Greek language special characteristics.
|
||||
# obj tag corrects some DEP tagger mistakes.
|
||||
# Further improvement of the models will eliminate the need for this tag.
|
||||
labels = ["nsubj", "obj", "iobj", "appos", "ROOT", "obl"]
|
||||
doc = doclike.doc # Ensure works on both Doc and Span.
|
||||
|
||||
if not doc.is_parsed:
|
||||
raise ValueError(Errors.E029)
|
||||
|
||||
np_deps = [doc.vocab.strings.add(label) for label in labels]
|
||||
conj = doc.vocab.strings.add("conj")
|
||||
nmod = doc.vocab.strings.add("nmod")
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,129 +1,128 @@
|
|||
from ...symbols import ORTH, LEMMA, NORM
|
||||
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH, NORM
|
||||
from ...util import update_exc
|
||||
|
||||
_exc = {}
|
||||
|
||||
for token in ["Απ'", "ΑΠ'", "αφ'", "Αφ'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "από", NORM: "από"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "από"}]
|
||||
|
||||
for token in ["Αλλ'", "αλλ'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "αλλά", NORM: "αλλά"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "αλλά"}]
|
||||
|
||||
for token in ["παρ'", "Παρ'", "ΠΑΡ'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "παρά", NORM: "παρά"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "παρά"}]
|
||||
|
||||
for token in ["καθ'", "Καθ'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "κάθε", NORM: "κάθε"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "κάθε"}]
|
||||
|
||||
for token in ["κατ'", "Κατ'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "κατά", NORM: "κατά"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "κατά"}]
|
||||
|
||||
for token in ["'ΣΟΥΝ", "'ναι", "'ταν", "'τανε", "'μαστε", "'μουνα", "'μουν"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "είμαι", NORM: "είμαι"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "είμαι"}]
|
||||
|
||||
for token in ["Επ'", "επ'", "εφ'", "Εφ'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "επί", NORM: "επί"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "επί"}]
|
||||
|
||||
for token in ["Δι'", "δι'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "δια", NORM: "δια"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "δια"}]
|
||||
|
||||
for token in ["'χουν", "'χουμε", "'χαμε", "'χα", "'χε", "'χεις", "'χει"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "έχω", NORM: "έχω"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "έχω"}]
|
||||
|
||||
for token in ["υπ'", "Υπ'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "υπό", NORM: "υπό"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "υπό"}]
|
||||
|
||||
for token in ["Μετ'", "ΜΕΤ'", "'μετ"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "μετά", NORM: "μετά"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "μετά"}]
|
||||
|
||||
for token in ["Μ'", "μ'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "με", NORM: "με"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "με"}]
|
||||
|
||||
for token in ["Γι'", "ΓΙ'", "γι'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "για", NORM: "για"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "για"}]
|
||||
|
||||
for token in ["Σ'", "σ'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "σε", NORM: "σε"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "σε"}]
|
||||
|
||||
for token in ["Θ'", "θ'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "θα", NORM: "θα"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "θα"}]
|
||||
|
||||
for token in ["Ν'", "ν'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "να", NORM: "να"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "να"}]
|
||||
|
||||
for token in ["Τ'", "τ'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "να", NORM: "να"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "να"}]
|
||||
|
||||
for token in ["'γω", "'σένα", "'μεις"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "εγώ", NORM: "εγώ"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "εγώ"}]
|
||||
|
||||
for token in ["Τ'", "τ'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "το", NORM: "το"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "το"}]
|
||||
|
||||
for token in ["Φέρ'", "Φερ'", "φέρ'", "φερ'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "φέρνω", NORM: "φέρνω"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "φέρνω"}]
|
||||
|
||||
for token in ["'ρθούνε", "'ρθουν", "'ρθει", "'ρθεί", "'ρθε", "'ρχεται"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "έρχομαι", NORM: "έρχομαι"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "έρχομαι"}]
|
||||
|
||||
for token in ["'πανε", "'λεγε", "'λεγαν", "'πε", "'λεγα"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "λέγω", NORM: "λέγω"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "λέγω"}]
|
||||
|
||||
for token in ["Πάρ'", "πάρ'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "παίρνω", NORM: "παίρνω"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "παίρνω"}]
|
||||
|
||||
for token in ["μέσ'", "Μέσ'", "μεσ'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "μέσα", NORM: "μέσα"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "μέσα"}]
|
||||
|
||||
for token in ["Δέσ'", "Δεσ'", "δεσ'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "δένω", NORM: "δένω"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "δένω"}]
|
||||
|
||||
for token in ["'κανε", "Κάν'"]:
|
||||
_exc[token] = [{ORTH: token, LEMMA: "κάνω", NORM: "κάνω"}]
|
||||
_exc[token] = [{ORTH: token, NORM: "κάνω"}]
|
||||
|
||||
_other_exc = {
|
||||
"κι": [{ORTH: "κι", LEMMA: "και", NORM: "και"}],
|
||||
"Παίξ'": [{ORTH: "Παίξ'", LEMMA: "παίζω", NORM: "παίζω"}],
|
||||
"Αντ'": [{ORTH: "Αντ'", LEMMA: "αντί", NORM: "αντί"}],
|
||||
"ολ'": [{ORTH: "ολ'", LEMMA: "όλος", NORM: "όλος"}],
|
||||
"ύστερ'": [{ORTH: "ύστερ'", LEMMA: "ύστερα", NORM: "ύστερα"}],
|
||||
"'πρεπε": [{ORTH: "'πρεπε", LEMMA: "πρέπει", NORM: "πρέπει"}],
|
||||
"Δύσκολ'": [{ORTH: "Δύσκολ'", LEMMA: "δύσκολος", NORM: "δύσκολος"}],
|
||||
"'θελα": [{ORTH: "'θελα", LEMMA: "θέλω", NORM: "θέλω"}],
|
||||
"'γραφα": [{ORTH: "'γραφα", LEMMA: "γράφω", NORM: "γράφω"}],
|
||||
"'παιρνα": [{ORTH: "'παιρνα", LEMMA: "παίρνω", NORM: "παίρνω"}],
|
||||
"'δειξε": [{ORTH: "'δειξε", LEMMA: "δείχνω", NORM: "δείχνω"}],
|
||||
"όμουρφ'": [{ORTH: "όμουρφ'", LEMMA: "όμορφος", NORM: "όμορφος"}],
|
||||
"κ'τσή": [{ORTH: "κ'τσή", LEMMA: "κουτσός", NORM: "κουτσός"}],
|
||||
"μηδ'": [{ORTH: "μηδ'", LEMMA: "μήδε", NORM: "μήδε"}],
|
||||
"'ξομολογήθηκε": [
|
||||
{ORTH: "'ξομολογήθηκε", LEMMA: "εξομολογούμαι", NORM: "εξομολογούμαι"}
|
||||
],
|
||||
"'μας": [{ORTH: "'μας", LEMMA: "εμάς", NORM: "εμάς"}],
|
||||
"'ξερες": [{ORTH: "'ξερες", LEMMA: "ξέρω", NORM: "ξέρω"}],
|
||||
"έφθασ'": [{ORTH: "έφθασ'", LEMMA: "φθάνω", NORM: "φθάνω"}],
|
||||
"εξ'": [{ORTH: "εξ'", LEMMA: "εκ", NORM: "εκ"}],
|
||||
"δώσ'": [{ORTH: "δώσ'", LEMMA: "δίνω", NORM: "δίνω"}],
|
||||
"τίποτ'": [{ORTH: "τίποτ'", LEMMA: "τίποτα", NORM: "τίποτα"}],
|
||||
"Λήξ'": [{ORTH: "Λήξ'", LEMMA: "λήγω", NORM: "λήγω"}],
|
||||
"άσ'": [{ORTH: "άσ'", LEMMA: "αφήνω", NORM: "αφήνω"}],
|
||||
"Στ'": [{ORTH: "Στ'", LEMMA: "στο", NORM: "στο"}],
|
||||
"Δωσ'": [{ORTH: "Δωσ'", LEMMA: "δίνω", NORM: "δίνω"}],
|
||||
"Βάψ'": [{ORTH: "Βάψ'", LEMMA: "βάφω", NORM: "βάφω"}],
|
||||
"Αλλ'": [{ORTH: "Αλλ'", LEMMA: "αλλά", NORM: "αλλά"}],
|
||||
"Αμ'": [{ORTH: "Αμ'", LEMMA: "άμα", NORM: "άμα"}],
|
||||
"Αγόρασ'": [{ORTH: "Αγόρασ'", LEMMA: "αγοράζω", NORM: "αγοράζω"}],
|
||||
"'φύγε": [{ORTH: "'φύγε", LEMMA: "φεύγω", NORM: "φεύγω"}],
|
||||
"'φερε": [{ORTH: "'φερε", LEMMA: "φέρνω", NORM: "φέρνω"}],
|
||||
"'φαγε": [{ORTH: "'φαγε", LEMMA: "τρώω", NORM: "τρώω"}],
|
||||
"'σπαγαν": [{ORTH: "'σπαγαν", LEMMA: "σπάω", NORM: "σπάω"}],
|
||||
"'σκασε": [{ORTH: "'σκασε", LEMMA: "σκάω", NORM: "σκάω"}],
|
||||
"'σβηνε": [{ORTH: "'σβηνε", LEMMA: "σβήνω", NORM: "σβήνω"}],
|
||||
"'ριξε": [{ORTH: "'ριξε", LEMMA: "ρίχνω", NORM: "ρίχνω"}],
|
||||
"'κλεβε": [{ORTH: "'κλεβε", LEMMA: "κλέβω", NORM: "κλέβω"}],
|
||||
"'κει": [{ORTH: "'κει", LEMMA: "εκεί", NORM: "εκεί"}],
|
||||
"'βλεπε": [{ORTH: "'βλεπε", LEMMA: "βλέπω", NORM: "βλέπω"}],
|
||||
"'βγαινε": [{ORTH: "'βγαινε", LEMMA: "βγαίνω", NORM: "βγαίνω"}],
|
||||
"κι": [{ORTH: "κι", NORM: "και"}],
|
||||
"Παίξ'": [{ORTH: "Παίξ'", NORM: "παίζω"}],
|
||||
"Αντ'": [{ORTH: "Αντ'", NORM: "αντί"}],
|
||||
"ολ'": [{ORTH: "ολ'", NORM: "όλος"}],
|
||||
"ύστερ'": [{ORTH: "ύστερ'", NORM: "ύστερα"}],
|
||||
"'πρεπε": [{ORTH: "'πρεπε", NORM: "πρέπει"}],
|
||||
"Δύσκολ'": [{ORTH: "Δύσκολ'", NORM: "δύσκολος"}],
|
||||
"'θελα": [{ORTH: "'θελα", NORM: "θέλω"}],
|
||||
"'γραφα": [{ORTH: "'γραφα", NORM: "γράφω"}],
|
||||
"'παιρνα": [{ORTH: "'παιρνα", NORM: "παίρνω"}],
|
||||
"'δειξε": [{ORTH: "'δειξε", NORM: "δείχνω"}],
|
||||
"όμουρφ'": [{ORTH: "όμουρφ'", NORM: "όμορφος"}],
|
||||
"κ'τσή": [{ORTH: "κ'τσή", NORM: "κουτσός"}],
|
||||
"μηδ'": [{ORTH: "μηδ'", NORM: "μήδε"}],
|
||||
"'ξομολογήθηκε": [{ORTH: "'ξομολογήθηκε", NORM: "εξομολογούμαι"}],
|
||||
"'μας": [{ORTH: "'μας", NORM: "εμάς"}],
|
||||
"'ξερες": [{ORTH: "'ξερες", NORM: "ξέρω"}],
|
||||
"έφθασ'": [{ORTH: "έφθασ'", NORM: "φθάνω"}],
|
||||
"εξ'": [{ORTH: "εξ'", NORM: "εκ"}],
|
||||
"δώσ'": [{ORTH: "δώσ'", NORM: "δίνω"}],
|
||||
"τίποτ'": [{ORTH: "τίποτ'", NORM: "τίποτα"}],
|
||||
"Λήξ'": [{ORTH: "Λήξ'", NORM: "λήγω"}],
|
||||
"άσ'": [{ORTH: "άσ'", NORM: "αφήνω"}],
|
||||
"Στ'": [{ORTH: "Στ'", NORM: "στο"}],
|
||||
"Δωσ'": [{ORTH: "Δωσ'", NORM: "δίνω"}],
|
||||
"Βάψ'": [{ORTH: "Βάψ'", NORM: "βάφω"}],
|
||||
"Αλλ'": [{ORTH: "Αλλ'", NORM: "αλλά"}],
|
||||
"Αμ'": [{ORTH: "Αμ'", NORM: "άμα"}],
|
||||
"Αγόρασ'": [{ORTH: "Αγόρασ'", NORM: "αγοράζω"}],
|
||||
"'φύγε": [{ORTH: "'φύγε", NORM: "φεύγω"}],
|
||||
"'φερε": [{ORTH: "'φερε", NORM: "φέρνω"}],
|
||||
"'φαγε": [{ORTH: "'φαγε", NORM: "τρώω"}],
|
||||
"'σπαγαν": [{ORTH: "'σπαγαν", NORM: "σπάω"}],
|
||||
"'σκασε": [{ORTH: "'σκασε", NORM: "σκάω"}],
|
||||
"'σβηνε": [{ORTH: "'σβηνε", NORM: "σβήνω"}],
|
||||
"'ριξε": [{ORTH: "'ριξε", NORM: "ρίχνω"}],
|
||||
"'κλεβε": [{ORTH: "'κλεβε", NORM: "κλέβω"}],
|
||||
"'κει": [{ORTH: "'κει", NORM: "εκεί"}],
|
||||
"'βλεπε": [{ORTH: "'βλεπε", NORM: "βλέπω"}],
|
||||
"'βγαινε": [{ORTH: "'βγαινε", NORM: "βγαίνω"}],
|
||||
}
|
||||
|
||||
_exc.update(_other_exc)
|
||||
|
@ -133,35 +132,35 @@ for h in range(1, 12 + 1):
|
|||
for period in ["π.μ.", "πμ"]:
|
||||
_exc[f"{h}{period}"] = [
|
||||
{ORTH: f"{h}"},
|
||||
{ORTH: period, LEMMA: "π.μ.", NORM: "π.μ."},
|
||||
{ORTH: period, NORM: "π.μ."},
|
||||
]
|
||||
|
||||
for period in ["μ.μ.", "μμ"]:
|
||||
_exc[f"{h}{period}"] = [
|
||||
{ORTH: f"{h}"},
|
||||
{ORTH: period, LEMMA: "μ.μ.", NORM: "μ.μ."},
|
||||
{ORTH: period, NORM: "μ.μ."},
|
||||
]
|
||||
|
||||
for exc_data in [
|
||||
{ORTH: "ΑΓΡ.", LEMMA: "Αγροτικός", NORM: "Αγροτικός"},
|
||||
{ORTH: "Αγ. Γρ.", LEMMA: "Αγία Γραφή", NORM: "Αγία Γραφή"},
|
||||
{ORTH: "Αθ.", LEMMA: "Αθανάσιος", NORM: "Αθανάσιος"},
|
||||
{ORTH: "Αλεξ.", LEMMA: "Αλέξανδρος", NORM: "Αλέξανδρος"},
|
||||
{ORTH: "Απρ.", LEMMA: "Απρίλιος", NORM: "Απρίλιος"},
|
||||
{ORTH: "Αύγ.", LEMMA: "Αύγουστος", NORM: "Αύγουστος"},
|
||||
{ORTH: "Δεκ.", LEMMA: "Δεκέμβριος", NORM: "Δεκέμβριος"},
|
||||
{ORTH: "Δημ.", LEMMA: "Δήμος", NORM: "Δήμος"},
|
||||
{ORTH: "Ιαν.", LEMMA: "Ιανουάριος", NORM: "Ιανουάριος"},
|
||||
{ORTH: "Ιούλ.", LEMMA: "Ιούλιος", NORM: "Ιούλιος"},
|
||||
{ORTH: "Ιούν.", LEMMA: "Ιούνιος", NORM: "Ιούνιος"},
|
||||
{ORTH: "Ιωαν.", LEMMA: "Ιωάννης", NORM: "Ιωάννης"},
|
||||
{ORTH: "Μ. Ασία", LEMMA: "Μικρά Ασία", NORM: "Μικρά Ασία"},
|
||||
{ORTH: "Μάρτ.", LEMMA: "Μάρτιος", NORM: "Μάρτιος"},
|
||||
{ORTH: "Μάρτ'", LEMMA: "Μάρτιος", NORM: "Μάρτιος"},
|
||||
{ORTH: "Νοέμβρ.", LEMMA: "Νοέμβριος", NORM: "Νοέμβριος"},
|
||||
{ORTH: "Οκτ.", LEMMA: "Οκτώβριος", NORM: "Οκτώβριος"},
|
||||
{ORTH: "Σεπτ.", LEMMA: "Σεπτέμβριος", NORM: "Σεπτέμβριος"},
|
||||
{ORTH: "Φεβρ.", LEMMA: "Φεβρουάριος", NORM: "Φεβρουάριος"},
|
||||
{ORTH: "ΑΓΡ.", NORM: "Αγροτικός"},
|
||||
{ORTH: "Αγ. Γρ.", NORM: "Αγία Γραφή"},
|
||||
{ORTH: "Αθ.", NORM: "Αθανάσιος"},
|
||||
{ORTH: "Αλεξ.", NORM: "Αλέξανδρος"},
|
||||
{ORTH: "Απρ.", NORM: "Απρίλιος"},
|
||||
{ORTH: "Αύγ.", NORM: "Αύγουστος"},
|
||||
{ORTH: "Δεκ.", NORM: "Δεκέμβριος"},
|
||||
{ORTH: "Δημ.", NORM: "Δήμος"},
|
||||
{ORTH: "Ιαν.", NORM: "Ιανουάριος"},
|
||||
{ORTH: "Ιούλ.", NORM: "Ιούλιος"},
|
||||
{ORTH: "Ιούν.", NORM: "Ιούνιος"},
|
||||
{ORTH: "Ιωαν.", NORM: "Ιωάννης"},
|
||||
{ORTH: "Μ. Ασία", NORM: "Μικρά Ασία"},
|
||||
{ORTH: "Μάρτ.", NORM: "Μάρτιος"},
|
||||
{ORTH: "Μάρτ'", NORM: "Μάρτιος"},
|
||||
{ORTH: "Νοέμβρ.", NORM: "Νοέμβριος"},
|
||||
{ORTH: "Οκτ.", NORM: "Οκτώβριος"},
|
||||
{ORTH: "Σεπτ.", NORM: "Σεπτέμβριος"},
|
||||
{ORTH: "Φεβρ.", NORM: "Φεβρουάριος"},
|
||||
]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
||||
|
@ -392,4 +391,4 @@ for orth in [
|
|||
]:
|
||||
_exc[orth] = [{ORTH: orth}]
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from typing import Callable
|
||||
from thinc.api import Config
|
||||
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
|
@ -7,60 +7,43 @@ from .lex_attrs import LEX_ATTRS
|
|||
from .syntax_iterators import SYNTAX_ITERATORS
|
||||
from .lemmatizer import is_base_form
|
||||
from .punctuation import TOKENIZER_INFIXES
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...lemmatizer import Lemmatizer
|
||||
from ...util import update_exc, registry
|
||||
from ...lookups import load_lookups
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "en"
|
||||
stop_words = {"@language_data": "spacy.en.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.en.lex_attr_getters"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.EnglishLemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
@lemmatizers = "spacy.en.EnglishLemmatizer"
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.en.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
@registry.lemmatizers("spacy.en.EnglishLemmatizer")
|
||||
def create_lemmatizer() -> Callable[[Language], Lemmatizer]:
|
||||
tables = ["lemma_lookup", "lemma_rules", "lemma_exc", "lemma_index"]
|
||||
|
||||
def lemmatizer_factory(nlp: Language) -> Lemmatizer:
|
||||
lookups = load_lookups(lang=nlp.lang, tables=tables)
|
||||
return Lemmatizer(lookups=lookups, is_base_form=is_base_form)
|
||||
|
||||
@registry.language_data("spacy.en.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
|
||||
|
||||
@registry.lemmatizers("spacy.EnglishLemmatizer.v1")
|
||||
def create_lemmatizer(data_paths: dict = {}) -> "Lemmatizer":
|
||||
return Lemmatizer(data_paths=data_paths, is_base_form=is_base_form)
|
||||
return lemmatizer_factory
|
||||
|
||||
|
||||
class EnglishDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
syntax_iterators = SYNTAX_ITERATORS
|
||||
config = Config().from_str(DEFAULT_CONFIG)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
infixes = TOKENIZER_INFIXES
|
||||
single_orth_variants = [
|
||||
{"tags": ["NFP"], "variants": ["…", "..."]},
|
||||
{"tags": [":"], "variants": ["-", "—", "–", "--", "---", "——"]},
|
||||
]
|
||||
paired_orth_variants = [
|
||||
{"tags": ["``", "''"], "variants": [("'", "'"), ("‘", "’")]},
|
||||
{"tags": ["``", "''"], "variants": [('"', '"'), ("“", "”")]},
|
||||
]
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
syntax_iterators = SYNTAX_ITERATORS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class English(Language):
|
||||
lang = "en"
|
||||
Defaults = EnglishDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["English"]
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from ..char_classes import LIST_ELLIPSES, LIST_ICONS, HYPHENS
|
||||
from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA, PUNCT
|
||||
from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA
|
||||
|
||||
_infixes = (
|
||||
LIST_ELLIPSES
|
||||
|
|
|
@ -1,27 +1,18 @@
|
|||
from typing import Union, Iterator
|
||||
|
||||
from ...symbols import NOUN, PROPN, PRON
|
||||
from ...errors import Errors
|
||||
from ...tokens import Doc, Span
|
||||
|
||||
|
||||
def noun_chunks(doclike):
|
||||
"""
|
||||
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
|
||||
"""
|
||||
labels = [
|
||||
"nsubj",
|
||||
"dobj",
|
||||
"nsubjpass",
|
||||
"pcomp",
|
||||
"pobj",
|
||||
"dative",
|
||||
"appos",
|
||||
"attr",
|
||||
"ROOT",
|
||||
]
|
||||
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
|
||||
"""Detect base noun phrases from a dependency parse. Works on Doc and Span."""
|
||||
# fmt: off
|
||||
labels = ["nsubj", "dobj", "nsubjpass", "pcomp", "pobj", "dative", "appos", "attr", "ROOT"]
|
||||
# fmt: on
|
||||
doc = doclike.doc # Ensure works on both Doc and Span.
|
||||
|
||||
if not doc.is_parsed:
|
||||
raise ValueError(Errors.E029)
|
||||
|
||||
np_deps = [doc.vocab.strings.add(label) for label in labels]
|
||||
conj = doc.vocab.strings.add("conj")
|
||||
np_label = doc.vocab.strings.add("NP")
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
from ...symbols import ORTH, LEMMA, TAG, NORM, PRON_LEMMA
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH, NORM
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
_exc = {}
|
||||
|
@ -26,110 +28,110 @@ _exclude = [
|
|||
for pron in ["i"]:
|
||||
for orth in [pron, pron.title()]:
|
||||
_exc[orth + "'m"] = [
|
||||
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
|
||||
{ORTH: "'m", LEMMA: "be", NORM: "am", TAG: "VBP"},
|
||||
{ORTH: orth, NORM: pron},
|
||||
{ORTH: "'m", NORM: "am"},
|
||||
]
|
||||
|
||||
_exc[orth + "m"] = [
|
||||
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
|
||||
{ORTH: "m", LEMMA: "be", TAG: "VBP", "tenspect": 1, "number": 1},
|
||||
{ORTH: orth, NORM: pron},
|
||||
{ORTH: "m", "tenspect": 1, "number": 1},
|
||||
]
|
||||
|
||||
_exc[orth + "'ma"] = [
|
||||
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
|
||||
{ORTH: "'m", LEMMA: "be", NORM: "am"},
|
||||
{ORTH: "a", LEMMA: "going to", NORM: "gonna"},
|
||||
{ORTH: orth, NORM: pron},
|
||||
{ORTH: "'m", NORM: "am"},
|
||||
{ORTH: "a", NORM: "gonna"},
|
||||
]
|
||||
|
||||
_exc[orth + "ma"] = [
|
||||
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
|
||||
{ORTH: "m", LEMMA: "be", NORM: "am"},
|
||||
{ORTH: "a", LEMMA: "going to", NORM: "gonna"},
|
||||
{ORTH: orth, NORM: pron},
|
||||
{ORTH: "m", NORM: "am"},
|
||||
{ORTH: "a", NORM: "gonna"},
|
||||
]
|
||||
|
||||
|
||||
for pron in ["i", "you", "he", "she", "it", "we", "they"]:
|
||||
for orth in [pron, pron.title()]:
|
||||
_exc[orth + "'ll"] = [
|
||||
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
|
||||
{ORTH: "'ll", LEMMA: "will", NORM: "will", TAG: "MD"},
|
||||
{ORTH: orth, NORM: pron},
|
||||
{ORTH: "'ll", NORM: "will"},
|
||||
]
|
||||
|
||||
_exc[orth + "ll"] = [
|
||||
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
|
||||
{ORTH: "ll", LEMMA: "will", NORM: "will", TAG: "MD"},
|
||||
{ORTH: orth, NORM: pron},
|
||||
{ORTH: "ll", NORM: "will"},
|
||||
]
|
||||
|
||||
_exc[orth + "'ll've"] = [
|
||||
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
|
||||
{ORTH: "'ll", LEMMA: "will", NORM: "will", TAG: "MD"},
|
||||
{ORTH: "'ve", LEMMA: "have", NORM: "have", TAG: "VB"},
|
||||
{ORTH: orth, NORM: pron},
|
||||
{ORTH: "'ll", NORM: "will"},
|
||||
{ORTH: "'ve", NORM: "have"},
|
||||
]
|
||||
|
||||
_exc[orth + "llve"] = [
|
||||
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
|
||||
{ORTH: "ll", LEMMA: "will", NORM: "will", TAG: "MD"},
|
||||
{ORTH: "ve", LEMMA: "have", NORM: "have", TAG: "VB"},
|
||||
{ORTH: orth, NORM: pron},
|
||||
{ORTH: "ll", NORM: "will"},
|
||||
{ORTH: "ve", NORM: "have"},
|
||||
]
|
||||
|
||||
_exc[orth + "'d"] = [
|
||||
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
|
||||
{ORTH: orth, NORM: pron},
|
||||
{ORTH: "'d", NORM: "'d"},
|
||||
]
|
||||
|
||||
_exc[orth + "d"] = [
|
||||
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
|
||||
{ORTH: orth, NORM: pron},
|
||||
{ORTH: "d", NORM: "'d"},
|
||||
]
|
||||
|
||||
_exc[orth + "'d've"] = [
|
||||
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
|
||||
{ORTH: "'d", LEMMA: "would", NORM: "would", TAG: "MD"},
|
||||
{ORTH: "'ve", LEMMA: "have", NORM: "have", TAG: "VB"},
|
||||
{ORTH: orth, NORM: pron},
|
||||
{ORTH: "'d", NORM: "would"},
|
||||
{ORTH: "'ve", NORM: "have"},
|
||||
]
|
||||
|
||||
_exc[orth + "dve"] = [
|
||||
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
|
||||
{ORTH: "d", LEMMA: "would", NORM: "would", TAG: "MD"},
|
||||
{ORTH: "ve", LEMMA: "have", NORM: "have", TAG: "VB"},
|
||||
{ORTH: orth, NORM: pron},
|
||||
{ORTH: "d", NORM: "would"},
|
||||
{ORTH: "ve", NORM: "have"},
|
||||
]
|
||||
|
||||
|
||||
for pron in ["i", "you", "we", "they"]:
|
||||
for orth in [pron, pron.title()]:
|
||||
_exc[orth + "'ve"] = [
|
||||
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
|
||||
{ORTH: "'ve", LEMMA: "have", NORM: "have", TAG: "VB"},
|
||||
{ORTH: orth, NORM: pron},
|
||||
{ORTH: "'ve", NORM: "have"},
|
||||
]
|
||||
|
||||
_exc[orth + "ve"] = [
|
||||
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
|
||||
{ORTH: "ve", LEMMA: "have", NORM: "have", TAG: "VB"},
|
||||
{ORTH: orth, NORM: pron},
|
||||
{ORTH: "ve", NORM: "have"},
|
||||
]
|
||||
|
||||
|
||||
for pron in ["you", "we", "they"]:
|
||||
for orth in [pron, pron.title()]:
|
||||
_exc[orth + "'re"] = [
|
||||
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
|
||||
{ORTH: "'re", LEMMA: "be", NORM: "are"},
|
||||
{ORTH: orth, NORM: pron},
|
||||
{ORTH: "'re", NORM: "are"},
|
||||
]
|
||||
|
||||
_exc[orth + "re"] = [
|
||||
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
|
||||
{ORTH: "re", LEMMA: "be", NORM: "are", TAG: "VBZ"},
|
||||
{ORTH: orth, NORM: pron},
|
||||
{ORTH: "re", NORM: "are"},
|
||||
]
|
||||
|
||||
|
||||
for pron in ["he", "she", "it"]:
|
||||
for orth in [pron, pron.title()]:
|
||||
_exc[orth + "'s"] = [
|
||||
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
|
||||
{ORTH: orth, NORM: pron},
|
||||
{ORTH: "'s", NORM: "'s"},
|
||||
]
|
||||
|
||||
_exc[orth + "s"] = [
|
||||
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
|
||||
{ORTH: orth, NORM: pron},
|
||||
{ORTH: "s"},
|
||||
]
|
||||
|
||||
|
@ -151,145 +153,145 @@ for word in [
|
|||
]:
|
||||
for orth in [word, word.title()]:
|
||||
_exc[orth + "'s"] = [
|
||||
{ORTH: orth, LEMMA: word, NORM: word},
|
||||
{ORTH: orth, NORM: word},
|
||||
{ORTH: "'s", NORM: "'s"},
|
||||
]
|
||||
|
||||
_exc[orth + "s"] = [{ORTH: orth, LEMMA: word, NORM: word}, {ORTH: "s"}]
|
||||
_exc[orth + "s"] = [{ORTH: orth, NORM: word}, {ORTH: "s"}]
|
||||
|
||||
_exc[orth + "'ll"] = [
|
||||
{ORTH: orth, LEMMA: word, NORM: word},
|
||||
{ORTH: "'ll", LEMMA: "will", NORM: "will", TAG: "MD"},
|
||||
{ORTH: orth, NORM: word},
|
||||
{ORTH: "'ll", NORM: "will"},
|
||||
]
|
||||
|
||||
_exc[orth + "ll"] = [
|
||||
{ORTH: orth, LEMMA: word, NORM: word},
|
||||
{ORTH: "ll", LEMMA: "will", NORM: "will", TAG: "MD"},
|
||||
{ORTH: orth, NORM: word},
|
||||
{ORTH: "ll", NORM: "will"},
|
||||
]
|
||||
|
||||
_exc[orth + "'ll've"] = [
|
||||
{ORTH: orth, LEMMA: word, NORM: word},
|
||||
{ORTH: "'ll", LEMMA: "will", NORM: "will", TAG: "MD"},
|
||||
{ORTH: "'ve", LEMMA: "have", NORM: "have", TAG: "VB"},
|
||||
{ORTH: orth, NORM: word},
|
||||
{ORTH: "'ll", NORM: "will"},
|
||||
{ORTH: "'ve", NORM: "have"},
|
||||
]
|
||||
|
||||
_exc[orth + "llve"] = [
|
||||
{ORTH: orth, LEMMA: word, NORM: word},
|
||||
{ORTH: "ll", LEMMA: "will", NORM: "will", TAG: "MD"},
|
||||
{ORTH: "ve", LEMMA: "have", NORM: "have", TAG: "VB"},
|
||||
{ORTH: orth, NORM: word},
|
||||
{ORTH: "ll", NORM: "will"},
|
||||
{ORTH: "ve", NORM: "have"},
|
||||
]
|
||||
|
||||
_exc[orth + "'re"] = [
|
||||
{ORTH: orth, LEMMA: word, NORM: word},
|
||||
{ORTH: "'re", LEMMA: "be", NORM: "are"},
|
||||
{ORTH: orth, NORM: word},
|
||||
{ORTH: "'re", NORM: "are"},
|
||||
]
|
||||
|
||||
_exc[orth + "re"] = [
|
||||
{ORTH: orth, LEMMA: word, NORM: word},
|
||||
{ORTH: "re", LEMMA: "be", NORM: "are"},
|
||||
{ORTH: orth, NORM: word},
|
||||
{ORTH: "re", NORM: "are"},
|
||||
]
|
||||
|
||||
_exc[orth + "'ve"] = [
|
||||
{ORTH: orth, LEMMA: word, NORM: word},
|
||||
{ORTH: "'ve", LEMMA: "have", TAG: "VB"},
|
||||
{ORTH: orth, NORM: word},
|
||||
{ORTH: "'ve"},
|
||||
]
|
||||
|
||||
_exc[orth + "ve"] = [
|
||||
{ORTH: orth, LEMMA: word},
|
||||
{ORTH: "ve", LEMMA: "have", NORM: "have", TAG: "VB"},
|
||||
{ORTH: orth},
|
||||
{ORTH: "ve", NORM: "have"},
|
||||
]
|
||||
|
||||
_exc[orth + "'d"] = [
|
||||
{ORTH: orth, LEMMA: word, NORM: word},
|
||||
{ORTH: orth, NORM: word},
|
||||
{ORTH: "'d", NORM: "'d"},
|
||||
]
|
||||
|
||||
_exc[orth + "d"] = [
|
||||
{ORTH: orth, LEMMA: word, NORM: word},
|
||||
{ORTH: orth, NORM: word},
|
||||
{ORTH: "d", NORM: "'d"},
|
||||
]
|
||||
|
||||
_exc[orth + "'d've"] = [
|
||||
{ORTH: orth, LEMMA: word, NORM: word},
|
||||
{ORTH: "'d", LEMMA: "would", NORM: "would", TAG: "MD"},
|
||||
{ORTH: "'ve", LEMMA: "have", NORM: "have", TAG: "VB"},
|
||||
{ORTH: orth, NORM: word},
|
||||
{ORTH: "'d", NORM: "would"},
|
||||
{ORTH: "'ve", NORM: "have"},
|
||||
]
|
||||
|
||||
_exc[orth + "dve"] = [
|
||||
{ORTH: orth, LEMMA: word, NORM: word},
|
||||
{ORTH: "d", LEMMA: "would", NORM: "would", TAG: "MD"},
|
||||
{ORTH: "ve", LEMMA: "have", NORM: "have", TAG: "VB"},
|
||||
{ORTH: orth, NORM: word},
|
||||
{ORTH: "d", NORM: "would"},
|
||||
{ORTH: "ve", NORM: "have"},
|
||||
]
|
||||
|
||||
|
||||
# Verbs
|
||||
|
||||
for verb_data in [
|
||||
{ORTH: "ca", LEMMA: "can", NORM: "can", TAG: "MD"},
|
||||
{ORTH: "could", NORM: "could", TAG: "MD"},
|
||||
{ORTH: "do", LEMMA: "do", NORM: "do"},
|
||||
{ORTH: "does", LEMMA: "do", NORM: "does"},
|
||||
{ORTH: "did", LEMMA: "do", NORM: "do", TAG: "VBD"},
|
||||
{ORTH: "had", LEMMA: "have", NORM: "have", TAG: "VBD"},
|
||||
{ORTH: "may", NORM: "may", TAG: "MD"},
|
||||
{ORTH: "might", NORM: "might", TAG: "MD"},
|
||||
{ORTH: "must", NORM: "must", TAG: "MD"},
|
||||
{ORTH: "ca", NORM: "can"},
|
||||
{ORTH: "could", NORM: "could"},
|
||||
{ORTH: "do", NORM: "do"},
|
||||
{ORTH: "does", NORM: "does"},
|
||||
{ORTH: "did", NORM: "do"},
|
||||
{ORTH: "had", NORM: "have"},
|
||||
{ORTH: "may", NORM: "may"},
|
||||
{ORTH: "might", NORM: "might"},
|
||||
{ORTH: "must", NORM: "must"},
|
||||
{ORTH: "need", NORM: "need"},
|
||||
{ORTH: "ought", NORM: "ought", TAG: "MD"},
|
||||
{ORTH: "sha", LEMMA: "shall", NORM: "shall", TAG: "MD"},
|
||||
{ORTH: "should", NORM: "should", TAG: "MD"},
|
||||
{ORTH: "wo", LEMMA: "will", NORM: "will", TAG: "MD"},
|
||||
{ORTH: "would", NORM: "would", TAG: "MD"},
|
||||
{ORTH: "ought", NORM: "ought"},
|
||||
{ORTH: "sha", NORM: "shall"},
|
||||
{ORTH: "should", NORM: "should"},
|
||||
{ORTH: "wo", NORM: "will"},
|
||||
{ORTH: "would", NORM: "would"},
|
||||
]:
|
||||
verb_data_tc = dict(verb_data)
|
||||
verb_data_tc[ORTH] = verb_data_tc[ORTH].title()
|
||||
for data in [verb_data, verb_data_tc]:
|
||||
_exc[data[ORTH] + "n't"] = [
|
||||
dict(data),
|
||||
{ORTH: "n't", LEMMA: "not", NORM: "not", TAG: "RB"},
|
||||
{ORTH: "n't", NORM: "not"},
|
||||
]
|
||||
|
||||
_exc[data[ORTH] + "nt"] = [
|
||||
dict(data),
|
||||
{ORTH: "nt", LEMMA: "not", NORM: "not", TAG: "RB"},
|
||||
{ORTH: "nt", NORM: "not"},
|
||||
]
|
||||
|
||||
_exc[data[ORTH] + "n't've"] = [
|
||||
dict(data),
|
||||
{ORTH: "n't", LEMMA: "not", NORM: "not", TAG: "RB"},
|
||||
{ORTH: "'ve", LEMMA: "have", NORM: "have", TAG: "VB"},
|
||||
{ORTH: "n't", NORM: "not"},
|
||||
{ORTH: "'ve", NORM: "have"},
|
||||
]
|
||||
|
||||
_exc[data[ORTH] + "ntve"] = [
|
||||
dict(data),
|
||||
{ORTH: "nt", LEMMA: "not", NORM: "not", TAG: "RB"},
|
||||
{ORTH: "ve", LEMMA: "have", NORM: "have", TAG: "VB"},
|
||||
{ORTH: "nt", NORM: "not"},
|
||||
{ORTH: "ve", NORM: "have"},
|
||||
]
|
||||
|
||||
|
||||
for verb_data in [
|
||||
{ORTH: "could", NORM: "could", TAG: "MD"},
|
||||
{ORTH: "might", NORM: "might", TAG: "MD"},
|
||||
{ORTH: "must", NORM: "must", TAG: "MD"},
|
||||
{ORTH: "should", NORM: "should", TAG: "MD"},
|
||||
{ORTH: "would", NORM: "would", TAG: "MD"},
|
||||
{ORTH: "could", NORM: "could"},
|
||||
{ORTH: "might", NORM: "might"},
|
||||
{ORTH: "must", NORM: "must"},
|
||||
{ORTH: "should", NORM: "should"},
|
||||
{ORTH: "would", NORM: "would"},
|
||||
]:
|
||||
verb_data_tc = dict(verb_data)
|
||||
verb_data_tc[ORTH] = verb_data_tc[ORTH].title()
|
||||
for data in [verb_data, verb_data_tc]:
|
||||
_exc[data[ORTH] + "'ve"] = [dict(data), {ORTH: "'ve", LEMMA: "have", TAG: "VB"}]
|
||||
_exc[data[ORTH] + "'ve"] = [dict(data), {ORTH: "'ve"}]
|
||||
|
||||
_exc[data[ORTH] + "ve"] = [dict(data), {ORTH: "ve", LEMMA: "have", TAG: "VB"}]
|
||||
_exc[data[ORTH] + "ve"] = [dict(data), {ORTH: "ve"}]
|
||||
|
||||
|
||||
for verb_data in [
|
||||
{ORTH: "ai", LEMMA: "be", TAG: "VBP", "number": 2},
|
||||
{ORTH: "are", LEMMA: "be", NORM: "are", TAG: "VBP", "number": 2},
|
||||
{ORTH: "is", LEMMA: "be", NORM: "is", TAG: "VBZ"},
|
||||
{ORTH: "was", LEMMA: "be", NORM: "was"},
|
||||
{ORTH: "were", LEMMA: "be", NORM: "were"},
|
||||
{ORTH: "ai", "number": 2},
|
||||
{ORTH: "are", NORM: "are", "number": 2},
|
||||
{ORTH: "is", NORM: "is"},
|
||||
{ORTH: "was", NORM: "was"},
|
||||
{ORTH: "were", NORM: "were"},
|
||||
{ORTH: "have", NORM: "have"},
|
||||
{ORTH: "has", LEMMA: "have", NORM: "has"},
|
||||
{ORTH: "has", NORM: "has"},
|
||||
{ORTH: "dare", NORM: "dare"},
|
||||
]:
|
||||
verb_data_tc = dict(verb_data)
|
||||
|
@ -297,24 +299,24 @@ for verb_data in [
|
|||
for data in [verb_data, verb_data_tc]:
|
||||
_exc[data[ORTH] + "n't"] = [
|
||||
dict(data),
|
||||
{ORTH: "n't", LEMMA: "not", NORM: "not", TAG: "RB"},
|
||||
{ORTH: "n't", NORM: "not"},
|
||||
]
|
||||
|
||||
_exc[data[ORTH] + "nt"] = [
|
||||
dict(data),
|
||||
{ORTH: "nt", LEMMA: "not", NORM: "not", TAG: "RB"},
|
||||
{ORTH: "nt", NORM: "not"},
|
||||
]
|
||||
|
||||
|
||||
# Other contractions with trailing apostrophe
|
||||
|
||||
for exc_data in [
|
||||
{ORTH: "doin", LEMMA: "do", NORM: "doing"},
|
||||
{ORTH: "goin", LEMMA: "go", NORM: "going"},
|
||||
{ORTH: "nothin", LEMMA: "nothing", NORM: "nothing"},
|
||||
{ORTH: "nuthin", LEMMA: "nothing", NORM: "nothing"},
|
||||
{ORTH: "ol", LEMMA: "old", NORM: "old"},
|
||||
{ORTH: "somethin", LEMMA: "something", NORM: "something"},
|
||||
{ORTH: "doin", NORM: "doing"},
|
||||
{ORTH: "goin", NORM: "going"},
|
||||
{ORTH: "nothin", NORM: "nothing"},
|
||||
{ORTH: "nuthin", NORM: "nothing"},
|
||||
{ORTH: "ol", NORM: "old"},
|
||||
{ORTH: "somethin", NORM: "something"},
|
||||
]:
|
||||
exc_data_tc = dict(exc_data)
|
||||
exc_data_tc[ORTH] = exc_data_tc[ORTH].title()
|
||||
|
@ -329,9 +331,9 @@ for exc_data in [
|
|||
|
||||
for exc_data in [
|
||||
{ORTH: "cause", NORM: "because"},
|
||||
{ORTH: "em", LEMMA: PRON_LEMMA, NORM: "them"},
|
||||
{ORTH: "ll", LEMMA: "will", NORM: "will"},
|
||||
{ORTH: "nuff", LEMMA: "enough", NORM: "enough"},
|
||||
{ORTH: "em", NORM: "them"},
|
||||
{ORTH: "ll", NORM: "will"},
|
||||
{ORTH: "nuff", NORM: "enough"},
|
||||
]:
|
||||
exc_data_apos = dict(exc_data)
|
||||
exc_data_apos[ORTH] = "'" + exc_data_apos[ORTH]
|
||||
|
@ -345,166 +347,131 @@ for h in range(1, 12 + 1):
|
|||
for period in ["a.m.", "am"]:
|
||||
_exc[f"{h}{period}"] = [
|
||||
{ORTH: f"{h}"},
|
||||
{ORTH: period, LEMMA: "a.m.", NORM: "a.m."},
|
||||
{ORTH: period, NORM: "a.m."},
|
||||
]
|
||||
for period in ["p.m.", "pm"]:
|
||||
_exc[f"{h}{period}"] = [
|
||||
{ORTH: f"{h}"},
|
||||
{ORTH: period, LEMMA: "p.m.", NORM: "p.m."},
|
||||
{ORTH: period, NORM: "p.m."},
|
||||
]
|
||||
|
||||
|
||||
# Rest
|
||||
|
||||
_other_exc = {
|
||||
"y'all": [{ORTH: "y'", LEMMA: PRON_LEMMA, NORM: "you"}, {ORTH: "all"}],
|
||||
"yall": [{ORTH: "y", LEMMA: PRON_LEMMA, NORM: "you"}, {ORTH: "all"}],
|
||||
"how'd'y": [
|
||||
{ORTH: "how", LEMMA: "how"},
|
||||
{ORTH: "'d", LEMMA: "do"},
|
||||
{ORTH: "'y", LEMMA: PRON_LEMMA, NORM: "you"},
|
||||
],
|
||||
"How'd'y": [
|
||||
{ORTH: "How", LEMMA: "how", NORM: "how"},
|
||||
{ORTH: "'d", LEMMA: "do"},
|
||||
{ORTH: "'y", LEMMA: PRON_LEMMA, NORM: "you"},
|
||||
],
|
||||
"not've": [
|
||||
{ORTH: "not", LEMMA: "not", TAG: "RB"},
|
||||
{ORTH: "'ve", LEMMA: "have", NORM: "have", TAG: "VB"},
|
||||
],
|
||||
"notve": [
|
||||
{ORTH: "not", LEMMA: "not", TAG: "RB"},
|
||||
{ORTH: "ve", LEMMA: "have", NORM: "have", TAG: "VB"},
|
||||
],
|
||||
"Not've": [
|
||||
{ORTH: "Not", LEMMA: "not", NORM: "not", TAG: "RB"},
|
||||
{ORTH: "'ve", LEMMA: "have", NORM: "have", TAG: "VB"},
|
||||
],
|
||||
"Notve": [
|
||||
{ORTH: "Not", LEMMA: "not", NORM: "not", TAG: "RB"},
|
||||
{ORTH: "ve", LEMMA: "have", NORM: "have", TAG: "VB"},
|
||||
],
|
||||
"cannot": [
|
||||
{ORTH: "can", LEMMA: "can", TAG: "MD"},
|
||||
{ORTH: "not", LEMMA: "not", TAG: "RB"},
|
||||
],
|
||||
"Cannot": [
|
||||
{ORTH: "Can", LEMMA: "can", NORM: "can", TAG: "MD"},
|
||||
{ORTH: "not", LEMMA: "not", TAG: "RB"},
|
||||
],
|
||||
"gonna": [
|
||||
{ORTH: "gon", LEMMA: "go", NORM: "going"},
|
||||
{ORTH: "na", LEMMA: "to", NORM: "to"},
|
||||
],
|
||||
"Gonna": [
|
||||
{ORTH: "Gon", LEMMA: "go", NORM: "going"},
|
||||
{ORTH: "na", LEMMA: "to", NORM: "to"},
|
||||
],
|
||||
"gotta": [{ORTH: "got"}, {ORTH: "ta", LEMMA: "to", NORM: "to"}],
|
||||
"Gotta": [{ORTH: "Got", NORM: "got"}, {ORTH: "ta", LEMMA: "to", NORM: "to"}],
|
||||
"let's": [{ORTH: "let"}, {ORTH: "'s", LEMMA: PRON_LEMMA, NORM: "us"}],
|
||||
"Let's": [
|
||||
{ORTH: "Let", LEMMA: "let", NORM: "let"},
|
||||
{ORTH: "'s", LEMMA: PRON_LEMMA, NORM: "us"},
|
||||
],
|
||||
"c'mon": [{ORTH: "c'm", NORM: "come", LEMMA: "come"}, {ORTH: "on"}],
|
||||
"C'mon": [{ORTH: "C'm", NORM: "come", LEMMA: "come"}, {ORTH: "on"}],
|
||||
"y'all": [{ORTH: "y'", NORM: "you"}, {ORTH: "all"}],
|
||||
"yall": [{ORTH: "y", NORM: "you"}, {ORTH: "all"}],
|
||||
"how'd'y": [{ORTH: "how"}, {ORTH: "'d"}, {ORTH: "'y", NORM: "you"}],
|
||||
"How'd'y": [{ORTH: "How", NORM: "how"}, {ORTH: "'d"}, {ORTH: "'y", NORM: "you"}],
|
||||
"not've": [{ORTH: "not"}, {ORTH: "'ve", NORM: "have"}],
|
||||
"notve": [{ORTH: "not"}, {ORTH: "ve", NORM: "have"}],
|
||||
"Not've": [{ORTH: "Not", NORM: "not"}, {ORTH: "'ve", NORM: "have"}],
|
||||
"Notve": [{ORTH: "Not", NORM: "not"}, {ORTH: "ve", NORM: "have"}],
|
||||
"cannot": [{ORTH: "can"}, {ORTH: "not"}],
|
||||
"Cannot": [{ORTH: "Can", NORM: "can"}, {ORTH: "not"}],
|
||||
"gonna": [{ORTH: "gon", NORM: "going"}, {ORTH: "na", NORM: "to"}],
|
||||
"Gonna": [{ORTH: "Gon", NORM: "going"}, {ORTH: "na", NORM: "to"}],
|
||||
"gotta": [{ORTH: "got"}, {ORTH: "ta", NORM: "to"}],
|
||||
"Gotta": [{ORTH: "Got", NORM: "got"}, {ORTH: "ta", NORM: "to"}],
|
||||
"let's": [{ORTH: "let"}, {ORTH: "'s", NORM: "us"}],
|
||||
"Let's": [{ORTH: "Let", NORM: "let"}, {ORTH: "'s", NORM: "us"}],
|
||||
"c'mon": [{ORTH: "c'm", NORM: "come"}, {ORTH: "on"}],
|
||||
"C'mon": [{ORTH: "C'm", NORM: "come"}, {ORTH: "on"}],
|
||||
}
|
||||
|
||||
_exc.update(_other_exc)
|
||||
|
||||
|
||||
for exc_data in [
|
||||
{ORTH: "'S", LEMMA: "'s", NORM: "'s"},
|
||||
{ORTH: "'s", LEMMA: "'s", NORM: "'s"},
|
||||
{ORTH: "\u2018S", LEMMA: "'s", NORM: "'s"},
|
||||
{ORTH: "\u2018s", LEMMA: "'s", NORM: "'s"},
|
||||
{ORTH: "and/or", LEMMA: "and/or", NORM: "and/or", TAG: "CC"},
|
||||
{ORTH: "w/o", LEMMA: "without", NORM: "without"},
|
||||
{ORTH: "'re", LEMMA: "be", NORM: "are"},
|
||||
{ORTH: "'Cause", LEMMA: "because", NORM: "because"},
|
||||
{ORTH: "'cause", LEMMA: "because", NORM: "because"},
|
||||
{ORTH: "'cos", LEMMA: "because", NORM: "because"},
|
||||
{ORTH: "'Cos", LEMMA: "because", NORM: "because"},
|
||||
{ORTH: "'coz", LEMMA: "because", NORM: "because"},
|
||||
{ORTH: "'Coz", LEMMA: "because", NORM: "because"},
|
||||
{ORTH: "'cuz", LEMMA: "because", NORM: "because"},
|
||||
{ORTH: "'Cuz", LEMMA: "because", NORM: "because"},
|
||||
{ORTH: "'bout", LEMMA: "about", NORM: "about"},
|
||||
{ORTH: "ma'am", LEMMA: "madam", NORM: "madam"},
|
||||
{ORTH: "Ma'am", LEMMA: "madam", NORM: "madam"},
|
||||
{ORTH: "o'clock", LEMMA: "o'clock", NORM: "o'clock"},
|
||||
{ORTH: "O'clock", LEMMA: "o'clock", NORM: "o'clock"},
|
||||
{ORTH: "lovin'", LEMMA: "love", NORM: "loving"},
|
||||
{ORTH: "Lovin'", LEMMA: "love", NORM: "loving"},
|
||||
{ORTH: "lovin", LEMMA: "love", NORM: "loving"},
|
||||
{ORTH: "Lovin", LEMMA: "love", NORM: "loving"},
|
||||
{ORTH: "havin'", LEMMA: "have", NORM: "having"},
|
||||
{ORTH: "Havin'", LEMMA: "have", NORM: "having"},
|
||||
{ORTH: "havin", LEMMA: "have", NORM: "having"},
|
||||
{ORTH: "Havin", LEMMA: "have", NORM: "having"},
|
||||
{ORTH: "doin'", LEMMA: "do", NORM: "doing"},
|
||||
{ORTH: "Doin'", LEMMA: "do", NORM: "doing"},
|
||||
{ORTH: "doin", LEMMA: "do", NORM: "doing"},
|
||||
{ORTH: "Doin", LEMMA: "do", NORM: "doing"},
|
||||
{ORTH: "goin'", LEMMA: "go", NORM: "going"},
|
||||
{ORTH: "Goin'", LEMMA: "go", NORM: "going"},
|
||||
{ORTH: "goin", LEMMA: "go", NORM: "going"},
|
||||
{ORTH: "Goin", LEMMA: "go", NORM: "going"},
|
||||
{ORTH: "Mt.", LEMMA: "Mount", NORM: "Mount"},
|
||||
{ORTH: "Ak.", LEMMA: "Alaska", NORM: "Alaska"},
|
||||
{ORTH: "Ala.", LEMMA: "Alabama", NORM: "Alabama"},
|
||||
{ORTH: "Apr.", LEMMA: "April", NORM: "April"},
|
||||
{ORTH: "Ariz.", LEMMA: "Arizona", NORM: "Arizona"},
|
||||
{ORTH: "Ark.", LEMMA: "Arkansas", NORM: "Arkansas"},
|
||||
{ORTH: "Aug.", LEMMA: "August", NORM: "August"},
|
||||
{ORTH: "Calif.", LEMMA: "California", NORM: "California"},
|
||||
{ORTH: "Colo.", LEMMA: "Colorado", NORM: "Colorado"},
|
||||
{ORTH: "Conn.", LEMMA: "Connecticut", NORM: "Connecticut"},
|
||||
{ORTH: "Dec.", LEMMA: "December", NORM: "December"},
|
||||
{ORTH: "Del.", LEMMA: "Delaware", NORM: "Delaware"},
|
||||
{ORTH: "Feb.", LEMMA: "February", NORM: "February"},
|
||||
{ORTH: "Fla.", LEMMA: "Florida", NORM: "Florida"},
|
||||
{ORTH: "Ga.", LEMMA: "Georgia", NORM: "Georgia"},
|
||||
{ORTH: "Ia.", LEMMA: "Iowa", NORM: "Iowa"},
|
||||
{ORTH: "Id.", LEMMA: "Idaho", NORM: "Idaho"},
|
||||
{ORTH: "Ill.", LEMMA: "Illinois", NORM: "Illinois"},
|
||||
{ORTH: "Ind.", LEMMA: "Indiana", NORM: "Indiana"},
|
||||
{ORTH: "Jan.", LEMMA: "January", NORM: "January"},
|
||||
{ORTH: "Jul.", LEMMA: "July", NORM: "July"},
|
||||
{ORTH: "Jun.", LEMMA: "June", NORM: "June"},
|
||||
{ORTH: "Kan.", LEMMA: "Kansas", NORM: "Kansas"},
|
||||
{ORTH: "Kans.", LEMMA: "Kansas", NORM: "Kansas"},
|
||||
{ORTH: "Ky.", LEMMA: "Kentucky", NORM: "Kentucky"},
|
||||
{ORTH: "La.", LEMMA: "Louisiana", NORM: "Louisiana"},
|
||||
{ORTH: "Mar.", LEMMA: "March", NORM: "March"},
|
||||
{ORTH: "Mass.", LEMMA: "Massachusetts", NORM: "Massachusetts"},
|
||||
{ORTH: "May.", LEMMA: "May", NORM: "May"},
|
||||
{ORTH: "Mich.", LEMMA: "Michigan", NORM: "Michigan"},
|
||||
{ORTH: "Minn.", LEMMA: "Minnesota", NORM: "Minnesota"},
|
||||
{ORTH: "Miss.", LEMMA: "Mississippi", NORM: "Mississippi"},
|
||||
{ORTH: "N.C.", LEMMA: "North Carolina", NORM: "North Carolina"},
|
||||
{ORTH: "N.D.", LEMMA: "North Dakota", NORM: "North Dakota"},
|
||||
{ORTH: "N.H.", LEMMA: "New Hampshire", NORM: "New Hampshire"},
|
||||
{ORTH: "N.J.", LEMMA: "New Jersey", NORM: "New Jersey"},
|
||||
{ORTH: "N.M.", LEMMA: "New Mexico", NORM: "New Mexico"},
|
||||
{ORTH: "N.Y.", LEMMA: "New York", NORM: "New York"},
|
||||
{ORTH: "Neb.", LEMMA: "Nebraska", NORM: "Nebraska"},
|
||||
{ORTH: "Nebr.", LEMMA: "Nebraska", NORM: "Nebraska"},
|
||||
{ORTH: "Nev.", LEMMA: "Nevada", NORM: "Nevada"},
|
||||
{ORTH: "Nov.", LEMMA: "November", NORM: "November"},
|
||||
{ORTH: "Oct.", LEMMA: "October", NORM: "October"},
|
||||
{ORTH: "Okla.", LEMMA: "Oklahoma", NORM: "Oklahoma"},
|
||||
{ORTH: "Ore.", LEMMA: "Oregon", NORM: "Oregon"},
|
||||
{ORTH: "Pa.", LEMMA: "Pennsylvania", NORM: "Pennsylvania"},
|
||||
{ORTH: "S.C.", LEMMA: "South Carolina", NORM: "South Carolina"},
|
||||
{ORTH: "Sep.", LEMMA: "September", NORM: "September"},
|
||||
{ORTH: "Sept.", LEMMA: "September", NORM: "September"},
|
||||
{ORTH: "Tenn.", LEMMA: "Tennessee", NORM: "Tennessee"},
|
||||
{ORTH: "Va.", LEMMA: "Virginia", NORM: "Virginia"},
|
||||
{ORTH: "Wash.", LEMMA: "Washington", NORM: "Washington"},
|
||||
{ORTH: "Wis.", LEMMA: "Wisconsin", NORM: "Wisconsin"},
|
||||
{ORTH: "'S", NORM: "'s"},
|
||||
{ORTH: "'s", NORM: "'s"},
|
||||
{ORTH: "\u2018S", NORM: "'s"},
|
||||
{ORTH: "\u2018s", NORM: "'s"},
|
||||
{ORTH: "and/or", NORM: "and/or"},
|
||||
{ORTH: "w/o", NORM: "without"},
|
||||
{ORTH: "'re", NORM: "are"},
|
||||
{ORTH: "'Cause", NORM: "because"},
|
||||
{ORTH: "'cause", NORM: "because"},
|
||||
{ORTH: "'cos", NORM: "because"},
|
||||
{ORTH: "'Cos", NORM: "because"},
|
||||
{ORTH: "'coz", NORM: "because"},
|
||||
{ORTH: "'Coz", NORM: "because"},
|
||||
{ORTH: "'cuz", NORM: "because"},
|
||||
{ORTH: "'Cuz", NORM: "because"},
|
||||
{ORTH: "'bout", NORM: "about"},
|
||||
{ORTH: "ma'am", NORM: "madam"},
|
||||
{ORTH: "Ma'am", NORM: "madam"},
|
||||
{ORTH: "o'clock", NORM: "o'clock"},
|
||||
{ORTH: "O'clock", NORM: "o'clock"},
|
||||
{ORTH: "lovin'", NORM: "loving"},
|
||||
{ORTH: "Lovin'", NORM: "loving"},
|
||||
{ORTH: "lovin", NORM: "loving"},
|
||||
{ORTH: "Lovin", NORM: "loving"},
|
||||
{ORTH: "havin'", NORM: "having"},
|
||||
{ORTH: "Havin'", NORM: "having"},
|
||||
{ORTH: "havin", NORM: "having"},
|
||||
{ORTH: "Havin", NORM: "having"},
|
||||
{ORTH: "doin'", NORM: "doing"},
|
||||
{ORTH: "Doin'", NORM: "doing"},
|
||||
{ORTH: "doin", NORM: "doing"},
|
||||
{ORTH: "Doin", NORM: "doing"},
|
||||
{ORTH: "goin'", NORM: "going"},
|
||||
{ORTH: "Goin'", NORM: "going"},
|
||||
{ORTH: "goin", NORM: "going"},
|
||||
{ORTH: "Goin", NORM: "going"},
|
||||
{ORTH: "Mt.", NORM: "Mount"},
|
||||
{ORTH: "Ak.", NORM: "Alaska"},
|
||||
{ORTH: "Ala.", NORM: "Alabama"},
|
||||
{ORTH: "Apr.", NORM: "April"},
|
||||
{ORTH: "Ariz.", NORM: "Arizona"},
|
||||
{ORTH: "Ark.", NORM: "Arkansas"},
|
||||
{ORTH: "Aug.", NORM: "August"},
|
||||
{ORTH: "Calif.", NORM: "California"},
|
||||
{ORTH: "Colo.", NORM: "Colorado"},
|
||||
{ORTH: "Conn.", NORM: "Connecticut"},
|
||||
{ORTH: "Dec.", NORM: "December"},
|
||||
{ORTH: "Del.", NORM: "Delaware"},
|
||||
{ORTH: "Feb.", NORM: "February"},
|
||||
{ORTH: "Fla.", NORM: "Florida"},
|
||||
{ORTH: "Ga.", NORM: "Georgia"},
|
||||
{ORTH: "Ia.", NORM: "Iowa"},
|
||||
{ORTH: "Id.", NORM: "Idaho"},
|
||||
{ORTH: "Ill.", NORM: "Illinois"},
|
||||
{ORTH: "Ind.", NORM: "Indiana"},
|
||||
{ORTH: "Jan.", NORM: "January"},
|
||||
{ORTH: "Jul.", NORM: "July"},
|
||||
{ORTH: "Jun.", NORM: "June"},
|
||||
{ORTH: "Kan.", NORM: "Kansas"},
|
||||
{ORTH: "Kans.", NORM: "Kansas"},
|
||||
{ORTH: "Ky.", NORM: "Kentucky"},
|
||||
{ORTH: "La.", NORM: "Louisiana"},
|
||||
{ORTH: "Mar.", NORM: "March"},
|
||||
{ORTH: "Mass.", NORM: "Massachusetts"},
|
||||
{ORTH: "May.", NORM: "May"},
|
||||
{ORTH: "Mich.", NORM: "Michigan"},
|
||||
{ORTH: "Minn.", NORM: "Minnesota"},
|
||||
{ORTH: "Miss.", NORM: "Mississippi"},
|
||||
{ORTH: "N.C.", NORM: "North Carolina"},
|
||||
{ORTH: "N.D.", NORM: "North Dakota"},
|
||||
{ORTH: "N.H.", NORM: "New Hampshire"},
|
||||
{ORTH: "N.J.", NORM: "New Jersey"},
|
||||
{ORTH: "N.M.", NORM: "New Mexico"},
|
||||
{ORTH: "N.Y.", NORM: "New York"},
|
||||
{ORTH: "Neb.", NORM: "Nebraska"},
|
||||
{ORTH: "Nebr.", NORM: "Nebraska"},
|
||||
{ORTH: "Nev.", NORM: "Nevada"},
|
||||
{ORTH: "Nov.", NORM: "November"},
|
||||
{ORTH: "Oct.", NORM: "October"},
|
||||
{ORTH: "Okla.", NORM: "Oklahoma"},
|
||||
{ORTH: "Ore.", NORM: "Oregon"},
|
||||
{ORTH: "Pa.", NORM: "Pennsylvania"},
|
||||
{ORTH: "S.C.", NORM: "South Carolina"},
|
||||
{ORTH: "Sep.", NORM: "September"},
|
||||
{ORTH: "Sept.", NORM: "September"},
|
||||
{ORTH: "Tenn.", NORM: "Tennessee"},
|
||||
{ORTH: "Va.", NORM: "Virginia"},
|
||||
{ORTH: "Wash.", NORM: "Washington"},
|
||||
{ORTH: "Wis.", NORM: "Wisconsin"},
|
||||
]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
||||
|
@ -555,4 +522,4 @@ for string in _exclude:
|
|||
_exc.pop(string)
|
||||
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,52 +1,23 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.config import Config
|
||||
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from .syntax_iterators import SYNTAX_ITERATORS
|
||||
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "es"
|
||||
stop_words = {"@language_data": "spacy.es.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.es.lex_attr_getters"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.es.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.es.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
|
||||
|
||||
class SpanishDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
infixes = TOKENIZER_INFIXES
|
||||
suffixes = TOKENIZER_SUFFIXES
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
syntax_iterators = SYNTAX_ITERATORS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Spanish(Language):
|
||||
lang = "es"
|
||||
Defaults = SpanishDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Spanish"]
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
from typing import Union, Iterator, Optional, List, Tuple
|
||||
|
||||
from ...symbols import NOUN, PROPN, PRON, VERB, AUX
|
||||
from ...errors import Errors
|
||||
from ...tokens import Doc, Span, Token
|
||||
|
||||
|
||||
def noun_chunks(doclike):
|
||||
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
|
||||
"""Detect base noun phrases from a dependency parse. Works on Doc and Span."""
|
||||
doc = doclike.doc
|
||||
|
||||
if not doc.is_parsed:
|
||||
raise ValueError(Errors.E029)
|
||||
|
||||
if not len(doc):
|
||||
return
|
||||
np_label = doc.vocab.strings.add("NP")
|
||||
|
@ -28,18 +30,24 @@ def noun_chunks(doclike):
|
|||
token = next_token(token)
|
||||
|
||||
|
||||
def is_verb_token(token):
|
||||
def is_verb_token(token: Token) -> bool:
|
||||
return token.pos in [VERB, AUX]
|
||||
|
||||
|
||||
def next_token(token):
|
||||
def next_token(token: Token) -> Optional[Token]:
|
||||
try:
|
||||
return token.nbor()
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
|
||||
def noun_bounds(doc, root, np_left_deps, np_right_deps, stop_deps):
|
||||
def noun_bounds(
|
||||
doc: Doc,
|
||||
root: Token,
|
||||
np_left_deps: List[str],
|
||||
np_right_deps: List[str],
|
||||
stop_deps: List[str],
|
||||
) -> Tuple[Token, Token]:
|
||||
left_bound = root
|
||||
for token in reversed(list(root.lefts)):
|
||||
if token.dep in np_left_deps:
|
||||
|
@ -50,12 +58,8 @@ def noun_bounds(doc, root, np_left_deps, np_right_deps, stop_deps):
|
|||
left, right = noun_bounds(
|
||||
doc, token, np_left_deps, np_right_deps, stop_deps
|
||||
)
|
||||
if list(
|
||||
filter(
|
||||
lambda t: is_verb_token(t) or t.dep in stop_deps,
|
||||
doc[left_bound.i : right.i],
|
||||
)
|
||||
):
|
||||
filter_func = lambda t: is_verb_token(t) or t.dep in stop_deps
|
||||
if list(filter(filter_func, doc[left_bound.i : right.i],)):
|
||||
break
|
||||
else:
|
||||
right_bound = right
|
||||
|
|
|
@ -1,25 +1,27 @@
|
|||
from ...symbols import ORTH, LEMMA, NORM, PRON_LEMMA
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH, NORM
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
_exc = {
|
||||
"pal": [{ORTH: "pa", LEMMA: "para"}, {ORTH: "l", LEMMA: "el", NORM: "el"}],
|
||||
"pal": [{ORTH: "pa"}, {ORTH: "l", NORM: "el"}],
|
||||
}
|
||||
|
||||
|
||||
for exc_data in [
|
||||
{ORTH: "n°", LEMMA: "número"},
|
||||
{ORTH: "°C", LEMMA: "grados Celcius"},
|
||||
{ORTH: "aprox.", LEMMA: "aproximadamente"},
|
||||
{ORTH: "dna.", LEMMA: "docena"},
|
||||
{ORTH: "dpto.", LEMMA: "departamento"},
|
||||
{ORTH: "ej.", LEMMA: "ejemplo"},
|
||||
{ORTH: "esq.", LEMMA: "esquina"},
|
||||
{ORTH: "pág.", LEMMA: "página"},
|
||||
{ORTH: "p.ej.", LEMMA: "por ejemplo"},
|
||||
{ORTH: "Ud.", LEMMA: PRON_LEMMA, NORM: "usted"},
|
||||
{ORTH: "Vd.", LEMMA: PRON_LEMMA, NORM: "usted"},
|
||||
{ORTH: "Uds.", LEMMA: PRON_LEMMA, NORM: "ustedes"},
|
||||
{ORTH: "Vds.", LEMMA: PRON_LEMMA, NORM: "ustedes"},
|
||||
{ORTH: "n°"},
|
||||
{ORTH: "°C"},
|
||||
{ORTH: "aprox."},
|
||||
{ORTH: "dna."},
|
||||
{ORTH: "dpto."},
|
||||
{ORTH: "ej."},
|
||||
{ORTH: "esq."},
|
||||
{ORTH: "pág."},
|
||||
{ORTH: "p.ej."},
|
||||
{ORTH: "Ud.", NORM: "usted"},
|
||||
{ORTH: "Vd.", NORM: "usted"},
|
||||
{ORTH: "Uds.", NORM: "ustedes"},
|
||||
{ORTH: "Vds.", NORM: "ustedes"},
|
||||
{ORTH: "vol.", NORM: "volúmen"},
|
||||
]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
@ -27,14 +29,14 @@ for exc_data in [
|
|||
|
||||
# Times
|
||||
|
||||
_exc["12m."] = [{ORTH: "12"}, {ORTH: "m.", LEMMA: "p.m."}]
|
||||
_exc["12m."] = [{ORTH: "12"}, {ORTH: "m."}]
|
||||
|
||||
|
||||
for h in range(1, 12 + 1):
|
||||
for period in ["a.m.", "am"]:
|
||||
_exc[f"{h}{period}"] = [{ORTH: f"{h}"}, {ORTH: period, LEMMA: "a.m."}]
|
||||
_exc[f"{h}{period}"] = [{ORTH: f"{h}"}, {ORTH: period}]
|
||||
for period in ["p.m.", "pm"]:
|
||||
_exc[f"{h}{period}"] = [{ORTH: f"{h}"}, {ORTH: period, LEMMA: "p.m."}]
|
||||
_exc[f"{h}{period}"] = [{ORTH: f"{h}"}, {ORTH: period}]
|
||||
|
||||
|
||||
for orth in [
|
||||
|
@ -73,4 +75,4 @@ for orth in [
|
|||
_exc[orth] = [{ORTH: orth}]
|
||||
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,26 +1,14 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "et"
|
||||
stop_words = {"@language_data": "spacy.et.stop_words"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.et.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
class EstonianDefaults(Language.Defaults):
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Estonian(Language):
|
||||
lang = "et"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = EstonianDefaults
|
||||
|
||||
|
||||
__all__ = ["Estonian"]
|
||||
|
|
|
@ -1,41 +1,18 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from .punctuation import TOKENIZER_SUFFIXES
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "eu"
|
||||
stop_words = {"@language_data": "spacy.eu.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.eu.lex_attr_getters"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.eu.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.eu.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
|
||||
|
||||
class BasqueDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = BASE_EXCEPTIONS
|
||||
suffixes = TOKENIZER_SUFFIXES
|
||||
stop_words = STOP_WORDS
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
|
||||
|
||||
class Basque(Language):
|
||||
lang = "eu"
|
||||
Defaults = BasqueDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Basque"]
|
||||
|
|
|
@ -1,55 +1,23 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .punctuation import TOKENIZER_SUFFIXES
|
||||
from .syntax_iterators import SYNTAX_ITERATORS
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "fa"
|
||||
stop_words = {"@language_data": "spacy.fa.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.fa.lex_attr_getters"}
|
||||
|
||||
[nlp.writing_system]
|
||||
direction = "rtl"
|
||||
has_case = false
|
||||
has_letters = true
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.fa.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.fa.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
from ...language import Language
|
||||
|
||||
|
||||
class PersianDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
suffixes = TOKENIZER_SUFFIXES
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
syntax_iterators = SYNTAX_ITERATORS
|
||||
stop_words = STOP_WORDS
|
||||
writing_system = {"direction": "rtl", "has_case": False, "has_letters": True}
|
||||
|
||||
|
||||
class Persian(Language):
|
||||
lang = "fa"
|
||||
Defaults = PersianDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Persian"]
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,43 +1,21 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "fi"
|
||||
stop_words = {"@language_data": "spacy.fi.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.fi.lex_attr_getters"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.fi.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.fi.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
|
||||
|
||||
class FinnishDefaults(Language.Defaults):
|
||||
infixes = TOKENIZER_INFIXES
|
||||
suffixes = TOKENIZER_SUFFIXES
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Finnish(Language):
|
||||
lang = "fi"
|
||||
Defaults = FinnishDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Finnish"]
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
from ...symbols import ORTH, LEMMA
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
_exc = {}
|
||||
|
@ -6,76 +8,76 @@ _exc = {}
|
|||
|
||||
# Source https://www.cs.tut.fi/~jkorpela/kielenopas/5.5.html
|
||||
for exc_data in [
|
||||
{ORTH: "aik.", LEMMA: "aikaisempi"},
|
||||
{ORTH: "alk.", LEMMA: "alkaen"},
|
||||
{ORTH: "alv.", LEMMA: "arvonlisävero"},
|
||||
{ORTH: "ark.", LEMMA: "arkisin"},
|
||||
{ORTH: "as.", LEMMA: "asunto"},
|
||||
{ORTH: "eaa.", LEMMA: "ennen ajanlaskun alkua"},
|
||||
{ORTH: "ed.", LEMMA: "edellinen"},
|
||||
{ORTH: "esim.", LEMMA: "esimerkki"},
|
||||
{ORTH: "huom.", LEMMA: "huomautus"},
|
||||
{ORTH: "jne.", LEMMA: "ja niin edelleen"},
|
||||
{ORTH: "joht.", LEMMA: "johtaja"},
|
||||
{ORTH: "k.", LEMMA: "kuollut"},
|
||||
{ORTH: "ks.", LEMMA: "katso"},
|
||||
{ORTH: "lk.", LEMMA: "luokka"},
|
||||
{ORTH: "lkm.", LEMMA: "lukumäärä"},
|
||||
{ORTH: "lyh.", LEMMA: "lyhenne"},
|
||||
{ORTH: "läh.", LEMMA: "lähettäjä"},
|
||||
{ORTH: "miel.", LEMMA: "mieluummin"},
|
||||
{ORTH: "milj.", LEMMA: "miljoona"},
|
||||
{ORTH: "Mm.", LEMMA: "muun muassa"},
|
||||
{ORTH: "mm.", LEMMA: "muun muassa"},
|
||||
{ORTH: "myöh.", LEMMA: "myöhempi"},
|
||||
{ORTH: "n.", LEMMA: "noin"},
|
||||
{ORTH: "nimim.", LEMMA: "nimimerkki"},
|
||||
{ORTH: "n:o", LEMMA: "numero"},
|
||||
{ORTH: "N:o", LEMMA: "numero"},
|
||||
{ORTH: "nro", LEMMA: "numero"},
|
||||
{ORTH: "ns.", LEMMA: "niin sanottu"},
|
||||
{ORTH: "nyk.", LEMMA: "nykyinen"},
|
||||
{ORTH: "oik.", LEMMA: "oikealla"},
|
||||
{ORTH: "os.", LEMMA: "osoite"},
|
||||
{ORTH: "p.", LEMMA: "päivä"},
|
||||
{ORTH: "par.", LEMMA: "paremmin"},
|
||||
{ORTH: "per.", LEMMA: "perustettu"},
|
||||
{ORTH: "pj.", LEMMA: "puheenjohtaja"},
|
||||
{ORTH: "puh.joht.", LEMMA: "puheenjohtaja"},
|
||||
{ORTH: "prof.", LEMMA: "professori"},
|
||||
{ORTH: "puh.", LEMMA: "puhelin"},
|
||||
{ORTH: "pvm.", LEMMA: "päivämäärä"},
|
||||
{ORTH: "rak.", LEMMA: "rakennettu"},
|
||||
{ORTH: "ry.", LEMMA: "rekisteröity yhdistys"},
|
||||
{ORTH: "s.", LEMMA: "sivu"},
|
||||
{ORTH: "siht.", LEMMA: "sihteeri"},
|
||||
{ORTH: "synt.", LEMMA: "syntynyt"},
|
||||
{ORTH: "t.", LEMMA: "toivoo"},
|
||||
{ORTH: "tark.", LEMMA: "tarkastanut"},
|
||||
{ORTH: "til.", LEMMA: "tilattu"},
|
||||
{ORTH: "tms.", LEMMA: "tai muuta sellaista"},
|
||||
{ORTH: "toim.", LEMMA: "toimittanut"},
|
||||
{ORTH: "v.", LEMMA: "vuosi"},
|
||||
{ORTH: "vas.", LEMMA: "vasen"},
|
||||
{ORTH: "vast.", LEMMA: "vastaus"},
|
||||
{ORTH: "vrt.", LEMMA: "vertaa"},
|
||||
{ORTH: "yht.", LEMMA: "yhteensä"},
|
||||
{ORTH: "yl.", LEMMA: "yleinen"},
|
||||
{ORTH: "ym.", LEMMA: "ynnä muuta"},
|
||||
{ORTH: "yms.", LEMMA: "ynnä muuta sellaista"},
|
||||
{ORTH: "yo.", LEMMA: "ylioppilas"},
|
||||
{ORTH: "yliopp.", LEMMA: "ylioppilas"},
|
||||
{ORTH: "ao.", LEMMA: "asianomainen"},
|
||||
{ORTH: "em.", LEMMA: "edellä mainittu"},
|
||||
{ORTH: "ko.", LEMMA: "kyseessä oleva"},
|
||||
{ORTH: "ml.", LEMMA: "mukaan luettuna"},
|
||||
{ORTH: "po.", LEMMA: "puheena oleva"},
|
||||
{ORTH: "so.", LEMMA: "se on"},
|
||||
{ORTH: "ts.", LEMMA: "toisin sanoen"},
|
||||
{ORTH: "vm.", LEMMA: "viimeksi mainittu"},
|
||||
{ORTH: "srk.", LEMMA: "seurakunta"},
|
||||
{ORTH: "aik."},
|
||||
{ORTH: "alk."},
|
||||
{ORTH: "alv."},
|
||||
{ORTH: "ark."},
|
||||
{ORTH: "as."},
|
||||
{ORTH: "eaa."},
|
||||
{ORTH: "ed."},
|
||||
{ORTH: "esim."},
|
||||
{ORTH: "huom."},
|
||||
{ORTH: "jne."},
|
||||
{ORTH: "joht."},
|
||||
{ORTH: "k."},
|
||||
{ORTH: "ks."},
|
||||
{ORTH: "lk."},
|
||||
{ORTH: "lkm."},
|
||||
{ORTH: "lyh."},
|
||||
{ORTH: "läh."},
|
||||
{ORTH: "miel."},
|
||||
{ORTH: "milj."},
|
||||
{ORTH: "Mm."},
|
||||
{ORTH: "mm."},
|
||||
{ORTH: "myöh."},
|
||||
{ORTH: "n."},
|
||||
{ORTH: "nimim."},
|
||||
{ORTH: "n:o"},
|
||||
{ORTH: "N:o"},
|
||||
{ORTH: "nro"},
|
||||
{ORTH: "ns."},
|
||||
{ORTH: "nyk."},
|
||||
{ORTH: "oik."},
|
||||
{ORTH: "os."},
|
||||
{ORTH: "p."},
|
||||
{ORTH: "par."},
|
||||
{ORTH: "per."},
|
||||
{ORTH: "pj."},
|
||||
{ORTH: "puh.joht."},
|
||||
{ORTH: "prof."},
|
||||
{ORTH: "puh."},
|
||||
{ORTH: "pvm."},
|
||||
{ORTH: "rak."},
|
||||
{ORTH: "ry."},
|
||||
{ORTH: "s."},
|
||||
{ORTH: "siht."},
|
||||
{ORTH: "synt."},
|
||||
{ORTH: "t."},
|
||||
{ORTH: "tark."},
|
||||
{ORTH: "til."},
|
||||
{ORTH: "tms."},
|
||||
{ORTH: "toim."},
|
||||
{ORTH: "v."},
|
||||
{ORTH: "vas."},
|
||||
{ORTH: "vast."},
|
||||
{ORTH: "vrt."},
|
||||
{ORTH: "yht."},
|
||||
{ORTH: "yl."},
|
||||
{ORTH: "ym."},
|
||||
{ORTH: "yms."},
|
||||
{ORTH: "yo."},
|
||||
{ORTH: "yliopp."},
|
||||
{ORTH: "ao."},
|
||||
{ORTH: "em."},
|
||||
{ORTH: "ko."},
|
||||
{ORTH: "ml."},
|
||||
{ORTH: "po."},
|
||||
{ORTH: "so."},
|
||||
{ORTH: "ts."},
|
||||
{ORTH: "vm."},
|
||||
{ORTH: "srk."},
|
||||
]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from typing import Callable
|
||||
from thinc.api import Config
|
||||
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS, TOKEN_MATCH
|
||||
|
@ -6,56 +6,47 @@ from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_INFIXES
|
|||
from .punctuation import TOKENIZER_SUFFIXES
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from .lemmatizer import FrenchLemmatizer, is_base_form
|
||||
from .syntax_iterators import SYNTAX_ITERATORS
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from .lemmatizer import FrenchLemmatizer, is_base_form
|
||||
from ...lookups import load_lookups
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "fr"
|
||||
stop_words = {"@language_data": "spacy.fr.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.fr.lex_attr_getters"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.FrenchLemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
@lemmatizers = "spacy.fr.FrenchLemmatizer"
|
||||
"""
|
||||
|
||||
|
||||
@registry.lemmatizers("spacy.FrenchLemmatizer.v1")
|
||||
def create_french_lemmatizer(data_paths: dict = {}) -> FrenchLemmatizer:
|
||||
return FrenchLemmatizer(data_paths=data_paths, is_base_form=is_base_form)
|
||||
@registry.lemmatizers("spacy.fr.FrenchLemmatizer")
|
||||
def create_lemmatizer() -> Callable[[Language], FrenchLemmatizer]:
|
||||
tables = ["lemma_rules", "lemma_index", "lemma_exc", "lemma_lookup"]
|
||||
|
||||
def lemmatizer_factory(nlp: Language) -> FrenchLemmatizer:
|
||||
lookups = load_lookups(lang=nlp.lang, tables=tables)
|
||||
return FrenchLemmatizer(lookups=lookups, is_base_form=is_base_form)
|
||||
|
||||
@registry.language_data("spacy.fr.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.fr.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
return lemmatizer_factory
|
||||
|
||||
|
||||
class FrenchDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
config = Config().from_str(DEFAULT_CONFIG)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
prefixes = TOKENIZER_PREFIXES
|
||||
infixes = TOKENIZER_INFIXES
|
||||
suffixes = TOKENIZER_SUFFIXES
|
||||
token_match = TOKEN_MATCH
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
syntax_iterators = SYNTAX_ITERATORS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class French(Language):
|
||||
lang = "fr"
|
||||
Defaults = FrenchDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["French"]
|
||||
|
|
|
@ -1,26 +1,18 @@
|
|||
from typing import Union, Iterator
|
||||
|
||||
from ...symbols import NOUN, PROPN, PRON
|
||||
from ...errors import Errors
|
||||
from ...tokens import Doc, Span
|
||||
|
||||
|
||||
def noun_chunks(doclike):
|
||||
"""
|
||||
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
|
||||
"""
|
||||
labels = [
|
||||
"nsubj",
|
||||
"nsubj:pass",
|
||||
"obj",
|
||||
"iobj",
|
||||
"ROOT",
|
||||
"appos",
|
||||
"nmod",
|
||||
"nmod:poss",
|
||||
]
|
||||
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
|
||||
"""Detect base noun phrases from a dependency parse. Works on Doc and Span."""
|
||||
# fmt: off
|
||||
labels = ["nsubj", "nsubj:pass", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"]
|
||||
# fmt: on
|
||||
doc = doclike.doc # Ensure works on both Doc and Span.
|
||||
|
||||
if not doc.is_parsed:
|
||||
raise ValueError(Errors.E029)
|
||||
|
||||
np_deps = [doc.vocab.strings[label] for label in labels]
|
||||
conj = doc.vocab.strings.add("conj")
|
||||
np_label = doc.vocab.strings.add("NP")
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
import re
|
||||
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from .punctuation import ELISION, HYPHENS
|
||||
from ..char_classes import ALPHA_LOWER, ALPHA
|
||||
from ...symbols import ORTH, LEMMA
|
||||
from ...symbols import ORTH
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
# not using the large _tokenizer_exceptions_list by default as it slows down the tokenizer
|
||||
# from ._tokenizer_exceptions_list import FR_BASE_EXCEPTIONS
|
||||
|
@ -25,29 +28,29 @@ def lower_first_letter(text):
|
|||
return text[0].lower() + text[1:]
|
||||
|
||||
|
||||
_exc = {"J.-C.": [{LEMMA: "Jésus", ORTH: "J."}, {LEMMA: "Christ", ORTH: "-C."}]}
|
||||
_exc = {"J.-C.": [{ORTH: "J."}, {ORTH: "-C."}]}
|
||||
|
||||
|
||||
for exc_data in [
|
||||
{LEMMA: "avant", ORTH: "av."},
|
||||
{LEMMA: "janvier", ORTH: "janv."},
|
||||
{LEMMA: "février", ORTH: "févr."},
|
||||
{LEMMA: "avril", ORTH: "avr."},
|
||||
{LEMMA: "juillet", ORTH: "juill."},
|
||||
{LEMMA: "septembre", ORTH: "sept."},
|
||||
{LEMMA: "octobre", ORTH: "oct."},
|
||||
{LEMMA: "novembre", ORTH: "nov."},
|
||||
{LEMMA: "décembre", ORTH: "déc."},
|
||||
{LEMMA: "après", ORTH: "apr."},
|
||||
{LEMMA: "docteur", ORTH: "Dr."},
|
||||
{LEMMA: "monsieur", ORTH: "M."},
|
||||
{LEMMA: "monsieur", ORTH: "Mr."},
|
||||
{LEMMA: "madame", ORTH: "Mme."},
|
||||
{LEMMA: "mademoiselle", ORTH: "Mlle."},
|
||||
{LEMMA: "numéro", ORTH: "n°"},
|
||||
{LEMMA: "degrés", ORTH: "d°"},
|
||||
{LEMMA: "saint", ORTH: "St."},
|
||||
{LEMMA: "sainte", ORTH: "Ste."},
|
||||
{ORTH: "av."},
|
||||
{ORTH: "janv."},
|
||||
{ORTH: "févr."},
|
||||
{ORTH: "avr."},
|
||||
{ORTH: "juill."},
|
||||
{ORTH: "sept."},
|
||||
{ORTH: "oct."},
|
||||
{ORTH: "nov."},
|
||||
{ORTH: "déc."},
|
||||
{ORTH: "apr."},
|
||||
{ORTH: "Dr."},
|
||||
{ORTH: "M."},
|
||||
{ORTH: "Mr."},
|
||||
{ORTH: "Mme."},
|
||||
{ORTH: "Mlle."},
|
||||
{ORTH: "n°"},
|
||||
{ORTH: "d°"},
|
||||
{ORTH: "St."},
|
||||
{ORTH: "Ste."},
|
||||
]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
||||
|
@ -77,55 +80,37 @@ for orth in [
|
|||
_exc[orth] = [{ORTH: orth}]
|
||||
|
||||
|
||||
for verb, verb_lemma in [
|
||||
("a", "avoir"),
|
||||
("est", "être"),
|
||||
("semble", "sembler"),
|
||||
("indique", "indiquer"),
|
||||
("moque", "moquer"),
|
||||
("passe", "passer"),
|
||||
for verb in [
|
||||
"a",
|
||||
"est" "semble",
|
||||
"indique",
|
||||
"moque",
|
||||
"passe",
|
||||
]:
|
||||
for orth in [verb, verb.title()]:
|
||||
for pronoun in ["elle", "il", "on"]:
|
||||
token = f"{orth}-t-{pronoun}"
|
||||
_exc[token] = [
|
||||
{LEMMA: verb_lemma, ORTH: orth}, # , TAG: "VERB"},
|
||||
{LEMMA: "t", ORTH: "-t"},
|
||||
{LEMMA: pronoun, ORTH: "-" + pronoun},
|
||||
]
|
||||
_exc[token] = [{ORTH: orth}, {ORTH: "-t"}, {ORTH: "-" + pronoun}]
|
||||
|
||||
for verb, verb_lemma in [("est", "être")]:
|
||||
for verb in ["est"]:
|
||||
for orth in [verb, verb.title()]:
|
||||
token = f"{orth}-ce"
|
||||
_exc[token] = [
|
||||
{LEMMA: verb_lemma, ORTH: orth}, # , TAG: "VERB"},
|
||||
{LEMMA: "ce", ORTH: "-ce"},
|
||||
]
|
||||
_exc[f"{orth}-ce"] = [{ORTH: orth}, {ORTH: "-ce"}]
|
||||
|
||||
|
||||
for pre, pre_lemma in [("qu'", "que"), ("n'", "ne")]:
|
||||
for pre in ["qu'", "n'"]:
|
||||
for orth in [pre, pre.title()]:
|
||||
_exc[f"{orth}est-ce"] = [
|
||||
{LEMMA: pre_lemma, ORTH: orth},
|
||||
{LEMMA: "être", ORTH: "est"},
|
||||
{LEMMA: "ce", ORTH: "-ce"},
|
||||
]
|
||||
_exc[f"{orth}est-ce"] = [{ORTH: orth}, {ORTH: "est"}, {ORTH: "-ce"}]
|
||||
|
||||
|
||||
for verb, pronoun in [("est", "il"), ("EST", "IL")]:
|
||||
token = "{}-{}".format(verb, pronoun)
|
||||
_exc[token] = [
|
||||
{LEMMA: "être", ORTH: verb},
|
||||
{LEMMA: pronoun, ORTH: "-" + pronoun},
|
||||
]
|
||||
_exc[f"{verb}-{pronoun}"] = [{ORTH: verb}, {ORTH: "-" + pronoun}]
|
||||
|
||||
|
||||
for s, verb, pronoun in [("s", "est", "il"), ("S", "EST", "IL")]:
|
||||
token = "{}'{}-{}".format(s, verb, pronoun)
|
||||
_exc[token] = [
|
||||
{LEMMA: "se", ORTH: s + "'"},
|
||||
{LEMMA: "être", ORTH: verb},
|
||||
{LEMMA: pronoun, ORTH: "-" + pronoun},
|
||||
_exc[f"{s}'{verb}-{pronoun}"] = [
|
||||
{ORTH: s + "'"},
|
||||
{ORTH: verb},
|
||||
{ORTH: "-" + pronoun},
|
||||
]
|
||||
|
||||
|
||||
|
@ -452,7 +437,7 @@ _regular_exp += [
|
|||
]
|
||||
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
TOKEN_MATCH = re.compile(
|
||||
"(?iu)" + "|".join("(?:{})".format(m) for m in _regular_exp)
|
||||
).match
|
||||
|
|
|
@ -1,33 +1,16 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .stop_words import STOP_WORDS
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "ga"
|
||||
stop_words = {"@language_data": "spacy.ga.stop_words"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.ga.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
class IrishDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Irish(Language):
|
||||
lang = "ga"
|
||||
Defaults = IrishDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Irish"]
|
||||
|
|
|
@ -1,79 +1,65 @@
|
|||
from ...symbols import POS, DET, ADP, CCONJ, ADV, NOUN, X, AUX
|
||||
from ...symbols import ORTH, LEMMA, NORM
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH, NORM
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
_exc = {
|
||||
"'acha'n": [
|
||||
{ORTH: "'ach", LEMMA: "gach", NORM: "gach", POS: DET},
|
||||
{ORTH: "a'n", LEMMA: "aon", NORM: "aon", POS: DET},
|
||||
],
|
||||
"dem'": [
|
||||
{ORTH: "de", LEMMA: "de", NORM: "de", POS: ADP},
|
||||
{ORTH: "m'", LEMMA: "mo", NORM: "mo", POS: DET},
|
||||
],
|
||||
"ded'": [
|
||||
{ORTH: "de", LEMMA: "de", NORM: "de", POS: ADP},
|
||||
{ORTH: "d'", LEMMA: "do", NORM: "do", POS: DET},
|
||||
],
|
||||
"lem'": [
|
||||
{ORTH: "le", LEMMA: "le", NORM: "le", POS: ADP},
|
||||
{ORTH: "m'", LEMMA: "mo", NORM: "mo", POS: DET},
|
||||
],
|
||||
"led'": [
|
||||
{ORTH: "le", LEMMA: "le", NORM: "le", POS: ADP},
|
||||
{ORTH: "d'", LEMMA: "mo", NORM: "do", POS: DET},
|
||||
],
|
||||
"'acha'n": [{ORTH: "'ach", NORM: "gach"}, {ORTH: "a'n", NORM: "aon"}],
|
||||
"dem'": [{ORTH: "de", NORM: "de"}, {ORTH: "m'", NORM: "mo"}],
|
||||
"ded'": [{ORTH: "de", NORM: "de"}, {ORTH: "d'", NORM: "do"}],
|
||||
"lem'": [{ORTH: "le", NORM: "le"}, {ORTH: "m'", NORM: "mo"}],
|
||||
"led'": [{ORTH: "le", NORM: "le"}, {ORTH: "d'", NORM: "do"}],
|
||||
}
|
||||
|
||||
for exc_data in [
|
||||
{ORTH: "'gus", LEMMA: "agus", NORM: "agus", POS: CCONJ},
|
||||
{ORTH: "'ach", LEMMA: "gach", NORM: "gach", POS: DET},
|
||||
{ORTH: "ao'", LEMMA: "aon", NORM: "aon"},
|
||||
{ORTH: "'niar", LEMMA: "aniar", NORM: "aniar", POS: ADV},
|
||||
{ORTH: "'níos", LEMMA: "aníos", NORM: "aníos", POS: ADV},
|
||||
{ORTH: "'ndiu", LEMMA: "inniu", NORM: "inniu", POS: ADV},
|
||||
{ORTH: "'nocht", LEMMA: "anocht", NORM: "anocht", POS: ADV},
|
||||
{ORTH: "m'", LEMMA: "mo", POS: DET},
|
||||
{ORTH: "Aib.", LEMMA: "Aibreán", POS: NOUN},
|
||||
{ORTH: "Ath.", LEMMA: "athair", POS: NOUN},
|
||||
{ORTH: "Beal.", LEMMA: "Bealtaine", POS: NOUN},
|
||||
{ORTH: "a.C.n.", LEMMA: "ante Christum natum", POS: X},
|
||||
{ORTH: "m.sh.", LEMMA: "mar shampla", POS: ADV},
|
||||
{ORTH: "M.F.", LEMMA: "Meán Fómhair", POS: NOUN},
|
||||
{ORTH: "M.Fómh.", LEMMA: "Meán Fómhair", POS: NOUN},
|
||||
{ORTH: "D.F.", LEMMA: "Deireadh Fómhair", POS: NOUN},
|
||||
{ORTH: "D.Fómh.", LEMMA: "Deireadh Fómhair", POS: NOUN},
|
||||
{ORTH: "r.C.", LEMMA: "roimh Chríost", POS: ADV},
|
||||
{ORTH: "R.C.", LEMMA: "roimh Chríost", POS: ADV},
|
||||
{ORTH: "r.Ch.", LEMMA: "roimh Chríost", POS: ADV},
|
||||
{ORTH: "r.Chr.", LEMMA: "roimh Chríost", POS: ADV},
|
||||
{ORTH: "R.Ch.", LEMMA: "roimh Chríost", POS: ADV},
|
||||
{ORTH: "R.Chr.", LEMMA: "roimh Chríost", POS: ADV},
|
||||
{ORTH: "⁊rl.", LEMMA: "agus araile", POS: ADV},
|
||||
{ORTH: "srl.", LEMMA: "agus araile", POS: ADV},
|
||||
{ORTH: "Co.", LEMMA: "contae", POS: NOUN},
|
||||
{ORTH: "Ean.", LEMMA: "Eanáir", POS: NOUN},
|
||||
{ORTH: "Feab.", LEMMA: "Feabhra", POS: NOUN},
|
||||
{ORTH: "gCo.", LEMMA: "contae", POS: NOUN},
|
||||
{ORTH: ".i.", LEMMA: "eadhon", POS: ADV},
|
||||
{ORTH: "B'", LEMMA: "ba", POS: AUX},
|
||||
{ORTH: "b'", LEMMA: "ba", POS: AUX},
|
||||
{ORTH: "lch.", LEMMA: "leathanach", POS: NOUN},
|
||||
{ORTH: "Lch.", LEMMA: "leathanach", POS: NOUN},
|
||||
{ORTH: "lgh.", LEMMA: "leathanach", POS: NOUN},
|
||||
{ORTH: "Lgh.", LEMMA: "leathanach", POS: NOUN},
|
||||
{ORTH: "Lún.", LEMMA: "Lúnasa", POS: NOUN},
|
||||
{ORTH: "Már.", LEMMA: "Márta", POS: NOUN},
|
||||
{ORTH: "Meith.", LEMMA: "Meitheamh", POS: NOUN},
|
||||
{ORTH: "Noll.", LEMMA: "Nollaig", POS: NOUN},
|
||||
{ORTH: "Samh.", LEMMA: "Samhain", POS: NOUN},
|
||||
{ORTH: "tAth.", LEMMA: "athair", POS: NOUN},
|
||||
{ORTH: "tUas.", LEMMA: "Uasal", POS: NOUN},
|
||||
{ORTH: "teo.", LEMMA: "teoranta", POS: NOUN},
|
||||
{ORTH: "Teo.", LEMMA: "teoranta", POS: NOUN},
|
||||
{ORTH: "Uas.", LEMMA: "Uasal", POS: NOUN},
|
||||
{ORTH: "uimh.", LEMMA: "uimhir", POS: NOUN},
|
||||
{ORTH: "Uimh.", LEMMA: "uimhir", POS: NOUN},
|
||||
{ORTH: "'gus", NORM: "agus"},
|
||||
{ORTH: "'ach", NORM: "gach"},
|
||||
{ORTH: "ao'", NORM: "aon"},
|
||||
{ORTH: "'niar", NORM: "aniar"},
|
||||
{ORTH: "'níos", NORM: "aníos"},
|
||||
{ORTH: "'ndiu", NORM: "inniu"},
|
||||
{ORTH: "'nocht", NORM: "anocht"},
|
||||
{ORTH: "m'"},
|
||||
{ORTH: "Aib."},
|
||||
{ORTH: "Ath."},
|
||||
{ORTH: "Beal."},
|
||||
{ORTH: "a.C.n."},
|
||||
{ORTH: "m.sh."},
|
||||
{ORTH: "M.F."},
|
||||
{ORTH: "M.Fómh."},
|
||||
{ORTH: "D.F."},
|
||||
{ORTH: "D.Fómh."},
|
||||
{ORTH: "r.C."},
|
||||
{ORTH: "R.C."},
|
||||
{ORTH: "r.Ch."},
|
||||
{ORTH: "r.Chr."},
|
||||
{ORTH: "R.Ch."},
|
||||
{ORTH: "R.Chr."},
|
||||
{ORTH: "⁊rl."},
|
||||
{ORTH: "srl."},
|
||||
{ORTH: "Co."},
|
||||
{ORTH: "Ean."},
|
||||
{ORTH: "Feab."},
|
||||
{ORTH: "gCo."},
|
||||
{ORTH: ".i."},
|
||||
{ORTH: "B'"},
|
||||
{ORTH: "b'"},
|
||||
{ORTH: "lch."},
|
||||
{ORTH: "Lch."},
|
||||
{ORTH: "lgh."},
|
||||
{ORTH: "Lgh."},
|
||||
{ORTH: "Lún."},
|
||||
{ORTH: "Már."},
|
||||
{ORTH: "Meith."},
|
||||
{ORTH: "Noll."},
|
||||
{ORTH: "Samh."},
|
||||
{ORTH: "tAth."},
|
||||
{ORTH: "tUas."},
|
||||
{ORTH: "teo."},
|
||||
{ORTH: "Teo."},
|
||||
{ORTH: "Uas."},
|
||||
{ORTH: "uimh."},
|
||||
{ORTH: "Uimh."},
|
||||
]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
||||
|
@ -81,4 +67,4 @@ for orth in ["d'", "D'"]:
|
|||
_exc[orth] = [{ORTH: orth}]
|
||||
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,26 +1,14 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "gu"
|
||||
stop_words = {"@language_data": "spacy.gu.stop_words"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.gu.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
class GujaratiDefaults(Language.Defaults):
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Gujarati(Language):
|
||||
lang = "gu"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = GujaratiDefaults
|
||||
|
||||
|
||||
__all__ = ["Gujarati"]
|
||||
|
|
|
@ -1,37 +1,15 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "he"
|
||||
stop_words = {"@language_data": "spacy.he.stop_words"}
|
||||
|
||||
[nlp.writing_system]
|
||||
direction = "rtl"
|
||||
has_case = false
|
||||
has_letters = true
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.he.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
class HebrewDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS)
|
||||
stop_words = STOP_WORDS
|
||||
writing_system = {"direction": "rtl", "has_case": False, "has_letters": True}
|
||||
|
||||
|
||||
class Hebrew(Language):
|
||||
lang = "he"
|
||||
Defaults = HebrewDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Hebrew"]
|
||||
|
|
|
@ -1,33 +1,16 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "hi"
|
||||
stop_words = {"@language_data": "spacy.hi.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.hi.lex_attr_getters"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.hi.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.hi.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
class HindiDefaults(Language.Defaults):
|
||||
stop_words = STOP_WORDS
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
|
||||
|
||||
class Hindi(Language):
|
||||
lang = "hi"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = HindiDefaults
|
||||
|
||||
|
||||
__all__ = ["Hindi"]
|
||||
|
|
|
@ -1,39 +1,14 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "hr"
|
||||
stop_words = {"@language_data": "spacy.hr.stop_words"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.hr.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
class CroatianDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS)
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Croatian(Language):
|
||||
lang = "hr"
|
||||
Defaults = CroatianDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Croatian"]
|
||||
|
|
|
@ -1,45 +1,21 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS, TOKEN_MATCH
|
||||
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES
|
||||
from .stop_words import STOP_WORDS
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "hu"
|
||||
stop_words = {"@language_data": "spacy.hu.stop_words"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.hu.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
class HungarianDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
prefixes = TOKENIZER_PREFIXES
|
||||
suffixes = TOKENIZER_SUFFIXES
|
||||
infixes = TOKENIZER_INFIXES
|
||||
token_match = TOKEN_MATCH
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Hungarian(Language):
|
||||
lang = "hu"
|
||||
Defaults = HungarianDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Hungarian"]
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
import re
|
||||
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ..punctuation import ALPHA_LOWER, CURRENCY
|
||||
from ...symbols import ORTH
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
_exc = {}
|
||||
|
@ -644,5 +646,5 @@ _nums = r"(({ne})|({t})|({on})|({c}))({s})?".format(
|
|||
)
|
||||
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
TOKEN_MATCH = re.compile(r"^{n}$".format(n=_nums)).match
|
||||
|
|
|
@ -1,33 +1,16 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "hy"
|
||||
stop_words = {"@language_data": "spacy.hy.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.hy.lex_attr_getters"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.hy.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.hy.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
class ArmenianDefaults(Language.Defaults):
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Armenian(Language):
|
||||
lang = "hy"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = ArmenianDefaults
|
||||
|
||||
|
||||
__all__ = ["Armenian"]
|
||||
|
|
|
@ -1,53 +1,24 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.config import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from .punctuation import TOKENIZER_SUFFIXES, TOKENIZER_PREFIXES, TOKENIZER_INFIXES
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from .syntax_iterators import SYNTAX_ITERATORS
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "id"
|
||||
stop_words = {"@language_data": "spacy.id.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.id.lex_attr_getters"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.id.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.id.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
|
||||
|
||||
class IndonesianDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
prefixes = TOKENIZER_PREFIXES
|
||||
suffixes = TOKENIZER_SUFFIXES
|
||||
infixes = TOKENIZER_INFIXES
|
||||
syntax_iterators = SYNTAX_ITERATORS
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Indonesian(Language):
|
||||
lang = "id"
|
||||
Defaults = IndonesianDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Indonesian"]
|
||||
|
|
|
@ -1,26 +1,20 @@
|
|||
from typing import Union, Iterator
|
||||
|
||||
from ...symbols import NOUN, PROPN, PRON
|
||||
from ...errors import Errors
|
||||
from ...tokens import Doc, Span
|
||||
|
||||
|
||||
def noun_chunks(doclike):
|
||||
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
|
||||
"""
|
||||
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
|
||||
"""
|
||||
labels = [
|
||||
"nsubj",
|
||||
"nsubj:pass",
|
||||
"obj",
|
||||
"iobj",
|
||||
"ROOT",
|
||||
"appos",
|
||||
"nmod",
|
||||
"nmod:poss",
|
||||
]
|
||||
# fmt: off
|
||||
labels = ["nsubj", "nsubj:pass", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"]
|
||||
# fmt: on
|
||||
doc = doclike.doc # Ensure works on both Doc and Span.
|
||||
|
||||
if not doc.is_parsed:
|
||||
raise ValueError(Errors.E029)
|
||||
|
||||
np_deps = [doc.vocab.strings[label] for label in labels]
|
||||
conj = doc.vocab.strings.add("conj")
|
||||
np_label = doc.vocab.strings.add("NP")
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ._tokenizer_exceptions_list import ID_BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH, LEMMA, NORM
|
||||
from ...symbols import ORTH, NORM
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
# Daftar singkatan dan Akronim dari:
|
||||
# https://id.wiktionary.org/wiki/Wiktionary:Daftar_singkatan_dan_akronim_bahasa_Indonesia#A
|
||||
|
@ -8,53 +11,47 @@ _exc = {}
|
|||
|
||||
for orth in ID_BASE_EXCEPTIONS:
|
||||
_exc[orth] = [{ORTH: orth}]
|
||||
|
||||
orth_title = orth.title()
|
||||
_exc[orth_title] = [{ORTH: orth_title}]
|
||||
|
||||
orth_caps = orth.upper()
|
||||
_exc[orth_caps] = [{ORTH: orth_caps}]
|
||||
|
||||
orth_lower = orth.lower()
|
||||
_exc[orth_lower] = [{ORTH: orth_lower}]
|
||||
|
||||
orth_first_upper = orth[0].upper() + orth[1:]
|
||||
_exc[orth_first_upper] = [{ORTH: orth_first_upper}]
|
||||
|
||||
if "-" in orth:
|
||||
orth_title = "-".join([part.title() for part in orth.split("-")])
|
||||
_exc[orth_title] = [{ORTH: orth_title}]
|
||||
|
||||
orth_caps = "-".join([part.upper() for part in orth.split("-")])
|
||||
_exc[orth_caps] = [{ORTH: orth_caps}]
|
||||
|
||||
for exc_data in [
|
||||
{ORTH: "Jan.", LEMMA: "Januari", NORM: "Januari"},
|
||||
{ORTH: "Feb.", LEMMA: "Februari", NORM: "Februari"},
|
||||
{ORTH: "Mar.", LEMMA: "Maret", NORM: "Maret"},
|
||||
{ORTH: "Apr.", LEMMA: "April", NORM: "April"},
|
||||
{ORTH: "Jun.", LEMMA: "Juni", NORM: "Juni"},
|
||||
{ORTH: "Jul.", LEMMA: "Juli", NORM: "Juli"},
|
||||
{ORTH: "Agu.", LEMMA: "Agustus", NORM: "Agustus"},
|
||||
{ORTH: "Ags.", LEMMA: "Agustus", NORM: "Agustus"},
|
||||
{ORTH: "Sep.", LEMMA: "September", NORM: "September"},
|
||||
{ORTH: "Okt.", LEMMA: "Oktober", NORM: "Oktober"},
|
||||
{ORTH: "Nov.", LEMMA: "November", NORM: "November"},
|
||||
{ORTH: "Des.", LEMMA: "Desember", NORM: "Desember"},
|
||||
{ORTH: "Jan.", NORM: "Januari"},
|
||||
{ORTH: "Feb.", NORM: "Februari"},
|
||||
{ORTH: "Mar.", NORM: "Maret"},
|
||||
{ORTH: "Apr.", NORM: "April"},
|
||||
{ORTH: "Jun.", NORM: "Juni"},
|
||||
{ORTH: "Jul.", NORM: "Juli"},
|
||||
{ORTH: "Agu.", NORM: "Agustus"},
|
||||
{ORTH: "Ags.", NORM: "Agustus"},
|
||||
{ORTH: "Sep.", NORM: "September"},
|
||||
{ORTH: "Okt.", NORM: "Oktober"},
|
||||
{ORTH: "Nov.", NORM: "November"},
|
||||
{ORTH: "Des.", NORM: "Desember"},
|
||||
]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
||||
_other_exc = {
|
||||
"do'a": [{ORTH: "do'a", LEMMA: "doa", NORM: "doa"}],
|
||||
"jum'at": [{ORTH: "jum'at", LEMMA: "Jumat", NORM: "Jumat"}],
|
||||
"Jum'at": [{ORTH: "Jum'at", LEMMA: "Jumat", NORM: "Jumat"}],
|
||||
"la'nat": [{ORTH: "la'nat", LEMMA: "laknat", NORM: "laknat"}],
|
||||
"ma'af": [{ORTH: "ma'af", LEMMA: "maaf", NORM: "maaf"}],
|
||||
"mu'jizat": [{ORTH: "mu'jizat", LEMMA: "mukjizat", NORM: "mukjizat"}],
|
||||
"Mu'jizat": [{ORTH: "Mu'jizat", LEMMA: "mukjizat", NORM: "mukjizat"}],
|
||||
"ni'mat": [{ORTH: "ni'mat", LEMMA: "nikmat", NORM: "nikmat"}],
|
||||
"raka'at": [{ORTH: "raka'at", LEMMA: "rakaat", NORM: "rakaat"}],
|
||||
"ta'at": [{ORTH: "ta'at", LEMMA: "taat", NORM: "taat"}],
|
||||
"do'a": [{ORTH: "do'a", NORM: "doa"}],
|
||||
"jum'at": [{ORTH: "jum'at", NORM: "Jumat"}],
|
||||
"Jum'at": [{ORTH: "Jum'at", NORM: "Jumat"}],
|
||||
"la'nat": [{ORTH: "la'nat", NORM: "laknat"}],
|
||||
"ma'af": [{ORTH: "ma'af", NORM: "maaf"}],
|
||||
"mu'jizat": [{ORTH: "mu'jizat", NORM: "mukjizat"}],
|
||||
"Mu'jizat": [{ORTH: "Mu'jizat", NORM: "mukjizat"}],
|
||||
"ni'mat": [{ORTH: "ni'mat", NORM: "nikmat"}],
|
||||
"raka'at": [{ORTH: "raka'at", NORM: "rakaat"}],
|
||||
"ta'at": [{ORTH: "ta'at", NORM: "taat"}],
|
||||
}
|
||||
|
||||
_exc.update(_other_exc)
|
||||
|
@ -221,4 +218,4 @@ for orth in [
|
|||
]:
|
||||
_exc[orth] = [{ORTH: orth}]
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,26 +1,14 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "is"
|
||||
stop_words = {"@language_data": "spacy.is.stop_words"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.is.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
class IcelandicDefaults(Language.Defaults):
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Icelandic(Language):
|
||||
lang = "is"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = IcelandicDefaults
|
||||
|
||||
|
||||
__all__ = ["Icelandic"]
|
||||
|
|
|
@ -1,35 +1,11 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_INFIXES
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "it"
|
||||
stop_words = {"@language_data": "spacy.it.stop_words"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.it.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
class ItalianDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
stop_words = STOP_WORDS
|
||||
prefixes = TOKENIZER_PREFIXES
|
||||
infixes = TOKENIZER_INFIXES
|
||||
|
@ -38,7 +14,6 @@ class ItalianDefaults(Language.Defaults):
|
|||
class Italian(Language):
|
||||
lang = "it"
|
||||
Defaults = ItalianDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Italian"]
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
from ...symbols import ORTH, LEMMA
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
_exc = {
|
||||
"all'art.": [{ORTH: "all'"}, {ORTH: "art."}],
|
||||
|
@ -7,7 +10,7 @@ _exc = {
|
|||
"L'art.": [{ORTH: "L'"}, {ORTH: "art."}],
|
||||
"l'art.": [{ORTH: "l'"}, {ORTH: "art."}],
|
||||
"nell'art.": [{ORTH: "nell'"}, {ORTH: "art."}],
|
||||
"po'": [{ORTH: "po'", LEMMA: "poco"}],
|
||||
"po'": [{ORTH: "po'"}],
|
||||
"sett..": [{ORTH: "sett."}, {ORTH: "."}],
|
||||
}
|
||||
|
||||
|
@ -52,4 +55,4 @@ for orth in [
|
|||
]:
|
||||
_exc[orth] = [{ORTH: orth}]
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from typing import Optional, Union, Dict, Any, Set
|
||||
from typing import Optional, Union, Dict, Any
|
||||
from pathlib import Path
|
||||
import srsly
|
||||
from collections import namedtuple
|
||||
|
@ -20,27 +20,15 @@ from ... import util
|
|||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "ja"
|
||||
stop_words = {"@language_data": "spacy.ja.stop_words"}
|
||||
|
||||
[nlp.tokenizer]
|
||||
@tokenizers = "spacy.JapaneseTokenizer.v1"
|
||||
@tokenizers = "spacy.ja.JapaneseTokenizer"
|
||||
split_mode = null
|
||||
|
||||
[nlp.writing_system]
|
||||
direction = "ltr"
|
||||
has_case = false
|
||||
has_letters = false
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.ja.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.tokenizers("spacy.JapaneseTokenizer.v1")
|
||||
def create_japanese_tokenizer(split_mode: Optional[str] = None):
|
||||
@registry.tokenizers("spacy.ja.JapaneseTokenizer")
|
||||
def create_tokenizer(split_mode: Optional[str] = None):
|
||||
def japanese_tokenizer_factory(nlp):
|
||||
return JapaneseTokenizer(nlp, split_mode=split_mode)
|
||||
|
||||
|
@ -50,6 +38,8 @@ def create_japanese_tokenizer(split_mode: Optional[str] = None):
|
|||
class JapaneseTokenizer(DummyTokenizer):
|
||||
def __init__(self, nlp: Language, split_mode: Optional[str] = None) -> None:
|
||||
self.vocab = nlp.vocab
|
||||
# TODO: is this the right way to do it?
|
||||
self.vocab.morphology.load_tag_map(TAG_MAP)
|
||||
self.split_mode = split_mode
|
||||
self.tokenizer = try_sudachi_import(self.split_mode)
|
||||
|
||||
|
@ -172,14 +162,15 @@ class JapaneseTokenizer(DummyTokenizer):
|
|||
|
||||
|
||||
class JapaneseDefaults(Language.Defaults):
|
||||
tag_map = TAG_MAP
|
||||
config = Config().from_str(DEFAULT_CONFIG)
|
||||
stop_words = STOP_WORDS
|
||||
syntax_iterators = SYNTAX_ITERATORS
|
||||
writing_system = {"direction": "ltr", "has_case": False, "has_letters": False}
|
||||
|
||||
|
||||
class Japanese(Language):
|
||||
lang = "ja"
|
||||
Defaults = JapaneseDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
# Hold the attributes we need with convenient names
|
||||
|
|
|
@ -1,33 +1,23 @@
|
|||
from typing import Union, Iterator
|
||||
|
||||
from ...symbols import NOUN, PROPN, PRON, VERB
|
||||
|
||||
# XXX this can probably be pruned a bit
|
||||
labels = [
|
||||
"nsubj",
|
||||
"nmod",
|
||||
"dobj",
|
||||
"nsubjpass",
|
||||
"pcomp",
|
||||
"pobj",
|
||||
"obj",
|
||||
"obl",
|
||||
"dative",
|
||||
"appos",
|
||||
"attr",
|
||||
"ROOT",
|
||||
]
|
||||
from ...tokens import Doc, Span
|
||||
|
||||
|
||||
def noun_chunks(obj):
|
||||
"""
|
||||
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
|
||||
"""
|
||||
# TODO: this can probably be pruned a bit
|
||||
# fmt: off
|
||||
labels = ["nsubj", "nmod", "ddoclike", "nsubjpass", "pcomp", "pdoclike", "doclike", "obl", "dative", "appos", "attr", "ROOT"]
|
||||
# fmt: on
|
||||
|
||||
doc = obj.doc # Ensure works on both Doc and Span.
|
||||
|
||||
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
|
||||
"""Detect base noun phrases from a dependency parse. Works on Doc and Span."""
|
||||
doc = doclike.doc # Ensure works on both Doc and Span.
|
||||
np_deps = [doc.vocab.strings.add(label) for label in labels]
|
||||
doc.vocab.strings.add("conj")
|
||||
np_label = doc.vocab.strings.add("NP")
|
||||
seen = set()
|
||||
for i, word in enumerate(obj):
|
||||
for i, word in enumerate(doclike):
|
||||
if word.pos not in (NOUN, PROPN, PRON):
|
||||
continue
|
||||
# Prevent nested chunks from being produced
|
||||
|
@ -37,12 +27,10 @@ def noun_chunks(obj):
|
|||
unseen = [w.i for w in word.subtree if w.i not in seen]
|
||||
if not unseen:
|
||||
continue
|
||||
|
||||
# this takes care of particles etc.
|
||||
seen.update(j.i for j in word.subtree)
|
||||
# This avoids duplicating embedded clauses
|
||||
seen.update(range(word.i + 1))
|
||||
|
||||
# if the head of this is a verb, mark that and rights seen
|
||||
# Don't do the subtree as that can hide other phrases
|
||||
if word.head.pos == VERB:
|
||||
|
|
|
@ -1,26 +1,14 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "kn"
|
||||
stop_words = {"@language_data": "spacy.kn.stop_words"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.kn.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
class KannadaDefaults(Language.Defaults):
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Kannada(Language):
|
||||
lang = "kn"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = KannadaDefaults
|
||||
|
||||
|
||||
__all__ = ["Kannada"]
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
from typing import Set, Optional, Any, Dict
|
||||
from typing import Optional, Any, Dict
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from .tag_map import TAG_MAP
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from ...language import Language
|
||||
from ...tokens import Doc
|
||||
from ...compat import copy_reg
|
||||
|
@ -11,26 +12,14 @@ from ...util import DummyTokenizer, registry
|
|||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "ko"
|
||||
stop_words = {"@language_data": "spacy.ko.stop_words"}
|
||||
|
||||
[nlp.tokenizer]
|
||||
@tokenizers = "spacy.KoreanTokenizer.v1"
|
||||
|
||||
[nlp.writing_system]
|
||||
direction = "ltr"
|
||||
has_case = false
|
||||
has_letters = false
|
||||
@tokenizers = "spacy.ko.KoreanTokenizer"
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.ko.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.tokenizers("spacy.KoreanTokenizer.v1")
|
||||
def create_korean_tokenizer():
|
||||
@registry.tokenizers("spacy.ko.KoreanTokenizer")
|
||||
def create_tokenizer():
|
||||
def korean_tokenizer_factory(nlp):
|
||||
return KoreanTokenizer(nlp)
|
||||
|
||||
|
@ -40,6 +29,8 @@ def create_korean_tokenizer():
|
|||
class KoreanTokenizer(DummyTokenizer):
|
||||
def __init__(self, nlp: Optional[Language] = None):
|
||||
self.vocab = nlp.vocab
|
||||
# TODO: is this the right way to do it?
|
||||
self.vocab.morphology.load_tag_map(TAG_MAP)
|
||||
MeCab = try_mecab_import()
|
||||
self.mecab_tokenizer = MeCab("-F%f[0],%f[7]")
|
||||
|
||||
|
@ -73,13 +64,15 @@ class KoreanTokenizer(DummyTokenizer):
|
|||
|
||||
|
||||
class KoreanDefaults(Language.Defaults):
|
||||
tag_map = TAG_MAP
|
||||
config = Config().from_str(DEFAULT_CONFIG)
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
writing_system = {"direction": "ltr", "has_case": False, "has_letters": False}
|
||||
|
||||
|
||||
class Korean(Language):
|
||||
lang = "ko"
|
||||
Defaults = KoreanDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
def try_mecab_import() -> None:
|
||||
|
|
|
@ -1,49 +1,20 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .punctuation import TOKENIZER_INFIXES
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from .stop_words import STOP_WORDS
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "lb"
|
||||
stop_words = {"@language_data": "spacy.lb.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.lb.lex_attr_getters"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.lb.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.lb.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
|
||||
|
||||
class LuxembourgishDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
infixes = TOKENIZER_INFIXES
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Luxembourgish(Language):
|
||||
lang = "lb"
|
||||
Defaults = LuxembourgishDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Luxembourgish"]
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
from ...symbols import ORTH, LEMMA, NORM
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH, NORM
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
# TODO
|
||||
# treat other apostrophes within words as part of the word: [op d'mannst], [fir d'éischt] (= exceptions)
|
||||
|
@ -7,19 +10,19 @@ _exc = {}
|
|||
|
||||
# translate / delete what is not necessary
|
||||
for exc_data in [
|
||||
{ORTH: "’t", LEMMA: "et", NORM: "et"},
|
||||
{ORTH: "’T", LEMMA: "et", NORM: "et"},
|
||||
{ORTH: "'t", LEMMA: "et", NORM: "et"},
|
||||
{ORTH: "'T", LEMMA: "et", NORM: "et"},
|
||||
{ORTH: "wgl.", LEMMA: "wannechgelift", NORM: "wannechgelift"},
|
||||
{ORTH: "M.", LEMMA: "Monsieur", NORM: "Monsieur"},
|
||||
{ORTH: "Mme.", LEMMA: "Madame", NORM: "Madame"},
|
||||
{ORTH: "Dr.", LEMMA: "Dokter", NORM: "Dokter"},
|
||||
{ORTH: "Tel.", LEMMA: "Telefon", NORM: "Telefon"},
|
||||
{ORTH: "asw.", LEMMA: "an sou weider", NORM: "an sou weider"},
|
||||
{ORTH: "etc.", LEMMA: "et cetera", NORM: "et cetera"},
|
||||
{ORTH: "bzw.", LEMMA: "bezéiungsweis", NORM: "bezéiungsweis"},
|
||||
{ORTH: "Jan.", LEMMA: "Januar", NORM: "Januar"},
|
||||
{ORTH: "’t", NORM: "et"},
|
||||
{ORTH: "’T", NORM: "et"},
|
||||
{ORTH: "'t", NORM: "et"},
|
||||
{ORTH: "'T", NORM: "et"},
|
||||
{ORTH: "wgl.", NORM: "wannechgelift"},
|
||||
{ORTH: "M.", NORM: "Monsieur"},
|
||||
{ORTH: "Mme.", NORM: "Madame"},
|
||||
{ORTH: "Dr.", NORM: "Dokter"},
|
||||
{ORTH: "Tel.", NORM: "Telefon"},
|
||||
{ORTH: "asw.", NORM: "an sou weider"},
|
||||
{ORTH: "etc.", NORM: "et cetera"},
|
||||
{ORTH: "bzw.", NORM: "bezéiungsweis"},
|
||||
{ORTH: "Jan.", NORM: "Januar"},
|
||||
]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
||||
|
@ -47,4 +50,4 @@ for orth in [
|
|||
]:
|
||||
_exc[orth] = [{ORTH: orth}]
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,35 +1,18 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .punctuation import TOKENIZER_INFIXES
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "lij"
|
||||
stop_words = {"@language_data": "spacy.lij.stop_words"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.lij.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
class LigurianDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
infixes = TOKENIZER_INFIXES
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Ligurian(Language):
|
||||
lang = "lij"
|
||||
Defaults = LigurianDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Ligurian"]
|
||||
|
|
|
@ -1,50 +1,50 @@
|
|||
from ...symbols import ORTH, LEMMA
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
_exc = {}
|
||||
|
||||
for raw, lemma in [
|
||||
("a-a", "a-o"),
|
||||
("a-e", "a-o"),
|
||||
("a-o", "a-o"),
|
||||
("a-i", "a-o"),
|
||||
("co-a", "co-o"),
|
||||
("co-e", "co-o"),
|
||||
("co-i", "co-o"),
|
||||
("co-o", "co-o"),
|
||||
("da-a", "da-o"),
|
||||
("da-e", "da-o"),
|
||||
("da-i", "da-o"),
|
||||
("da-o", "da-o"),
|
||||
("pe-a", "pe-o"),
|
||||
("pe-e", "pe-o"),
|
||||
("pe-i", "pe-o"),
|
||||
("pe-o", "pe-o"),
|
||||
for raw in [
|
||||
"a-e",
|
||||
"a-o",
|
||||
"a-i",
|
||||
"a-a",
|
||||
"co-a",
|
||||
"co-e",
|
||||
"co-i",
|
||||
"co-o",
|
||||
"da-a",
|
||||
"da-e",
|
||||
"da-i",
|
||||
"da-o",
|
||||
"pe-a",
|
||||
"pe-e",
|
||||
"pe-i",
|
||||
"pe-o",
|
||||
]:
|
||||
for orth in [raw, raw.capitalize()]:
|
||||
_exc[orth] = [{ORTH: orth, LEMMA: lemma}]
|
||||
_exc[orth] = [{ORTH: orth}]
|
||||
|
||||
# Prefix + prepositions with à (e.g. "sott'a-o")
|
||||
|
||||
for prep, prep_lemma in [
|
||||
("a-a", "a-o"),
|
||||
("a-e", "a-o"),
|
||||
("a-o", "a-o"),
|
||||
("a-i", "a-o"),
|
||||
for prep in [
|
||||
"a-a",
|
||||
"a-e",
|
||||
"a-o",
|
||||
"a-i",
|
||||
]:
|
||||
for prefix, prefix_lemma in [
|
||||
("sott'", "sotta"),
|
||||
("sott’", "sotta"),
|
||||
("contr'", "contra"),
|
||||
("contr’", "contra"),
|
||||
("ch'", "che"),
|
||||
("ch’", "che"),
|
||||
("s'", "se"),
|
||||
("s’", "se"),
|
||||
for prefix in [
|
||||
"sott'",
|
||||
"sott’",
|
||||
"contr'",
|
||||
"contr’",
|
||||
"ch'",
|
||||
"ch’",
|
||||
"s'",
|
||||
"s’",
|
||||
]:
|
||||
for prefix_orth in [prefix, prefix.capitalize()]:
|
||||
_exc[prefix_orth + prep] = [
|
||||
{ORTH: prefix_orth, LEMMA: prefix_lemma},
|
||||
{ORTH: prep, LEMMA: prep_lemma},
|
||||
]
|
||||
_exc[prefix_orth + prep] = [{ORTH: prefix_orth}, {ORTH: prep}]
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,54 +1,21 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "lt"
|
||||
stop_words = {"@language_data": "spacy.lt.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.lt.lex_attr_getters"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.lt.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.lt.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
|
||||
|
||||
class LithuanianDefaults(Language.Defaults):
|
||||
infixes = TOKENIZER_INFIXES
|
||||
suffixes = TOKENIZER_SUFFIXES
|
||||
mod_base_exceptions = {
|
||||
exc: val for exc, val in BASE_EXCEPTIONS.items() if not exc.endswith(".")
|
||||
}
|
||||
del mod_base_exceptions["8)"]
|
||||
tokenizer_exceptions = update_exc(mod_base_exceptions, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
stop_words = STOP_WORDS
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
|
||||
|
||||
class Lithuanian(Language):
|
||||
lang = "lt"
|
||||
Defaults = LithuanianDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Lithuanian"]
|
||||
|
|
|
@ -1,267 +1,15 @@
|
|||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
_exc = {}
|
||||
|
||||
for orth in [
|
||||
"n-tosios",
|
||||
"?!",
|
||||
# "G.",
|
||||
# "J. E.",
|
||||
# "J. Em.",
|
||||
# "J.E.",
|
||||
# "J.Em.",
|
||||
# "K.",
|
||||
# "N.",
|
||||
# "V.",
|
||||
# "Vt.",
|
||||
# "a.",
|
||||
# "a.k.",
|
||||
# "a.s.",
|
||||
# "adv.",
|
||||
# "akad.",
|
||||
# "aklg.",
|
||||
# "akt.",
|
||||
# "al.",
|
||||
# "ang.",
|
||||
# "angl.",
|
||||
# "aps.",
|
||||
# "apskr.",
|
||||
# "apyg.",
|
||||
# "arbat.",
|
||||
# "asist.",
|
||||
# "asm.",
|
||||
# "asm.k.",
|
||||
# "asmv.",
|
||||
# "atk.",
|
||||
# "atsak.",
|
||||
# "atsisk.",
|
||||
# "atsisk.sąsk.",
|
||||
# "atv.",
|
||||
# "aut.",
|
||||
# "avd.",
|
||||
# "b.k.",
|
||||
# "baud.",
|
||||
# "biol.",
|
||||
# "bkl.",
|
||||
# "bot.",
|
||||
# "bt.",
|
||||
# "buv.",
|
||||
# "ch.",
|
||||
# "chem.",
|
||||
# "corp.",
|
||||
# "d.",
|
||||
# "dab.",
|
||||
# "dail.",
|
||||
# "dek.",
|
||||
# "deš.",
|
||||
# "dir.",
|
||||
# "dirig.",
|
||||
# "doc.",
|
||||
# "dol.",
|
||||
# "dr.",
|
||||
# "drp.",
|
||||
# "dvit.",
|
||||
# "dėst.",
|
||||
# "dš.",
|
||||
# "dž.",
|
||||
# "e.b.",
|
||||
# "e.bankas",
|
||||
# "e.p.",
|
||||
# "e.parašas",
|
||||
# "e.paštas",
|
||||
# "e.v.",
|
||||
# "e.valdžia",
|
||||
# "egz.",
|
||||
# "eil.",
|
||||
# "ekon.",
|
||||
# "el.",
|
||||
# "el.bankas",
|
||||
# "el.p.",
|
||||
# "el.parašas",
|
||||
# "el.paštas",
|
||||
# "el.valdžia",
|
||||
# "etc.",
|
||||
# "ež.",
|
||||
# "fak.",
|
||||
# "faks.",
|
||||
# "feat.",
|
||||
# "filol.",
|
||||
# "filos.",
|
||||
# "g.",
|
||||
# "gen.",
|
||||
# "geol.",
|
||||
# "gerb.",
|
||||
# "gim.",
|
||||
# "gr.",
|
||||
# "gv.",
|
||||
# "gyd.",
|
||||
# "gyv.",
|
||||
# "habil.",
|
||||
# "inc.",
|
||||
# "insp.",
|
||||
# "inž.",
|
||||
# "ir pan.",
|
||||
# "ir t. t.",
|
||||
# "isp.",
|
||||
# "istor.",
|
||||
# "it.",
|
||||
# "just.",
|
||||
# "k.",
|
||||
# "k. a.",
|
||||
# "k.a.",
|
||||
# "kab.",
|
||||
# "kand.",
|
||||
# "kart.",
|
||||
# "kat.",
|
||||
# "ketv.",
|
||||
# "kh.",
|
||||
# "kl.",
|
||||
# "kln.",
|
||||
# "km.",
|
||||
# "kn.",
|
||||
# "koresp.",
|
||||
# "kpt.",
|
||||
# "kr.",
|
||||
# "kt.",
|
||||
# "kub.",
|
||||
# "kun.",
|
||||
# "kv.",
|
||||
# "kyš.",
|
||||
# "l. e. p.",
|
||||
# "l.e.p.",
|
||||
# "lenk.",
|
||||
# "liet.",
|
||||
# "lot.",
|
||||
# "lt.",
|
||||
# "ltd.",
|
||||
# "ltn.",
|
||||
# "m.",
|
||||
# "m.e..",
|
||||
# "m.m.",
|
||||
# "mat.",
|
||||
# "med.",
|
||||
# "mgnt.",
|
||||
# "mgr.",
|
||||
# "min.",
|
||||
# "mjr.",
|
||||
# "ml.",
|
||||
# "mln.",
|
||||
# "mlrd.",
|
||||
# "mob.",
|
||||
# "mok.",
|
||||
# "moksl.",
|
||||
# "mokyt.",
|
||||
# "mot.",
|
||||
# "mr.",
|
||||
# "mst.",
|
||||
# "mstl.",
|
||||
# "mėn.",
|
||||
# "nkt.",
|
||||
# "no.",
|
||||
# "nr.",
|
||||
# "ntk.",
|
||||
# "nuotr.",
|
||||
# "op.",
|
||||
# "org.",
|
||||
# "orig.",
|
||||
# "p.",
|
||||
# "p.d.",
|
||||
# "p.m.e.",
|
||||
# "p.s.",
|
||||
# "pab.",
|
||||
# "pan.",
|
||||
# "past.",
|
||||
# "pav.",
|
||||
# "pavad.",
|
||||
# "per.",
|
||||
# "perd.",
|
||||
# "pirm.",
|
||||
# "pl.",
|
||||
# "plg.",
|
||||
# "plk.",
|
||||
# "pr.",
|
||||
# "pr.Kr.",
|
||||
# "pranc.",
|
||||
# "proc.",
|
||||
# "prof.",
|
||||
# "prom.",
|
||||
# "prot.",
|
||||
# "psl.",
|
||||
# "pss.",
|
||||
# "pvz.",
|
||||
# "pšt.",
|
||||
# "r.",
|
||||
# "raj.",
|
||||
# "red.",
|
||||
# "rez.",
|
||||
# "rež.",
|
||||
# "rus.",
|
||||
# "rš.",
|
||||
# "s.",
|
||||
# "sav.",
|
||||
# "saviv.",
|
||||
# "sek.",
|
||||
# "sekr.",
|
||||
# "sen.",
|
||||
# "sh.",
|
||||
# "sk.",
|
||||
# "skg.",
|
||||
# "skv.",
|
||||
# "skyr.",
|
||||
# "sp.",
|
||||
# "spec.",
|
||||
# "sr.",
|
||||
# "st.",
|
||||
# "str.",
|
||||
# "stud.",
|
||||
# "sąs.",
|
||||
# "t.",
|
||||
# "t. p.",
|
||||
# "t. y.",
|
||||
# "t.p.",
|
||||
# "t.t.",
|
||||
# "t.y.",
|
||||
# "techn.",
|
||||
# "tel.",
|
||||
# "teol.",
|
||||
# "th.",
|
||||
# "tir.",
|
||||
# "trit.",
|
||||
# "trln.",
|
||||
# "tšk.",
|
||||
# "tūks.",
|
||||
# "tūkst.",
|
||||
# "up.",
|
||||
# "upl.",
|
||||
# "v.s.",
|
||||
# "vad.",
|
||||
# "val.",
|
||||
# "valg.",
|
||||
# "ved.",
|
||||
# "vert.",
|
||||
# "vet.",
|
||||
# "vid.",
|
||||
# "virš.",
|
||||
# "vlsč.",
|
||||
# "vnt.",
|
||||
# "vok.",
|
||||
# "vs.",
|
||||
# "vtv.",
|
||||
# "vv.",
|
||||
# "vyr.",
|
||||
# "vyresn.",
|
||||
# "zool.",
|
||||
# "Įn",
|
||||
# "įl.",
|
||||
# "š.m.",
|
||||
# "šnek.",
|
||||
# "šv.",
|
||||
# "švč.",
|
||||
# "ž.ū.",
|
||||
# "žin.",
|
||||
# "žml.",
|
||||
# "žr.",
|
||||
]:
|
||||
for orth in ["n-tosios", "?!"]:
|
||||
_exc[orth] = [{ORTH: orth}]
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
mod_base_exceptions = {
|
||||
exc: val for exc, val in BASE_EXCEPTIONS.items() if not exc.endswith(".")
|
||||
}
|
||||
del mod_base_exceptions["8)"]
|
||||
TOKENIZER_EXCEPTIONS = update_exc(mod_base_exceptions, _exc)
|
||||
|
|
|
@ -1,26 +1,14 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "lv"
|
||||
stop_words = {"@language_data": "spacy.lv.stop_words"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.lv.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
class LatvianDefaults(Language.Defaults):
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Latvian(Language):
|
||||
lang = "lv"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = LatvianDefaults
|
||||
|
||||
|
||||
__all__ = ["Latvian"]
|
||||
|
|
|
@ -1,26 +1,16 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "ml"
|
||||
stop_words = {"@language_data": "spacy.ml.stop_words"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.ml.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
class MalayalamDefaults(Language.Defaults):
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Malayalam(Language):
|
||||
lang = "ml"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = MalayalamDefaults
|
||||
|
||||
|
||||
__all__ = ["Malayalam"]
|
||||
|
|
|
@ -1,26 +1,14 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "af"
|
||||
stop_words = {"@language_data": "spacy.mr.stop_words"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.mr.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
class MarathiDefaults(Language.Defaults):
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Marathi(Language):
|
||||
lang = "mr"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = MarathiDefaults
|
||||
|
||||
|
||||
__all__ = ["Marathi"]
|
||||
|
|
|
@ -1,47 +1,23 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_INFIXES
|
||||
from .punctuation import TOKENIZER_SUFFIXES
|
||||
from .stop_words import STOP_WORDS
|
||||
from .syntax_iterators import SYNTAX_ITERATORS
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "nb"
|
||||
stop_words = {"@language_data": "spacy.nb.stop_words"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.nb.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
class NorwegianDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
prefixes = TOKENIZER_PREFIXES
|
||||
infixes = TOKENIZER_INFIXES
|
||||
suffixes = TOKENIZER_SUFFIXES
|
||||
syntax_iterators = SYNTAX_ITERATORS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Norwegian(Language):
|
||||
lang = "nb"
|
||||
Defaults = NorwegianDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Norwegian"]
|
||||
|
|
|
@ -1,26 +1,18 @@
|
|||
from typing import Union, Iterator
|
||||
|
||||
from ...symbols import NOUN, PROPN, PRON
|
||||
from ...errors import Errors
|
||||
from ...tokens import Doc, Span
|
||||
|
||||
|
||||
def noun_chunks(doclike):
|
||||
"""
|
||||
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
|
||||
"""
|
||||
labels = [
|
||||
"nsubj",
|
||||
"nsubj:pass",
|
||||
"obj",
|
||||
"iobj",
|
||||
"ROOT",
|
||||
"appos",
|
||||
"nmod",
|
||||
"nmod:poss",
|
||||
]
|
||||
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
|
||||
"""Detect base noun phrases from a dependency parse. Works on Doc and Span."""
|
||||
# fmt: off
|
||||
labels = ["nsubj", "nsubj:pass", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"]
|
||||
# fmt: on
|
||||
doc = doclike.doc # Ensure works on both Doc and Span.
|
||||
|
||||
if not doc.is_parsed:
|
||||
raise ValueError(Errors.E029)
|
||||
|
||||
np_deps = [doc.vocab.strings[label] for label in labels]
|
||||
conj = doc.vocab.strings.add("conj")
|
||||
np_label = doc.vocab.strings.add("NP")
|
||||
|
|
|
@ -1,21 +1,23 @@
|
|||
from ...symbols import ORTH, LEMMA
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH, NORM
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
_exc = {}
|
||||
|
||||
|
||||
for exc_data in [
|
||||
{ORTH: "jan.", LEMMA: "januar"},
|
||||
{ORTH: "feb.", LEMMA: "februar"},
|
||||
{ORTH: "mar.", LEMMA: "mars"},
|
||||
{ORTH: "apr.", LEMMA: "april"},
|
||||
{ORTH: "jun.", LEMMA: "juni"},
|
||||
{ORTH: "jul.", LEMMA: "juli"},
|
||||
{ORTH: "aug.", LEMMA: "august"},
|
||||
{ORTH: "sep.", LEMMA: "september"},
|
||||
{ORTH: "okt.", LEMMA: "oktober"},
|
||||
{ORTH: "nov.", LEMMA: "november"},
|
||||
{ORTH: "des.", LEMMA: "desember"},
|
||||
{ORTH: "jan.", NORM: "januar"},
|
||||
{ORTH: "feb.", NORM: "februar"},
|
||||
{ORTH: "mar.", NORM: "mars"},
|
||||
{ORTH: "apr.", NORM: "april"},
|
||||
{ORTH: "jun.", NORM: "juni"},
|
||||
{ORTH: "jul.", NORM: "juli"},
|
||||
{ORTH: "aug.", NORM: "august"},
|
||||
{ORTH: "sep.", NORM: "september"},
|
||||
{ORTH: "okt.", NORM: "oktober"},
|
||||
{ORTH: "nov.", NORM: "november"},
|
||||
{ORTH: "des.", NORM: "desember"},
|
||||
]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
||||
|
@ -218,4 +220,4 @@ for orth in [
|
|||
_exc[orth] = [{ORTH: orth}]
|
||||
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,33 +1,16 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "ne"
|
||||
stop_words = {"@language_data": "spacy.ne.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.ne.lex_attr_getters"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.ne.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.ne.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
class NepaliDefaults(Language.Defaults):
|
||||
stop_words = STOP_WORDS
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
|
||||
|
||||
class Nepali(Language):
|
||||
lang = "ne"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = NepaliDefaults
|
||||
|
||||
|
||||
__all__ = ["Nepali"]
|
||||
|
|
|
@ -1,7 +1,3 @@
|
|||
# coding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
|
||||
"""
|
||||
Example sentences to test spaCy and its language models.
|
||||
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
# coding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..norm_exceptions import BASE_NORMS
|
||||
from ...attrs import NORM, LIKE_NUM
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from typing import Callable
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
|
@ -7,52 +7,43 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
|||
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_INFIXES
|
||||
from .punctuation import TOKENIZER_SUFFIXES
|
||||
from .lemmatizer import DutchLemmatizer
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...lookups import load_lookups
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "nl"
|
||||
stop_words = {"@language_data": "spacy.nl.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.nl.lex_attr_getters"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.DutchLemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
@lemmatizers = "spacy.nl.DutchLemmatizer"
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.nl.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
@registry.lemmatizers("spacy.nl.DutchLemmatizer")
|
||||
def create_lemmatizer() -> Callable[[Language], DutchLemmatizer]:
|
||||
tables = ["lemma_rules", "lemma_index", "lemma_exc", "lemma_lookup"]
|
||||
|
||||
def lemmatizer_factory(nlp: Language) -> DutchLemmatizer:
|
||||
lookups = load_lookups(lang=nlp.lang, tables=tables)
|
||||
return DutchLemmatizer(lookups=lookups)
|
||||
|
||||
@registry.language_data("spacy.nl.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
|
||||
|
||||
@registry.lemmatizers("spacy.DutchLemmatizer.v1")
|
||||
def create_dutch_lemmatizer(data_paths: dict = {}) -> DutchLemmatizer:
|
||||
return DutchLemmatizer(data_paths=data_paths)
|
||||
return lemmatizer_factory
|
||||
|
||||
|
||||
class DutchDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
config = Config().from_str(DEFAULT_CONFIG)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
prefixes = TOKENIZER_PREFIXES
|
||||
infixes = TOKENIZER_INFIXES
|
||||
suffixes = TOKENIZER_SUFFIXES
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Dutch(Language):
|
||||
lang = "nl"
|
||||
Defaults = DutchDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Dutch"]
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
# Extensive list of both common and uncommon dutch abbreviations copied from
|
||||
# github.com/diasks2/pragmatic_segmenter, a Ruby library for rule-based
|
||||
|
@ -1602,4 +1605,4 @@ for orth in abbrevs:
|
|||
_exc[i] = [{ORTH: i}]
|
||||
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from typing import Callable
|
||||
from thinc.api import Config
|
||||
|
||||
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_INFIXES
|
||||
|
@ -7,54 +7,53 @@ from .stop_words import STOP_WORDS
|
|||
from .lex_attrs import LEX_ATTRS
|
||||
from .lemmatizer import PolishLemmatizer
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...lookups import load_lookups
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "pl"
|
||||
stop_words = {"@language_data": "spacy.pl.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.pl.lex_attr_getters"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.PolishLemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
@lemmatizers = "spacy.pl.PolishLemmatizer"
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.pl.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
TOKENIZER_EXCEPTIONS = {
|
||||
exc: val for exc, val in BASE_EXCEPTIONS.items() if not exc.endswith(".")
|
||||
}
|
||||
|
||||
|
||||
@registry.language_data("spacy.pl.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
@registry.lemmatizers("spacy.pl.PolishLemmatizer")
|
||||
def create_lemmatizer() -> Callable[[Language], PolishLemmatizer]:
|
||||
# fmt: off
|
||||
tables = [
|
||||
"lemma_lookup_adj", "lemma_lookup_adp", "lemma_lookup_adv",
|
||||
"lemma_lookup_aux", "lemma_lookup_noun", "lemma_lookup_num",
|
||||
"lemma_lookup_part", "lemma_lookup_pron", "lemma_lookup_verb"
|
||||
]
|
||||
# fmt: on
|
||||
|
||||
def lemmatizer_factory(nlp: Language) -> PolishLemmatizer:
|
||||
lookups = load_lookups(lang=nlp.lang, tables=tables)
|
||||
return PolishLemmatizer(lookups=lookups)
|
||||
|
||||
@registry.lemmatizers("spacy.PolishLemmatizer.v1")
|
||||
def create_polish_lemmatizer(data_paths: dict = {}) -> PolishLemmatizer:
|
||||
return PolishLemmatizer(data_paths=data_paths)
|
||||
return lemmatizer_factory
|
||||
|
||||
|
||||
class PolishDefaults(Language.Defaults):
|
||||
mod_base_exceptions = {
|
||||
exc: val for exc, val in BASE_EXCEPTIONS.items() if not exc.endswith(".")
|
||||
}
|
||||
tokenizer_exceptions = mod_base_exceptions
|
||||
config = Config().from_str(DEFAULT_CONFIG)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
prefixes = TOKENIZER_PREFIXES
|
||||
infixes = TOKENIZER_INFIXES
|
||||
suffixes = TOKENIZER_SUFFIXES
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Polish(Language):
|
||||
lang = "pl"
|
||||
Defaults = PolishDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Polish"]
|
||||
|
|
|
@ -1,50 +1,21 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "pt"
|
||||
stop_words = {"@language_data": "spacy.pt.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.pt.lex_attr_getters"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.pt.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.pt.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
|
||||
|
||||
class PortugueseDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
infixes = TOKENIZER_INFIXES
|
||||
prefixes = TOKENIZER_PREFIXES
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Portuguese(Language):
|
||||
lang = "pt"
|
||||
Defaults = PortugueseDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Portuguese"]
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
_exc = {}
|
||||
|
@ -50,4 +52,4 @@ for orth in [
|
|||
_exc[orth] = [{ORTH: orth}]
|
||||
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -3,7 +3,7 @@ from .char_classes import LIST_ICONS, HYPHENS, CURRENCY, UNITS
|
|||
from .char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA, PUNCT
|
||||
|
||||
|
||||
_prefixes = (
|
||||
TOKENIZER_PREFIXES = (
|
||||
["§", "%", "=", "—", "–", r"\+(?![0-9])"]
|
||||
+ LIST_PUNCT
|
||||
+ LIST_ELLIPSES
|
||||
|
@ -13,7 +13,7 @@ _prefixes = (
|
|||
)
|
||||
|
||||
|
||||
_suffixes = (
|
||||
TOKENIZER_SUFFIXES = (
|
||||
LIST_PUNCT
|
||||
+ LIST_ELLIPSES
|
||||
+ LIST_QUOTES
|
||||
|
@ -31,7 +31,7 @@ _suffixes = (
|
|||
]
|
||||
)
|
||||
|
||||
_infixes = (
|
||||
TOKENIZER_INFIXES = (
|
||||
LIST_ELLIPSES
|
||||
+ LIST_ICONS
|
||||
+ [
|
||||
|
@ -44,7 +44,3 @@ _infixes = (
|
|||
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
|
||||
]
|
||||
)
|
||||
|
||||
TOKENIZER_PREFIXES = _prefixes
|
||||
TOKENIZER_SUFFIXES = _suffixes
|
||||
TOKENIZER_INFIXES = _infixes
|
||||
|
|
|
@ -1,49 +1,27 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .stop_words import STOP_WORDS
|
||||
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_INFIXES
|
||||
from .punctuation import TOKENIZER_SUFFIXES
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
# Lemma data note:
|
||||
# Original pairs downloaded from http://www.lexiconista.com/datasets/lemmatization/
|
||||
# Replaced characters using cedillas with the correct ones (ș and ț)
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "ro"
|
||||
stop_words = {"@language_data": "spacy.ro.stop_words"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.ro.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
class RomanianDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
prefixes = TOKENIZER_PREFIXES
|
||||
suffixes = TOKENIZER_SUFFIXES
|
||||
infixes = TOKENIZER_INFIXES
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Romanian(Language):
|
||||
lang = "ro"
|
||||
Defaults = RomanianDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Romanian"]
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH
|
||||
from ...util import update_exc
|
||||
from .punctuation import _make_ro_variants
|
||||
|
||||
|
||||
|
@ -91,4 +93,4 @@ for orth in [
|
|||
_exc[variant] = [{ORTH: variant}]
|
||||
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,49 +1,40 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from typing import Callable
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from .lemmatizer import RussianLemmatizer
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...util import update_exc, registry
|
||||
from ...util import registry
|
||||
from ...language import Language
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "ru"
|
||||
stop_words = {"@language_data": "spacy.ru.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.ru.lex_attr_getters"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.RussianLemmatizer.v1"
|
||||
@lemmatizers = "spacy.ru.RussianLemmatizer"
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.ru.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
@registry.lemmatizers("spacy.ru.RussianLemmatizer")
|
||||
def create_lemmatizer() -> Callable[[Language], RussianLemmatizer]:
|
||||
def lemmatizer_factory(nlp: Language) -> RussianLemmatizer:
|
||||
return RussianLemmatizer()
|
||||
|
||||
|
||||
@registry.language_data("spacy.ru.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
|
||||
|
||||
@registry.lemmatizers("spacy.RussianLemmatizer.v1")
|
||||
def create_russian_lemmatizer() -> RussianLemmatizer:
|
||||
return RussianLemmatizer()
|
||||
return lemmatizer_factory
|
||||
|
||||
|
||||
class RussianDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
config = Config().from_str(DEFAULT_CONFIG)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Russian(Language):
|
||||
lang = "ru"
|
||||
Defaults = RussianDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Russian"]
|
||||
|
|
|
@ -1,66 +1,66 @@
|
|||
from ...symbols import ORTH, LEMMA, NORM
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH, NORM
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
_exc = {}
|
||||
|
||||
_abbrev_exc = [
|
||||
# Weekdays abbreviations
|
||||
{ORTH: "пн", LEMMA: "понедельник", NORM: "понедельник"},
|
||||
{ORTH: "вт", LEMMA: "вторник", NORM: "вторник"},
|
||||
{ORTH: "ср", LEMMA: "среда", NORM: "среда"},
|
||||
{ORTH: "чт", LEMMA: "четверг", NORM: "четверг"},
|
||||
{ORTH: "чтв", LEMMA: "четверг", NORM: "четверг"},
|
||||
{ORTH: "пт", LEMMA: "пятница", NORM: "пятница"},
|
||||
{ORTH: "сб", LEMMA: "суббота", NORM: "суббота"},
|
||||
{ORTH: "сбт", LEMMA: "суббота", NORM: "суббота"},
|
||||
{ORTH: "вс", LEMMA: "воскресенье", NORM: "воскресенье"},
|
||||
{ORTH: "вскр", LEMMA: "воскресенье", NORM: "воскресенье"},
|
||||
{ORTH: "воскр", LEMMA: "воскресенье", NORM: "воскресенье"},
|
||||
{ORTH: "пн", NORM: "понедельник"},
|
||||
{ORTH: "вт", NORM: "вторник"},
|
||||
{ORTH: "ср", NORM: "среда"},
|
||||
{ORTH: "чт", NORM: "четверг"},
|
||||
{ORTH: "чтв", NORM: "четверг"},
|
||||
{ORTH: "пт", NORM: "пятница"},
|
||||
{ORTH: "сб", NORM: "суббота"},
|
||||
{ORTH: "сбт", NORM: "суббота"},
|
||||
{ORTH: "вс", NORM: "воскресенье"},
|
||||
{ORTH: "вскр", NORM: "воскресенье"},
|
||||
{ORTH: "воскр", NORM: "воскресенье"},
|
||||
# Months abbreviations
|
||||
{ORTH: "янв", LEMMA: "январь", NORM: "январь"},
|
||||
{ORTH: "фев", LEMMA: "февраль", NORM: "февраль"},
|
||||
{ORTH: "февр", LEMMA: "февраль", NORM: "февраль"},
|
||||
{ORTH: "мар", LEMMA: "март", NORM: "март"},
|
||||
# {ORTH: "март", LEMMA: "март", NORM: "март"},
|
||||
{ORTH: "мрт", LEMMA: "март", NORM: "март"},
|
||||
{ORTH: "апр", LEMMA: "апрель", NORM: "апрель"},
|
||||
# {ORTH: "май", LEMMA: "май", NORM: "май"},
|
||||
{ORTH: "июн", LEMMA: "июнь", NORM: "июнь"},
|
||||
# {ORTH: "июнь", LEMMA: "июнь", NORM: "июнь"},
|
||||
{ORTH: "июл", LEMMA: "июль", NORM: "июль"},
|
||||
# {ORTH: "июль", LEMMA: "июль", NORM: "июль"},
|
||||
{ORTH: "авг", LEMMA: "август", NORM: "август"},
|
||||
{ORTH: "сен", LEMMA: "сентябрь", NORM: "сентябрь"},
|
||||
{ORTH: "сент", LEMMA: "сентябрь", NORM: "сентябрь"},
|
||||
{ORTH: "окт", LEMMA: "октябрь", NORM: "октябрь"},
|
||||
{ORTH: "октб", LEMMA: "октябрь", NORM: "октябрь"},
|
||||
{ORTH: "ноя", LEMMA: "ноябрь", NORM: "ноябрь"},
|
||||
{ORTH: "нояб", LEMMA: "ноябрь", NORM: "ноябрь"},
|
||||
{ORTH: "нбр", LEMMA: "ноябрь", NORM: "ноябрь"},
|
||||
{ORTH: "дек", LEMMA: "декабрь", NORM: "декабрь"},
|
||||
{ORTH: "янв", NORM: "январь"},
|
||||
{ORTH: "фев", NORM: "февраль"},
|
||||
{ORTH: "февр", NORM: "февраль"},
|
||||
{ORTH: "мар", NORM: "март"},
|
||||
# {ORTH: "март", NORM: "март"},
|
||||
{ORTH: "мрт", NORM: "март"},
|
||||
{ORTH: "апр", NORM: "апрель"},
|
||||
# {ORTH: "май", NORM: "май"},
|
||||
{ORTH: "июн", NORM: "июнь"},
|
||||
# {ORTH: "июнь", NORM: "июнь"},
|
||||
{ORTH: "июл", NORM: "июль"},
|
||||
# {ORTH: "июль", NORM: "июль"},
|
||||
{ORTH: "авг", NORM: "август"},
|
||||
{ORTH: "сен", NORM: "сентябрь"},
|
||||
{ORTH: "сент", NORM: "сентябрь"},
|
||||
{ORTH: "окт", NORM: "октябрь"},
|
||||
{ORTH: "октб", NORM: "октябрь"},
|
||||
{ORTH: "ноя", NORM: "ноябрь"},
|
||||
{ORTH: "нояб", NORM: "ноябрь"},
|
||||
{ORTH: "нбр", NORM: "ноябрь"},
|
||||
{ORTH: "дек", NORM: "декабрь"},
|
||||
]
|
||||
|
||||
|
||||
for abbrev_desc in _abbrev_exc:
|
||||
abbrev = abbrev_desc[ORTH]
|
||||
for orth in (abbrev, abbrev.capitalize(), abbrev.upper()):
|
||||
_exc[orth] = [{ORTH: orth, LEMMA: abbrev_desc[LEMMA], NORM: abbrev_desc[NORM]}]
|
||||
_exc[orth + "."] = [
|
||||
{ORTH: orth + ".", LEMMA: abbrev_desc[LEMMA], NORM: abbrev_desc[NORM]}
|
||||
]
|
||||
_exc[orth] = [{ORTH: orth, NORM: abbrev_desc[NORM]}]
|
||||
_exc[orth + "."] = [{ORTH: orth + ".", NORM: abbrev_desc[NORM]}]
|
||||
|
||||
|
||||
_slang_exc = [
|
||||
{ORTH: "2к15", LEMMA: "2015", NORM: "2015"},
|
||||
{ORTH: "2к16", LEMMA: "2016", NORM: "2016"},
|
||||
{ORTH: "2к17", LEMMA: "2017", NORM: "2017"},
|
||||
{ORTH: "2к18", LEMMA: "2018", NORM: "2018"},
|
||||
{ORTH: "2к19", LEMMA: "2019", NORM: "2019"},
|
||||
{ORTH: "2к20", LEMMA: "2020", NORM: "2020"},
|
||||
{ORTH: "2к15", NORM: "2015"},
|
||||
{ORTH: "2к16", NORM: "2016"},
|
||||
{ORTH: "2к17", NORM: "2017"},
|
||||
{ORTH: "2к18", NORM: "2018"},
|
||||
{ORTH: "2к19", NORM: "2019"},
|
||||
{ORTH: "2к20", NORM: "2020"},
|
||||
]
|
||||
|
||||
for slang_desc in _slang_exc:
|
||||
_exc[slang_desc[ORTH]] = [slang_desc]
|
||||
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,33 +1,16 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "si"
|
||||
stop_words = {"@language_data": "spacy.si.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.si.lex_attr_getters"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.si.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.si.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
class SinhalaDefaults(Language.Defaults):
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Sinhala(Language):
|
||||
lang = "si"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = SinhalaDefaults
|
||||
|
||||
|
||||
__all__ = ["Sinhala"]
|
||||
|
|
|
@ -1,33 +1,16 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "sk"
|
||||
stop_words = {"@language_data": "spacy.sk.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.sk.lex_attr_getters"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.sk.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.sk.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
class SlovakDefaults(Language.Defaults):
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Slovak(Language):
|
||||
lang = "sk"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = SlovakDefaults
|
||||
|
||||
|
||||
__all__ = ["Slovak"]
|
||||
|
|
|
@ -1,26 +1,14 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "sl"
|
||||
stop_words = {"@language_data": "spacy.sl.stop_words"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.sl.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
class SlovenianDefaults(Language.Defaults):
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Slovenian(Language):
|
||||
lang = "sl"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = SlovenianDefaults
|
||||
|
||||
|
||||
__all__ = ["Slovenian"]
|
||||
|
|
|
@ -1,26 +1,14 @@
|
|||
from typing import Set
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "sq"
|
||||
stop_words = {"@language_data": "spacy.sq.stop_words"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.sq.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
class AlbanianDefaults(Language.Defaults):
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Albanian(Language):
|
||||
lang = "sq"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = AlbanianDefaults
|
||||
|
||||
|
||||
__all__ = ["Albanian"]
|
||||
|
|
|
@ -1,47 +1,18 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "sr"
|
||||
stop_words = {"@language_data": "spacy.sr.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.sr.lex_attr_getters"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.sr.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.sr.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
|
||||
|
||||
class SerbianDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Serbian(Language):
|
||||
lang = "sr"
|
||||
Defaults = SerbianDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Serbian"]
|
||||
|
|
|
@ -1,93 +1,93 @@
|
|||
from ...symbols import ORTH, LEMMA, NORM
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import ORTH, NORM
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
_exc = {}
|
||||
|
||||
_abbrev_exc = [
|
||||
# Weekdays abbreviations
|
||||
{ORTH: "пoн", LEMMA: "понедељак", NORM: "понедељак"},
|
||||
{ORTH: "уто", LEMMA: "уторак", NORM: "уторак"},
|
||||
{ORTH: "сре", LEMMA: "среда", NORM: "среда"},
|
||||
{ORTH: "чет", LEMMA: "четвртак", NORM: "четвртак"},
|
||||
{ORTH: "пет", LEMMA: "петак", NORM: "петак"},
|
||||
{ORTH: "суб", LEMMA: "субота", NORM: "субота"},
|
||||
{ORTH: "нед", LEMMA: "недеља", NORM: "недеља"},
|
||||
{ORTH: "пoн", NORM: "понедељак"},
|
||||
{ORTH: "уто", NORM: "уторак"},
|
||||
{ORTH: "сре", NORM: "среда"},
|
||||
{ORTH: "чет", NORM: "четвртак"},
|
||||
{ORTH: "пет", NORM: "петак"},
|
||||
{ORTH: "суб", NORM: "субота"},
|
||||
{ORTH: "нед", NORM: "недеља"},
|
||||
# Months abbreviations
|
||||
{ORTH: "јан", LEMMA: "јануар", NORM: "јануар"},
|
||||
{ORTH: "феб", LEMMA: "фебруар", NORM: "фебруар"},
|
||||
{ORTH: "мар", LEMMA: "март", NORM: "март"},
|
||||
{ORTH: "апр", LEMMA: "април", NORM: "април"},
|
||||
{ORTH: "јуни", LEMMA: "јун", NORM: "јун"},
|
||||
{ORTH: "јули", LEMMA: "јул", NORM: "јул"},
|
||||
{ORTH: "авг", LEMMA: "август", NORM: "август"},
|
||||
{ORTH: "сеп", LEMMA: "септембар", NORM: "септембар"},
|
||||
{ORTH: "септ", LEMMA: "септембар", NORM: "септембар"},
|
||||
{ORTH: "окт", LEMMA: "октобар", NORM: "октобар"},
|
||||
{ORTH: "нов", LEMMA: "новембар", NORM: "новембар"},
|
||||
{ORTH: "дец", LEMMA: "децембар", NORM: "децембар"},
|
||||
{ORTH: "јан", NORM: "јануар"},
|
||||
{ORTH: "феб", NORM: "фебруар"},
|
||||
{ORTH: "мар", NORM: "март"},
|
||||
{ORTH: "апр", NORM: "април"},
|
||||
{ORTH: "јуни", NORM: "јун"},
|
||||
{ORTH: "јули", NORM: "јул"},
|
||||
{ORTH: "авг", NORM: "август"},
|
||||
{ORTH: "сеп", NORM: "септембар"},
|
||||
{ORTH: "септ", NORM: "септембар"},
|
||||
{ORTH: "окт", NORM: "октобар"},
|
||||
{ORTH: "нов", NORM: "новембар"},
|
||||
{ORTH: "дец", NORM: "децембар"},
|
||||
]
|
||||
|
||||
|
||||
for abbrev_desc in _abbrev_exc:
|
||||
abbrev = abbrev_desc[ORTH]
|
||||
for orth in (abbrev, abbrev.capitalize(), abbrev.upper()):
|
||||
_exc[orth] = [{ORTH: orth, LEMMA: abbrev_desc[LEMMA], NORM: abbrev_desc[NORM]}]
|
||||
_exc[orth + "."] = [
|
||||
{ORTH: orth + ".", LEMMA: abbrev_desc[LEMMA], NORM: abbrev_desc[NORM]}
|
||||
]
|
||||
_exc[orth] = [{ORTH: orth, NORM: abbrev_desc[NORM]}]
|
||||
_exc[orth + "."] = [{ORTH: orth + ".", NORM: abbrev_desc[NORM]}]
|
||||
|
||||
|
||||
# common abbreviations
|
||||
_slang_exc = [
|
||||
# without dot
|
||||
{ORTH: "др", LEMMA: "доктор", NORM: "доктор"},
|
||||
{ORTH: "гдин", LEMMA: "господин", NORM: "господин"},
|
||||
{ORTH: "гђа", LEMMA: "госпођа", NORM: "госпођа"},
|
||||
{ORTH: "гђица", LEMMA: "госпођица", NORM: "госпођица"},
|
||||
{ORTH: "мр", LEMMA: "магистар", NORM: "магистар"},
|
||||
{ORTH: "Бгд", LEMMA: "Београд", NORM: "београд"},
|
||||
{ORTH: "цм", LEMMA: "центиметар", NORM: "центиметар"},
|
||||
{ORTH: "м", LEMMA: "метар", NORM: "метар"},
|
||||
{ORTH: "км", LEMMA: "километар", NORM: "километар"},
|
||||
{ORTH: "мг", LEMMA: "милиграм", NORM: "милиграм"},
|
||||
{ORTH: "кг", LEMMA: "килограм", NORM: "килограм"},
|
||||
{ORTH: "дл", LEMMA: "децилитар", NORM: "децилитар"},
|
||||
{ORTH: "хл", LEMMA: "хектолитар", NORM: "хектолитар"},
|
||||
{ORTH: "др", NORM: "доктор"},
|
||||
{ORTH: "гдин", NORM: "господин"},
|
||||
{ORTH: "гђа", NORM: "госпођа"},
|
||||
{ORTH: "гђица", NORM: "госпођица"},
|
||||
{ORTH: "мр", NORM: "магистар"},
|
||||
{ORTH: "Бгд", NORM: "београд"},
|
||||
{ORTH: "цм", NORM: "центиметар"},
|
||||
{ORTH: "м", NORM: "метар"},
|
||||
{ORTH: "км", NORM: "километар"},
|
||||
{ORTH: "мг", NORM: "милиграм"},
|
||||
{ORTH: "кг", NORM: "килограм"},
|
||||
{ORTH: "дл", NORM: "децилитар"},
|
||||
{ORTH: "хл", NORM: "хектолитар"},
|
||||
# with dot
|
||||
{ORTH: "ул.", LEMMA: "улица", NORM: "улица"},
|
||||
{ORTH: "бр.", LEMMA: "број", NORM: "број"},
|
||||
{ORTH: "нпр.", LEMMA: "на пример", NORM: "на пример"},
|
||||
{ORTH: "тзв.", LEMMA: "такозван", NORM: "такозван"},
|
||||
{ORTH: "проф.", LEMMA: "професор", NORM: "професор"},
|
||||
{ORTH: "стр.", LEMMA: "страна", NORM: "страна"},
|
||||
{ORTH: "једн.", LEMMA: "једнина", NORM: "једнина"},
|
||||
{ORTH: "мн.", LEMMA: "множина", NORM: "множина"},
|
||||
{ORTH: "уч.", LEMMA: "ученик", NORM: "ученик"},
|
||||
{ORTH: "разр.", LEMMA: "разред", NORM: "разред"},
|
||||
{ORTH: "инж.", LEMMA: "инжењер", NORM: "инжењер"},
|
||||
{ORTH: "гимн.", LEMMA: "гимназија", NORM: "гимназија"},
|
||||
{ORTH: "год.", LEMMA: "година", NORM: "година"},
|
||||
{ORTH: "мед.", LEMMA: "медицина", NORM: "медицина"},
|
||||
{ORTH: "гимн.", LEMMA: "гимназија", NORM: "гимназија"},
|
||||
{ORTH: "акад.", LEMMA: "академик", NORM: "академик"},
|
||||
{ORTH: "доц.", LEMMA: "доцент", NORM: "доцент"},
|
||||
{ORTH: "итд.", LEMMA: "и тако даље", NORM: "и тако даље"},
|
||||
{ORTH: "и сл.", LEMMA: "и слично", NORM: "и слично"},
|
||||
{ORTH: "н.е.", LEMMA: "нова ера", NORM: "нове ере"},
|
||||
{ORTH: "о.г.", LEMMA: "ова година", NORM: "ове године"},
|
||||
{ORTH: "л.к.", LEMMA: "лична карта", NORM: "лична карта"},
|
||||
{ORTH: "в.д.", LEMMA: "вршилац дужности", NORM: "вршилац дужности"},
|
||||
{ORTH: "стр.", LEMMA: "страна", NORM: "страна"},
|
||||
{ORTH: "ул.", NORM: "улица"},
|
||||
{ORTH: "бр.", NORM: "број"},
|
||||
{ORTH: "нпр.", NORM: "на пример"},
|
||||
{ORTH: "тзв.", NORM: "такозван"},
|
||||
{ORTH: "проф.", NORM: "професор"},
|
||||
{ORTH: "стр.", NORM: "страна"},
|
||||
{ORTH: "једн.", NORM: "једнина"},
|
||||
{ORTH: "мн.", NORM: "множина"},
|
||||
{ORTH: "уч.", NORM: "ученик"},
|
||||
{ORTH: "разр.", NORM: "разред"},
|
||||
{ORTH: "инж.", NORM: "инжењер"},
|
||||
{ORTH: "гимн.", NORM: "гимназија"},
|
||||
{ORTH: "год.", NORM: "година"},
|
||||
{ORTH: "мед.", NORM: "медицина"},
|
||||
{ORTH: "гимн.", NORM: "гимназија"},
|
||||
{ORTH: "акад.", NORM: "академик"},
|
||||
{ORTH: "доц.", NORM: "доцент"},
|
||||
{ORTH: "итд.", NORM: "и тако даље"},
|
||||
{ORTH: "и сл.", NORM: "и слично"},
|
||||
{ORTH: "н.е.", NORM: "нове ере"},
|
||||
{ORTH: "о.г.", NORM: "ове године"},
|
||||
{ORTH: "л.к.", NORM: "лична карта"},
|
||||
{ORTH: "в.д.", NORM: "вршилац дужности"},
|
||||
{ORTH: "стр.", NORM: "страна"},
|
||||
# with qoute
|
||||
{ORTH: "ал'", LEMMA: "али", NORM: "али"},
|
||||
{ORTH: "ил'", LEMMA: "или", NORM: "или"},
|
||||
{ORTH: "је л'", LEMMA: "је ли", NORM: "је ли"},
|
||||
{ORTH: "да л'", LEMMA: "да ли", NORM: "да ли"},
|
||||
{ORTH: "држ'те", LEMMA: "држати", NORM: "држите"},
|
||||
{ORTH: "ал'", NORM: "али"},
|
||||
{ORTH: "ил'", NORM: "или"},
|
||||
{ORTH: "је л'", NORM: "је ли"},
|
||||
{ORTH: "да л'", NORM: "да ли"},
|
||||
{ORTH: "држ'те", NORM: "држите"},
|
||||
]
|
||||
|
||||
for slang_desc in _slang_exc:
|
||||
_exc[slang_desc[ORTH]] = [slang_desc]
|
||||
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,54 +1,25 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...util import update_exc, registry
|
||||
from .syntax_iterators import SYNTAX_ITERATORS
|
||||
from ...language import Language
|
||||
|
||||
# Punctuation stolen from Danish
|
||||
from ..da.punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "sv"
|
||||
stop_words = {"@language_data": "spacy.sv.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.sv.lex_attr_getters"}
|
||||
|
||||
[nlp.lemmatizer]
|
||||
@lemmatizers = "spacy.Lemmatizer.v1"
|
||||
|
||||
[nlp.lemmatizer.data_paths]
|
||||
@language_data = "spacy-lookups-data"
|
||||
lang = ${nlp:lang}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.sv.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.sv.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
|
||||
|
||||
class SwedishDefaults(Language.Defaults):
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
infixes = TOKENIZER_INFIXES
|
||||
suffixes = TOKENIZER_SUFFIXES
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
syntax_iterators = SYNTAX_ITERATORS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Swedish(Language):
|
||||
lang = "sv"
|
||||
Defaults = SwedishDefaults
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
|
||||
|
||||
__all__ = ["Swedish"]
|
||||
|
|
|
@ -1,27 +1,18 @@
|
|||
from typing import Union, Iterator
|
||||
|
||||
from ...symbols import NOUN, PROPN, PRON
|
||||
from ...errors import Errors
|
||||
from ...tokens import Doc, Span
|
||||
|
||||
|
||||
def noun_chunks(doclike):
|
||||
"""
|
||||
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
|
||||
"""
|
||||
labels = [
|
||||
"nsubj",
|
||||
"nsubj:pass",
|
||||
"dobj",
|
||||
"obj",
|
||||
"iobj",
|
||||
"ROOT",
|
||||
"appos",
|
||||
"nmod",
|
||||
"nmod:poss",
|
||||
]
|
||||
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
|
||||
"""Detect base noun phrases from a dependency parse. Works on Doc and Span."""
|
||||
# fmt: off
|
||||
labels = ["nsubj", "nsubj:pass", "dobj", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"]
|
||||
# fmt: on
|
||||
doc = doclike.doc # Ensure works on both Doc and Span.
|
||||
|
||||
if not doc.is_parsed:
|
||||
raise ValueError(Errors.E029)
|
||||
|
||||
np_deps = [doc.vocab.strings[label] for label in labels]
|
||||
conj = doc.vocab.strings.add("conj")
|
||||
np_label = doc.vocab.strings.add("NP")
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
from ...symbols import LEMMA, NORM, ORTH, PRON_LEMMA
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...symbols import NORM, ORTH
|
||||
from ...util import update_exc
|
||||
|
||||
_exc = {}
|
||||
|
||||
|
@ -8,61 +10,58 @@ _exc = {}
|
|||
for verb_data in [
|
||||
{ORTH: "driver"},
|
||||
{ORTH: "kör"},
|
||||
{ORTH: "hörr", LEMMA: "hör"},
|
||||
{ORTH: "hörr"},
|
||||
{ORTH: "fattar"},
|
||||
{ORTH: "hajar", LEMMA: "förstår"},
|
||||
{ORTH: "hajar"},
|
||||
{ORTH: "lever"},
|
||||
{ORTH: "serr", LEMMA: "ser"},
|
||||
{ORTH: "serr"},
|
||||
{ORTH: "fixar"},
|
||||
]:
|
||||
verb_data_tc = dict(verb_data)
|
||||
verb_data_tc[ORTH] = verb_data_tc[ORTH].title()
|
||||
for data in [verb_data, verb_data_tc]:
|
||||
_exc[data[ORTH] + "u"] = [
|
||||
dict(data),
|
||||
{ORTH: "u", LEMMA: PRON_LEMMA, NORM: "du"},
|
||||
]
|
||||
_exc[data[ORTH] + "u"] = [data, {ORTH: "u", NORM: "du"}]
|
||||
|
||||
# Abbreviations for weekdays "sön." (for "söndag" / "söner")
|
||||
# are left out because they are ambiguous. The same is the case
|
||||
# for abbreviations "jul." and "Jul." ("juli" / "jul").
|
||||
for exc_data in [
|
||||
{ORTH: "jan.", LEMMA: "januari"},
|
||||
{ORTH: "febr.", LEMMA: "februari"},
|
||||
{ORTH: "feb.", LEMMA: "februari"},
|
||||
{ORTH: "apr.", LEMMA: "april"},
|
||||
{ORTH: "jun.", LEMMA: "juni"},
|
||||
{ORTH: "aug.", LEMMA: "augusti"},
|
||||
{ORTH: "sept.", LEMMA: "september"},
|
||||
{ORTH: "sep.", LEMMA: "september"},
|
||||
{ORTH: "okt.", LEMMA: "oktober"},
|
||||
{ORTH: "nov.", LEMMA: "november"},
|
||||
{ORTH: "dec.", LEMMA: "december"},
|
||||
{ORTH: "mån.", LEMMA: "måndag"},
|
||||
{ORTH: "tis.", LEMMA: "tisdag"},
|
||||
{ORTH: "ons.", LEMMA: "onsdag"},
|
||||
{ORTH: "tors.", LEMMA: "torsdag"},
|
||||
{ORTH: "fre.", LEMMA: "fredag"},
|
||||
{ORTH: "lör.", LEMMA: "lördag"},
|
||||
{ORTH: "Jan.", LEMMA: "Januari"},
|
||||
{ORTH: "Febr.", LEMMA: "Februari"},
|
||||
{ORTH: "Feb.", LEMMA: "Februari"},
|
||||
{ORTH: "Apr.", LEMMA: "April"},
|
||||
{ORTH: "Jun.", LEMMA: "Juni"},
|
||||
{ORTH: "Aug.", LEMMA: "Augusti"},
|
||||
{ORTH: "Sept.", LEMMA: "September"},
|
||||
{ORTH: "Sep.", LEMMA: "September"},
|
||||
{ORTH: "Okt.", LEMMA: "Oktober"},
|
||||
{ORTH: "Nov.", LEMMA: "November"},
|
||||
{ORTH: "Dec.", LEMMA: "December"},
|
||||
{ORTH: "Mån.", LEMMA: "Måndag"},
|
||||
{ORTH: "Tis.", LEMMA: "Tisdag"},
|
||||
{ORTH: "Ons.", LEMMA: "Onsdag"},
|
||||
{ORTH: "Tors.", LEMMA: "Torsdag"},
|
||||
{ORTH: "Fre.", LEMMA: "Fredag"},
|
||||
{ORTH: "Lör.", LEMMA: "Lördag"},
|
||||
{ORTH: "sthlm", LEMMA: "Stockholm"},
|
||||
{ORTH: "gbg", LEMMA: "Göteborg"},
|
||||
{ORTH: "jan.", NORM: "januari"},
|
||||
{ORTH: "febr.", NORM: "februari"},
|
||||
{ORTH: "feb.", NORM: "februari"},
|
||||
{ORTH: "apr.", NORM: "april"},
|
||||
{ORTH: "jun.", NORM: "juni"},
|
||||
{ORTH: "aug.", NORM: "augusti"},
|
||||
{ORTH: "sept.", NORM: "september"},
|
||||
{ORTH: "sep.", NORM: "september"},
|
||||
{ORTH: "okt.", NORM: "oktober"},
|
||||
{ORTH: "nov.", NORM: "november"},
|
||||
{ORTH: "dec.", NORM: "december"},
|
||||
{ORTH: "mån.", NORM: "måndag"},
|
||||
{ORTH: "tis.", NORM: "tisdag"},
|
||||
{ORTH: "ons.", NORM: "onsdag"},
|
||||
{ORTH: "tors.", NORM: "torsdag"},
|
||||
{ORTH: "fre.", NORM: "fredag"},
|
||||
{ORTH: "lör.", NORM: "lördag"},
|
||||
{ORTH: "Jan.", NORM: "Januari"},
|
||||
{ORTH: "Febr.", NORM: "Februari"},
|
||||
{ORTH: "Feb.", NORM: "Februari"},
|
||||
{ORTH: "Apr.", NORM: "April"},
|
||||
{ORTH: "Jun.", NORM: "Juni"},
|
||||
{ORTH: "Aug.", NORM: "Augusti"},
|
||||
{ORTH: "Sept.", NORM: "September"},
|
||||
{ORTH: "Sep.", NORM: "September"},
|
||||
{ORTH: "Okt.", NORM: "Oktober"},
|
||||
{ORTH: "Nov.", NORM: "November"},
|
||||
{ORTH: "Dec.", NORM: "December"},
|
||||
{ORTH: "Mån.", NORM: "Måndag"},
|
||||
{ORTH: "Tis.", NORM: "Tisdag"},
|
||||
{ORTH: "Ons.", NORM: "Onsdag"},
|
||||
{ORTH: "Tors.", NORM: "Torsdag"},
|
||||
{ORTH: "Fre.", NORM: "Fredag"},
|
||||
{ORTH: "Lör.", NORM: "Lördag"},
|
||||
{ORTH: "sthlm", NORM: "Stockholm"},
|
||||
{ORTH: "gbg", NORM: "Göteborg"},
|
||||
]:
|
||||
_exc[exc_data[ORTH]] = [exc_data]
|
||||
|
||||
|
@ -152,6 +151,6 @@ for orth in ABBREVIATIONS:
|
|||
# Sentences ending in "i." (as in "... peka i."), "m." (as in "...än 2000 m."),
|
||||
# should be tokenized as two separate tokens.
|
||||
for orth in ["i", "m"]:
|
||||
_exc[orth + "."] = [{ORTH: orth, LEMMA: orth, NORM: orth}, {ORTH: "."}]
|
||||
_exc[orth + "."] = [{ORTH: orth, NORM: orth}, {ORTH: "."}]
|
||||
|
||||
TOKENIZER_EXCEPTIONS = _exc
|
||||
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
||||
|
|
|
@ -1,33 +1,16 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "ta"
|
||||
stop_words = {"@language_data": "spacy.ta.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.ta.lex_attr_getters"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.ta.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.ta.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
class TamilDefaults(Language.Defaults):
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Tamil(Language):
|
||||
lang = "ta"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = TamilDefaults
|
||||
|
||||
|
||||
__all__ = ["Tamil"]
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
from ..symbols import POS, ADV, NOUN, ADP, PRON, SCONJ, PROPN, DET, SYM, INTJ
|
||||
from ..symbols import PUNCT, NUM, AUX, X, CONJ, ADJ, VERB, PART, SPACE, CCONJ
|
||||
|
||||
|
||||
TAG_MAP = {
|
||||
"ADV": {POS: ADV},
|
||||
"NOUN": {POS: NOUN},
|
||||
"ADP": {POS: ADP},
|
||||
"PRON": {POS: PRON},
|
||||
"SCONJ": {POS: SCONJ},
|
||||
"PROPN": {POS: PROPN},
|
||||
"DET": {POS: DET},
|
||||
"SYM": {POS: SYM},
|
||||
"INTJ": {POS: INTJ},
|
||||
"PUNCT": {POS: PUNCT},
|
||||
"NUM": {POS: NUM},
|
||||
"AUX": {POS: AUX},
|
||||
"X": {POS: X},
|
||||
"CONJ": {POS: CONJ},
|
||||
"CCONJ": {POS: CCONJ},
|
||||
"ADJ": {POS: ADJ},
|
||||
"VERB": {POS: VERB},
|
||||
"PART": {POS: PART},
|
||||
"_SP": {POS: SPACE},
|
||||
}
|
|
@ -1,33 +1,16 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from .lex_attrs import LEX_ATTRS
|
||||
from ...language import Language
|
||||
from ...util import registry
|
||||
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "te"
|
||||
stop_words = {"@language_data": "spacy.te.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.te.lex_attr_getters"}
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.te.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.te.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
class TeluguDefaults(Language.Defaults):
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Telugu(Language):
|
||||
lang = "te"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = TeluguDefaults
|
||||
|
||||
|
||||
__all__ = ["Telugu"]
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
from typing import Set, Dict, Callable, Any
|
||||
from thinc.api import Config
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
|
@ -10,26 +9,13 @@ from ...util import DummyTokenizer, registry
|
|||
|
||||
DEFAULT_CONFIG = """
|
||||
[nlp]
|
||||
lang = "th"
|
||||
stop_words = {"@language_data": "spacy.th.stop_words"}
|
||||
lex_attr_getters = {"@language_data": "spacy.th.lex_attr_getters"}
|
||||
|
||||
[nlp.tokenizer]
|
||||
@tokenizers = "spacy.ThaiTokenizer.v1"
|
||||
@tokenizers = "spacy.th.ThaiTokenizer"
|
||||
"""
|
||||
|
||||
|
||||
@registry.language_data("spacy.th.stop_words")
|
||||
def stop_words() -> Set[str]:
|
||||
return STOP_WORDS
|
||||
|
||||
|
||||
@registry.language_data("spacy.th.lex_attr_getters")
|
||||
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
||||
return LEX_ATTRS
|
||||
|
||||
|
||||
@registry.tokenizers("spacy.ThaiTokenizer.v1")
|
||||
@registry.tokenizers("spacy.th.ThaiTokenizer")
|
||||
def create_thai_tokenizer():
|
||||
def thai_tokenizer_factory(nlp):
|
||||
return ThaiTokenizer(nlp)
|
||||
|
@ -55,9 +41,15 @@ class ThaiTokenizer(DummyTokenizer):
|
|||
return Doc(self.vocab, words=words, spaces=spaces)
|
||||
|
||||
|
||||
class ThaiDefaults(Language.Defaults):
|
||||
config = Config().from_str(DEFAULT_CONFIG)
|
||||
lex_attr_getters = LEX_ATTRS
|
||||
stop_words = STOP_WORDS
|
||||
|
||||
|
||||
class Thai(Language):
|
||||
lang = "th"
|
||||
default_config = Config().from_str(DEFAULT_CONFIG)
|
||||
Defaults = ThaiDefaults
|
||||
|
||||
|
||||
__all__ = ["Thai"]
|
||||
|
|
|
@ -1,469 +1,438 @@
|
|||
from ...symbols import ORTH, LEMMA
|
||||
from ...symbols import ORTH
|
||||
|
||||
|
||||
_exc = {
|
||||
# หน่วยงานรัฐ / government agency
|
||||
"กกต.": [{ORTH: "กกต.", LEMMA: "คณะกรรมการการเลือกตั้ง"}],
|
||||
"กทท.": [{ORTH: "กทท.", LEMMA: "การท่าเรือแห่งประเทศไทย"}],
|
||||
"กทพ.": [{ORTH: "กทพ.", LEMMA: "การทางพิเศษแห่งประเทศไทย"}],
|
||||
"กบข.": [{ORTH: "กบข.", LEMMA: "กองทุนบำเหน็จบำนาญข้าราชการพลเรือน"}],
|
||||
"กบว.": [{ORTH: "กบว.", LEMMA: "คณะกรรมการบริหารวิทยุกระจายเสียงและวิทยุโทรทัศน์"}],
|
||||
"กปน.": [{ORTH: "กปน.", LEMMA: "การประปานครหลวง"}],
|
||||
"กปภ.": [{ORTH: "กปภ.", LEMMA: "การประปาส่วนภูมิภาค"}],
|
||||
"กปส.": [{ORTH: "กปส.", LEMMA: "กรมประชาสัมพันธ์"}],
|
||||
"กผม.": [{ORTH: "กผม.", LEMMA: "กองผังเมือง"}],
|
||||
"กฟน.": [{ORTH: "กฟน.", LEMMA: "การไฟฟ้านครหลวง"}],
|
||||
"กฟผ.": [{ORTH: "กฟผ.", LEMMA: "การไฟฟ้าฝ่ายผลิตแห่งประเทศไทย"}],
|
||||
"กฟภ.": [{ORTH: "กฟภ.", LEMMA: "การไฟฟ้าส่วนภูมิภาค"}],
|
||||
"ก.ช.น.": [{ORTH: "ก.ช.น.", LEMMA: "คณะกรรมการช่วยเหลือชาวนาชาวไร่"}],
|
||||
"กยศ.": [{ORTH: "กยศ.", LEMMA: "กองทุนเงินให้กู้ยืมเพื่อการศึกษา"}],
|
||||
"ก.ล.ต.": [{ORTH: "ก.ล.ต.", LEMMA: "คณะกรรมการกำกับหลักทรัพย์และตลาดหลักทรัพย์"}],
|
||||
"กศ.บ.": [{ORTH: "กศ.บ.", LEMMA: "การศึกษาบัณฑิต"}],
|
||||
"กศน.": [{ORTH: "กศน.", LEMMA: "กรมการศึกษานอกโรงเรียน"}],
|
||||
"กสท.": [{ORTH: "กสท.", LEMMA: "การสื่อสารแห่งประเทศไทย"}],
|
||||
"กอ.รมน.": [{ORTH: "กอ.รมน.", LEMMA: "กองอำนวยการรักษาความมั่นคงภายใน"}],
|
||||
"กร.": [{ORTH: "กร.", LEMMA: "กองเรือยุทธการ"}],
|
||||
"ขสมก.": [{ORTH: "ขสมก.", LEMMA: "องค์การขนส่งมวลชนกรุงเทพ"}],
|
||||
"คตง.": [{ORTH: "คตง.", LEMMA: "คณะกรรมการตรวจเงินแผ่นดิน"}],
|
||||
"ครม.": [{ORTH: "ครม.", LEMMA: "คณะรัฐมนตรี"}],
|
||||
"คมช.": [{ORTH: "คมช.", LEMMA: "คณะมนตรีความมั่นคงแห่งชาติ"}],
|
||||
"ตชด.": [{ORTH: "ตชด.", LEMMA: "ตำรวจตะเวนชายเดน"}],
|
||||
"ตม.": [{ORTH: "ตม.", LEMMA: "กองตรวจคนเข้าเมือง"}],
|
||||
"ตร.": [{ORTH: "ตร.", LEMMA: "ตำรวจ"}],
|
||||
"ททท.": [{ORTH: "ททท.", LEMMA: "การท่องเที่ยวแห่งประเทศไทย"}],
|
||||
"ททบ.": [{ORTH: "ททบ.", LEMMA: "สถานีวิทยุโทรทัศน์กองทัพบก"}],
|
||||
"ทบ.": [{ORTH: "ทบ.", LEMMA: "กองทัพบก"}],
|
||||
"ทร.": [{ORTH: "ทร.", LEMMA: "กองทัพเรือ"}],
|
||||
"ทอ.": [{ORTH: "ทอ.", LEMMA: "กองทัพอากาศ"}],
|
||||
"ทอท.": [{ORTH: "ทอท.", LEMMA: "การท่าอากาศยานแห่งประเทศไทย"}],
|
||||
"ธ.ก.ส.": [{ORTH: "ธ.ก.ส.", LEMMA: "ธนาคารเพื่อการเกษตรและสหกรณ์การเกษตร"}],
|
||||
"ธปท.": [{ORTH: "ธปท.", LEMMA: "ธนาคารแห่งประเทศไทย"}],
|
||||
"ธอส.": [{ORTH: "ธอส.", LEMMA: "ธนาคารอาคารสงเคราะห์"}],
|
||||
"นย.": [{ORTH: "นย.", LEMMA: "นาวิกโยธิน"}],
|
||||
"ปตท.": [{ORTH: "ปตท.", LEMMA: "การปิโตรเลียมแห่งประเทศไทย"}],
|
||||
"ป.ป.ช.": [
|
||||
{
|
||||
ORTH: "ป.ป.ช.",
|
||||
LEMMA: "คณะกรรมการป้องกันและปราบปรามการทุจริตและประพฤติมิชอบในวงราชการ",
|
||||
}
|
||||
],
|
||||
"ป.ป.ส.": [{ORTH: "ป.ป.ส.", LEMMA: "คณะกรรมการป้องกันและปราบปรามยาเสพติด"}],
|
||||
"บพร.": [{ORTH: "บพร.", LEMMA: "กรมการบินพลเรือน"}],
|
||||
"บย.": [{ORTH: "บย.", LEMMA: "กองบินยุทธการ"}],
|
||||
"พสวท.": [
|
||||
{
|
||||
ORTH: "พสวท.",
|
||||
LEMMA: "โครงการพัฒนาและส่งเสริมผู้มีความรู้ความสามารถพิเศษทางวิทยาศาสตร์และเทคโนโลยี",
|
||||
}
|
||||
],
|
||||
"มอก.": [{ORTH: "มอก.", LEMMA: "สำนักงานมาตรฐานผลิตภัณฑ์อุตสาหกรรม"}],
|
||||
"ยธ.": [{ORTH: "ยธ.", LEMMA: "กรมโยธาธิการ"}],
|
||||
"รพช.": [{ORTH: "รพช.", LEMMA: "สำนักงานเร่งรัดพัฒนาชนบท"}],
|
||||
"รฟท.": [{ORTH: "รฟท.", LEMMA: "การรถไฟแห่งประเทศไทย"}],
|
||||
"รฟม.": [{ORTH: "รฟม.", LEMMA: "การรถไฟฟ้าขนส่งมวลชนแห่งประเทศไทย"}],
|
||||
"ศธ.": [{ORTH: "ศธ.", LEMMA: "กระทรวงศึกษาธิการ"}],
|
||||
"ศนธ.": [{ORTH: "ศนธ.", LEMMA: "ศูนย์กลางนิสิตนักศึกษาแห่งประเทศไทย"}],
|
||||
"สกจ.": [{ORTH: "สกจ.", LEMMA: "สหกรณ์จังหวัด"}],
|
||||
"สกท.": [{ORTH: "สกท.", LEMMA: "สำนักงานคณะกรรมการส่งเสริมการลงทุน"}],
|
||||
"สกว.": [{ORTH: "สกว.", LEMMA: "สำนักงานกองทุนสนับสนุนการวิจัย"}],
|
||||
"สคบ.": [{ORTH: "สคบ.", LEMMA: "สำนักงานคณะกรรมการคุ้มครองผู้บริโภค"}],
|
||||
"สจร.": [{ORTH: "สจร.", LEMMA: "สำนักงานคณะกรรมการจัดระบบการจราจรทางบก"}],
|
||||
"สตง.": [{ORTH: "สตง.", LEMMA: "สำนักงานตรวจเงินแผ่นดิน"}],
|
||||
"สทท.": [{ORTH: "สทท.", LEMMA: "สถานีวิทยุโทรทัศน์แห่งประเทศไทย"}],
|
||||
"สทร.": [{ORTH: "สทร.", LEMMA: "สำนักงานกลางทะเบียนราษฎร์"}],
|
||||
"สธ": [{ORTH: "สธ", LEMMA: "กระทรวงสาธารณสุข"}],
|
||||
"สนช.": [{ORTH: "สนช.", LEMMA: "สภานิติบัญญัติแห่งชาติ,สำนักงานนวัตกรรมแห่งชาติ"}],
|
||||
"สนนท.": [{ORTH: "สนนท.", LEMMA: "สหพันธ์นิสิตนักศึกษาแห่งประเทศไทย"}],
|
||||
"สปก.": [{ORTH: "สปก.", LEMMA: "สำนักงานการปฏิรูปที่ดินเพื่อเกษตรกรรม"}],
|
||||
"สปช.": [{ORTH: "สปช.", LEMMA: "สำนักงานคณะกรรมการการประถมศึกษาแห่งชาติ"}],
|
||||
"สปอ.": [{ORTH: "สปอ.", LEMMA: "สำนักงานการประถมศึกษาอำเภอ"}],
|
||||
"สพช.": [{ORTH: "สพช.", LEMMA: "สำนักงานคณะกรรมการนโยบายพลังงานแห่งชาติ"}],
|
||||
"สยช.": [
|
||||
{ORTH: "สยช.", LEMMA: "สำนักงานคณะกรรมการส่งเสริมและประสานงานเยาวชนแห่งชาติ"}
|
||||
],
|
||||
"สวช.": [{ORTH: "สวช.", LEMMA: "สำนักงานคณะกรรมการวัฒนธรรมแห่งชาติ"}],
|
||||
"สวท.": [{ORTH: "สวท.", LEMMA: "สถานีวิทยุกระจายเสียงแห่งประเทศไทย"}],
|
||||
"สวทช.": [{ORTH: "สวทช.", LEMMA: "สำนักงานพัฒนาวิทยาศาสตร์และเทคโนโลยีแห่งชาติ"}],
|
||||
"สคช.": [
|
||||
{ORTH: "สคช.", LEMMA: "สำนักงานคณะกรรมการพัฒนาการเศรษฐกิจและสังคมแห่งชาติ"}
|
||||
],
|
||||
"สสว.": [{ORTH: "สสว.", LEMMA: "สำนักงานส่งเสริมวิสาหกิจขนาดกลางและขนาดย่อม"}],
|
||||
"สสส.": [{ORTH: "สสส.", LEMMA: "สำนักงานกองทุนสนับสนุนการสร้างเสริมสุขภาพ"}],
|
||||
"สสวท.": [{ORTH: "สสวท.", LEMMA: "สถาบันส่งเสริมการสอนวิทยาศาสตร์และเทคโนโลยี"}],
|
||||
"อตก.": [{ORTH: "อตก.", LEMMA: "องค์การตลาดเพื่อเกษตรกร"}],
|
||||
"อบจ.": [{ORTH: "อบจ.", LEMMA: "องค์การบริหารส่วนจังหวัด"}],
|
||||
"อบต.": [{ORTH: "อบต.", LEMMA: "องค์การบริหารส่วนตำบล"}],
|
||||
"อปพร.": [{ORTH: "อปพร.", LEMMA: "อาสาสมัครป้องกันภัยฝ่ายพลเรือน"}],
|
||||
"อย.": [{ORTH: "อย.", LEMMA: "สำนักงานคณะกรรมการอาหารและยา"}],
|
||||
"อ.ส.ม.ท.": [{ORTH: "อ.ส.ม.ท.", LEMMA: "องค์การสื่อสารมวลชนแห่งประเทศไทย"}],
|
||||
"กกต.": [{ORTH: "กกต."}],
|
||||
"กทท.": [{ORTH: "กทท."}],
|
||||
"กทพ.": [{ORTH: "กทพ."}],
|
||||
"กบข.": [{ORTH: "กบข."}],
|
||||
"กบว.": [{ORTH: "กบว."}],
|
||||
"กปน.": [{ORTH: "กปน."}],
|
||||
"กปภ.": [{ORTH: "กปภ."}],
|
||||
"กปส.": [{ORTH: "กปส."}],
|
||||
"กผม.": [{ORTH: "กผม."}],
|
||||
"กฟน.": [{ORTH: "กฟน."}],
|
||||
"กฟผ.": [{ORTH: "กฟผ."}],
|
||||
"กฟภ.": [{ORTH: "กฟภ."}],
|
||||
"ก.ช.น.": [{ORTH: "ก.ช.น."}],
|
||||
"กยศ.": [{ORTH: "กยศ."}],
|
||||
"ก.ล.ต.": [{ORTH: "ก.ล.ต."}],
|
||||
"กศ.บ.": [{ORTH: "กศ.บ."}],
|
||||
"กศน.": [{ORTH: "กศน."}],
|
||||
"กสท.": [{ORTH: "กสท."}],
|
||||
"กอ.รมน.": [{ORTH: "กอ.รมน."}],
|
||||
"กร.": [{ORTH: "กร."}],
|
||||
"ขสมก.": [{ORTH: "ขสมก."}],
|
||||
"คตง.": [{ORTH: "คตง."}],
|
||||
"ครม.": [{ORTH: "ครม."}],
|
||||
"คมช.": [{ORTH: "คมช."}],
|
||||
"ตชด.": [{ORTH: "ตชด."}],
|
||||
"ตม.": [{ORTH: "ตม."}],
|
||||
"ตร.": [{ORTH: "ตร."}],
|
||||
"ททท.": [{ORTH: "ททท."}],
|
||||
"ททบ.": [{ORTH: "ททบ."}],
|
||||
"ทบ.": [{ORTH: "ทบ."}],
|
||||
"ทร.": [{ORTH: "ทร."}],
|
||||
"ทอ.": [{ORTH: "ทอ."}],
|
||||
"ทอท.": [{ORTH: "ทอท."}],
|
||||
"ธ.ก.ส.": [{ORTH: "ธ.ก.ส."}],
|
||||
"ธปท.": [{ORTH: "ธปท."}],
|
||||
"ธอส.": [{ORTH: "ธอส."}],
|
||||
"นย.": [{ORTH: "นย."}],
|
||||
"ปตท.": [{ORTH: "ปตท."}],
|
||||
"ป.ป.ช.": [{ORTH: "ป.ป.ช."}],
|
||||
"ป.ป.ส.": [{ORTH: "ป.ป.ส."}],
|
||||
"บพร.": [{ORTH: "บพร."}],
|
||||
"บย.": [{ORTH: "บย."}],
|
||||
"พสวท.": [{ORTH: "พสวท."}],
|
||||
"มอก.": [{ORTH: "มอก."}],
|
||||
"ยธ.": [{ORTH: "ยธ."}],
|
||||
"รพช.": [{ORTH: "รพช."}],
|
||||
"รฟท.": [{ORTH: "รฟท."}],
|
||||
"รฟม.": [{ORTH: "รฟม."}],
|
||||
"ศธ.": [{ORTH: "ศธ."}],
|
||||
"ศนธ.": [{ORTH: "ศนธ."}],
|
||||
"สกจ.": [{ORTH: "สกจ."}],
|
||||
"สกท.": [{ORTH: "สกท."}],
|
||||
"สกว.": [{ORTH: "สกว."}],
|
||||
"สคบ.": [{ORTH: "สคบ."}],
|
||||
"สจร.": [{ORTH: "สจร."}],
|
||||
"สตง.": [{ORTH: "สตง."}],
|
||||
"สทท.": [{ORTH: "สทท."}],
|
||||
"สทร.": [{ORTH: "สทร."}],
|
||||
"สธ": [{ORTH: "สธ"}],
|
||||
"สนช.": [{ORTH: "สนช."}],
|
||||
"สนนท.": [{ORTH: "สนนท."}],
|
||||
"สปก.": [{ORTH: "สปก."}],
|
||||
"สปช.": [{ORTH: "สปช."}],
|
||||
"สปอ.": [{ORTH: "สปอ."}],
|
||||
"สพช.": [{ORTH: "สพช."}],
|
||||
"สยช.": [{ORTH: "สยช."}],
|
||||
"สวช.": [{ORTH: "สวช."}],
|
||||
"สวท.": [{ORTH: "สวท."}],
|
||||
"สวทช.": [{ORTH: "สวทช."}],
|
||||
"สคช.": [{ORTH: "สคช."}],
|
||||
"สสว.": [{ORTH: "สสว."}],
|
||||
"สสส.": [{ORTH: "สสส."}],
|
||||
"สสวท.": [{ORTH: "สสวท."}],
|
||||
"อตก.": [{ORTH: "อตก."}],
|
||||
"อบจ.": [{ORTH: "อบจ."}],
|
||||
"อบต.": [{ORTH: "อบต."}],
|
||||
"อปพร.": [{ORTH: "อปพร."}],
|
||||
"อย.": [{ORTH: "อย."}],
|
||||
"อ.ส.ม.ท.": [{ORTH: "อ.ส.ม.ท."}],
|
||||
# มหาวิทยาลัย / สถานศึกษา / university / college
|
||||
"มทส.": [{ORTH: "มทส.", LEMMA: "มหาวิทยาลัยเทคโนโลยีสุรนารี"}],
|
||||
"มธ.": [{ORTH: "มธ.", LEMMA: "มหาวิทยาลัยธรรมศาสตร์"}],
|
||||
"ม.อ.": [{ORTH: "ม.อ.", LEMMA: "มหาวิทยาลัยสงขลานครินทร์"}],
|
||||
"มทร.": [{ORTH: "มทร.", LEMMA: "มหาวิทยาลัยเทคโนโลยีราชมงคล"}],
|
||||
"มมส.": [{ORTH: "มมส.", LEMMA: "มหาวิทยาลัยมหาสารคาม"}],
|
||||
"วท.": [{ORTH: "วท.", LEMMA: "วิทยาลัยเทคนิค"}],
|
||||
"สตม.": [{ORTH: "สตม.", LEMMA: "สำนักงานตรวจคนเข้าเมือง (ตำรวจ)"}],
|
||||
"มทส.": [{ORTH: "มทส."}],
|
||||
"มธ.": [{ORTH: "มธ."}],
|
||||
"ม.อ.": [{ORTH: "ม.อ."}],
|
||||
"มทร.": [{ORTH: "มทร."}],
|
||||
"มมส.": [{ORTH: "มมส."}],
|
||||
"วท.": [{ORTH: "วท."}],
|
||||
"สตม.": [{ORTH: "สตม."}],
|
||||
# ยศ / rank
|
||||
"ดร.": [{ORTH: "ดร.", LEMMA: "ดอกเตอร์"}],
|
||||
"ด.ต.": [{ORTH: "ด.ต.", LEMMA: "ดาบตำรวจ"}],
|
||||
"จ.ต.": [{ORTH: "จ.ต.", LEMMA: "จ่าตรี"}],
|
||||
"จ.ท.": [{ORTH: "จ.ท.", LEMMA: "จ่าโท"}],
|
||||
"จ.ส.ต.": [{ORTH: "จ.ส.ต.", LEMMA: "จ่าสิบตรี (ทหารบก)"}],
|
||||
"จสต.": [{ORTH: "จสต.", LEMMA: "จ่าสิบตำรวจ"}],
|
||||
"จ.ส.ท.": [{ORTH: "จ.ส.ท.", LEMMA: "จ่าสิบโท"}],
|
||||
"จ.ส.อ.": [{ORTH: "จ.ส.อ.", LEMMA: "จ่าสิบเอก"}],
|
||||
"จ.อ.": [{ORTH: "จ.อ.", LEMMA: "จ่าเอก"}],
|
||||
"ทพญ.": [{ORTH: "ทพญ.", LEMMA: "ทันตแพทย์หญิง"}],
|
||||
"ทนพ.": [{ORTH: "ทนพ.", LEMMA: "เทคนิคการแพทย์"}],
|
||||
"นจอ.": [{ORTH: "นจอ.", LEMMA: "นักเรียนจ่าอากาศ"}],
|
||||
"น.ช.": [{ORTH: "น.ช.", LEMMA: "นักโทษชาย"}],
|
||||
"น.ญ.": [{ORTH: "น.ญ.", LEMMA: "นักโทษหญิง"}],
|
||||
"น.ต.": [{ORTH: "น.ต.", LEMMA: "นาวาตรี"}],
|
||||
"น.ท.": [{ORTH: "น.ท.", LEMMA: "นาวาโท"}],
|
||||
"นตท.": [{ORTH: "นตท.", LEMMA: "นักเรียนเตรียมทหาร"}],
|
||||
"นนส.": [{ORTH: "นนส.", LEMMA: "นักเรียนนายสิบทหารบก"}],
|
||||
"นนร.": [{ORTH: "นนร.", LEMMA: "นักเรียนนายร้อย"}],
|
||||
"นนอ.": [{ORTH: "นนอ.", LEMMA: "นักเรียนนายเรืออากาศ"}],
|
||||
"นพ.": [{ORTH: "นพ.", LEMMA: "นายแพทย์"}],
|
||||
"นพท.": [{ORTH: "นพท.", LEMMA: "นายแพทย์ทหาร"}],
|
||||
"นรจ.": [{ORTH: "นรจ.", LEMMA: "นักเรียนจ่าทหารเรือ"}],
|
||||
"นรต.": [{ORTH: "นรต.", LEMMA: "นักเรียนนายร้อยตำรวจ"}],
|
||||
"นศพ.": [{ORTH: "นศพ.", LEMMA: "นักศึกษาแพทย์"}],
|
||||
"นศท.": [{ORTH: "นศท.", LEMMA: "นักศึกษาวิชาทหาร"}],
|
||||
"น.สพ.": [{ORTH: "น.สพ.", LEMMA: "นายสัตวแพทย์ (พ.ร.บ.วิชาชีพการสัตวแพทย์)"}],
|
||||
"น.อ.": [{ORTH: "น.อ.", LEMMA: "นาวาเอก"}],
|
||||
"บช.ก.": [{ORTH: "บช.ก.", LEMMA: "กองบัญชาการตำรวจสอบสวนกลาง"}],
|
||||
"บช.น.": [{ORTH: "บช.น.", LEMMA: "กองบัญชาการตำรวจนครบาล"}],
|
||||
"ผกก.": [{ORTH: "ผกก.", LEMMA: "ผู้กำกับการ"}],
|
||||
"ผกก.ภ.": [{ORTH: "ผกก.ภ.", LEMMA: "ผู้กำกับการตำรวจภูธร"}],
|
||||
"ผจก.": [{ORTH: "ผจก.", LEMMA: "ผู้จัดการ"}],
|
||||
"ผช.": [{ORTH: "ผช.", LEMMA: "ผู้ช่วย"}],
|
||||
"ผชก.": [{ORTH: "ผชก.", LEMMA: "ผู้ชำนาญการ"}],
|
||||
"ผช.ผอ.": [{ORTH: "ผช.ผอ.", LEMMA: "ผู้ช่วยผู้อำนวยการ"}],
|
||||
"ผญบ.": [{ORTH: "ผญบ.", LEMMA: "ผู้ใหญ่บ้าน"}],
|
||||
"ผบ.": [{ORTH: "ผบ.", LEMMA: "ผู้บังคับบัญชา"}],
|
||||
"ผบก.": [{ORTH: "ผบก.", LEMMA: "ผู้บังคับบัญชาการ (ตำรวจ)"}],
|
||||
"ผบก.น.": [{ORTH: "ผบก.น.", LEMMA: "ผู้บังคับการตำรวจนครบาล"}],
|
||||
"ผบก.ป.": [{ORTH: "ผบก.ป.", LEMMA: "ผู้บังคับการตำรวจกองปราบปราม"}],
|
||||
"ผบก.ปค.": [
|
||||
{
|
||||
ORTH: "ผบก.ปค.",
|
||||
LEMMA: "ผู้บังคับการ กองบังคับการปกครอง (โรงเรียนนายร้อยตำรวจ)",
|
||||
}
|
||||
],
|
||||
"ผบก.ปม.": [{ORTH: "ผบก.ปม.", LEMMA: "ผู้บังคับการตำรวจป่าไม้"}],
|
||||
"ผบก.ภ.": [{ORTH: "ผบก.ภ.", LEMMA: "ผู้บังคับการตำรวจภูธร"}],
|
||||
"ผบช.": [{ORTH: "ผบช.", LEMMA: "ผู้บัญชาการ (ตำรวจ)"}],
|
||||
"ผบช.ก.": [{ORTH: "ผบช.ก.", LEMMA: "ผู้บัญชาการตำรวจสอบสวนกลาง"}],
|
||||
"ผบช.ตชด.": [{ORTH: "ผบช.ตชด.", LEMMA: "ผู้บัญชาการตำรวจตระเวนชายแดน"}],
|
||||
"ผบช.น.": [{ORTH: "ผบช.น.", LEMMA: "ผู้บัญชาการตำรวจนครบาล"}],
|
||||
"ผบช.ภ.": [{ORTH: "ผบช.ภ.", LEMMA: "ผู้บัญชาการตำรวจภูธร"}],
|
||||
"ผบ.ทบ.": [{ORTH: "ผบ.ทบ.", LEMMA: "ผู้บัญชาการทหารบก"}],
|
||||
"ผบ.ตร.": [{ORTH: "ผบ.ตร.", LEMMA: "ผู้บัญชาการตำรวจแห่งชาติ"}],
|
||||
"ผบ.ทร.": [{ORTH: "ผบ.ทร.", LEMMA: "ผู้บัญชาการทหารเรือ"}],
|
||||
"ผบ.ทอ.": [{ORTH: "ผบ.ทอ.", LEMMA: "ผู้บัญชาการทหารอากาศ"}],
|
||||
"ผบ.ทสส.": [{ORTH: "ผบ.ทสส.", LEMMA: "ผู้บัญชาการทหารสูงสุด"}],
|
||||
"ผวจ.": [{ORTH: "ผวจ.", LEMMA: "ผู้ว่าราชการจังหวัด"}],
|
||||
"ผู้ว่าฯ": [{ORTH: "ผู้ว่าฯ", LEMMA: "ผู้ว่าราชการจังหวัด"}],
|
||||
"พ.จ.ต.": [{ORTH: "พ.จ.ต.", LEMMA: "พันจ่าตรี"}],
|
||||
"พ.จ.ท.": [{ORTH: "พ.จ.ท.", LEMMA: "พันจ่าโท"}],
|
||||
"พ.จ.อ.": [{ORTH: "พ.จ.อ.", LEMMA: "พันจ่าเอก"}],
|
||||
"พญ.": [{ORTH: "พญ.", LEMMA: "แพทย์หญิง"}],
|
||||
"ฯพณฯ": [{ORTH: "ฯพณฯ", LEMMA: "พณท่าน"}],
|
||||
"พ.ต.": [{ORTH: "พ.ต.", LEMMA: "พันตรี"}],
|
||||
"พ.ท.": [{ORTH: "พ.ท.", LEMMA: "พันโท"}],
|
||||
"พ.อ.": [{ORTH: "พ.อ.", LEMMA: "พันเอก"}],
|
||||
"พ.ต.อ.พิเศษ": [{ORTH: "พ.ต.อ.พิเศษ", LEMMA: "พันตำรวจเอกพิเศษ"}],
|
||||
"พลฯ": [{ORTH: "พลฯ", LEMMA: "พลทหาร"}],
|
||||
"พล.๑ รอ.": [{ORTH: "พล.๑ รอ.", LEMMA: "กองพลที่ ๑ รักษาพระองค์ กองทัพบก"}],
|
||||
"พล.ต.": [{ORTH: "พล.ต.", LEMMA: "พลตรี"}],
|
||||
"พล.ต.ต.": [{ORTH: "พล.ต.ต.", LEMMA: "พลตำรวจตรี"}],
|
||||
"พล.ต.ท.": [{ORTH: "พล.ต.ท.", LEMMA: "พลตำรวจโท"}],
|
||||
"พล.ต.อ.": [{ORTH: "พล.ต.อ.", LEMMA: "พลตำรวจเอก"}],
|
||||
"พล.ท.": [{ORTH: "พล.ท.", LEMMA: "พลโท"}],
|
||||
"พล.ปตอ.": [{ORTH: "พล.ปตอ.", LEMMA: "กองพลทหารปืนใหญ่ต่อสู่อากาศยาน"}],
|
||||
"พล.ม.": [{ORTH: "พล.ม.", LEMMA: "กองพลทหารม้า"}],
|
||||
"พล.ม.๒": [{ORTH: "พล.ม.๒", LEMMA: "กองพลทหารม้าที่ ๒"}],
|
||||
"พล.ร.ต.": [{ORTH: "พล.ร.ต.", LEMMA: "พลเรือตรี"}],
|
||||
"พล.ร.ท.": [{ORTH: "พล.ร.ท.", LEMMA: "พลเรือโท"}],
|
||||
"พล.ร.อ.": [{ORTH: "พล.ร.อ.", LEMMA: "พลเรือเอก"}],
|
||||
"พล.อ.": [{ORTH: "พล.อ.", LEMMA: "พลเอก"}],
|
||||
"พล.อ.ต.": [{ORTH: "พล.อ.ต.", LEMMA: "พลอากาศตรี"}],
|
||||
"พล.อ.ท.": [{ORTH: "พล.อ.ท.", LEMMA: "พลอากาศโท"}],
|
||||
"พล.อ.อ.": [{ORTH: "พล.อ.อ.", LEMMA: "พลอากาศเอก"}],
|
||||
"พ.อ.พิเศษ": [{ORTH: "พ.อ.พิเศษ", LEMMA: "พันเอกพิเศษ"}],
|
||||
"พ.อ.ต.": [{ORTH: "พ.อ.ต.", LEMMA: "พันจ่าอากาศตรี"}],
|
||||
"พ.อ.ท.": [{ORTH: "พ.อ.ท.", LEMMA: "พันจ่าอากาศโท"}],
|
||||
"พ.อ.อ.": [{ORTH: "พ.อ.อ.", LEMMA: "พันจ่าอากาศเอก"}],
|
||||
"ภกญ.": [{ORTH: "ภกญ.", LEMMA: "เภสัชกรหญิง"}],
|
||||
"ม.จ.": [{ORTH: "ม.จ.", LEMMA: "หม่อมเจ้า"}],
|
||||
"มท1": [{ORTH: "มท1", LEMMA: "รัฐมนตรีว่าการกระทรวงมหาดไทย"}],
|
||||
"ม.ร.ว.": [{ORTH: "ม.ร.ว.", LEMMA: "หม่อมราชวงศ์"}],
|
||||
"มล.": [{ORTH: "มล.", LEMMA: "หม่อมหลวง"}],
|
||||
"ร.ต.": [{ORTH: "ร.ต.", LEMMA: "ร้อยตรี,เรือตรี,เรืออากาศตรี"}],
|
||||
"ร.ต.ต.": [{ORTH: "ร.ต.ต.", LEMMA: "ร้อยตำรวจตรี"}],
|
||||
"ร.ต.ท.": [{ORTH: "ร.ต.ท.", LEMMA: "ร้อยตำรวจโท"}],
|
||||
"ร.ต.อ.": [{ORTH: "ร.ต.อ.", LEMMA: "ร้อยตำรวจเอก"}],
|
||||
"ร.ท.": [{ORTH: "ร.ท.", LEMMA: "ร้อยโท,เรือโท,เรืออากาศโท"}],
|
||||
"รมช.": [{ORTH: "รมช.", LEMMA: "รัฐมนตรีช่วยว่าการกระทรวง"}],
|
||||
"รมต.": [{ORTH: "รมต.", LEMMA: "รัฐมนตรี"}],
|
||||
"รมว.": [{ORTH: "รมว.", LEMMA: "รัฐมนตรีว่าการกระทรวง"}],
|
||||
"รศ.": [{ORTH: "รศ.", LEMMA: "รองศาสตราจารย์"}],
|
||||
"ร.อ.": [{ORTH: "ร.อ.", LEMMA: "ร้อยเอก,เรือเอก,เรืออากาศเอก"}],
|
||||
"ศ.": [{ORTH: "ศ.", LEMMA: "ศาสตราจารย์"}],
|
||||
"ส.ต.": [{ORTH: "ส.ต.", LEMMA: "สิบตรี"}],
|
||||
"ส.ต.ต.": [{ORTH: "ส.ต.ต.", LEMMA: "สิบตำรวจตรี"}],
|
||||
"ส.ต.ท.": [{ORTH: "ส.ต.ท.", LEMMA: "สิบตำรวจโท"}],
|
||||
"ส.ต.อ.": [{ORTH: "ส.ต.อ.", LEMMA: "สิบตำรวจเอก"}],
|
||||
"ส.ท.": [{ORTH: "ส.ท.", LEMMA: "สิบโท"}],
|
||||
"สพ.": [{ORTH: "สพ.", LEMMA: "สัตวแพทย์"}],
|
||||
"สพ.ญ.": [{ORTH: "สพ.ญ.", LEMMA: "สัตวแพทย์หญิง"}],
|
||||
"สพ.ช.": [{ORTH: "สพ.ช.", LEMMA: "สัตวแพทย์ชาย"}],
|
||||
"ส.อ.": [{ORTH: "ส.อ.", LEMMA: "สิบเอก"}],
|
||||
"อจ.": [{ORTH: "อจ.", LEMMA: "อาจารย์"}],
|
||||
"อจญ.": [{ORTH: "อจญ.", LEMMA: "อาจารย์ใหญ่"}],
|
||||
"ดร.": [{ORTH: "ดร."}],
|
||||
"ด.ต.": [{ORTH: "ด.ต."}],
|
||||
"จ.ต.": [{ORTH: "จ.ต."}],
|
||||
"จ.ท.": [{ORTH: "จ.ท."}],
|
||||
"จ.ส.ต.": [{ORTH: "จ.ส.ต."}],
|
||||
"จสต.": [{ORTH: "จสต."}],
|
||||
"จ.ส.ท.": [{ORTH: "จ.ส.ท."}],
|
||||
"จ.ส.อ.": [{ORTH: "จ.ส.อ."}],
|
||||
"จ.อ.": [{ORTH: "จ.อ."}],
|
||||
"ทพญ.": [{ORTH: "ทพญ."}],
|
||||
"ทนพ.": [{ORTH: "ทนพ."}],
|
||||
"นจอ.": [{ORTH: "นจอ."}],
|
||||
"น.ช.": [{ORTH: "น.ช."}],
|
||||
"น.ญ.": [{ORTH: "น.ญ."}],
|
||||
"น.ต.": [{ORTH: "น.ต."}],
|
||||
"น.ท.": [{ORTH: "น.ท."}],
|
||||
"นตท.": [{ORTH: "นตท."}],
|
||||
"นนส.": [{ORTH: "นนส."}],
|
||||
"นนร.": [{ORTH: "นนร."}],
|
||||
"นนอ.": [{ORTH: "นนอ."}],
|
||||
"นพ.": [{ORTH: "นพ."}],
|
||||
"นพท.": [{ORTH: "นพท."}],
|
||||
"นรจ.": [{ORTH: "นรจ."}],
|
||||
"นรต.": [{ORTH: "นรต."}],
|
||||
"นศพ.": [{ORTH: "นศพ."}],
|
||||
"นศท.": [{ORTH: "นศท."}],
|
||||
"น.สพ.": [{ORTH: "น.สพ."}],
|
||||
"น.อ.": [{ORTH: "น.อ."}],
|
||||
"บช.ก.": [{ORTH: "บช.ก."}],
|
||||
"บช.น.": [{ORTH: "บช.น."}],
|
||||
"ผกก.": [{ORTH: "ผกก."}],
|
||||
"ผกก.ภ.": [{ORTH: "ผกก.ภ."}],
|
||||
"ผจก.": [{ORTH: "ผจก."}],
|
||||
"ผช.": [{ORTH: "ผช."}],
|
||||
"ผชก.": [{ORTH: "ผชก."}],
|
||||
"ผช.ผอ.": [{ORTH: "ผช.ผอ."}],
|
||||
"ผญบ.": [{ORTH: "ผญบ."}],
|
||||
"ผบ.": [{ORTH: "ผบ."}],
|
||||
"ผบก.": [{ORTH: "ผบก."}],
|
||||
"ผบก.น.": [{ORTH: "ผบก.น."}],
|
||||
"ผบก.ป.": [{ORTH: "ผบก.ป."}],
|
||||
"ผบก.ปค.": [{ORTH: "ผบก.ปค."}],
|
||||
"ผบก.ปม.": [{ORTH: "ผบก.ปม."}],
|
||||
"ผบก.ภ.": [{ORTH: "ผบก.ภ."}],
|
||||
"ผบช.": [{ORTH: "ผบช."}],
|
||||
"ผบช.ก.": [{ORTH: "ผบช.ก."}],
|
||||
"ผบช.ตชด.": [{ORTH: "ผบช.ตชด."}],
|
||||
"ผบช.น.": [{ORTH: "ผบช.น."}],
|
||||
"ผบช.ภ.": [{ORTH: "ผบช.ภ."}],
|
||||
"ผบ.ทบ.": [{ORTH: "ผบ.ทบ."}],
|
||||
"ผบ.ตร.": [{ORTH: "ผบ.ตร."}],
|
||||
"ผบ.ทร.": [{ORTH: "ผบ.ทร."}],
|
||||
"ผบ.ทอ.": [{ORTH: "ผบ.ทอ."}],
|
||||
"ผบ.ทสส.": [{ORTH: "ผบ.ทสส."}],
|
||||
"ผวจ.": [{ORTH: "ผวจ."}],
|
||||
"ผู้ว่าฯ": [{ORTH: "ผู้ว่าฯ"}],
|
||||
"พ.จ.ต.": [{ORTH: "พ.จ.ต."}],
|
||||
"พ.จ.ท.": [{ORTH: "พ.จ.ท."}],
|
||||
"พ.จ.อ.": [{ORTH: "พ.จ.อ."}],
|
||||
"พญ.": [{ORTH: "พญ."}],
|
||||
"ฯพณฯ": [{ORTH: "ฯพณฯ"}],
|
||||
"พ.ต.": [{ORTH: "พ.ต."}],
|
||||
"พ.ท.": [{ORTH: "พ.ท."}],
|
||||
"พ.อ.": [{ORTH: "พ.อ."}],
|
||||
"พ.ต.อ.พิเศษ": [{ORTH: "พ.ต.อ.พิเศษ"}],
|
||||
"พลฯ": [{ORTH: "พลฯ"}],
|
||||
"พล.๑ รอ.": [{ORTH: "พล.๑ รอ."}],
|
||||
"พล.ต.": [{ORTH: "พล.ต."}],
|
||||
"พล.ต.ต.": [{ORTH: "พล.ต.ต."}],
|
||||
"พล.ต.ท.": [{ORTH: "พล.ต.ท."}],
|
||||
"พล.ต.อ.": [{ORTH: "พล.ต.อ."}],
|
||||
"พล.ท.": [{ORTH: "พล.ท."}],
|
||||
"พล.ปตอ.": [{ORTH: "พล.ปตอ."}],
|
||||
"พล.ม.": [{ORTH: "พล.ม."}],
|
||||
"พล.ม.๒": [{ORTH: "พล.ม.๒"}],
|
||||
"พล.ร.ต.": [{ORTH: "พล.ร.ต."}],
|
||||
"พล.ร.ท.": [{ORTH: "พล.ร.ท."}],
|
||||
"พล.ร.อ.": [{ORTH: "พล.ร.อ."}],
|
||||
"พล.อ.": [{ORTH: "พล.อ."}],
|
||||
"พล.อ.ต.": [{ORTH: "พล.อ.ต."}],
|
||||
"พล.อ.ท.": [{ORTH: "พล.อ.ท."}],
|
||||
"พล.อ.อ.": [{ORTH: "พล.อ.อ."}],
|
||||
"พ.อ.พิเศษ": [{ORTH: "พ.อ.พิเศษ"}],
|
||||
"พ.อ.ต.": [{ORTH: "พ.อ.ต."}],
|
||||
"พ.อ.ท.": [{ORTH: "พ.อ.ท."}],
|
||||
"พ.อ.อ.": [{ORTH: "พ.อ.อ."}],
|
||||
"ภกญ.": [{ORTH: "ภกญ."}],
|
||||
"ม.จ.": [{ORTH: "ม.จ."}],
|
||||
"มท1": [{ORTH: "มท1"}],
|
||||
"ม.ร.ว.": [{ORTH: "ม.ร.ว."}],
|
||||
"มล.": [{ORTH: "มล."}],
|
||||
"ร.ต.": [{ORTH: "ร.ต."}],
|
||||
"ร.ต.ต.": [{ORTH: "ร.ต.ต."}],
|
||||
"ร.ต.ท.": [{ORTH: "ร.ต.ท."}],
|
||||
"ร.ต.อ.": [{ORTH: "ร.ต.อ."}],
|
||||
"ร.ท.": [{ORTH: "ร.ท."}],
|
||||
"รมช.": [{ORTH: "รมช."}],
|
||||
"รมต.": [{ORTH: "รมต."}],
|
||||
"รมว.": [{ORTH: "รมว."}],
|
||||
"รศ.": [{ORTH: "รศ."}],
|
||||
"ร.อ.": [{ORTH: "ร.อ."}],
|
||||
"ศ.": [{ORTH: "ศ."}],
|
||||
"ส.ต.": [{ORTH: "ส.ต."}],
|
||||
"ส.ต.ต.": [{ORTH: "ส.ต.ต."}],
|
||||
"ส.ต.ท.": [{ORTH: "ส.ต.ท."}],
|
||||
"ส.ต.อ.": [{ORTH: "ส.ต.อ."}],
|
||||
"ส.ท.": [{ORTH: "ส.ท."}],
|
||||
"สพ.": [{ORTH: "สพ."}],
|
||||
"สพ.ญ.": [{ORTH: "สพ.ญ."}],
|
||||
"สพ.ช.": [{ORTH: "สพ.ช."}],
|
||||
"ส.อ.": [{ORTH: "ส.อ."}],
|
||||
"อจ.": [{ORTH: "อจ."}],
|
||||
"อจญ.": [{ORTH: "อจญ."}],
|
||||
# วุฒิ / bachelor degree
|
||||
"ป.": [{ORTH: "ป.", LEMMA: "ประถมศึกษา"}],
|
||||
"ป.กศ.": [{ORTH: "ป.กศ.", LEMMA: "ประกาศนียบัตรวิชาการศึกษา"}],
|
||||
"ป.กศ.สูง": [{ORTH: "ป.กศ.สูง", LEMMA: "ประกาศนียบัตรวิชาการศึกษาชั้นสูง"}],
|
||||
"ปวช.": [{ORTH: "ปวช.", LEMMA: "ประกาศนียบัตรวิชาชีพ"}],
|
||||
"ปวท.": [{ORTH: "ปวท.", LEMMA: "ประกาศนียบัตรวิชาชีพเทคนิค"}],
|
||||
"ปวส.": [{ORTH: "ปวส.", LEMMA: "ประกาศนียบัตรวิชาชีพชั้นสูง"}],
|
||||
"ปทส.": [{ORTH: "ปทส.", LEMMA: "ประกาศนียบัตรครูเทคนิคชั้นสูง"}],
|
||||
"กษ.บ.": [{ORTH: "กษ.บ.", LEMMA: "เกษตรศาสตรบัณฑิต"}],
|
||||
"กษ.ม.": [{ORTH: "กษ.ม.", LEMMA: "เกษตรศาสตรมหาบัณฑิต"}],
|
||||
"กษ.ด.": [{ORTH: "กษ.ด.", LEMMA: "เกษตรศาสตรดุษฎีบัณฑิต"}],
|
||||
"ค.บ.": [{ORTH: "ค.บ.", LEMMA: "ครุศาสตรบัณฑิต"}],
|
||||
"คศ.บ.": [{ORTH: "คศ.บ.", LEMMA: "คหกรรมศาสตรบัณฑิต"}],
|
||||
"คศ.ม.": [{ORTH: "คศ.ม.", LEMMA: "คหกรรมศาสตรมหาบัณฑิต"}],
|
||||
"คศ.ด.": [{ORTH: "คศ.ด.", LEMMA: "คหกรรมศาสตรดุษฎีบัณฑิต"}],
|
||||
"ค.อ.บ.": [{ORTH: "ค.อ.บ.", LEMMA: "ครุศาสตรอุตสาหกรรมบัณฑิต"}],
|
||||
"ค.อ.ม.": [{ORTH: "ค.อ.ม.", LEMMA: "ครุศาสตรอุตสาหกรรมมหาบัณฑิต"}],
|
||||
"ค.อ.ด.": [{ORTH: "ค.อ.ด.", LEMMA: "ครุศาสตรอุตสาหกรรมดุษฎีบัณฑิต"}],
|
||||
"ทก.บ.": [{ORTH: "ทก.บ.", LEMMA: "เทคโนโลยีการเกษตรบัณฑิต"}],
|
||||
"ทก.ม.": [{ORTH: "ทก.ม.", LEMMA: "เทคโนโลยีการเกษตรมหาบัณฑิต"}],
|
||||
"ทก.ด.": [{ORTH: "ทก.ด.", LEMMA: "เทคโนโลยีการเกษตรดุษฎีบัณฑิต"}],
|
||||
"ท.บ.": [{ORTH: "ท.บ.", LEMMA: "ทันตแพทยศาสตรบัณฑิต"}],
|
||||
"ท.ม.": [{ORTH: "ท.ม.", LEMMA: "ทันตแพทยศาสตรมหาบัณฑิต"}],
|
||||
"ท.ด.": [{ORTH: "ท.ด.", LEMMA: "ทันตแพทยศาสตรดุษฎีบัณฑิต"}],
|
||||
"น.บ.": [{ORTH: "น.บ.", LEMMA: "นิติศาสตรบัณฑิต"}],
|
||||
"น.ม.": [{ORTH: "น.ม.", LEMMA: "นิติศาสตรมหาบัณฑิต"}],
|
||||
"น.ด.": [{ORTH: "น.ด.", LEMMA: "นิติศาสตรดุษฎีบัณฑิต"}],
|
||||
"นศ.บ.": [{ORTH: "นศ.บ.", LEMMA: "นิเทศศาสตรบัณฑิต"}],
|
||||
"นศ.ม.": [{ORTH: "นศ.ม.", LEMMA: "นิเทศศาสตรมหาบัณฑิต"}],
|
||||
"นศ.ด.": [{ORTH: "นศ.ด.", LEMMA: "นิเทศศาสตรดุษฎีบัณฑิต"}],
|
||||
"บช.บ.": [{ORTH: "บช.บ.", LEMMA: "บัญชีบัณฑิต"}],
|
||||
"บช.ม.": [{ORTH: "บช.ม.", LEMMA: "บัญชีมหาบัณฑิต"}],
|
||||
"บช.ด.": [{ORTH: "บช.ด.", LEMMA: "บัญชีดุษฎีบัณฑิต"}],
|
||||
"บธ.บ.": [{ORTH: "บธ.บ.", LEMMA: "บริหารธุรกิจบัณฑิต"}],
|
||||
"บธ.ม.": [{ORTH: "บธ.ม.", LEMMA: "บริหารธุรกิจมหาบัณฑิต"}],
|
||||
"บธ.ด.": [{ORTH: "บธ.ด.", LEMMA: "บริหารธุรกิจดุษฎีบัณฑิต"}],
|
||||
"พณ.บ.": [{ORTH: "พณ.บ.", LEMMA: "พาณิชยศาสตรบัณฑิต"}],
|
||||
"พณ.ม.": [{ORTH: "พณ.ม.", LEMMA: "พาณิชยศาสตรมหาบัณฑิต"}],
|
||||
"พณ.ด.": [{ORTH: "พณ.ด.", LEMMA: "พาณิชยศาสตรดุษฎีบัณฑิต"}],
|
||||
"พ.บ.": [{ORTH: "พ.บ.", LEMMA: "แพทยศาสตรบัณฑิต"}],
|
||||
"พ.ม.": [{ORTH: "พ.ม.", LEMMA: "แพทยศาสตรมหาบัณฑิต"}],
|
||||
"พ.ด.": [{ORTH: "พ.ด.", LEMMA: "แพทยศาสตรดุษฎีบัณฑิต"}],
|
||||
"พธ.บ.": [{ORTH: "พธ.บ.", LEMMA: "พุทธศาสตรบัณฑิต"}],
|
||||
"พธ.ม.": [{ORTH: "พธ.ม.", LEMMA: "พุทธศาสตรมหาบัณฑิต"}],
|
||||
"พธ.ด.": [{ORTH: "พธ.ด.", LEMMA: "พุทธศาสตรดุษฎีบัณฑิต"}],
|
||||
"พบ.บ.": [{ORTH: "พบ.บ.", LEMMA: "พัฒนบริหารศาสตรบัณฑิต"}],
|
||||
"พบ.ม.": [{ORTH: "พบ.ม.", LEMMA: "พัฒนบริหารศาสตรมหาบัณฑิต"}],
|
||||
"พบ.ด.": [{ORTH: "พบ.ด.", LEMMA: "พัฒนบริหารศาสตรดุษฎีบัณฑิต"}],
|
||||
"พย.บ.": [{ORTH: "พย.บ.", LEMMA: "พยาบาลศาสตรดุษฎีบัณฑิต"}],
|
||||
"พย.ม.": [{ORTH: "พย.ม.", LEMMA: "พยาบาลศาสตรมหาบัณฑิต"}],
|
||||
"พย.ด.": [{ORTH: "พย.ด.", LEMMA: "พยาบาลศาสตรดุษฎีบัณฑิต"}],
|
||||
"พศ.บ.": [{ORTH: "พศ.บ.", LEMMA: "พาณิชยศาสตรบัณฑิต"}],
|
||||
"พศ.ม.": [{ORTH: "พศ.ม.", LEMMA: "พาณิชยศาสตรมหาบัณฑิต"}],
|
||||
"พศ.ด.": [{ORTH: "พศ.ด.", LEMMA: "พาณิชยศาสตรดุษฎีบัณฑิต"}],
|
||||
"ภ.บ.": [{ORTH: "ภ.บ.", LEMMA: "เภสัชศาสตรบัณฑิต"}],
|
||||
"ภ.ม.": [{ORTH: "ภ.ม.", LEMMA: "เภสัชศาสตรมหาบัณฑิต"}],
|
||||
"ภ.ด.": [{ORTH: "ภ.ด.", LEMMA: "เภสัชศาสตรดุษฎีบัณฑิต"}],
|
||||
"ภ.สถ.บ.": [{ORTH: "ภ.สถ.บ.", LEMMA: "ภูมิสถาปัตยกรรมศาสตรบัณฑิต"}],
|
||||
"รป.บ.": [{ORTH: "รป.บ.", LEMMA: "รัฐประศาสนศาสตร์บัณฑิต"}],
|
||||
"รป.ม.": [{ORTH: "รป.ม.", LEMMA: "รัฐประศาสนศาสตร์มหาบัณฑิต"}],
|
||||
"วท.บ.": [{ORTH: "วท.บ.", LEMMA: "วิทยาศาสตรบัณฑิต"}],
|
||||
"วท.ม.": [{ORTH: "วท.ม.", LEMMA: "วิทยาศาสตรมหาบัณฑิต"}],
|
||||
"วท.ด.": [{ORTH: "วท.ด.", LEMMA: "วิทยาศาสตรดุษฎีบัณฑิต"}],
|
||||
"ศ.บ.": [{ORTH: "ศ.บ.", LEMMA: "ศิลปบัณฑิต"}],
|
||||
"ศศ.บ.": [{ORTH: "ศศ.บ.", LEMMA: "ศิลปศาสตรบัณฑิต"}],
|
||||
"ศษ.บ.": [{ORTH: "ศษ.บ.", LEMMA: "ศึกษาศาสตรบัณฑิต"}],
|
||||
"ศส.บ.": [{ORTH: "ศส.บ.", LEMMA: "เศรษฐศาสตรบัณฑิต"}],
|
||||
"สถ.บ.": [{ORTH: "สถ.บ.", LEMMA: "สถาปัตยกรรมศาสตรบัณฑิต"}],
|
||||
"สถ.ม.": [{ORTH: "สถ.ม.", LEMMA: "สถาปัตยกรรมศาสตรมหาบัณฑิต"}],
|
||||
"สถ.ด.": [{ORTH: "สถ.ด.", LEMMA: "สถาปัตยกรรมศาสตรดุษฎีบัณฑิต"}],
|
||||
"สพ.บ.": [{ORTH: "สพ.บ.", LEMMA: "สัตวแพทยศาสตรบัณฑิต"}],
|
||||
"อ.บ.": [{ORTH: "อ.บ.", LEMMA: "อักษรศาสตรบัณฑิต"}],
|
||||
"อ.ม.": [{ORTH: "อ.ม.", LEMMA: "อักษรศาสตรมหาบัณฑิต"}],
|
||||
"อ.ด.": [{ORTH: "อ.ด.", LEMMA: "อักษรศาสตรดุษฎีบัณฑิต"}],
|
||||
"ป.": [{ORTH: "ป."}],
|
||||
"ป.กศ.": [{ORTH: "ป.กศ."}],
|
||||
"ป.กศ.สูง": [{ORTH: "ป.กศ.สูง"}],
|
||||
"ปวช.": [{ORTH: "ปวช."}],
|
||||
"ปวท.": [{ORTH: "ปวท."}],
|
||||
"ปวส.": [{ORTH: "ปวส."}],
|
||||
"ปทส.": [{ORTH: "ปทส."}],
|
||||
"กษ.บ.": [{ORTH: "กษ.บ."}],
|
||||
"กษ.ม.": [{ORTH: "กษ.ม."}],
|
||||
"กษ.ด.": [{ORTH: "กษ.ด."}],
|
||||
"ค.บ.": [{ORTH: "ค.บ."}],
|
||||
"คศ.บ.": [{ORTH: "คศ.บ."}],
|
||||
"คศ.ม.": [{ORTH: "คศ.ม."}],
|
||||
"คศ.ด.": [{ORTH: "คศ.ด."}],
|
||||
"ค.อ.บ.": [{ORTH: "ค.อ.บ."}],
|
||||
"ค.อ.ม.": [{ORTH: "ค.อ.ม."}],
|
||||
"ค.อ.ด.": [{ORTH: "ค.อ.ด."}],
|
||||
"ทก.บ.": [{ORTH: "ทก.บ."}],
|
||||
"ทก.ม.": [{ORTH: "ทก.ม."}],
|
||||
"ทก.ด.": [{ORTH: "ทก.ด."}],
|
||||
"ท.บ.": [{ORTH: "ท.บ."}],
|
||||
"ท.ม.": [{ORTH: "ท.ม."}],
|
||||
"ท.ด.": [{ORTH: "ท.ด."}],
|
||||
"น.บ.": [{ORTH: "น.บ."}],
|
||||
"น.ม.": [{ORTH: "น.ม."}],
|
||||
"น.ด.": [{ORTH: "น.ด."}],
|
||||
"นศ.บ.": [{ORTH: "นศ.บ."}],
|
||||
"นศ.ม.": [{ORTH: "นศ.ม."}],
|
||||
"นศ.ด.": [{ORTH: "นศ.ด."}],
|
||||
"บช.บ.": [{ORTH: "บช.บ."}],
|
||||
"บช.ม.": [{ORTH: "บช.ม."}],
|
||||
"บช.ด.": [{ORTH: "บช.ด."}],
|
||||
"บธ.บ.": [{ORTH: "บธ.บ."}],
|
||||
"บธ.ม.": [{ORTH: "บธ.ม."}],
|
||||
"บธ.ด.": [{ORTH: "บธ.ด."}],
|
||||
"พณ.บ.": [{ORTH: "พณ.บ."}],
|
||||
"พณ.ม.": [{ORTH: "พณ.ม."}],
|
||||
"พณ.ด.": [{ORTH: "พณ.ด."}],
|
||||
"พ.บ.": [{ORTH: "พ.บ."}],
|
||||
"พ.ม.": [{ORTH: "พ.ม."}],
|
||||
"พ.ด.": [{ORTH: "พ.ด."}],
|
||||
"พธ.บ.": [{ORTH: "พธ.บ."}],
|
||||
"พธ.ม.": [{ORTH: "พธ.ม."}],
|
||||
"พธ.ด.": [{ORTH: "พธ.ด."}],
|
||||
"พบ.บ.": [{ORTH: "พบ.บ."}],
|
||||
"พบ.ม.": [{ORTH: "พบ.ม."}],
|
||||
"พบ.ด.": [{ORTH: "พบ.ด."}],
|
||||
"พย.บ.": [{ORTH: "พย.บ."}],
|
||||
"พย.ม.": [{ORTH: "พย.ม."}],
|
||||
"พย.ด.": [{ORTH: "พย.ด."}],
|
||||
"พศ.บ.": [{ORTH: "พศ.บ."}],
|
||||
"พศ.ม.": [{ORTH: "พศ.ม."}],
|
||||
"พศ.ด.": [{ORTH: "พศ.ด."}],
|
||||
"ภ.บ.": [{ORTH: "ภ.บ."}],
|
||||
"ภ.ม.": [{ORTH: "ภ.ม."}],
|
||||
"ภ.ด.": [{ORTH: "ภ.ด."}],
|
||||
"ภ.สถ.บ.": [{ORTH: "ภ.สถ.บ."}],
|
||||
"รป.บ.": [{ORTH: "รป.บ."}],
|
||||
"รป.ม.": [{ORTH: "รป.ม."}],
|
||||
"วท.บ.": [{ORTH: "วท.บ."}],
|
||||
"วท.ม.": [{ORTH: "วท.ม."}],
|
||||
"วท.ด.": [{ORTH: "วท.ด."}],
|
||||
"ศ.บ.": [{ORTH: "ศ.บ."}],
|
||||
"ศศ.บ.": [{ORTH: "ศศ.บ."}],
|
||||
"ศษ.บ.": [{ORTH: "ศษ.บ."}],
|
||||
"ศส.บ.": [{ORTH: "ศส.บ."}],
|
||||
"สถ.บ.": [{ORTH: "สถ.บ."}],
|
||||
"สถ.ม.": [{ORTH: "สถ.ม."}],
|
||||
"สถ.ด.": [{ORTH: "สถ.ด."}],
|
||||
"สพ.บ.": [{ORTH: "สพ.บ."}],
|
||||
"อ.บ.": [{ORTH: "อ.บ."}],
|
||||
"อ.ม.": [{ORTH: "อ.ม."}],
|
||||
"อ.ด.": [{ORTH: "อ.ด."}],
|
||||
# ปี / เวลา / year / time
|
||||
"ชม.": [{ORTH: "ชม.", LEMMA: "ชั่วโมง"}],
|
||||
"จ.ศ.": [{ORTH: "จ.ศ.", LEMMA: "จุลศักราช"}],
|
||||
"ค.ศ.": [{ORTH: "ค.ศ.", LEMMA: "คริสต์ศักราช"}],
|
||||
"ฮ.ศ.": [{ORTH: "ฮ.ศ.", LEMMA: "ฮิจเราะห์ศักราช"}],
|
||||
"ว.ด.ป.": [{ORTH: "ว.ด.ป.", LEMMA: "วัน เดือน ปี"}],
|
||||
"ชม.": [{ORTH: "ชม."}],
|
||||
"จ.ศ.": [{ORTH: "จ.ศ."}],
|
||||
"ค.ศ.": [{ORTH: "ค.ศ."}],
|
||||
"ฮ.ศ.": [{ORTH: "ฮ.ศ."}],
|
||||
"ว.ด.ป.": [{ORTH: "ว.ด.ป."}],
|
||||
# ระยะทาง / distance
|
||||
"ฮม.": [{ORTH: "ฮม.", LEMMA: "เฮกโตเมตร"}],
|
||||
"ดคม.": [{ORTH: "ดคม.", LEMMA: "เดคาเมตร"}],
|
||||
"ดม.": [{ORTH: "ดม.", LEMMA: "เดซิเมตร"}],
|
||||
"มม.": [{ORTH: "มม.", LEMMA: "มิลลิเมตร"}],
|
||||
"ซม.": [{ORTH: "ซม.", LEMMA: "เซนติเมตร"}],
|
||||
"กม.": [{ORTH: "กม.", LEMMA: "กิโลเมตร"}],
|
||||
"ฮม.": [{ORTH: "ฮม."}],
|
||||
"ดคม.": [{ORTH: "ดคม."}],
|
||||
"ดม.": [{ORTH: "ดม."}],
|
||||
"มม.": [{ORTH: "มม."}],
|
||||
"ซม.": [{ORTH: "ซม."}],
|
||||
"กม.": [{ORTH: "กม."}],
|
||||
# น้ำหนัก / weight
|
||||
"น.น.": [{ORTH: "น.น.", LEMMA: "น้ำหนัก"}],
|
||||
"ฮก.": [{ORTH: "ฮก.", LEMMA: "เฮกโตกรัม"}],
|
||||
"ดคก.": [{ORTH: "ดคก.", LEMMA: "เดคากรัม"}],
|
||||
"ดก.": [{ORTH: "ดก.", LEMMA: "เดซิกรัม"}],
|
||||
"ซก.": [{ORTH: "ซก.", LEMMA: "เซนติกรัม"}],
|
||||
"มก.": [{ORTH: "มก.", LEMMA: "มิลลิกรัม"}],
|
||||
"ก.": [{ORTH: "ก.", LEMMA: "กรัม"}],
|
||||
"กก.": [{ORTH: "กก.", LEMMA: "กิโลกรัม"}],
|
||||
"น.น.": [{ORTH: "น.น."}],
|
||||
"ฮก.": [{ORTH: "ฮก."}],
|
||||
"ดคก.": [{ORTH: "ดคก."}],
|
||||
"ดก.": [{ORTH: "ดก."}],
|
||||
"ซก.": [{ORTH: "ซก."}],
|
||||
"มก.": [{ORTH: "มก."}],
|
||||
"ก.": [{ORTH: "ก."}],
|
||||
"กก.": [{ORTH: "กก."}],
|
||||
# ปริมาตร / volume
|
||||
"ฮล.": [{ORTH: "ฮล.", LEMMA: "เฮกโตลิตร"}],
|
||||
"ดคล.": [{ORTH: "ดคล.", LEMMA: "เดคาลิตร"}],
|
||||
"ดล.": [{ORTH: "ดล.", LEMMA: "เดซิลิตร"}],
|
||||
"ซล.": [{ORTH: "ซล.", LEMMA: "เซนติลิตร"}],
|
||||
"ล.": [{ORTH: "ล.", LEMMA: "ลิตร"}],
|
||||
"กล.": [{ORTH: "กล.", LEMMA: "กิโลลิตร"}],
|
||||
"ลบ.": [{ORTH: "ลบ.", LEMMA: "ลูกบาศก์"}],
|
||||
"ฮล.": [{ORTH: "ฮล."}],
|
||||
"ดคล.": [{ORTH: "ดคล."}],
|
||||
"ดล.": [{ORTH: "ดล."}],
|
||||
"ซล.": [{ORTH: "ซล."}],
|
||||
"ล.": [{ORTH: "ล."}],
|
||||
"กล.": [{ORTH: "กล."}],
|
||||
"ลบ.": [{ORTH: "ลบ."}],
|
||||
# พื้นที่ / area
|
||||
"ตร.ซม.": [{ORTH: "ตร.ซม.", LEMMA: "ตารางเซนติเมตร"}],
|
||||
"ตร.ม.": [{ORTH: "ตร.ม.", LEMMA: "ตารางเมตร"}],
|
||||
"ตร.ว.": [{ORTH: "ตร.ว.", LEMMA: "ตารางวา"}],
|
||||
"ตร.กม.": [{ORTH: "ตร.กม.", LEMMA: "ตารางกิโลเมตร"}],
|
||||
"ตร.ซม.": [{ORTH: "ตร.ซม."}],
|
||||
"ตร.ม.": [{ORTH: "ตร.ม."}],
|
||||
"ตร.ว.": [{ORTH: "ตร.ว."}],
|
||||
"ตร.กม.": [{ORTH: "ตร.กม."}],
|
||||
# เดือน / month
|
||||
"ม.ค.": [{ORTH: "ม.ค.", LEMMA: "มกราคม"}],
|
||||
"ก.พ.": [{ORTH: "ก.พ.", LEMMA: "กุมภาพันธ์"}],
|
||||
"มี.ค.": [{ORTH: "มี.ค.", LEMMA: "มีนาคม"}],
|
||||
"เม.ย.": [{ORTH: "เม.ย.", LEMMA: "เมษายน"}],
|
||||
"พ.ค.": [{ORTH: "พ.ค.", LEMMA: "พฤษภาคม"}],
|
||||
"มิ.ย.": [{ORTH: "มิ.ย.", LEMMA: "มิถุนายน"}],
|
||||
"ก.ค.": [{ORTH: "ก.ค.", LEMMA: "กรกฎาคม"}],
|
||||
"ส.ค.": [{ORTH: "ส.ค.", LEMMA: "สิงหาคม"}],
|
||||
"ก.ย.": [{ORTH: "ก.ย.", LEMMA: "กันยายน"}],
|
||||
"ต.ค.": [{ORTH: "ต.ค.", LEMMA: "ตุลาคม"}],
|
||||
"พ.ย.": [{ORTH: "พ.ย.", LEMMA: "พฤศจิกายน"}],
|
||||
"ธ.ค.": [{ORTH: "ธ.ค.", LEMMA: "ธันวาคม"}],
|
||||
"ม.ค.": [{ORTH: "ม.ค."}],
|
||||
"ก.พ.": [{ORTH: "ก.พ."}],
|
||||
"มี.ค.": [{ORTH: "มี.ค."}],
|
||||
"เม.ย.": [{ORTH: "เม.ย."}],
|
||||
"พ.ค.": [{ORTH: "พ.ค."}],
|
||||
"มิ.ย.": [{ORTH: "มิ.ย."}],
|
||||
"ก.ค.": [{ORTH: "ก.ค."}],
|
||||
"ส.ค.": [{ORTH: "ส.ค."}],
|
||||
"ก.ย.": [{ORTH: "ก.ย."}],
|
||||
"ต.ค.": [{ORTH: "ต.ค."}],
|
||||
"พ.ย.": [{ORTH: "พ.ย."}],
|
||||
"ธ.ค.": [{ORTH: "ธ.ค."}],
|
||||
# เพศ / gender
|
||||
"ช.": [{ORTH: "ช.", LEMMA: "ชาย"}],
|
||||
"ญ.": [{ORTH: "ญ.", LEMMA: "หญิง"}],
|
||||
"ด.ช.": [{ORTH: "ด.ช.", LEMMA: "เด็กชาย"}],
|
||||
"ด.ญ.": [{ORTH: "ด.ญ.", LEMMA: "เด็กหญิง"}],
|
||||
"ช.": [{ORTH: "ช."}],
|
||||
"ญ.": [{ORTH: "ญ."}],
|
||||
"ด.ช.": [{ORTH: "ด.ช."}],
|
||||
"ด.ญ.": [{ORTH: "ด.ญ."}],
|
||||
# ที่อยู่ / address
|
||||
"ถ.": [{ORTH: "ถ.", LEMMA: "ถนน"}],
|
||||
"ต.": [{ORTH: "ต.", LEMMA: "ตำบล"}],
|
||||
"อ.": [{ORTH: "อ.", LEMMA: "อำเภอ"}],
|
||||
"จ.": [{ORTH: "จ.", LEMMA: "จังหวัด"}],
|
||||
"ถ.": [{ORTH: "ถ."}],
|
||||
"ต.": [{ORTH: "ต."}],
|
||||
"อ.": [{ORTH: "อ."}],
|
||||
"จ.": [{ORTH: "จ."}],
|
||||
# สรรพนาม / pronoun
|
||||
"ข้าฯ": [{ORTH: "ข้าฯ", LEMMA: "ข้าพระพุทธเจ้า"}],
|
||||
"ทูลเกล้าฯ": [{ORTH: "ทูลเกล้าฯ", LEMMA: "ทูลเกล้าทูลกระหม่อม"}],
|
||||
"น้อมเกล้าฯ": [{ORTH: "น้อมเกล้าฯ", LEMMA: "น้อมเกล้าน้อมกระหม่อม"}],
|
||||
"โปรดเกล้าฯ": [{ORTH: "โปรดเกล้าฯ", LEMMA: "โปรดเกล้าโปรดกระหม่อม"}],
|
||||
"ข้าฯ": [{ORTH: "ข้าฯ"}],
|
||||
"ทูลเกล้าฯ": [{ORTH: "ทูลเกล้าฯ"}],
|
||||
"น้อมเกล้าฯ": [{ORTH: "น้อมเกล้าฯ"}],
|
||||
"โปรดเกล้าฯ": [{ORTH: "โปรดเกล้าฯ"}],
|
||||
# การเมือง / politic
|
||||
"ขจก.": [{ORTH: "ขจก.", LEMMA: "ขบวนการโจรก่อการร้าย"}],
|
||||
"ขบด.": [{ORTH: "ขบด.", LEMMA: "ขบวนการแบ่งแยกดินแดน"}],
|
||||
"นปช.": [{ORTH: "นปช.", LEMMA: "แนวร่วมประชาธิปไตยขับไล่เผด็จการ"}],
|
||||
"ปชป.": [{ORTH: "ปชป.", LEMMA: "พรรคประชาธิปัตย์"}],
|
||||
"ผกค.": [{ORTH: "ผกค.", LEMMA: "ผู้ก่อการร้ายคอมมิวนิสต์"}],
|
||||
"พท.": [{ORTH: "พท.", LEMMA: "พรรคเพื่อไทย"}],
|
||||
"พ.ร.ก.": [{ORTH: "พ.ร.ก.", LEMMA: "พระราชกำหนด"}],
|
||||
"พ.ร.ฎ.": [{ORTH: "พ.ร.ฎ.", LEMMA: "พระราชกฤษฎีกา"}],
|
||||
"พ.ร.บ.": [{ORTH: "พ.ร.บ.", LEMMA: "พระราชบัญญัติ"}],
|
||||
"รธน.": [{ORTH: "รธน.", LEMMA: "รัฐธรรมนูญ"}],
|
||||
"รบ.": [{ORTH: "รบ.", LEMMA: "รัฐบาล"}],
|
||||
"รสช.": [{ORTH: "รสช.", LEMMA: "คณะรักษาความสงบเรียบร้อยแห่งชาติ"}],
|
||||
"ส.ก.": [{ORTH: "ส.ก.", LEMMA: "สมาชิกสภากรุงเทพมหานคร"}],
|
||||
"สจ.": [{ORTH: "สจ.", LEMMA: "สมาชิกสภาจังหวัด"}],
|
||||
"สว.": [{ORTH: "สว.", LEMMA: "สมาชิกวุฒิสภา"}],
|
||||
"ส.ส.": [{ORTH: "ส.ส.", LEMMA: "สมาชิกสภาผู้แทนราษฎร"}],
|
||||
"ขจก.": [{ORTH: "ขจก."}],
|
||||
"ขบด.": [{ORTH: "ขบด."}],
|
||||
"นปช.": [{ORTH: "นปช."}],
|
||||
"ปชป.": [{ORTH: "ปชป."}],
|
||||
"ผกค.": [{ORTH: "ผกค."}],
|
||||
"พท.": [{ORTH: "พท."}],
|
||||
"พ.ร.ก.": [{ORTH: "พ.ร.ก."}],
|
||||
"พ.ร.ฎ.": [{ORTH: "พ.ร.ฎ."}],
|
||||
"พ.ร.บ.": [{ORTH: "พ.ร.บ."}],
|
||||
"รธน.": [{ORTH: "รธน."}],
|
||||
"รบ.": [{ORTH: "รบ."}],
|
||||
"รสช.": [{ORTH: "รสช."}],
|
||||
"ส.ก.": [{ORTH: "ส.ก."}],
|
||||
"สจ.": [{ORTH: "สจ."}],
|
||||
"สว.": [{ORTH: "สว."}],
|
||||
"ส.ส.": [{ORTH: "ส.ส."}],
|
||||
# ทั่วไป / general
|
||||
"ก.ข.ค.": [{ORTH: "ก.ข.ค.", LEMMA: "ก้างขวางคอ"}],
|
||||
"กทม.": [{ORTH: "กทม.", LEMMA: "กรุงเทพมหานคร"}],
|
||||
"กรุงเทพฯ": [{ORTH: "กรุงเทพฯ", LEMMA: "กรุงเทพมหานคร"}],
|
||||
"ขรก.": [{ORTH: "ขรก.", LEMMA: "ข้าราชการ"}],
|
||||
"ขส": [{ORTH: "ขส.", LEMMA: "ขนส่ง"}],
|
||||
"ค.ร.น.": [{ORTH: "ค.ร.น.", LEMMA: "คูณร่วมน้อย"}],
|
||||
"ค.ร.ม.": [{ORTH: "ค.ร.ม.", LEMMA: "คูณร่วมมาก"}],
|
||||
"ง.ด.": [{ORTH: "ง.ด.", LEMMA: "เงินเดือน"}],
|
||||
"งป.": [{ORTH: "งป.", LEMMA: "งบประมาณ"}],
|
||||
"จก.": [{ORTH: "จก.", LEMMA: "จำกัด"}],
|
||||
"จขกท.": [{ORTH: "จขกท.", LEMMA: "เจ้าของกระทู้"}],
|
||||
"จนท.": [{ORTH: "จนท.", LEMMA: "เจ้าหน้าที่"}],
|
||||
"จ.ป.ร.": [
|
||||
{
|
||||
ORTH: "จ.ป.ร.",
|
||||
LEMMA: "มหาจุฬาลงกรณ ปรมราชาธิราช (พระปรมาภิไธยในพระบาทสมเด็จพระจุลจอมเกล้าเจ้าอยู่หัว)",
|
||||
}
|
||||
],
|
||||
"จ.ม.": [{ORTH: "จ.ม.", LEMMA: "จดหมาย"}],
|
||||
"จย.": [{ORTH: "จย.", LEMMA: "จักรยาน"}],
|
||||
"จยย.": [{ORTH: "จยย.", LEMMA: "จักรยานยนต์"}],
|
||||
"ตจว.": [{ORTH: "ตจว.", LEMMA: "ต่างจังหวัด"}],
|
||||
"โทร.": [{ORTH: "โทร.", LEMMA: "โทรศัพท์"}],
|
||||
"ธ.": [{ORTH: "ธ.", LEMMA: "ธนาคาร"}],
|
||||
"น.ร.": [{ORTH: "น.ร.", LEMMA: "นักเรียน"}],
|
||||
"น.ศ.": [{ORTH: "น.ศ.", LEMMA: "นักศึกษา"}],
|
||||
"น.ส.": [{ORTH: "น.ส.", LEMMA: "นางสาว"}],
|
||||
"น.ส.๓": [{ORTH: "น.ส.๓", LEMMA: "หนังสือรับรองการทำประโยชน์ในที่ดิน"}],
|
||||
"น.ส.๓ ก.": [
|
||||
{ORTH: "น.ส.๓ ก", LEMMA: "หนังสือแสดงกรรมสิทธิ์ในที่ดิน (มีระวางกำหนด)"}
|
||||
],
|
||||
"นสพ.": [{ORTH: "นสพ.", LEMMA: "หนังสือพิมพ์"}],
|
||||
"บ.ก.": [{ORTH: "บ.ก.", LEMMA: "บรรณาธิการ"}],
|
||||
"บจก.": [{ORTH: "บจก.", LEMMA: "บริษัทจำกัด"}],
|
||||
"บงล.": [{ORTH: "บงล.", LEMMA: "บริษัทเงินทุนและหลักทรัพย์จำกัด"}],
|
||||
"บบส.": [{ORTH: "บบส.", LEMMA: "บรรษัทบริหารสินทรัพย์สถาบันการเงิน"}],
|
||||
"บมจ.": [{ORTH: "บมจ.", LEMMA: "บริษัทมหาชนจำกัด"}],
|
||||
"บลจ.": [{ORTH: "บลจ.", LEMMA: "บริษัทหลักทรัพย์จัดการกองทุนรวมจำกัด"}],
|
||||
"บ/ช": [{ORTH: "บ/ช", LEMMA: "บัญชี"}],
|
||||
"บร.": [{ORTH: "บร.", LEMMA: "บรรณารักษ์"}],
|
||||
"ปชช.": [{ORTH: "ปชช.", LEMMA: "ประชาชน"}],
|
||||
"ปณ.": [{ORTH: "ปณ.", LEMMA: "ที่ทำการไปรษณีย์"}],
|
||||
"ปณก.": [{ORTH: "ปณก.", LEMMA: "ที่ทำการไปรษณีย์กลาง"}],
|
||||
"ปณส.": [{ORTH: "ปณส.", LEMMA: "ที่ทำการไปรษณีย์สาขา"}],
|
||||
"ปธ.": [{ORTH: "ปธ.", LEMMA: "ประธาน"}],
|
||||
"ปธน.": [{ORTH: "ปธน.", LEMMA: "ประธานาธิบดี"}],
|
||||
"ปอ.": [{ORTH: "ปอ.", LEMMA: "รถยนต์โดยสารประจำทางปรับอากาศ"}],
|
||||
"ปอ.พ.": [{ORTH: "ปอ.พ.", LEMMA: "รถยนต์โดยสารประจำทางปรับอากาศพิเศษ"}],
|
||||
"พ.ก.ง.": [{ORTH: "พ.ก.ง.", LEMMA: "พัสดุเก็บเงินปลายทาง"}],
|
||||
"พ.ก.ส.": [{ORTH: "พ.ก.ส.", LEMMA: "พนักงานเก็บค่าโดยสาร"}],
|
||||
"พขร.": [{ORTH: "พขร.", LEMMA: "พนักงานขับรถ"}],
|
||||
"ภ.ง.ด.": [{ORTH: "ภ.ง.ด.", LEMMA: "ภาษีเงินได้"}],
|
||||
"ภ.ง.ด.๙": [{ORTH: "ภ.ง.ด.๙", LEMMA: "แบบแสดงรายการเสียภาษีเงินได้ของกรมสรรพากร"}],
|
||||
"ภ.ป.ร.": [
|
||||
{
|
||||
ORTH: "ภ.ป.ร.",
|
||||
LEMMA: "ภูมิพลอดุยเดช ปรมราชาธิราช (พระปรมาภิไธยในพระบาทสมเด็จพระปรมินทรมหาภูมิพลอดุลยเดช)",
|
||||
}
|
||||
],
|
||||
"ภ.พ.": [{ORTH: "ภ.พ.", LEMMA: "ภาษีมูลค่าเพิ่ม"}],
|
||||
"ร.": [{ORTH: "ร.", LEMMA: "รัชกาล"}],
|
||||
"ร.ง.": [{ORTH: "ร.ง.", LEMMA: "โรงงาน"}],
|
||||
"ร.ด.": [{ORTH: "ร.ด.", LEMMA: "รักษาดินแดน"}],
|
||||
"รปภ.": [{ORTH: "รปภ.", LEMMA: "รักษาความปลอดภัย"}],
|
||||
"รพ.": [{ORTH: "รพ.", LEMMA: "โรงพยาบาล"}],
|
||||
"ร.พ.": [{ORTH: "ร.พ.", LEMMA: "โรงพิมพ์"}],
|
||||
"รร.": [{ORTH: "รร.", LEMMA: "โรงเรียน,โรงแรม"}],
|
||||
"รสก.": [{ORTH: "รสก.", LEMMA: "รัฐวิสาหกิจ"}],
|
||||
"ส.ค.ส.": [{ORTH: "ส.ค.ส.", LEMMA: "ส่งความสุขปีใหม่"}],
|
||||
"สต.": [{ORTH: "สต.", LEMMA: "สตางค์"}],
|
||||
"สน.": [{ORTH: "สน.", LEMMA: "สถานีตำรวจ"}],
|
||||
"สนข.": [{ORTH: "สนข.", LEMMA: "สำนักงานเขต"}],
|
||||
"สนง.": [{ORTH: "สนง.", LEMMA: "สำนักงาน"}],
|
||||
"สนญ.": [{ORTH: "สนญ.", LEMMA: "สำนักงานใหญ่"}],
|
||||
"ส.ป.ช.": [{ORTH: "ส.ป.ช.", LEMMA: "สร้างเสริมประสบการณ์ชีวิต"}],
|
||||
"สภ.": [{ORTH: "สภ.", LEMMA: "สถานีตำรวจภูธร"}],
|
||||
"ส.ล.น.": [{ORTH: "ส.ล.น.", LEMMA: "สร้างเสริมลักษณะนิสัย"}],
|
||||
"สวญ.": [{ORTH: "สวญ.", LEMMA: "สารวัตรใหญ่"}],
|
||||
"สวป.": [{ORTH: "สวป.", LEMMA: "สารวัตรป้องกันปราบปราม"}],
|
||||
"สว.สส.": [{ORTH: "สว.สส.", LEMMA: "สารวัตรสืบสวน"}],
|
||||
"ส.ห.": [{ORTH: "ส.ห.", LEMMA: "สารวัตรทหาร"}],
|
||||
"สอ.": [{ORTH: "สอ.", LEMMA: "สถานีอนามัย"}],
|
||||
"สอท.": [{ORTH: "สอท.", LEMMA: "สถานเอกอัครราชทูต"}],
|
||||
"เสธ.": [{ORTH: "เสธ.", LEMMA: "เสนาธิการ"}],
|
||||
"หจก.": [{ORTH: "หจก.", LEMMA: "ห้างหุ้นส่วนจำกัด"}],
|
||||
"ห.ร.ม.": [{ORTH: "ห.ร.ม.", LEMMA: "ตัวหารร่วมมาก"}],
|
||||
"ก.ข.ค.": [{ORTH: "ก.ข.ค."}],
|
||||
"กทม.": [{ORTH: "กทม."}],
|
||||
"กรุงเทพฯ": [{ORTH: "กรุงเทพฯ"}],
|
||||
"ขรก.": [{ORTH: "ขรก."}],
|
||||
"ขส": [{ORTH: "ขส."}],
|
||||
"ค.ร.น.": [{ORTH: "ค.ร.น."}],
|
||||
"ค.ร.ม.": [{ORTH: "ค.ร.ม."}],
|
||||
"ง.ด.": [{ORTH: "ง.ด."}],
|
||||
"งป.": [{ORTH: "งป."}],
|
||||
"จก.": [{ORTH: "จก."}],
|
||||
"จขกท.": [{ORTH: "จขกท."}],
|
||||
"จนท.": [{ORTH: "จนท."}],
|
||||
"จ.ป.ร.": [{ORTH: "จ.ป.ร."}],
|
||||
"จ.ม.": [{ORTH: "จ.ม."}],
|
||||
"จย.": [{ORTH: "จย."}],
|
||||
"จยย.": [{ORTH: "จยย."}],
|
||||
"ตจว.": [{ORTH: "ตจว."}],
|
||||
"โทร.": [{ORTH: "โทร."}],
|
||||
"ธ.": [{ORTH: "ธ."}],
|
||||
"น.ร.": [{ORTH: "น.ร."}],
|
||||
"น.ศ.": [{ORTH: "น.ศ."}],
|
||||
"น.ส.": [{ORTH: "น.ส."}],
|
||||
"น.ส.๓": [{ORTH: "น.ส.๓"}],
|
||||
"น.ส.๓ ก.": [{ORTH: "น.ส.๓ ก"}],
|
||||
"นสพ.": [{ORTH: "นสพ."}],
|
||||
"บ.ก.": [{ORTH: "บ.ก."}],
|
||||
"บจก.": [{ORTH: "บจก."}],
|
||||
"บงล.": [{ORTH: "บงล."}],
|
||||
"บบส.": [{ORTH: "บบส."}],
|
||||
"บมจ.": [{ORTH: "บมจ."}],
|
||||
"บลจ.": [{ORTH: "บลจ."}],
|
||||
"บ/ช": [{ORTH: "บ/ช"}],
|
||||
"บร.": [{ORTH: "บร."}],
|
||||
"ปชช.": [{ORTH: "ปชช."}],
|
||||
"ปณ.": [{ORTH: "ปณ."}],
|
||||
"ปณก.": [{ORTH: "ปณก."}],
|
||||
"ปณส.": [{ORTH: "ปณส."}],
|
||||
"ปธ.": [{ORTH: "ปธ."}],
|
||||
"ปธน.": [{ORTH: "ปธน."}],
|
||||
"ปอ.": [{ORTH: "ปอ."}],
|
||||
"ปอ.พ.": [{ORTH: "ปอ.พ."}],
|
||||
"พ.ก.ง.": [{ORTH: "พ.ก.ง."}],
|
||||
"พ.ก.ส.": [{ORTH: "พ.ก.ส."}],
|
||||
"พขร.": [{ORTH: "พขร."}],
|
||||
"ภ.ง.ด.": [{ORTH: "ภ.ง.ด."}],
|
||||
"ภ.ง.ด.๙": [{ORTH: "ภ.ง.ด.๙"}],
|
||||
"ภ.ป.ร.": [{ORTH: "ภ.ป.ร."}],
|
||||
"ภ.พ.": [{ORTH: "ภ.พ."}],
|
||||
"ร.": [{ORTH: "ร."}],
|
||||
"ร.ง.": [{ORTH: "ร.ง."}],
|
||||
"ร.ด.": [{ORTH: "ร.ด."}],
|
||||
"รปภ.": [{ORTH: "รปภ."}],
|
||||
"รพ.": [{ORTH: "รพ."}],
|
||||
"ร.พ.": [{ORTH: "ร.พ."}],
|
||||
"รร.": [{ORTH: "รร."}],
|
||||
"รสก.": [{ORTH: "รสก."}],
|
||||
"ส.ค.ส.": [{ORTH: "ส.ค.ส."}],
|
||||
"สต.": [{ORTH: "สต."}],
|
||||
"สน.": [{ORTH: "สน."}],
|
||||
"สนข.": [{ORTH: "สนข."}],
|
||||
"สนง.": [{ORTH: "สนง."}],
|
||||
"สนญ.": [{ORTH: "สนญ."}],
|
||||
"ส.ป.ช.": [{ORTH: "ส.ป.ช."}],
|
||||
"สภ.": [{ORTH: "สภ."}],
|
||||
"ส.ล.น.": [{ORTH: "ส.ล.น."}],
|
||||
"สวญ.": [{ORTH: "สวญ."}],
|
||||
"สวป.": [{ORTH: "สวป."}],
|
||||
"สว.สส.": [{ORTH: "สว.สส."}],
|
||||
"ส.ห.": [{ORTH: "ส.ห."}],
|
||||
"สอ.": [{ORTH: "สอ."}],
|
||||
"สอท.": [{ORTH: "สอท."}],
|
||||
"เสธ.": [{ORTH: "เสธ."}],
|
||||
"หจก.": [{ORTH: "หจก."}],
|
||||
"ห.ร.ม.": [{ORTH: "ห.ร.ม."}],
|
||||
}
|
||||
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user