Remove unicode declarations and update language data

This commit is contained in:
Ines Montani 2020-09-04 13:19:16 +02:00
parent ba600f91c5
commit df0b68f60e
13 changed files with 27 additions and 73 deletions

View File

@ -1,7 +1,3 @@
# coding: utf8
from __future__ import unicode_literals
"""
Example sentences to test spaCy and its language models.
>>> from spacy.lang.cs.examples import sentences
@ -12,7 +8,7 @@ Example sentences to test spaCy and its language models.
sentences = [
"Máma mele maso.",
"Příliš žluťoučký kůň úpěl ďábelské ódy.",
"ArcGIS je geografický informační systém určený pro práci s prostorovými daty." ,
"ArcGIS je geografický informační systém určený pro práci s prostorovými daty.",
"Může data vytvářet a spravovat, ale především je dokáže analyzovat, najít v nich nové vztahy a vše přehledně vizualizovat.",
"Dnes je krásné počasí.",
"Nestihl autobus, protože pozdě vstal z postele.",
@ -39,4 +35,4 @@ sentences = [
"Jaké PSČ má Praha 1?",
"PSČ Prahy 1 je 110 00.",
"Za 20 minut jede vlak.",
]
]

View File

@ -1,6 +1,3 @@
# coding: utf8
from __future__ import unicode_literals
from ...attrs import LIKE_NUM
_num_words = [
@ -43,7 +40,7 @@ _num_words = [
"kvadrilion",
"kvadriliarda",
"kvintilion",
]
]
def like_num(text):

View File

@ -1,6 +1,3 @@
# coding: utf8
from __future__ import unicode_literals
from ...attrs import LIKE_NUM
_num_words = [
@ -73,6 +70,7 @@ _ordinal_words = [
"עשירי",
]
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]

View File

@ -1,7 +1,3 @@
# coding: utf8
from __future__ import unicode_literals
# Source: https://github.com/sanjaalcorps/NepaliStopWords/blob/master/NepaliStopWords.txt
STOP_WORDS = set(

View File

@ -1,18 +1,10 @@
# coding: utf8
from __future__ import unicode_literals
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from ...language import Language
from ...attrs import LANG
class SanskritDefaults(Language.Defaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters.update(LEX_ATTRS)
lex_attr_getters[LANG] = lambda text: "sa"
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS

View File

@ -1,7 +1,3 @@
# coding: utf8
from __future__ import unicode_literals
"""
Example sentences to test spaCy and its language models.

View File

@ -1,9 +1,5 @@
# coding: utf8
from __future__ import unicode_literals
from ...attrs import LIKE_NUM
# reference 1: https://en.wikibooks.org/wiki/Sanskrit/Numbers
_num_words = [
@ -106,7 +102,7 @@ _num_words = [
"सप्तनवतिः",
"अष्टनवतिः",
"एकोनशतम्",
"शतम्"
"शतम्",
]

View File

@ -1,6 +1,3 @@
# coding: utf8
from __future__ import unicode_literals
# Source: https://gist.github.com/Akhilesh28/fe8b8e180f64b72e64751bc31cb6d323
STOP_WORDS = set(

View File

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import pytest

View File

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import pytest

View File

@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import pytest

View File

@ -1,15 +1,13 @@
# coding: utf8
from __future__ import unicode_literals
from spacy.lang.en import English
from spacy.tokens import Span
from spacy import displacy
SAMPLE_TEXT = '''First line
SAMPLE_TEXT = """First line
Second line, with ent
Third line
Fourth line
'''
"""
def test_issue5838():
@ -18,8 +16,8 @@ def test_issue5838():
nlp = English()
doc = nlp(SAMPLE_TEXT)
doc.ents = [Span(doc, 7, 8, label='test')]
doc.ents = [Span(doc, 7, 8, label="test")]
html = displacy.render(doc, style='ent')
found = html.count('</br>')
html = displacy.render(doc, style="ent")
found = html.count("</br>")
assert found == 4

View File

@ -1,6 +1,3 @@
# coding: utf8
from __future__ import unicode_literals
from spacy.lang.en import English
from spacy.pipeline import merge_entities, EntityRuler