chore: add 'concepCy' to spacy universe (#11255)

* chore: add 'concepCy' to spacy universe

* docs: add 'slogan' to concepCy
This commit is contained in:
Jules Belveze 2022-08-04 08:42:38 +02:00 committed by GitHub
parent d993df41e5
commit cd09614ab2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -1,5 +1,39 @@
{ {
"resources": [ "resources": [
{
"id": "concepcy",
"title": "concepCy",
"slogan": "A multilingual knowledge graph in spaCy",
"description": "A spaCy wrapper for ConceptNet, a freely-available semantic network designed to help computers understand the meaning of words.",
"github": "JulesBelveze/concepcy",
"pip": "concepcy",
"code_example": [
"import spacy",
"import concepcy",
"",
"nlp = spacy.load('en_core_web_sm')",
"# Using default concepCy configuration",
"nlp.add_pipe('concepcy')",
"",
"doc = nlp('WHO is a lovely company')",
"",
"# Access all the 'RelatedTo' relations from the Doc",
"for word, relations in doc._.relatedto.items():",
" print(f'Word: {word}\n{relations}')",
"",
"# Access the 'RelatedTo' relations word by word",
"for token in doc:",
" print(f'Word: {token}\n{token._.relatedto}')"
],
"category": ["pipeline"],
"image": "https://github.com/JulesBelveze/concepcy/blob/main/figures/concepcy.png",
"tags": ["semantic", "ConceptNet"],
"author": "Jules Belveze",
"author_links": {
"github": "JulesBelveze",
"website": "https://www.linkedin.com/in/jules-belveze/"
}
},
{ {
"id": "spacyfishing", "id": "spacyfishing",
"title": "spaCy fishing", "title": "spaCy fishing",
@ -2604,7 +2638,7 @@
" Add the courgette, garlic, red peppers and oregano and cook for 23 minutes.", " Add the courgette, garlic, red peppers and oregano and cook for 23 minutes.",
" Later, add some oranges and chickens.\"\"\"", " Later, add some oranges and chickens.\"\"\"",
"", "",
"# use any model that has internal spacy embeddings", "# use any model that has internal spacy embeddings",
"nlp = spacy.load('en_core_web_lg')", "nlp = spacy.load('en_core_web_lg')",
"nlp.add_pipe(\"concise_concepts\", ", "nlp.add_pipe(\"concise_concepts\", ",
" config={\"data\": data}", " config={\"data\": data}",
@ -2650,7 +2684,7 @@
" At that location, Nissin was founded.", " At that location, Nissin was founded.",
" Many students survived by eating these noodles, but they don't even know him.\"\"\"", " Many students survived by eating these noodles, but they don't even know him.\"\"\"",
"", "",
"# use any model that has internal spacy embeddings", "# use any model that has internal spacy embeddings",
"nlp = spacy.load('en_core_web_sm')", "nlp = spacy.load('en_core_web_sm')",
"nlp.add_pipe(", "nlp.add_pipe(",
" \"xx_coref\", config={\"chunk_size\": 2500, \"chunk_overlap\": 2, \"device\": 0})", " \"xx_coref\", config={\"chunk_size\": 2500, \"chunk_overlap\": 2, \"device\": 0})",
@ -2833,7 +2867,7 @@
"doc = nlp(\"AE died in Princeton in 1955.\")", "doc = nlp(\"AE died in Princeton in 1955.\")",
"", "",
"print(doc._.clauses)", "print(doc._.clauses)",
"# Output:", "# Output:",
"# <SV, AE, died, None, None, None, [in Princeton, in 1955]>", "# <SV, AE, died, None, None, None, [in Princeton, in 1955]>",
"", "",
"propositions = doc._.clauses[0].to_propositions(as_text=True)", "propositions = doc._.clauses[0].to_propositions(as_text=True)",
@ -3599,7 +3633,7 @@
"", "",
"#Lexico Semantic (LxSem) Features", "#Lexico Semantic (LxSem) Features",
"TTRF = LingFeat.TTRF_() #Type Token Ratio Features", "TTRF = LingFeat.TTRF_() #Type Token Ratio Features",
"VarF = LingFeat.VarF_() #Noun/Verb/Adj/Adv Variation Features", "VarF = LingFeat.VarF_() #Noun/Verb/Adj/Adv Variation Features",
"PsyF = LingFeat.PsyF_() #Psycholinguistic Difficulty of Words (AoA Kuperman)", "PsyF = LingFeat.PsyF_() #Psycholinguistic Difficulty of Words (AoA Kuperman)",
"WoLF = LingFeat.WorF_() #Word Familiarity from Frequency Count (SubtlexUS)", "WoLF = LingFeat.WorF_() #Word Familiarity from Frequency Count (SubtlexUS)",
"", "",