Adjust GitHub embeds

This commit is contained in:
ines 2017-10-27 12:30:59 +02:00
parent 4033e70c71
commit 52f1bf2729
7 changed files with 11 additions and 11 deletions

View File

@ -181,7 +181,7 @@ mixin codepen(slug, height, default_tab)
alt_file - [string] alternative file path used in footer and link button
height - [integer] height of code preview in px
mixin github(repo, file, alt_file, height, language)
mixin github(repo, file, height, alt_file, language)
- var branch = ALPHA ? "develop" : "master"
- var height = height || 250

View File

@ -234,7 +234,7 @@ p
| when you customise spaCy's tokenization rules. When you call #[code nlp]
| on a text, the custom pipeline component is applied to the #[code Doc]
+github("spacy", "examples/pipeline/custom_component_entities.py", false, 500)
+github("spacy", "examples/pipeline/custom_component_entities.py", 500)
p
| Wrapping this functionality in a
@ -255,7 +255,7 @@ p
| #[code Token] for example, the capital, latitude/longitude coordinates
| and even the country flag.
+github("spacy", "examples/pipeline/custom_component_countries_api.py", false, 500)
+github("spacy", "examples/pipeline/custom_component_countries_api.py", 500)
p
| In this case, all data can be fetched on initialisation in one request.

View File

@ -50,4 +50,4 @@ p
| dataset and will be loaded automatically via Thinc's built-in dataset
| loader.
+github("spacy", "examples/pipeline/multi_processing.py")
+github("spacy", "examples/pipeline/multi_processing.py", 500)

View File

@ -34,7 +34,7 @@ p
| #[strong character offsets] and #[strong labels] of each entity contained
| in the texts.
+github("spacy", "examples/training/train_ner.py")
+github("spacy", "examples/training/train_ner.py", 500)
+h(4) Step by step guide
@ -88,7 +88,7 @@ p
| recognizer over unlabelled sentences, and adding their annotations to the
| training set.
+github("spacy", "examples/training/train_new_entity_type.py")
+github("spacy", "examples/training/train_new_entity_type.py", 500)
+h(4) Step by step guide

View File

@ -8,7 +8,7 @@ p
| #[strong training examples] and the respective #[strong heads] and
| #[strong dependency label] for each token of the example texts.
+github("spacy", "examples/training/train_parser.py")
+github("spacy", "examples/training/train_parser.py", 500)
+h(4) Step by step guide
@ -61,7 +61,7 @@ p
| #[strong custom tags], as well as a dictionary mapping those tags to the
| #[+a("http://universaldependencies.github.io/docs/u/pos/index.html") Universal Dependencies scheme].
+github("spacy", "examples/training/train_tagger.py")
+github("spacy", "examples/training/train_tagger.py", 500)
+h(4) Step by step guide
@ -141,7 +141,7 @@ p
| of relations: #[code ROOT], #[code PLACE], #[code QUALITY],
| #[code ATTRIBUTE], #[code TIME] and #[code LOCATION].
+github("spacy", "examples/training/train_intent_parser.py")
+github("spacy", "examples/training/train_intent_parser.py", 500)
+h(4) Step by step guide

View File

@ -11,7 +11,7 @@ p
| loader. Predictions are available via
| #[+api("doc#attributes") #[code Doc.cats]].
+github("spacy", "examples/training/train_textcat.py")
+github("spacy", "examples/training/train_textcat.py", 500)
+h(4) Step by step guide

View File

@ -179,4 +179,4 @@ include ../_includes/_mixins
| parameters, and was implemented using #[+a("https://keras.io") Keras]
| and spaCy.
+github("spacy", "examples/keras_parikh_entailment/__main__.py", "examples/keras_parikh_entailment")
+github("spacy", "examples/keras_parikh_entailment/__main__.py", false, "examples/keras_parikh_entailment")