mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-10-30 23:47:31 +03:00 
			
		
		
		
	proposal for doctests
This commit is contained in:
		
							parent
							
								
									3b3547251c
								
							
						
					
					
						commit
						ee521a52a8
					
				|  | @ -24,4 +24,4 @@ install: | |||
| 
 | ||||
| # run tests | ||||
| script: | ||||
|   - "py.test tests/ -x" | ||||
|   - "py.test tests/ website/tests/ -x" | ||||
|  |  | |||
|  | @ -1,9 +1,12 @@ | |||
| all: site | ||||
| all: dir site | ||||
| 
 | ||||
| dir: | ||||
| 	mkdir -p site | ||||
| 
 | ||||
| site: site/index.html site/blog/ site/docs/ site/license/ site/blog/introducing-spacy/ site/blog/parsing-english-in-python/ site/blog/part-of-speech-POS-tagger-in-python/ site/tutorials/twitter-filter/ site/tutorials/syntax-search/ site/tutorials/mark-adverbs/ site/blog/writing-c-in-cython/ site/blog/how-spacy-works/ | ||||
| 
 | ||||
| site/index.html: src/jade/header.jade src/jade/*.jade | ||||
| 	jade -P src/jade/home/index.jade --out site/ | ||||
| 	./run_jade src/jade/home/index.jade $@ | ||||
| 
 | ||||
| site/docs/: src/jade/docs/*.jade src/jade/header.jade | ||||
| 	jade -P src/jade/docs/index.jade --out $@ | ||||
|  |  | |||
							
								
								
									
										59
									
								
								website/run_jade
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										59
									
								
								website/run_jade
									
									
									
									
									
										Executable file
									
								
							|  | @ -0,0 +1,59 @@ | |||
| #!/usr/bin/env node | ||||
| 'use strict'; | ||||
| 
 | ||||
| var fs = require('fs'); | ||||
| var jade = require('jade'); | ||||
| 
 | ||||
| // returns all: code and return value (default) | ||||
| jade.filters.doctest_all = function (html, _, use_rv) { | ||||
|     use_rv = use_rv === undefined ? true : use_rv; | ||||
| 
 | ||||
|     var lines = html.trim().split(/\n/), | ||||
|         block = [], | ||||
|         res = ''; | ||||
| 
 | ||||
|     lines.forEach(function (line) { | ||||
|         if (line.indexOf('>>> ') === 0) { | ||||
|             // we use ### to suppress lines | ||||
|             if (line.indexOf("###") === -1) { | ||||
|                 block.push(line.replace(/^>>> /gm, '')); | ||||
|             } | ||||
|         } else if (block.length > 0) { | ||||
|             res += '<pre class="language-python"><code>' + block.join('\n') + '</code></pre>'; | ||||
|             block = []; | ||||
| 
 | ||||
|             if (use_rv) { | ||||
|                 res += '<p>Which produces:</p>'; | ||||
|                 res += '<pre class="language-python"><code>' + line + '</code></pre>'; | ||||
|             } | ||||
|         } | ||||
|     }); | ||||
| 
 | ||||
|     if (block.length > 0) { | ||||
|         res += '<pre class="language-python"><code>' + block.join('\n') + '</code></pre>'; | ||||
|     } | ||||
| 
 | ||||
|     return res; | ||||
| }; | ||||
| 
 | ||||
| // returns only code | ||||
| jade.filters.doctest = function (html) { | ||||
|     return jade.filters.doctest_all(html, null, false); | ||||
| }; | ||||
| 
 | ||||
| if (process.argv[0] === "node") { | ||||
|     process.argv.shift(); | ||||
| } | ||||
| 
 | ||||
| var in_file = process.argv[1]; | ||||
| var out_file = process.argv[2]; | ||||
| 
 | ||||
| var html = jade.renderFile(in_file, { | ||||
|     pretty: true | ||||
| }); | ||||
| 
 | ||||
| fs.writeFile(out_file, html, function (err) { | ||||
|     if (err) { | ||||
|         throw err; | ||||
|     } | ||||
| }); | ||||
|  | @ -6,26 +6,13 @@ mixin example(name) | |||
| 
 | ||||
| 
 | ||||
| +example("Load resources and process text") | ||||
|   pre.language-python: code | ||||
|     | from __future__ import unicode_literals, print_function | ||||
|     | from spacy.en import English | ||||
|     | nlp = English() | ||||
|     | doc = nlp('Hello, world. Here are two sentences.') | ||||
|   include:doctest_all ../../../tests/test_load_resources_and_process_text.txt | ||||
| 
 | ||||
| +example("Get tokens and sentences") | ||||
|   pre.language-python: code | ||||
|     | token = doc[0] | ||||
|     | sentence = doc.sents.next() | ||||
|     | assert token is sentence[0] | ||||
|     | assert sentence.text == 'Hello, world.' | ||||
|   include:doctest ../../../tests/test_get_tokens_and_sentences.txt | ||||
| 
 | ||||
| +example("Use integer IDs for any string") | ||||
|   pre.language-python: code | ||||
|     | hello_id = nlp.vocab.strings['Hello'] | ||||
|     | hello_str = nlp.vocab.strings[hello_id] | ||||
|     |  | ||||
|     | assert token.orth  == hello_id  == 469755 | ||||
|     | assert token.orth_ == hello_str == 'Hello' | ||||
|   include:doctest ../../../tests/test_use_interger_ids_for_any_strings.txt | ||||
| 
 | ||||
| +example("Get and set string views and flags") | ||||
|   pre.language-python: code | ||||
|  |  | |||
							
								
								
									
										8
									
								
								website/tests/test_get_tokens_and_sentences.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								website/tests/test_get_tokens_and_sentences.txt
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,8 @@ | |||
| >>> from spacy.en import English                        ### | ||||
| >>> nlp = English()                                     ### | ||||
| >>> doc = nlp(u'Hello, world. Here are two sentences.') ### | ||||
| >>> | ||||
| >>> token = doc[0] | ||||
| >>> sentence = doc.sents.next() | ||||
| >>> assert token is sentence[0] | ||||
| >>> assert sentence.text == 'Hello, world.' | ||||
							
								
								
									
										5
									
								
								website/tests/test_load_resources_and_process_text.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								website/tests/test_load_resources_and_process_text.txt
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,5 @@ | |||
| >>> from spacy.en import English | ||||
| >>> nlp = English() | ||||
| >>> doc = nlp(u'Hello, world. Here are two sentences.') | ||||
| >>> print([s.string for s in doc.sents]) | ||||
| [u'Hello, world. ', u'Here are two sentences.'] | ||||
							
								
								
									
										10
									
								
								website/tests/test_use_interger_ids_for_any_strings.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								website/tests/test_use_interger_ids_for_any_strings.txt
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,10 @@ | |||
| >>> from spacy.en import English                        ### | ||||
| >>> nlp = English()                                     ### | ||||
| >>> doc = nlp(u'Hello, world. Here are two sentences.') ### | ||||
| >>> token = doc[0]                                      ### | ||||
| >>> | ||||
| >>> hello_id = nlp.vocab.strings['Hello'] | ||||
| >>> hello_str = nlp.vocab.strings[hello_id] | ||||
| >>> | ||||
| >>> assert token.orth  == hello_id  == 469755 | ||||
| >>> assert token.orth_ == hello_str == 'Hello' | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user