This commit is contained in:
Matthew Honnibal 2016-07-27 03:14:27 +02:00
commit 71ff4bf287
752 changed files with 4039372 additions and 0 deletions

103
.gitignore vendored Normal file
View File

@ -0,0 +1,103 @@
# Vim
*.swp
*.sw*
Profile.prof
tmp/
.dev
.denv
.pypyenv
.eggs
*.tgz
.sass-cache
MANIFEST
corpora/
models/
examples/
keys/
spacy/syntax/*.cpp
spacy/syntax/*.html
spacy/en/*.cpp
spacy/tokens/*.cpp
spacy/serialize/*.cpp
spacy/en/data/*
spacy/*.cpp
spacy/ner/*.cpp
spacy/orthography/*.cpp
ext/murmurhash.cpp
ext/sparsehash.cpp
data/en/pos
data/en/ner
data/en/lexemes
data/en/strings
_build/
.env/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
# C extensions
*.so
# Distribution / packaging
.Python
env/
bin/
build/
develop-eggs/
dist/
eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.cache
nosetests.xml
coverage.xml
# Website
website/www/
website/demos/displacy/
website/demos/sense2vec/
# Translations
*.mo
# Mr Developer
.mr.developer.cfg
.project
.pydevproject
# Rope
.ropeproject
# Django stuff:
*.log
*.pot
# Windows local helper files
*.bat
# Mac OS X
*.DS_Store
# Komodo project files
*.komodoproject

30
.travis.yml Normal file
View File

@ -0,0 +1,30 @@
language: python
sudo: required
dist: precise
group: edge
python:
- "2.7"
- "3.4"
os:
- linux
install:
- "pip install -r requirements.txt"
- "pip install -e ."
- "mkdir -p corpora/en"
- "cd corpora/en"
- "wget --no-check-certificate http://wordnetcode.princeton.edu/3.0/WordNet-3.0.tar.gz"
- "tar -xzf WordNet-3.0.tar.gz"
- "mv WordNet-3.0 wordnet"
- "cd ../../"
- "python bin/init_model.py en lang_data/ corpora/ data"
- "cp package.json data"
- "sputnik build data en_default.sputnik"
- "sputnik --name spacy install en_default.sputnik"
script:
- "pip install pytest"
- "python -m pytest spacy"

22
LICENSE Normal file
View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (C) 2015 Matthew Honnibal
2016 spaCy GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

4
MANIFEST.in Normal file
View File

@ -0,0 +1,4 @@
recursive-include include *.h
include buildbot.json
include LICENSE
include README.rst

111
README.rst Normal file
View File

@ -0,0 +1,111 @@
.. image:: https://travis-ci.org/spacy-io/spaCy.svg?branch=master
:target: https://travis-ci.org/spacy-io/spaCy
==============================
spaCy: Industrial-strength NLP
==============================
spaCy is a library for advanced natural language processing in Python and Cython.
Documentation and details: https://spacy.io/
spaCy is built on the very latest research, but it isn't researchware. It was
designed from day 1 to be used in real products. It's commercial open-source
software, released under the MIT license.
Features
--------
* Labelled dependency parsing (91.8% accuracy on OntoNotes 5)
* Named entity recognition (82.6% accuracy on OntoNotes 5)
* Part-of-speech tagging (97.1% accuracy on OntoNotes 5)
* Easy to use word vectors
* All strings mapped to integer IDs
* Export to numpy data arrays
* Alignment maintained to original string, ensuring easy mark up calculation
* Range of easy-to-use orthographic features.
* No pre-processing required. spaCy takes raw text as input, warts and newlines and all.
Top Peformance
--------------
* Fastest in the world: <50ms per document. No faster system has ever been
announced.
* Accuracy within 1% of the current state of the art on all tasks performed
(parsing, named entity recognition, part-of-speech tagging). The only more
accurate systems are an order of magnitude slower or more.
Supports
--------
* CPython 2.6, 2.7, 3.3, 3.4, 3.5 (only 64 bit)
* OSX
* Linux
* Windows (Cygwin, MinGW, Visual Studio)
2016-05-0 0.101.0: Fixed German model
-------------------------------------
* Fixed bug that prevented German parses from being deprojectivised.
* Bug fixes to sentence boundary detection.
* Add rich comparison methods to the Lexeme class.
* Add missing Doc.has_vector and Span.has_vector properties.
* Add missing Span.sent property.
2016-05-05 v0.100.7: German!
----------------------------
spaCy finally supports another language, in addition to English. We're lucky to have Wolfgang Seeker on the team, and the new German model is just the beginning.
Now that there are multiple languages, you should consider loading spaCy via the load() function. This function also makes it easier to load extra word vector data for English:
.. code:: python
import spacy
en_nlp = spacy.load('en', vectors='en_glove_cc_300_1m_vectors')
de_nlp = spacy.load('de')
To support use of the load function, there are also two new helper functions: spacy.get_lang_class and spacy.set_lang_class.
Once the German model is loaded, you can use it just like the English model:
.. code:: python
doc = nlp(u'''Wikipedia ist ein Projekt zum Aufbau einer Enzyklopädie aus freien Inhalten, zu dem du mit deinem Wissen beitragen kannst. Seit Mai 2001 sind 1.936.257 Artikel in deutscher Sprache entstanden.''')
for sent in doc.sents:
print(sent.root.text, sent.root.n_lefts, sent.root.n_rights)
# (u'ist', 1, 2)
# (u'sind', 1, 3)
The German model provides tokenization, POS tagging, sentence boundary detection, syntactic dependency parsing, recognition of organisation, location and person entities, and word vector representations trained on a mix of open subtitles and Wikipedia data. It doesn't yet provide lemmatisation or morphological analysis, and it doesn't yet recognise numeric entities such as numbers and dates.
Bugfixes
--------
* spaCy < 0.100.7 had a bug in the semantics of the Token.__str__ and Token.__unicode__ built-ins: they included a trailing space.
* Improve handling of "infixed" hyphens. Previously the tokenizer struggled with multiple hyphens, such as "well-to-do".
* Improve handling of periods after mixed-case tokens
* Improve lemmatization for English special-case tokens
* Fix bug that allowed spaces to be treated as heads in the syntactic parse
* Fix bug that led to inconsistent sentence boundaries before and after serialisation.
* Fix bug from deserialising untagged documents.

156
bin/cythonize.py Executable file
View File

@ -0,0 +1,156 @@
#!/usr/bin/env python
""" cythonize.py
Cythonize pyx files into C++ files as needed.
Usage: cythonize.py [root]
Checks pyx files to see if they have been changed relative to their
corresponding C++ files. If they have, then runs cython on these files to
recreate the C++ files.
Additionally, checks pxd files and setup.py if they have been changed. If
they have, rebuilds everything.
Change detection based on file hashes stored in JSON format.
For now, this script should be run by developers when changing Cython files
and the resulting C++ files checked in, so that end-users (and Python-only
developers) do not get the Cython dependencies.
Based upon:
https://raw.github.com/dagss/private-scipy-refactor/cythonize/cythonize.py
https://raw.githubusercontent.com/numpy/numpy/master/tools/cythonize.py
Note: this script does not check any of the dependent C++ libraries.
"""
from __future__ import print_function
import os
import sys
import json
import hashlib
import subprocess
import argparse
HASH_FILE = 'cythonize.json'
def process_pyx(fromfile, tofile):
print('Processing %s' % fromfile)
try:
from Cython.Compiler.Version import version as cython_version
from distutils.version import LooseVersion
if LooseVersion(cython_version) < LooseVersion('0.19'):
raise Exception('Require Cython >= 0.19')
except ImportError:
pass
flags = ['--fast-fail']
if tofile.endswith('.cpp'):
flags += ['--cplus']
try:
try:
r = subprocess.call(['cython'] + flags + ['-o', tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
# There are ways of installing Cython that don't result in a cython
# executable on the path, see gh-2397.
r = subprocess.call([sys.executable, '-c',
'import sys; from Cython.Compiler.Main import '
'setuptools_main as main; sys.exit(main())'] + flags +
['-o', tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
raise OSError('Cython needs to be installed')
def preserve_cwd(path, func, *args):
orig_cwd = os.getcwd()
try:
os.chdir(path)
func(*args)
finally:
os.chdir(orig_cwd)
def load_hashes(filename):
try:
return json.load(open(filename))
except (ValueError, IOError):
return {}
def save_hashes(hash_db, filename):
with open(filename, 'w') as f:
f.write(json.dumps(hash_db))
def get_hash(path):
return hashlib.md5(open(path, 'rb').read()).hexdigest()
def hash_changed(base, path, db):
full_path = os.path.normpath(os.path.join(base, path))
return not get_hash(full_path) == db.get(full_path)
def hash_add(base, path, db):
full_path = os.path.normpath(os.path.join(base, path))
db[full_path] = get_hash(full_path)
def process(base, filename, db):
root, ext = os.path.splitext(filename)
if ext in ['.pyx', '.cpp']:
if hash_changed(base, filename, db) or not os.path.isfile(os.path.join(base, root + '.cpp')):
preserve_cwd(base, process_pyx, root + '.pyx', root + '.cpp')
hash_add(base, root + '.cpp', db)
hash_add(base, root + '.pyx', db)
def check_changes(root, db):
res = False
new_db = {}
setup_filename = 'setup.py'
hash_add('.', setup_filename, new_db)
if hash_changed('.', setup_filename, db):
res = True
for base, _, files in os.walk(root):
for filename in files:
if filename.endswith('.pxd'):
hash_add(base, filename, new_db)
if hash_changed(base, filename, db):
res = True
if res:
db.clear()
db.update(new_db)
return res
def run(root):
db = load_hashes(HASH_FILE)
try:
check_changes(root, db)
for base, _, files in os.walk(root):
for filename in files:
process(base, filename, db)
finally:
save_hashes(db, HASH_FILE)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Cythonize pyx files into C++ files as needed')
parser.add_argument('root', help='root directory')
args = parser.parse_args()
run(args.root)

93
bin/get_freqs.py Executable file
View File

@ -0,0 +1,93 @@
#!/usr/bin/env python
from __future__ import unicode_literals, print_function
import plac
import joblib
from os import path
import os
import bz2
import ujson
from preshed.counter import PreshCounter
from joblib import Parallel, delayed
import io
from spacy.en import English
from spacy.strings import StringStore
from spacy.attrs import ORTH
from spacy.tokenizer import Tokenizer
from spacy.vocab import Vocab
def iter_comments(loc):
with bz2.BZ2File(loc) as file_:
for line in file_:
yield ujson.loads(line)
def count_freqs(input_loc, output_loc):
print(output_loc)
vocab = English.default_vocab(get_lex_attr=None)
tokenizer = Tokenizer.from_dir(vocab,
path.join(English.default_data_dir(), 'tokenizer'))
counts = PreshCounter()
for json_comment in iter_comments(input_loc):
doc = tokenizer(json_comment['body'])
doc.count_by(ORTH, counts=counts)
with io.open(output_loc, 'w', 'utf8') as file_:
for orth, freq in counts:
string = tokenizer.vocab.strings[orth]
if not string.isspace():
file_.write('%d\t%s\n' % (freq, string))
def parallelize(func, iterator, n_jobs):
Parallel(n_jobs=n_jobs)(delayed(func)(*item) for item in iterator)
def merge_counts(locs, out_loc):
string_map = StringStore()
counts = PreshCounter()
for loc in locs:
with io.open(loc, 'r', encoding='utf8') as file_:
for line in file_:
freq, word = line.strip().split('\t', 1)
orth = string_map[word]
counts.inc(orth, int(freq))
with io.open(out_loc, 'w', encoding='utf8') as file_:
for orth, count in counts:
string = string_map[orth]
file_.write('%d\t%s\n' % (count, string))
@plac.annotations(
input_loc=("Location of input file list"),
freqs_dir=("Directory for frequency files"),
output_loc=("Location for output file"),
n_jobs=("Number of workers", "option", "n", int),
skip_existing=("Skip inputs where an output file exists", "flag", "s", bool),
)
def main(input_loc, freqs_dir, output_loc, n_jobs=2, skip_existing=False):
tasks = []
outputs = []
for input_path in open(input_loc):
input_path = input_path.strip()
if not input_path:
continue
filename = input_path.split('/')[-1]
output_path = path.join(freqs_dir, filename.replace('bz2', 'freq'))
outputs.append(output_path)
if not path.exists(output_path) or not skip_existing:
tasks.append((input_path, output_path))
if tasks:
parallelize(count_freqs, tasks, n_jobs)
print("Merge")
merge_counts(outputs, output_loc)
if __name__ == '__main__':
plac.call(main)

228
bin/init_model.py Normal file
View File

@ -0,0 +1,228 @@
"""Set up a model directory.
Requires:
lang_data --- Rules for the tokenizer
* prefix.txt
* suffix.txt
* infix.txt
* morphs.json
* specials.json
corpora --- Data files
* WordNet
* words.sgt.prob --- Smoothed unigram probabilities
* clusters.txt --- Output of hierarchical clustering, e.g. Brown clusters
* vectors.bz2 --- output of something like word2vec, compressed with bzip
"""
from __future__ import unicode_literals
from ast import literal_eval
import math
import gzip
import json
import plac
from pathlib import Path
from shutil import copyfile
from shutil import copytree
from collections import defaultdict
import io
from spacy.vocab import Vocab
from spacy.vocab import write_binary_vectors
from spacy.strings import hash_string
from preshed.counter import PreshCounter
from spacy.parts_of_speech import NOUN, VERB, ADJ
from spacy.util import get_lang_class
try:
unicode
except NameError:
unicode = str
def setup_tokenizer(lang_data_dir, tok_dir):
if not tok_dir.exists():
tok_dir.mkdir()
for filename in ('infix.txt', 'morphs.json', 'prefix.txt', 'specials.json',
'suffix.txt'):
src = lang_data_dir / filename
dst = tok_dir / filename
copyfile(str(src), str(dst))
def _read_clusters(loc):
if not loc.exists():
print("Warning: Clusters file not found")
return {}
clusters = {}
for line in io.open(str(loc), 'r', encoding='utf8'):
try:
cluster, word, freq = line.split()
except ValueError:
continue
# If the clusterer has only seen the word a few times, its cluster is
# unreliable.
if int(freq) >= 3:
clusters[word] = cluster
else:
clusters[word] = '0'
# Expand clusters with re-casing
for word, cluster in list(clusters.items()):
if word.lower() not in clusters:
clusters[word.lower()] = cluster
if word.title() not in clusters:
clusters[word.title()] = cluster
if word.upper() not in clusters:
clusters[word.upper()] = cluster
return clusters
def _read_probs(loc):
if not loc.exists():
print("Probabilities file not found. Trying freqs.")
return {}, 0.0
probs = {}
for i, line in enumerate(io.open(str(loc), 'r', encoding='utf8')):
prob, word = line.split()
prob = float(prob)
probs[word] = prob
return probs, probs['-OOV-']
def _read_freqs(loc, max_length=100, min_doc_freq=5, min_freq=200):
if not loc.exists():
print("Warning: Frequencies file not found")
return {}, 0.0
counts = PreshCounter()
total = 0
if str(loc).endswith('gz'):
file_ = gzip.open(str(loc))
else:
file_ = loc.open()
for i, line in enumerate(file_):
freq, doc_freq, key = line.rstrip().split('\t', 2)
freq = int(freq)
counts.inc(i+1, freq)
total += freq
counts.smooth()
log_total = math.log(total)
if str(loc).endswith('gz'):
file_ = gzip.open(str(loc))
else:
file_ = loc.open()
probs = {}
for line in file_:
freq, doc_freq, key = line.rstrip().split('\t', 2)
doc_freq = int(doc_freq)
freq = int(freq)
if doc_freq >= min_doc_freq and freq >= min_freq and len(key) < max_length:
word = literal_eval(key)
smooth_count = counts.smoother(int(freq))
probs[word] = math.log(smooth_count) - log_total
oov_prob = math.log(counts.smoother(0)) - log_total
return probs, oov_prob
def _read_senses(loc):
lexicon = defaultdict(lambda: defaultdict(list))
if not loc.exists():
print("Warning: WordNet senses not found")
return lexicon
sense_names = dict((s, i) for i, s in enumerate(spacy.senses.STRINGS))
pos_ids = {'noun': NOUN, 'verb': VERB, 'adjective': ADJ}
for line in codecs.open(str(loc), 'r', 'utf8'):
sense_strings = line.split()
word = sense_strings.pop(0)
for sense in sense_strings:
pos, sense = sense[3:].split('.')
sense_name = '%s_%s' % (pos[0].upper(), sense.lower())
if sense_name != 'N_tops':
sense_id = sense_names[sense_name]
lexicon[word][pos_ids[pos]].append(sense_id)
return lexicon
def setup_vocab(get_lex_attr, tag_map, src_dir, dst_dir):
if not dst_dir.exists():
dst_dir.mkdir()
vectors_src = src_dir / 'vectors.bz2'
if vectors_src.exists():
write_binary_vectors(str(vectors_src), str(dst_dir / 'vec.bin'))
else:
print("Warning: Word vectors file not found")
vocab = Vocab(get_lex_attr=get_lex_attr, tag_map=tag_map)
clusters = _read_clusters(src_dir / 'clusters.txt')
probs, oov_prob = _read_probs(src_dir / 'words.sgt.prob')
if not probs:
probs, oov_prob = _read_freqs(src_dir / 'freqs.txt.gz')
if not probs:
oov_prob = -20
else:
oov_prob = min(probs.values())
for word in clusters:
if word not in probs:
probs[word] = oov_prob
lexicon = []
for word, prob in reversed(sorted(list(probs.items()), key=lambda item: item[1])):
# First encode the strings into the StringStore. This way, we can map
# the orth IDs to frequency ranks
orth = vocab.strings[word]
# Now actually load the vocab
for word, prob in reversed(sorted(list(probs.items()), key=lambda item: item[1])):
lexeme = vocab[word]
lexeme.prob = prob
lexeme.is_oov = False
# Decode as a little-endian string, so that we can do & 15 to get
# the first 4 bits. See _parse_features.pyx
if word in clusters:
lexeme.cluster = int(clusters[word][::-1], 2)
else:
lexeme.cluster = 0
vocab.dump(str(dst_dir / 'lexemes.bin'))
with (dst_dir / 'strings.json').open('w') as file_:
vocab.strings.dump(file_)
with (dst_dir / 'oov_prob').open('w') as file_:
file_.write('%f' % oov_prob)
def main(lang_id, lang_data_dir, corpora_dir, model_dir):
model_dir = Path(model_dir)
lang_data_dir = Path(lang_data_dir) / lang_id
corpora_dir = Path(corpora_dir) / lang_id
assert corpora_dir.exists()
assert lang_data_dir.exists()
if not model_dir.exists():
model_dir.mkdir()
tag_map = json.load((lang_data_dir / 'tag_map.json').open())
setup_tokenizer(lang_data_dir, model_dir / 'tokenizer')
setup_vocab(get_lang_class(lang_id).default_lex_attrs(), tag_map, corpora_dir,
model_dir / 'vocab')
if (lang_data_dir / 'gazetteer.json').exists():
copyfile(str(lang_data_dir / 'gazetteer.json'),
str(model_dir / 'vocab' / 'gazetteer.json'))
copyfile(str(lang_data_dir / 'tag_map.json'),
str(model_dir / 'vocab' / 'tag_map.json'))
if (lang_data_dir / 'lemma_rules.json').exists():
copyfile(str(lang_data_dir / 'lemma_rules.json'),
str(model_dir / 'vocab' / 'lemma_rules.json'))
if not (model_dir / 'wordnet').exists() and (corpora_dir / 'wordnet').exists():
copytree(str(corpora_dir / 'wordnet' / 'dict'), str(model_dir / 'wordnet'))
if __name__ == '__main__':
plac.call(main)

89
bin/munge_ewtb.py Executable file
View File

@ -0,0 +1,89 @@
#!/usr/bin/env python
from __future__ import unicode_literals
from xml.etree import cElementTree as ElementTree
import json
import re
import plac
from pathlib import Path
from os import path
escaped_tokens = {
'-LRB-': '(',
'-RRB-': ')',
'-LSB-': '[',
'-RSB-': ']',
'-LCB-': '{',
'-RCB-': '}',
}
def read_parses(parse_loc):
offset = 0
doc = []
for parse in open(str(parse_loc) + '.dep').read().strip().split('\n\n'):
parse = _adjust_token_ids(parse, offset)
offset += len(parse.split('\n'))
doc.append(parse)
return doc
def _adjust_token_ids(parse, offset):
output = []
for line in parse.split('\n'):
pieces = line.split()
pieces[0] = str(int(pieces[0]) + offset)
pieces[5] = str(int(pieces[5]) + offset) if pieces[5] != '0' else '0'
output.append('\t'.join(pieces))
return '\n'.join(output)
def _fmt_doc(filename, paras):
return {'id': filename, 'paragraphs': [_fmt_para(*para) for para in paras]}
def _fmt_para(raw, sents):
return {'raw': raw, 'sentences': [_fmt_sent(sent) for sent in sents]}
def _fmt_sent(sent):
return {
'tokens': [_fmt_token(*t.split()) for t in sent.strip().split('\n')],
'brackets': []}
def _fmt_token(id_, word, hyph, pos, ner, head, dep, blank1, blank2, blank3):
head = int(head) - 1
id_ = int(id_) - 1
head = (head - id_) if head != -1 else 0
return {'id': id_, 'orth': word, 'tag': pos, 'dep': dep, 'head': head}
tags_re = re.compile(r'<[\w\?/][^>]+>')
def main(out_dir, ewtb_dir='/usr/local/data/eng_web_tbk'):
ewtb_dir = Path(ewtb_dir)
out_dir = Path(out_dir)
if not out_dir.exists():
out_dir.mkdir()
for genre_dir in ewtb_dir.joinpath('data').iterdir():
#if 'answers' in str(genre_dir): continue
parse_dir = genre_dir.joinpath('penntree')
docs = []
for source_loc in genre_dir.joinpath('source').joinpath('source_original').iterdir():
filename = source_loc.parts[-1].replace('.sgm.sgm', '')
filename = filename.replace('.xml', '')
filename = filename.replace('.txt', '')
parse_loc = parse_dir.joinpath(filename + '.xml.tree')
parses = read_parses(parse_loc)
source = source_loc.open().read().strip()
if 'answers' in str(genre_dir):
source = tags_re.sub('', source).strip()
docs.append(_fmt_doc(filename, [[source, parses]]))
out_loc = out_dir.joinpath(genre_dir.parts[-1] + '.json')
with open(str(out_loc), 'w') as out_file:
out_file.write(json.dumps(docs, indent=4))
if __name__ == '__main__':
plac.call(main)

32
bin/ner_tag.py Normal file
View File

@ -0,0 +1,32 @@
import io
import plac
from spacy.en import English
def main(text_loc):
with io.open(text_loc, 'r', encoding='utf8') as file_:
text = file_.read()
NLU = English()
for paragraph in text.split('\n\n'):
tokens = NLU(paragraph)
ent_starts = {}
ent_ends = {}
for span in tokens.ents:
ent_starts[span.start] = span.label_
ent_ends[span.end] = span.label_
output = []
for token in tokens:
if token.i in ent_starts:
output.append('<%s>' % ent_starts[token.i])
output.append(token.orth_)
if (token.i+1) in ent_ends:
output.append('</%s>' % ent_ends[token.i+1])
output.append('\n\n')
print ' '.join(output)
if __name__ == '__main__':
plac.call(main)

130
bin/parser/conll_parse.py Normal file
View File

@ -0,0 +1,130 @@
#!/usr/bin/env python
from __future__ import division
from __future__ import unicode_literals
import os
from os import path
import shutil
import codecs
import random
import time
import gzip
import plac
import cProfile
import pstats
import spacy.util
from spacy.en import English
from spacy.en.pos import POS_TEMPLATES, POS_TAGS, setup_model_dir
from spacy.syntax.parser import GreedyParser
from spacy.syntax.parser import OracleError
from spacy.syntax.util import Config
def is_punct_label(label):
return label == 'P' or label.lower() == 'punct'
def read_gold(file_):
"""Read a standard CoNLL/MALT-style format"""
sents = []
for sent_str in file_.read().strip().split('\n\n'):
ids = []
words = []
heads = []
labels = []
tags = []
for i, line in enumerate(sent_str.split('\n')):
id_, word, pos_string, head_idx, label = _parse_line(line)
words.append(word)
if head_idx == -1:
head_idx = i
ids.append(id_)
heads.append(head_idx)
labels.append(label)
tags.append(pos_string)
text = ' '.join(words)
sents.append((text, [words], ids, words, tags, heads, labels))
return sents
def _parse_line(line):
pieces = line.split()
id_ = int(pieces[0])
word = pieces[1]
pos = pieces[3]
head_idx = int(pieces[6])
label = pieces[7]
return id_, word, pos, head_idx, label
def iter_data(paragraphs, tokenizer, gold_preproc=False):
for raw, tokenized, ids, words, tags, heads, labels in paragraphs:
assert len(words) == len(heads)
for words in tokenized:
sent_ids = ids[:len(words)]
sent_tags = tags[:len(words)]
sent_heads = heads[:len(words)]
sent_labels = labels[:len(words)]
sent_heads = _map_indices_to_tokens(sent_ids, sent_heads)
tokens = tokenizer.tokens_from_list(words)
yield tokens, sent_tags, sent_heads, sent_labels
ids = ids[len(words):]
tags = tags[len(words):]
heads = heads[len(words):]
labels = labels[len(words):]
def _map_indices_to_tokens(ids, heads):
mapped = []
for head in heads:
if head not in ids:
mapped.append(None)
else:
mapped.append(ids.index(head))
return mapped
def evaluate(Language, dev_loc, model_dir):
global loss
nlp = Language()
n_corr = 0
pos_corr = 0
n_tokens = 0
total = 0
skipped = 0
loss = 0
with codecs.open(dev_loc, 'r', 'utf8') as file_:
paragraphs = read_gold(file_)
for tokens, tag_strs, heads, labels in iter_data(paragraphs, nlp.tokenizer):
assert len(tokens) == len(labels)
nlp.tagger.tag_from_strings(tokens, tag_strs)
nlp.parser(tokens)
for i, token in enumerate(tokens):
try:
pos_corr += token.tag_ == tag_strs[i]
except:
print i, token.orth_, token.tag
raise
n_tokens += 1
if heads[i] is None:
skipped += 1
continue
if is_punct_label(labels[i]):
continue
n_corr += token.head.i == heads[i]
total += 1
print loss, skipped, (loss+skipped + total)
print pos_corr / n_tokens
return float(n_corr) / (total + loss)
def main(dev_loc, model_dir):
print evaluate(English, dev_loc, model_dir)
if __name__ == '__main__':
plac.call(main)

157
bin/parser/conll_train.py Executable file
View File

@ -0,0 +1,157 @@
#!/usr/bin/env python
from __future__ import division
from __future__ import unicode_literals
import os
from os import path
import shutil
import io
import random
import time
import gzip
import plac
import cProfile
import pstats
import spacy.util
from spacy.en import English
from spacy.gold import GoldParse
from spacy.syntax.util import Config
from spacy.syntax.arc_eager import ArcEager
from spacy.syntax.parser import Parser
from spacy.scorer import Scorer
from spacy.tagger import Tagger
# Last updated for spaCy v0.97
def read_conll(file_):
"""Read a standard CoNLL/MALT-style format"""
sents = []
for sent_str in file_.read().strip().split('\n\n'):
ids = []
words = []
heads = []
labels = []
tags = []
for i, line in enumerate(sent_str.split('\n')):
word, pos_string, head_idx, label = _parse_line(line)
words.append(word)
if head_idx < 0:
head_idx = i
ids.append(i)
heads.append(head_idx)
labels.append(label)
tags.append(pos_string)
text = ' '.join(words)
annot = (ids, words, tags, heads, labels, ['O'] * len(ids))
sents.append((None, [(annot, [])]))
return sents
def _parse_line(line):
pieces = line.split()
if len(pieces) == 4:
word, pos, head_idx, label = pieces
head_idx = int(head_idx)
elif len(pieces) == 15:
id_ = int(pieces[0].split('_')[-1])
word = pieces[1]
pos = pieces[4]
head_idx = int(pieces[8])-1
label = pieces[10]
else:
id_ = int(pieces[0].split('_')[-1])
word = pieces[1]
pos = pieces[4]
head_idx = int(pieces[6])-1
label = pieces[7]
if head_idx == 0:
label = 'ROOT'
return word, pos, head_idx, label
def score_model(scorer, nlp, raw_text, annot_tuples, verbose=False):
tokens = nlp.tokenizer.tokens_from_list(annot_tuples[1])
nlp.tagger(tokens)
nlp.parser(tokens)
gold = GoldParse(tokens, annot_tuples, make_projective=False)
scorer.score(tokens, gold, verbose=verbose, punct_labels=('--', 'p', 'punct'))
def train(Language, gold_tuples, model_dir, n_iter=15, feat_set=u'basic', seed=0,
gold_preproc=False, force_gold=False):
dep_model_dir = path.join(model_dir, 'deps')
pos_model_dir = path.join(model_dir, 'pos')
if path.exists(dep_model_dir):
shutil.rmtree(dep_model_dir)
if path.exists(pos_model_dir):
shutil.rmtree(pos_model_dir)
os.mkdir(dep_model_dir)
os.mkdir(pos_model_dir)
Config.write(dep_model_dir, 'config', features=feat_set, seed=seed,
labels=ArcEager.get_labels(gold_tuples))
nlp = Language(data_dir=model_dir, tagger=False, parser=False, entity=False)
nlp.tagger = Tagger.blank(nlp.vocab, Tagger.default_templates())
nlp.parser = Parser.from_dir(dep_model_dir, nlp.vocab.strings, ArcEager)
print("Itn.\tP.Loss\tUAS\tNER F.\tTag %\tToken %")
for itn in range(n_iter):
scorer = Scorer()
loss = 0
for _, sents in gold_tuples:
for annot_tuples, _ in sents:
if len(annot_tuples[1]) == 1:
continue
score_model(scorer, nlp, None, annot_tuples, verbose=False)
tokens = nlp.tokenizer.tokens_from_list(annot_tuples[1])
nlp.tagger(tokens)
gold = GoldParse(tokens, annot_tuples, make_projective=True)
if not gold.is_projective:
raise Exception(
"Non-projective sentence in training, after we should "
"have enforced projectivity: %s" % annot_tuples
)
loss += nlp.parser.train(tokens, gold)
nlp.tagger.train(tokens, gold.tags)
random.shuffle(gold_tuples)
print('%d:\t%d\t%.3f\t%.3f\t%.3f' % (itn, loss, scorer.uas,
scorer.tags_acc, scorer.token_acc))
print('end training')
nlp.end_training(model_dir)
print('done')
@plac.annotations(
train_loc=("Location of CoNLL 09 formatted training file"),
dev_loc=("Location of CoNLL 09 formatted development file"),
model_dir=("Location of output model directory"),
eval_only=("Skip training, and only evaluate", "flag", "e", bool),
n_iter=("Number of training iterations", "option", "i", int),
)
def main(train_loc, dev_loc, model_dir, n_iter=15):
with io.open(train_loc, 'r', encoding='utf8') as file_:
train_sents = read_conll(file_)
if not eval_only:
train(English, train_sents, model_dir, n_iter=n_iter)
nlp = English(data_dir=model_dir)
dev_sents = read_conll(io.open(dev_loc, 'r', encoding='utf8'))
scorer = Scorer()
for _, sents in dev_sents:
for annot_tuples, _ in sents:
score_model(scorer, nlp, None, annot_tuples)
print('TOK', 100-scorer.token_acc)
print('POS', scorer.tags_acc)
print('UAS', scorer.uas)
print('LAS', scorer.las)
if __name__ == '__main__':
plac.call(main)

261
bin/parser/nn_train.py Executable file
View File

@ -0,0 +1,261 @@
#!/usr/bin/env python
from __future__ import division
from __future__ import unicode_literals
import os
from os import path
import shutil
import codecs
import random
import plac
import cProfile
import pstats
import re
import spacy.util
from spacy.en import English
from spacy.en.pos import POS_TEMPLATES, POS_TAGS, setup_model_dir
from spacy.syntax.util import Config
from spacy.gold import read_json_file
from spacy.gold import GoldParse
from spacy.scorer import Scorer
from spacy.syntax.parser import Parser, get_templates
from spacy._theano import TheanoModel
import theano
import theano.tensor as T
from theano.printing import Print
import numpy
from collections import OrderedDict, defaultdict
theano.config.profile = False
theano.config.floatX = 'float32'
floatX = theano.config.floatX
def L1(L1_reg, *weights):
return L1_reg * sum(abs(w).sum() for w in weights)
def L2(L2_reg, *weights):
return L2_reg * sum((w ** 2).sum() for w in weights)
def rms_prop(loss, params, eta=1.0, rho=0.9, eps=1e-6):
updates = OrderedDict()
for param in params:
value = param.get_value(borrow=True)
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
grad = T.grad(loss, param)
accu_new = rho * accu + (1 - rho) * grad ** 2
updates[accu] = accu_new
updates[param] = param - (eta * grad / T.sqrt(accu_new + eps))
return updates
def relu(x):
return x * (x > 0)
def feed_layer(activation, weights, bias, input_):
return activation(T.dot(input_, weights) + bias)
def init_weights(n_in, n_out):
rng = numpy.random.RandomState(1235)
weights = numpy.asarray(
rng.standard_normal(size=(n_in, n_out)) * numpy.sqrt(2.0 / n_in),
dtype=theano.config.floatX
)
bias = numpy.zeros((n_out,), dtype=theano.config.floatX)
return [wrapper(weights, name='W'), wrapper(bias, name='b')]
def compile_model(n_classes, n_hidden, n_in, optimizer):
x = T.vector('x')
costs = T.ivector('costs')
loss = T.scalar('loss')
maxent_W, maxent_b = init_weights(n_hidden, n_classes)
hidden_W, hidden_b = init_weights(n_in, n_hidden)
# Feed the inputs forward through the network
p_y_given_x = feed_layer(
T.nnet.softmax,
maxent_W,
maxent_b,
feed_layer(
relu,
hidden_W,
hidden_b,
x))
loss = -T.log(T.sum(p_y_given_x[0] * T.eq(costs, 0)) + 1e-8)
train_model = theano.function(
name='train_model',
inputs=[x, costs],
outputs=[p_y_given_x[0], T.grad(loss, x), loss],
updates=optimizer(loss, [maxent_W, maxent_b, hidden_W, hidden_b]),
on_unused_input='warn'
)
evaluate_model = theano.function(
name='evaluate_model',
inputs=[x],
outputs=[
feed_layer(
T.nnet.softmax,
maxent_W,
maxent_b,
feed_layer(
relu,
hidden_W,
hidden_b,
x
)
)[0]
]
)
return train_model, evaluate_model
def score_model(scorer, nlp, annot_tuples, verbose=False):
tokens = nlp.tokenizer.tokens_from_list(annot_tuples[1])
nlp.tagger(tokens)
nlp.parser(tokens)
gold = GoldParse(tokens, annot_tuples)
scorer.score(tokens, gold, verbose=verbose)
def train(Language, gold_tuples, model_dir, n_iter=15, feat_set=u'basic',
eta=0.01, mu=0.9, nv_hidden=100, nv_word=10, nv_tag=10, nv_label=10,
seed=0, n_sents=0, verbose=False):
dep_model_dir = path.join(model_dir, 'deps')
pos_model_dir = path.join(model_dir, 'pos')
if path.exists(dep_model_dir):
shutil.rmtree(dep_model_dir)
if path.exists(pos_model_dir):
shutil.rmtree(pos_model_dir)
os.mkdir(dep_model_dir)
os.mkdir(pos_model_dir)
setup_model_dir(sorted(POS_TAGS.keys()), POS_TAGS, POS_TEMPLATES, pos_model_dir)
Config.write(dep_model_dir, 'config',
seed=seed,
templates=tuple(),
labels=Language.ParserTransitionSystem.get_labels(gold_tuples),
vector_lengths=(nv_word, nv_tag, nv_label),
hidden_nodes=nv_hidden,
eta=eta,
mu=mu
)
# Bake-in hyper-parameters
optimizer = lambda loss, params: rms_prop(loss, params, eta=eta, rho=rho, eps=eps)
nlp = Language(data_dir=model_dir)
n_classes = nlp.parser.model.n_classes
train, predict = compile_model(n_classes, nv_hidden, n_in, optimizer)
nlp.parser.model = TheanoModel(n_classes, input_spec, train,
predict, model_loc)
if n_sents > 0:
gold_tuples = gold_tuples[:n_sents]
print "Itn.\tP.Loss\tUAS\tTag %\tToken %"
log_loc = path.join(model_dir, 'job.log')
for itn in range(n_iter):
scorer = Scorer()
loss = 0
for _, sents in gold_tuples:
for annot_tuples, ctnt in sents:
if len(annot_tuples[1]) == 1:
continue
score_model(scorer, nlp, annot_tuples)
tokens = nlp.tokenizer.tokens_from_list(annot_tuples[1])
nlp.tagger(tokens)
gold = GoldParse(tokens, annot_tuples, make_projective=True)
assert gold.is_projective
loss += nlp.parser.train(tokens, gold)
nlp.tagger.train(tokens, gold.tags)
random.shuffle(gold_tuples)
logline = '%d:\t%d\t%.3f\t%.3f\t%.3f' % (itn, loss, scorer.uas,
scorer.tags_acc,
scorer.token_acc)
print logline
with open(log_loc, 'aw') as file_:
file_.write(logline + '\n')
nlp.parser.model.end_training()
nlp.tagger.model.end_training()
nlp.vocab.strings.dump(path.join(model_dir, 'vocab', 'strings.txt'))
return nlp
def evaluate(nlp, gold_tuples, gold_preproc=True):
scorer = Scorer()
for raw_text, sents in gold_tuples:
for annot_tuples, brackets in sents:
tokens = nlp.tokenizer.tokens_from_list(annot_tuples[1])
nlp.tagger(tokens)
nlp.parser(tokens)
gold = GoldParse(tokens, annot_tuples)
scorer.score(tokens, gold)
return scorer
@plac.annotations(
train_loc=("Location of training file or directory"),
dev_loc=("Location of development file or directory"),
model_dir=("Location of output model directory",),
eval_only=("Skip training, and only evaluate", "flag", "e", bool),
n_sents=("Number of training sentences", "option", "n", int),
n_iter=("Number of training iterations", "option", "i", int),
verbose=("Verbose error reporting", "flag", "v", bool),
nv_word=("Word vector length", "option", "W", int),
nv_tag=("Tag vector length", "option", "T", int),
nv_label=("Label vector length", "option", "L", int),
nv_hidden=("Hidden nodes length", "option", "H", int),
eta=("Learning rate", "option", "E", float),
mu=("Momentum", "option", "M", float),
)
def main(train_loc, dev_loc, model_dir, n_sents=0, n_iter=15, verbose=False,
nv_word=10, nv_tag=10, nv_label=10, nv_hidden=10,
eta=0.1, mu=0.9, eval_only=False):
gold_train = list(read_json_file(train_loc, lambda doc: 'wsj' in doc['id']))
nlp = train(English, gold_train, model_dir,
feat_set='embed',
eta=eta, mu=mu,
nv_word=nv_word, nv_tag=nv_tag, nv_label=nv_label, nv_hidden=nv_hidden,
n_sents=n_sents, n_iter=n_iter,
verbose=verbose)
scorer = evaluate(nlp, list(read_json_file(dev_loc)))
print 'TOK', 100-scorer.token_acc
print 'POS', scorer.tags_acc
print 'UAS', scorer.uas
print 'LAS', scorer.las
print 'NER P', scorer.ents_p
print 'NER R', scorer.ents_r
print 'NER F', scorer.ents_f
if __name__ == '__main__':
plac.call(main)

246
bin/parser/train.py Executable file
View File

@ -0,0 +1,246 @@
#!/usr/bin/env python
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import os
from os import path
import shutil
import io
import random
import plac
import re
import spacy.util
from spacy.syntax.util import Config
from spacy.gold import read_json_file
from spacy.gold import GoldParse
from spacy.scorer import Scorer
from spacy.syntax.arc_eager import ArcEager
from spacy.syntax.ner import BiluoPushDown
from spacy.tagger import Tagger
from spacy.syntax.parser import Parser
from spacy.syntax.nonproj import PseudoProjectivity
def _corrupt(c, noise_level):
if random.random() >= noise_level:
return c
elif c == ' ':
return '\n'
elif c == '\n':
return ' '
elif c in ['.', "'", "!", "?"]:
return ''
else:
return c.lower()
def add_noise(orig, noise_level):
if random.random() >= noise_level:
return orig
elif type(orig) == list:
corrupted = [_corrupt(word, noise_level) for word in orig]
corrupted = [w for w in corrupted if w]
return corrupted
else:
return ''.join(_corrupt(c, noise_level) for c in orig)
def score_model(scorer, nlp, raw_text, annot_tuples, verbose=False):
if raw_text is None:
tokens = nlp.tokenizer.tokens_from_list(annot_tuples[1])
else:
tokens = nlp.tokenizer(raw_text)
nlp.tagger(tokens)
nlp.entity(tokens)
nlp.parser(tokens)
gold = GoldParse(tokens, annot_tuples)
scorer.score(tokens, gold, verbose=verbose)
def _merge_sents(sents):
m_deps = [[], [], [], [], [], []]
m_brackets = []
i = 0
for (ids, words, tags, heads, labels, ner), brackets in sents:
m_deps[0].extend(id_ + i for id_ in ids)
m_deps[1].extend(words)
m_deps[2].extend(tags)
m_deps[3].extend(head + i for head in heads)
m_deps[4].extend(labels)
m_deps[5].extend(ner)
m_brackets.extend((b['first'] + i, b['last'] + i, b['label']) for b in brackets)
i += len(ids)
return [(m_deps, m_brackets)]
def train(Language, gold_tuples, model_dir, n_iter=15, feat_set=u'basic',
seed=0, gold_preproc=False, n_sents=0, corruption_level=0,
beam_width=1, verbose=False,
use_orig_arc_eager=False, pseudoprojective=False):
dep_model_dir = path.join(model_dir, 'deps')
ner_model_dir = path.join(model_dir, 'ner')
pos_model_dir = path.join(model_dir, 'pos')
if path.exists(dep_model_dir):
shutil.rmtree(dep_model_dir)
if path.exists(ner_model_dir):
shutil.rmtree(ner_model_dir)
if path.exists(pos_model_dir):
shutil.rmtree(pos_model_dir)
os.mkdir(dep_model_dir)
os.mkdir(ner_model_dir)
os.mkdir(pos_model_dir)
if pseudoprojective:
# preprocess training data here before ArcEager.get_labels() is called
gold_tuples = PseudoProjectivity.preprocess_training_data(gold_tuples)
Config.write(dep_model_dir, 'config', features=feat_set, seed=seed,
labels=ArcEager.get_labels(gold_tuples),
beam_width=beam_width,projectivize=pseudoprojective)
Config.write(ner_model_dir, 'config', features='ner', seed=seed,
labels=BiluoPushDown.get_labels(gold_tuples),
beam_width=0)
if n_sents > 0:
gold_tuples = gold_tuples[:n_sents]
nlp = Language(data_dir=model_dir, tagger=False, parser=False, entity=False)
nlp.tagger = Tagger.blank(nlp.vocab, Tagger.default_templates())
nlp.parser = Parser.from_dir(dep_model_dir, nlp.vocab.strings, ArcEager)
nlp.entity = Parser.from_dir(ner_model_dir, nlp.vocab.strings, BiluoPushDown)
print("Itn.\tP.Loss\tUAS\tNER F.\tTag %\tToken %")
for itn in range(n_iter):
scorer = Scorer()
loss = 0
for raw_text, sents in gold_tuples:
if gold_preproc:
raw_text = None
else:
sents = _merge_sents(sents)
for annot_tuples, ctnt in sents:
if len(annot_tuples[1]) == 1:
continue
score_model(scorer, nlp, raw_text, annot_tuples,
verbose=verbose if itn >= 2 else False)
if raw_text is None:
words = add_noise(annot_tuples[1], corruption_level)
tokens = nlp.tokenizer.tokens_from_list(words)
else:
raw_text = add_noise(raw_text, corruption_level)
tokens = nlp.tokenizer(raw_text)
nlp.tagger(tokens)
gold = GoldParse(tokens, annot_tuples)
if not gold.is_projective:
raise Exception("Non-projective sentence in training: %s" % annot_tuples[1])
loss += nlp.parser.train(tokens, gold)
nlp.entity.train(tokens, gold)
nlp.tagger.train(tokens, gold.tags)
random.shuffle(gold_tuples)
print('%d:\t%d\t%.3f\t%.3f\t%.3f\t%.3f' % (itn, loss, scorer.uas, scorer.ents_f,
scorer.tags_acc,
scorer.token_acc))
print('end training')
nlp.end_training(model_dir)
print('done')
def evaluate(Language, gold_tuples, model_dir, gold_preproc=False, verbose=False,
beam_width=None, cand_preproc=None):
nlp = Language(data_dir=model_dir)
if nlp.lang == 'de':
nlp.vocab.morphology.lemmatizer = lambda string,pos: set([string])
if beam_width is not None:
nlp.parser.cfg.beam_width = beam_width
scorer = Scorer()
for raw_text, sents in gold_tuples:
if gold_preproc:
raw_text = None
else:
sents = _merge_sents(sents)
for annot_tuples, brackets in sents:
if raw_text is None:
tokens = nlp.tokenizer.tokens_from_list(annot_tuples[1])
nlp.tagger(tokens)
nlp.parser(tokens)
nlp.entity(tokens)
else:
tokens = nlp(raw_text)
gold = GoldParse(tokens, annot_tuples)
scorer.score(tokens, gold, verbose=verbose)
return scorer
def write_parses(Language, dev_loc, model_dir, out_loc):
nlp = Language(data_dir=model_dir)
gold_tuples = read_json_file(dev_loc)
scorer = Scorer()
out_file = io.open(out_loc, 'w', 'utf8')
for raw_text, sents in gold_tuples:
sents = _merge_sents(sents)
for annot_tuples, brackets in sents:
if raw_text is None:
tokens = nlp.tokenizer.tokens_from_list(annot_tuples[1])
nlp.tagger(tokens)
nlp.entity(tokens)
nlp.parser(tokens)
else:
tokens = nlp(raw_text)
#gold = GoldParse(tokens, annot_tuples)
#scorer.score(tokens, gold, verbose=False)
for sent in tokens.sents:
for t in sent:
if not t.is_space:
out_file.write(
'%d\t%s\t%s\t%s\t%s\n' % (t.i, t.orth_, t.tag_, t.head.orth_, t.dep_)
)
out_file.write('\n')
@plac.annotations(
language=("The language to train", "positional", None, str, ['en','de', 'zh']),
train_loc=("Location of training file or directory"),
dev_loc=("Location of development file or directory"),
model_dir=("Location of output model directory",),
eval_only=("Skip training, and only evaluate", "flag", "e", bool),
corruption_level=("Amount of noise to add to training data", "option", "c", float),
gold_preproc=("Use gold-standard sentence boundaries in training?", "flag", "g", bool),
out_loc=("Out location", "option", "o", str),
n_sents=("Number of training sentences", "option", "n", int),
n_iter=("Number of training iterations", "option", "i", int),
verbose=("Verbose error reporting", "flag", "v", bool),
debug=("Debug mode", "flag", "d", bool),
pseudoprojective=("Use pseudo-projective parsing", "flag", "p", bool),
)
def main(language, train_loc, dev_loc, model_dir, n_sents=0, n_iter=15, out_loc="", verbose=False,
debug=False, corruption_level=0.0, gold_preproc=False, eval_only=False, pseudoprojective=False):
lang = spacy.util.get_lang_class(language)
if not eval_only:
gold_train = list(read_json_file(train_loc))
train(lang, gold_train, model_dir,
feat_set='basic' if not debug else 'debug',
gold_preproc=gold_preproc, n_sents=n_sents,
corruption_level=corruption_level, n_iter=n_iter,
verbose=verbose,pseudoprojective=pseudoprojective)
if out_loc:
write_parses(lang, dev_loc, model_dir, out_loc)
scorer = evaluate(lang, list(read_json_file(dev_loc)),
model_dir, gold_preproc=gold_preproc, verbose=verbose)
print('TOK', scorer.token_acc)
print('POS', scorer.tags_acc)
print('UAS', scorer.uas)
print('LAS', scorer.las)
print('NER P', scorer.ents_p)
print('NER R', scorer.ents_r)
print('NER F', scorer.ents_f)
if __name__ == '__main__':
plac.call(main)

161
bin/parser/train_ud.py Normal file
View File

@ -0,0 +1,161 @@
import plac
import json
from os import path
import shutil
import os
import random
import io
from spacy.syntax.util import Config
from spacy.gold import GoldParse
from spacy.tokenizer import Tokenizer
from spacy.vocab import Vocab
from spacy.tagger import Tagger
from spacy.syntax.parser import Parser
from spacy.syntax.arc_eager import ArcEager
from spacy.syntax.parser import get_templates
from spacy.scorer import Scorer
import spacy.attrs
from spacy.language import Language
from spacy.tagger import W_orth
TAGGER_TEMPLATES = (
(W_orth,),
)
try:
from codecs import open
except ImportError:
pass
class TreebankParser(object):
@staticmethod
def setup_model_dir(model_dir, labels, templates, feat_set='basic', seed=0):
dep_model_dir = path.join(model_dir, 'deps')
pos_model_dir = path.join(model_dir, 'pos')
if path.exists(dep_model_dir):
shutil.rmtree(dep_model_dir)
if path.exists(pos_model_dir):
shutil.rmtree(pos_model_dir)
os.mkdir(dep_model_dir)
os.mkdir(pos_model_dir)
Config.write(dep_model_dir, 'config', features=feat_set, seed=seed,
labels=labels)
@classmethod
def from_dir(cls, tag_map, model_dir):
vocab = Vocab(tag_map=tag_map, get_lex_attr=Language.default_lex_attrs())
vocab.get_lex_attr[spacy.attrs.LANG] = lambda _: 0
tokenizer = Tokenizer(vocab, {}, None, None, None)
tagger = Tagger.blank(vocab, TAGGER_TEMPLATES)
cfg = Config.read(path.join(model_dir, 'deps'), 'config')
parser = Parser.from_dir(path.join(model_dir, 'deps'), vocab.strings, ArcEager)
return cls(vocab, tokenizer, tagger, parser)
def __init__(self, vocab, tokenizer, tagger, parser):
self.vocab = vocab
self.tokenizer = tokenizer
self.tagger = tagger
self.parser = parser
def train(self, words, tags, heads, deps):
tokens = self.tokenizer.tokens_from_list(list(words))
self.tagger.train(tokens, tags)
tokens = self.tokenizer.tokens_from_list(list(words))
ids = range(len(words))
ner = ['O'] * len(words)
gold = GoldParse(tokens, ((ids, words, tags, heads, deps, ner)),
make_projective=False)
self.tagger(tokens)
if gold.is_projective:
try:
self.parser.train(tokens, gold)
except:
for id_, word, head, dep in zip(ids, words, heads, deps):
print(id_, word, head, dep)
raise
def __call__(self, words, tags=None):
tokens = self.tokenizer.tokens_from_list(list(words))
if tags is None:
self.tagger(tokens)
else:
self.tagger.tag_from_strings(tokens, tags)
self.parser(tokens)
return tokens
def end_training(self, data_dir):
self.parser.model.end_training()
self.parser.model.dump(path.join(data_dir, 'deps', 'model'))
self.tagger.model.end_training()
self.tagger.model.dump(path.join(data_dir, 'pos', 'model'))
strings_loc = path.join(data_dir, 'vocab', 'strings.json')
with io.open(strings_loc, 'w', encoding='utf8') as file_:
self.vocab.strings.dump(file_)
self.vocab.dump(path.join(data_dir, 'vocab', 'lexemes.bin'))
def read_conllx(loc):
with open(loc, 'r', 'utf8') as file_:
text = file_.read()
for sent in text.strip().split('\n\n'):
lines = sent.strip().split('\n')
if lines:
while lines[0].startswith('#'):
lines.pop(0)
tokens = []
for line in lines:
id_, word, lemma, pos, tag, morph, head, dep, _1, _2 = line.split()
if '-' in id_:
continue
id_ = int(id_) - 1
head = (int(head) - 1) if head != '0' else id_
dep = 'ROOT' if dep == 'root' else dep
tokens.append((id_, word, tag, head, dep, 'O'))
tuples = zip(*tokens)
yield (None, [(tuples, [])])
def score_model(nlp, gold_docs, verbose=False):
scorer = Scorer()
for _, gold_doc in gold_docs:
for annot_tuples, _ in gold_doc:
tokens = nlp(list(annot_tuples[1]), tags=list(annot_tuples[2]))
gold = GoldParse(tokens, annot_tuples)
scorer.score(tokens, gold, verbose=verbose)
return scorer
def main(train_loc, dev_loc, model_dir, tag_map_loc):
with open(tag_map_loc) as file_:
tag_map = json.loads(file_.read())
train_sents = list(read_conllx(train_loc))
labels = ArcEager.get_labels(train_sents)
templates = get_templates('basic')
TreebankParser.setup_model_dir(model_dir, labels, templates)
nlp = TreebankParser.from_dir(tag_map, model_dir)
for itn in range(15):
for _, doc_sents in train_sents:
for (ids, words, tags, heads, deps, ner), _ in doc_sents:
nlp.train(words, tags, heads, deps)
random.shuffle(train_sents)
scorer = score_model(nlp, read_conllx(dev_loc))
print('%d:\t%.3f\t%.3f' % (itn, scorer.uas, scorer.tags_acc))
nlp.end_training(model_dir)
scorer = score_model(nlp, read_conllx(dev_loc))
print('%d:\t%.3f\t%.3f\t%.3f' % (itn, scorer.uas, scorer.las, scorer.tags_acc))
if __name__ == '__main__':
plac.call(main)

194
bin/prepare_treebank.py Normal file
View File

@ -0,0 +1,194 @@
"""Convert OntoNotes into a json format.
doc: {
id: string,
paragraphs: [{
raw: string,
sents: [int],
tokens: [{
start: int,
tag: string,
head: int,
dep: string}],
ner: [{
start: int,
end: int,
label: string}],
brackets: [{
start: int,
end: int,
label: string}]}]}
Consumes output of spacy/munge/align_raw.py
"""
from __future__ import unicode_literals
import plac
import json
from os import path
import os
import re
import io
from collections import defaultdict
from spacy.munge import read_ptb
from spacy.munge import read_conll
from spacy.munge import read_ner
def _iter_raw_files(raw_loc):
files = json.load(open(raw_loc))
for f in files:
yield f
def format_doc(file_id, raw_paras, ptb_text, dep_text, ner_text):
ptb_sents = read_ptb.split(ptb_text)
dep_sents = read_conll.split(dep_text)
if len(ptb_sents) != len(dep_sents):
return None
if ner_text is not None:
ner_sents = read_ner.split(ner_text)
else:
ner_sents = [None] * len(ptb_sents)
i = 0
doc = {'id': file_id}
if raw_paras is None:
doc['paragraphs'] = [format_para(None, ptb_sents, dep_sents, ner_sents)]
#for ptb_sent, dep_sent, ner_sent in zip(ptb_sents, dep_sents, ner_sents):
# doc['paragraphs'].append(format_para(None, [ptb_sent], [dep_sent], [ner_sent]))
else:
doc['paragraphs'] = []
for raw_sents in raw_paras:
para = format_para(
' '.join(raw_sents).replace('<SEP>', ''),
ptb_sents[i:i+len(raw_sents)],
dep_sents[i:i+len(raw_sents)],
ner_sents[i:i+len(raw_sents)])
if para['sentences']:
doc['paragraphs'].append(para)
i += len(raw_sents)
return doc
def format_para(raw_text, ptb_sents, dep_sents, ner_sents):
para = {'raw': raw_text, 'sentences': []}
offset = 0
assert len(ptb_sents) == len(dep_sents) == len(ner_sents)
for ptb_text, dep_text, ner_text in zip(ptb_sents, dep_sents, ner_sents):
_, deps = read_conll.parse(dep_text, strip_bad_periods=True)
if deps and 'VERB' in [t['tag'] for t in deps]:
continue
if ner_text is not None:
_, ner = read_ner.parse(ner_text, strip_bad_periods=True)
else:
ner = ['-' for _ in deps]
_, brackets = read_ptb.parse(ptb_text, strip_bad_periods=True)
# Necessary because the ClearNLP converter deletes EDITED words.
if len(ner) != len(deps):
ner = ['-' for _ in deps]
para['sentences'].append(format_sentence(deps, ner, brackets))
return para
def format_sentence(deps, ner, brackets):
sent = {'tokens': [], 'brackets': []}
for token_id, (token, token_ent) in enumerate(zip(deps, ner)):
sent['tokens'].append(format_token(token_id, token, token_ent))
for label, start, end in brackets:
if start != end:
sent['brackets'].append({
'label': label,
'first': start,
'last': (end-1)})
return sent
def format_token(token_id, token, ner):
assert token_id == token['id']
head = (token['head'] - token_id) if token['head'] != -1 else 0
return {
'id': token_id,
'orth': token['word'],
'tag': token['tag'],
'head': head,
'dep': token['dep'],
'ner': ner}
def read_file(*pieces):
loc = path.join(*pieces)
if not path.exists(loc):
return None
else:
return io.open(loc, 'r', encoding='utf8').read().strip()
def get_file_names(section_dir, subsection):
filenames = []
for fn in os.listdir(path.join(section_dir, subsection)):
filenames.append(fn.rsplit('.', 1)[0])
return list(sorted(set(filenames)))
def read_wsj_with_source(onto_dir, raw_dir):
# Now do WSJ, with source alignment
onto_dir = path.join(onto_dir, 'data', 'english', 'annotations', 'nw', 'wsj')
docs = {}
for i in range(25):
section = str(i) if i >= 10 else ('0' + str(i))
raw_loc = path.join(raw_dir, 'wsj%s.json' % section)
for j, (filename, raw_paras) in enumerate(_iter_raw_files(raw_loc)):
if section == '00':
j += 1
if section == '04' and filename == '55':
continue
ptb = read_file(onto_dir, section, '%s.parse' % filename)
dep = read_file(onto_dir, section, '%s.parse.dep' % filename)
ner = read_file(onto_dir, section, '%s.name' % filename)
if ptb is not None and dep is not None:
docs[filename] = format_doc(filename, raw_paras, ptb, dep, ner)
return docs
def get_doc(onto_dir, file_path, wsj_docs):
filename = file_path.rsplit('/', 1)[1]
if filename in wsj_docs:
return wsj_docs[filename]
else:
ptb = read_file(onto_dir, file_path + '.parse')
dep = read_file(onto_dir, file_path + '.parse.dep')
ner = read_file(onto_dir, file_path + '.name')
if ptb is not None and dep is not None:
return format_doc(filename, None, ptb, dep, ner)
else:
return None
def read_ids(loc):
return open(loc).read().strip().split('\n')
def main(onto_dir, raw_dir, out_dir):
wsj_docs = read_wsj_with_source(onto_dir, raw_dir)
for partition in ('train', 'test', 'development'):
ids = read_ids(path.join(onto_dir, '%s.id' % partition))
docs_by_genre = defaultdict(list)
for file_path in ids:
doc = get_doc(onto_dir, file_path, wsj_docs)
if doc is not None:
genre = file_path.split('/')[3]
docs_by_genre[genre].append(doc)
part_dir = path.join(out_dir, partition)
if not path.exists(part_dir):
os.mkdir(part_dir)
for genre, docs in sorted(docs_by_genre.items()):
out_loc = path.join(part_dir, genre + '.json')
with open(out_loc, 'w') as file_:
json.dump(docs, file_, indent=4)
if __name__ == '__main__':
plac.call(main)

13
bin/prepare_vecs.py Normal file
View File

@ -0,0 +1,13 @@
"""Read a vector file, and prepare it as binary data, for easy consumption"""
import plac
from spacy.vocab import write_binary_vectors
def main(in_loc, out_loc):
write_binary_vectors(in_loc, out_loc)
if __name__ == '__main__':
plac.call(main)

175
bin/tagger/train.py Executable file
View File

@ -0,0 +1,175 @@
#!/usr/bin/env python
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import os
from os import path
import shutil
import codecs
import random
import plac
import re
import spacy.util
from spacy.en import English
from spacy.tagger import Tagger
from spacy.syntax.util import Config
from spacy.gold import read_json_file
from spacy.gold import GoldParse
from spacy.scorer import Scorer
def score_model(scorer, nlp, raw_text, annot_tuples):
if raw_text is None:
tokens = nlp.tokenizer.tokens_from_list(annot_tuples[1])
else:
tokens = nlp.tokenizer(raw_text)
nlp.tagger(tokens)
gold = GoldParse(tokens, annot_tuples)
scorer.score(tokens, gold)
def _merge_sents(sents):
m_deps = [[], [], [], [], [], []]
m_brackets = []
i = 0
for (ids, words, tags, heads, labels, ner), brackets in sents:
m_deps[0].extend(id_ + i for id_ in ids)
m_deps[1].extend(words)
m_deps[2].extend(tags)
m_deps[3].extend(head + i for head in heads)
m_deps[4].extend(labels)
m_deps[5].extend(ner)
m_brackets.extend((b['first'] + i, b['last'] + i, b['label']) for b in brackets)
i += len(ids)
return [(m_deps, m_brackets)]
def train(Language, gold_tuples, model_dir, n_iter=15, feat_set=u'basic',
seed=0, gold_preproc=False, n_sents=0, corruption_level=0,
beam_width=1, verbose=False,
use_orig_arc_eager=False):
if n_sents > 0:
gold_tuples = gold_tuples[:n_sents]
templates = Tagger.default_templates()
nlp = Language(data_dir=model_dir, tagger=False)
nlp.tagger = Tagger.blank(nlp.vocab, templates)
print("Itn.\tP.Loss\tUAS\tNER F.\tTag %\tToken %")
for itn in range(n_iter):
scorer = Scorer()
loss = 0
for raw_text, sents in gold_tuples:
if gold_preproc:
raw_text = None
else:
sents = _merge_sents(sents)
for annot_tuples, ctnt in sents:
words = annot_tuples[1]
gold_tags = annot_tuples[2]
score_model(scorer, nlp, raw_text, annot_tuples)
if raw_text is None:
tokens = nlp.tokenizer.tokens_from_list(words)
else:
tokens = nlp.tokenizer(raw_text)
loss += nlp.tagger.train(tokens, gold_tags)
random.shuffle(gold_tuples)
print('%d:\t%d\t%.3f\t%.3f\t%.3f\t%.3f' % (itn, loss, scorer.uas, scorer.ents_f,
scorer.tags_acc,
scorer.token_acc))
nlp.end_training(model_dir)
def evaluate(Language, gold_tuples, model_dir, gold_preproc=False, verbose=False,
beam_width=None):
nlp = Language(data_dir=model_dir)
if beam_width is not None:
nlp.parser.cfg.beam_width = beam_width
scorer = Scorer()
for raw_text, sents in gold_tuples:
if gold_preproc:
raw_text = None
else:
sents = _merge_sents(sents)
for annot_tuples, brackets in sents:
if raw_text is None:
tokens = nlp.tokenizer.tokens_from_list(annot_tuples[1])
nlp.tagger(tokens)
nlp.entity(tokens)
nlp.parser(tokens)
else:
tokens = nlp(raw_text, merge_mwes=False)
gold = GoldParse(tokens, annot_tuples)
scorer.score(tokens, gold, verbose=verbose)
return scorer
def write_parses(Language, dev_loc, model_dir, out_loc, beam_width=None):
nlp = Language(data_dir=model_dir)
if beam_width is not None:
nlp.parser.cfg.beam_width = beam_width
gold_tuples = read_json_file(dev_loc)
scorer = Scorer()
out_file = codecs.open(out_loc, 'w', 'utf8')
for raw_text, sents in gold_tuples:
sents = _merge_sents(sents)
for annot_tuples, brackets in sents:
if raw_text is None:
tokens = nlp.tokenizer.tokens_from_list(annot_tuples[1])
nlp.tagger(tokens)
nlp.entity(tokens)
nlp.parser(tokens)
else:
tokens = nlp(raw_text, merge_mwes=False)
gold = GoldParse(tokens, annot_tuples)
scorer.score(tokens, gold, verbose=False)
for t in tokens:
out_file.write(
'%s\t%s\t%s\t%s\n' % (t.orth_, t.tag_, t.head.orth_, t.dep_)
)
return scorer
@plac.annotations(
train_loc=("Location of training file or directory"),
dev_loc=("Location of development file or directory"),
model_dir=("Location of output model directory",),
eval_only=("Skip training, and only evaluate", "flag", "e", bool),
corruption_level=("Amount of noise to add to training data", "option", "c", float),
gold_preproc=("Use gold-standard sentence boundaries in training?", "flag", "g", bool),
out_loc=("Out location", "option", "o", str),
n_sents=("Number of training sentences", "option", "n", int),
n_iter=("Number of training iterations", "option", "i", int),
verbose=("Verbose error reporting", "flag", "v", bool),
debug=("Debug mode", "flag", "d", bool),
)
def main(train_loc, dev_loc, model_dir, n_sents=0, n_iter=15, out_loc="", verbose=False,
debug=False, corruption_level=0.0, gold_preproc=False, eval_only=False):
if not eval_only:
gold_train = list(read_json_file(train_loc))
train(English, gold_train, model_dir,
feat_set='basic' if not debug else 'debug',
gold_preproc=gold_preproc, n_sents=n_sents,
corruption_level=corruption_level, n_iter=n_iter,
verbose=verbose)
#if out_loc:
# write_parses(English, dev_loc, model_dir, out_loc, beam_width=beam_width)
scorer = evaluate(English, list(read_json_file(dev_loc)),
model_dir, gold_preproc=gold_preproc, verbose=verbose)
print('TOK', scorer.token_acc)
print('POS', scorer.tags_acc)
print('UAS', scorer.uas)
print('LAS', scorer.las)
print('NER P', scorer.ents_p)
print('NER R', scorer.ents_r)
print('NER F', scorer.ents_f)
if __name__ == '__main__':
plac.call(main)

View File

@ -0,0 +1,160 @@
#!/usr/bin/env python
from __future__ import division
from __future__ import unicode_literals
import os
from os import path
import shutil
import io
import random
import time
import gzip
import ujson
import plac
import cProfile
import pstats
import spacy.util
from spacy.de import German
from spacy.gold import GoldParse
from spacy.tagger import Tagger
from spacy.scorer import PRFScore
from spacy.tagger import P2_orth, P2_cluster, P2_shape, P2_prefix, P2_suffix, P2_pos, P2_lemma, P2_flags
from spacy.tagger import P1_orth, P1_cluster, P1_shape, P1_prefix, P1_suffix, P1_pos, P1_lemma, P1_flags
from spacy.tagger import W_orth, W_cluster, W_shape, W_prefix, W_suffix, W_pos, W_lemma, W_flags
from spacy.tagger import N1_orth, N1_cluster, N1_shape, N1_prefix, N1_suffix, N1_pos, N1_lemma, N1_flags
from spacy.tagger import N2_orth, N2_cluster, N2_shape, N2_prefix, N2_suffix, N2_pos, N2_lemma, N2_flags, N_CONTEXT_FIELDS
def default_templates():
return spacy.tagger.Tagger.default_templates()
def default_templates_without_clusters():
return (
(W_orth,),
(P1_lemma, P1_pos),
(P2_lemma, P2_pos),
(N1_orth,),
(N2_orth,),
(W_suffix,),
(W_prefix,),
(P1_pos,),
(P2_pos,),
(P1_pos, P2_pos),
(P1_pos, W_orth),
(P1_suffix,),
(N1_suffix,),
(W_shape,),
(W_flags,),
(N1_flags,),
(N2_flags,),
(P1_flags,),
(P2_flags,),
)
def make_tagger(vocab, templates):
model = spacy.tagger.TaggerModel(templates)
return spacy.tagger.Tagger(vocab,model)
def read_conll(file_):
def sentences():
words, tags = [], []
for line in file_:
line = line.strip()
if line:
word, tag = line.split('\t')[1::3][:2] # get column 1 and 4 (CoNLL09)
words.append(word)
tags.append(tag)
elif words:
yield words, tags
words, tags = [], []
if words:
yield words, tags
return [ s for s in sentences() ]
def score_model(score, nlp, words, gold_tags):
tokens = nlp.tokenizer.tokens_from_list(words)
assert(len(tokens) == len(gold_tags))
nlp.tagger(tokens)
for token, gold_tag in zip(tokens,gold_tags):
score.score_set(set([token.tag_]),set([gold_tag]))
def train(Language, train_sents, dev_sents, model_dir, n_iter=15, seed=21):
# make shuffling deterministic
random.seed(seed)
# set up directory for model
pos_model_dir = path.join(model_dir, 'pos')
if path.exists(pos_model_dir):
shutil.rmtree(pos_model_dir)
os.mkdir(pos_model_dir)
nlp = Language(data_dir=model_dir, tagger=False, parser=False, entity=False)
nlp.tagger = make_tagger(nlp.vocab,default_templates())
print("Itn.\ttrain acc %\tdev acc %")
for itn in range(n_iter):
# train on train set
#train_acc = PRFScore()
correct, total = 0., 0.
for words, gold_tags in train_sents:
tokens = nlp.tokenizer.tokens_from_list(words)
correct += nlp.tagger.train(tokens, gold_tags)
total += len(words)
train_acc = correct/total
# test on dev set
dev_acc = PRFScore()
for words, gold_tags in dev_sents:
score_model(dev_acc, nlp, words, gold_tags)
random.shuffle(train_sents)
print('%d:\t%6.2f\t%6.2f' % (itn, 100*train_acc, 100*dev_acc.precision))
print('end training')
nlp.end_training(model_dir)
print('done')
@plac.annotations(
train_loc=("Location of CoNLL 09 formatted training file"),
dev_loc=("Location of CoNLL 09 formatted development file"),
model_dir=("Location of output model directory"),
eval_only=("Skip training, and only evaluate", "flag", "e", bool),
n_iter=("Number of training iterations", "option", "i", int),
)
def main(train_loc, dev_loc, model_dir, eval_only=False, n_iter=15):
# training
if not eval_only:
with io.open(train_loc, 'r', encoding='utf8') as trainfile_, \
io.open(dev_loc, 'r', encoding='utf8') as devfile_:
train_sents = read_conll(trainfile_)
dev_sents = read_conll(devfile_)
train(German, train_sents, dev_sents, model_dir, n_iter=n_iter)
# testing
with io.open(dev_loc, 'r', encoding='utf8') as file_:
dev_sents = read_conll(file_)
nlp = German(data_dir=model_dir)
dev_acc = PRFScore()
for words, gold_tags in dev_sents:
score_model(dev_acc, nlp, words, gold_tags)
print('POS: %6.2f %%' % (100*dev_acc.precision))
if __name__ == '__main__':
plac.call(main)

25
buildbot.json Normal file
View File

@ -0,0 +1,25 @@
{
"build": {
"sdist": [
"pip install -r requirements.txt",
"pip install \"numpy<1.8\"",
"python setup.py sdist"
],
"install": [
"pip install -v source.tar.gz"
],
"wheel": [
"python untar.py source.tar.gz .",
"python setup.py bdist_wheel",
"python cpdist.py dist"
]
},
"test": {
"after": ["install", "wheel"],
"run": [
"python -m spacy.en.download --force"
],
"package": "spacy",
"args": "--tb=native -x --models --vectors --slow"
}
}

View File

@ -0,0 +1,95 @@
Syllogism Contributor Agreement
===============================
This Syllogism Contributor Agreement (“SCA”) is based on the Oracle Contributor
Agreement. The SCA applies to any contribution that you make to any product or
project managed by us (the “project”), and sets out the intellectual property
rights you grant to us in the contributed materials. The term “us” shall mean
Syllogism Co. The term "you" shall mean the person or entity identified below.
If you agree to be bound by these terms, fill in the information requested below
and include the filled-in version with your first pull-request, under the file
contrbutors/. The name of the file should be your GitHub username, with the
extension .md. For example, the user example_user would create the file
spaCy/contributors/example_user.md .
Read this agreement carefully before signing. These terms and conditions
constitute a binding legal agreement.
1. The term 'contribution' or contributed materials means any source code,
object code, patch, tool, sample, graphic, specification, manual, documentation,
or any other material posted or submitted by you to the project.
2. With respect to any worldwide copyrights, or copyright applications and registrations,
in your contribution:
* you hereby assign to us joint ownership, and to the extent that such assignment
is or becomes invalid, ineffective or unenforceable, you hereby grant to us a perpetual,
irrevocable, non-exclusive, worldwide, no-charge, royalty-free, unrestricted license
to exercise all rights under those copyrights. This includes, at our option, the
right to sublicense these same rights to third parties through multiple levels of
sublicensees or other licensing arrangements;
* you agree that each of us can do all things in relation to your contribution
as if each of us were the sole owners, and if one of us makes a derivative work
of your contribution, the one who makes the derivative work (or has it made) will
be the sole owner of that derivative work;
* you agree that you will not assert any moral rights in your contribution against
us, our licensees or transferees;
* you agree that we may register a copyright in your contribution and exercise
all ownership rights associated with it; and
* you agree that neither of us has any duty to consult with, obtain the consent
of, pay or render an accounting to the other for any use or distribution of your
contribution.
3. With respect to any patents you own, or that you can license without payment
to any third party, you hereby grant to us a perpetual, irrevocable, non-exclusive,
worldwide, no-charge, royalty-free license to:
* make, have made, use, sell, offer to sell, import, and otherwise transfer your
contribution in whole or in part, alone or in combination with
or included in any product, work or materials arising out of the project to
which your contribution was submitted, and
* at our option, to sublicense these same rights to third parties through multiple
levels of sublicensees or other licensing arrangements.
4. Except as set out above, you keep all right, title, and interest in your
contribution. The rights that you grant to us under these terms are effective on
the date you first submitted a contribution to us, even if your submission took
place before the date you sign these terms.
5. You covenant, represent, warrant and agree that:
* Each contribution that you submit is and shall be an original work of authorship
and you can legally grant the rights set out in this SCA;
* to the best of your knowledge, each contribution will not violate any third
party's copyrights, trademarks, patents, or other intellectual property rights; and
* each contribution shall be in compliance with U.S. export control laws and other
applicable export and import laws. You agree to notify us if you become aware of
any circumstance which would make any of the foregoing representations inaccurate
in any respect. Syllogism Co. may publicly disclose your participation in the project,
including the fact that you have signed the SCA.
6. This SCA is governed by the laws of the State of California and applicable U.S.
Federal law. Any choice of law rules will not apply.
7. Please place an “x” on one of the applicable statement below. Please do NOT
mark both statements:
_x__ I am signing on behalf of myself as an individual and no other person or entity, including my employer, has or will have rights with respect my contributions.
____ I am signing on behalf of my employer or a legal entity and I have the actual authority to contractually bind that entity.
| Field | Entry |
|------------------------------- | -------------------- |
| Name | J Nicolas Schrading |
| Company's name (if applicable) | |
| Title or Role (if applicable) | |
| Date | 2015-08-24 |
| GitHub username | NSchrading |
| Website (optional) | nicschrading.com |

View File

@ -0,0 +1,95 @@
Syllogism Contributor Agreement
===============================
This Syllogism Contributor Agreement (“SCA”) is based on the Oracle Contributor
Agreement. The SCA applies to any contribution that you make to any product or
project managed by us (the “project”), and sets out the intellectual property
rights you grant to us in the contributed materials. The term “us” shall mean
Syllogism Co. The term "you" shall mean the person or entity identified below.
If you agree to be bound by these terms, fill in the information requested below
and include the filled-in version with your first pull-request, under the file
contrbutors/. The name of the file should be your GitHub username, with the
extension .md. For example, the user example_user would create the file
spaCy/contributors/example_user.md .
Read this agreement carefully before signing. These terms and conditions
constitute a binding legal agreement.
1. The term 'contribution' or contributed materials means any source code,
object code, patch, tool, sample, graphic, specification, manual, documentation,
or any other material posted or submitted by you to the project.
2. With respect to any worldwide copyrights, or copyright applications and registrations,
in your contribution:
* you hereby assign to us joint ownership, and to the extent that such assignment
is or becomes invalid, ineffective or unenforceable, you hereby grant to us a perpetual,
irrevocable, non-exclusive, worldwide, no-charge, royalty-free, unrestricted license
to exercise all rights under those copyrights. This includes, at our option, the
right to sublicense these same rights to third parties through multiple levels of
sublicensees or other licensing arrangements;
* you agree that each of us can do all things in relation to your contribution
as if each of us were the sole owners, and if one of us makes a derivative work
of your contribution, the one who makes the derivative work (or has it made) will
be the sole owner of that derivative work;
* you agree that you will not assert any moral rights in your contribution against
us, our licensees or transferees;
* you agree that we may register a copyright in your contribution and exercise
all ownership rights associated with it; and
* you agree that neither of us has any duty to consult with, obtain the consent
of, pay or render an accounting to the other for any use or distribution of your
contribution.
3. With respect to any patents you own, or that you can license without payment
to any third party, you hereby grant to us a perpetual, irrevocable, non-exclusive,
worldwide, no-charge, royalty-free license to:
* make, have made, use, sell, offer to sell, import, and otherwise transfer your
contribution in whole or in part, alone or in combination with
or included in any product, work or materials arising out of the project to
which your contribution was submitted, and
* at our option, to sublicense these same rights to third parties through multiple
levels of sublicensees or other licensing arrangements.
4. Except as set out above, you keep all right, title, and interest in your
contribution. The rights that you grant to us under these terms are effective on
the date you first submitted a contribution to us, even if your submission took
place before the date you sign these terms.
5. You covenant, represent, warrant and agree that:
* Each contribution that you submit is and shall be an original work of authorship
and you can legally grant the rights set out in this SCA;
* to the best of your knowledge, each contribution will not violate any third
party's copyrights, trademarks, patents, or other intellectual property rights; and
* each contribution shall be in compliance with U.S. export control laws and other
applicable export and import laws. You agree to notify us if you become aware of
any circumstance which would make any of the foregoing representations inaccurate
in any respect. Syllogism Co. may publicly disclose your participation in the project,
including the fact that you have signed the SCA.
6. This SCA is governed by the laws of the State of California and applicable U.S.
Federal law. Any choice of law rules will not apply.
7. Please place an “x” on one of the applicable statement below. Please do NOT
mark both statements:
x I am signing on behalf of myself as an individual and no other person or entity, including my employer, has or will have rights with respect my contributions.
____ I am signing on behalf of my employer or a legal entity and I have the actual authority to contractually bind that entity.
| Field | Entry |
|------------------------------- | -------------------- |
| Name | Chris DuBois |
| Company's name (if applicable) | |
| Title or Role (if applicable) | |
| Date | 2015.10.07 |
| GitHub username | chrisdubois |
| Website (optional) | |

13
contributors/cla.md Normal file
View File

@ -0,0 +1,13 @@
Signing the Contributors License Agreement
==========================================
SpaCy is a commercial open-source project, owned by Syllogism Co. We require that contributors to SpaCy sign our Contributors License Agreement, which is based on the Oracle Contributor Agreement.
The CLA must be signed on your first pull request. To do this, simply fill in the file cla_template.md, and include the filed in form in your first pull request.
$ git clone https://github.com/honnibal/spaCy
$ cp spaCy/contributors/cla_template.md spaCy/contributors/<your GitHub username>.md
<Now fill in the file spaCy/contributors/<your GitHub username>.md>
$ git add -A spaCy/contributors/<your GitHub username>.md
Now finish your pull request, and you're done.

View File

@ -0,0 +1,95 @@
Syllogism Contributor Agreement
===============================
This Syllogism Contributor Agreement (“SCA”) is based on the Oracle Contributor
Agreement. The SCA applies to any contribution that you make to any product or
project managed by us (the “project”), and sets out the intellectual property
rights you grant to us in the contributed materials. The term “us” shall mean
Syllogism Co. The term "you" shall mean the person or entity identified below.
If you agree to be bound by these terms, fill in the information requested below
and include the filled-in version with your first pull-request, under the file
contrbutors/. The name of the file should be your GitHub username, with the
extension .md. For example, the user example_user would create the file
spaCy/contributors/example_user.md .
Read this agreement carefully before signing. These terms and conditions
constitute a binding legal agreement.
1. The term 'contribution' or contributed materials means any source code,
object code, patch, tool, sample, graphic, specification, manual, documentation,
or any other material posted or submitted by you to the project.
2. With respect to any worldwide copyrights, or copyright applications and registrations,
in your contribution:
* you hereby assign to us joint ownership, and to the extent that such assignment
is or becomes invalid, ineffective or unenforceable, you hereby grant to us a perpetual,
irrevocable, non-exclusive, worldwide, no-charge, royalty-free, unrestricted license
to exercise all rights under those copyrights. This includes, at our option, the
right to sublicense these same rights to third parties through multiple levels of
sublicensees or other licensing arrangements;
* you agree that each of us can do all things in relation to your contribution
as if each of us were the sole owners, and if one of us makes a derivative work
of your contribution, the one who makes the derivative work (or has it made) will
be the sole owner of that derivative work;
* you agree that you will not assert any moral rights in your contribution against
us, our licensees or transferees;
* you agree that we may register a copyright in your contribution and exercise
all ownership rights associated with it; and
* you agree that neither of us has any duty to consult with, obtain the consent
of, pay or render an accounting to the other for any use or distribution of your
contribution.
3. With respect to any patents you own, or that you can license without payment
to any third party, you hereby grant to us a perpetual, irrevocable, non-exclusive,
worldwide, no-charge, royalty-free license to:
* make, have made, use, sell, offer to sell, import, and otherwise transfer your
contribution in whole or in part, alone or in combination with
or included in any product, work or materials arising out of the project to
which your contribution was submitted, and
* at our option, to sublicense these same rights to third parties through multiple
levels of sublicensees or other licensing arrangements.
4. Except as set out above, you keep all right, title, and interest in your
contribution. The rights that you grant to us under these terms are effective on
the date you first submitted a contribution to us, even if your submission took
place before the date you sign these terms.
5. You covenant, represent, warrant and agree that:
* Each contribution that you submit is and shall be an original work of authorship
and you can legally grant the rights set out in this SCA;
* to the best of your knowledge, each contribution will not violate any third
party's copyrights, trademarks, patents, or other intellectual property rights; and
* each contribution shall be in compliance with U.S. export control laws and other
applicable export and import laws. You agree to notify us if you become aware of
any circumstance which would make any of the foregoing representations inaccurate
in any respect. Syllogism Co. may publicly disclose your participation in the project,
including the fact that you have signed the SCA.
6. This SCA is governed by the laws of the State of California and applicable U.S.
Federal law. Any choice of law rules will not apply.
7. Please place an “x” on one of the applicable statement below. Please do NOT
mark both statements:
____ I am signing on behalf of myself as an individual and no other person or entity, including my employer, has or will have rights with respect my contributions.
____ I am signing on behalf of my employer or a legal entity and I have the actual authority to contractually bind that entity.
| Field | Entry |
|------------------------------- | -------------------- |
| Name | |
| Company's name (if applicable) | |
| Title or Role (if applicable) | |
| Date | |
| GitHub username | |
| Website (optional) | |

95
contributors/suchow.md Normal file
View File

@ -0,0 +1,95 @@
Syllogism Contributor Agreement
===============================
This Syllogism Contributor Agreement (“SCA”) is based on the Oracle Contributor
Agreement. The SCA applies to any contribution that you make to any product or
project managed by us (the “project”), and sets out the intellectual property
rights you grant to us in the contributed materials. The term “us” shall mean
Syllogism Co. The term "you" shall mean the person or entity identified below.
If you agree to be bound by these terms, fill in the information requested below
and include the filled-in version with your first pull-request, under the file
contrbutors/. The name of the file should be your GitHub username, with the
extension .md. For example, the user example_user would create the file
spaCy/contributors/example_user.md .
Read this agreement carefully before signing. These terms and conditions
constitute a binding legal agreement.
1. The term 'contribution' or contributed materials means any source code,
object code, patch, tool, sample, graphic, specification, manual, documentation,
or any other material posted or submitted by you to the project.
2. With respect to any worldwide copyrights, or copyright applications and registrations,
in your contribution:
* you hereby assign to us joint ownership, and to the extent that such assignment
is or becomes invalid, ineffective or unenforceable, you hereby grant to us a perpetual,
irrevocable, non-exclusive, worldwide, no-charge, royalty-free, unrestricted license
to exercise all rights under those copyrights. This includes, at our option, the
right to sublicense these same rights to third parties through multiple levels of
sublicensees or other licensing arrangements;
* you agree that each of us can do all things in relation to your contribution
as if each of us were the sole owners, and if one of us makes a derivative work
of your contribution, the one who makes the derivative work (or has it made) will
be the sole owner of that derivative work;
* you agree that you will not assert any moral rights in your contribution against
us, our licensees or transferees;
* you agree that we may register a copyright in your contribution and exercise
all ownership rights associated with it; and
* you agree that neither of us has any duty to consult with, obtain the consent
of, pay or render an accounting to the other for any use or distribution of your
contribution.
3. With respect to any patents you own, or that you can license without payment
to any third party, you hereby grant to us a perpetual, irrevocable, non-exclusive,
worldwide, no-charge, royalty-free license to:
* make, have made, use, sell, offer to sell, import, and otherwise transfer your
contribution in whole or in part, alone or in combination with
or included in any product, work or materials arising out of the project to
which your contribution was submitted, and
* at our option, to sublicense these same rights to third parties through multiple
levels of sublicensees or other licensing arrangements.
4. Except as set out above, you keep all right, title, and interest in your
contribution. The rights that you grant to us under these terms are effective on
the date you first submitted a contribution to us, even if your submission took
place before the date you sign these terms.
5. You covenant, represent, warrant and agree that:
* Each contribution that you submit is and shall be an original work of authorship
and you can legally grant the rights set out in this SCA;
* to the best of your knowledge, each contribution will not violate any third
party's copyrights, trademarks, patents, or other intellectual property rights; and
* each contribution shall be in compliance with U.S. export control laws and other
applicable export and import laws. You agree to notify us if you become aware of
any circumstance which would make any of the foregoing representations inaccurate
in any respect. Syllogism Co. may publicly disclose your participation in the project,
including the fact that you have signed the SCA.
6. This SCA is governed by the laws of the State of California and applicable U.S.
Federal law. Any choice of law rules will not apply.
7. Please place an “x” on one of the applicable statement below. Please do NOT
mark both statements:
x___ I am signing on behalf of myself as an individual and no other person or entity, including my employer, has or will have rights with respect my contributions.
____ I am signing on behalf of my employer or a legal entity and I have the actual authority to contractually bind that entity.
| Field | Entry |
|------------------------------- | -------------------- |
| Name | Jordan Suchow |
| Company's name (if applicable) | |
| Title or Role (if applicable) | |
| Date | 2015-04-19 |
| GitHub username | suchow |
| Website (optional) | http://suchow.io |

95
contributors/vsolovyov.md Normal file
View File

@ -0,0 +1,95 @@
Syllogism Contributor Agreement
===============================
This Syllogism Contributor Agreement (“SCA”) is based on the Oracle Contributor
Agreement. The SCA applies to any contribution that you make to any product or
project managed by us (the “project”), and sets out the intellectual property
rights you grant to us in the contributed materials. The term “us” shall mean
Syllogism Co. The term "you" shall mean the person or entity identified below.
If you agree to be bound by these terms, fill in the information requested below
and include the filled-in version with your first pull-request, under the file
contrbutors/. The name of the file should be your GitHub username, with the
extension .md. For example, the user example_user would create the file
spaCy/contributors/example_user.md .
Read this agreement carefully before signing. These terms and conditions
constitute a binding legal agreement.
1. The term 'contribution' or contributed materials means any source code,
object code, patch, tool, sample, graphic, specification, manual, documentation,
or any other material posted or submitted by you to the project.
2. With respect to any worldwide copyrights, or copyright applications and registrations,
in your contribution:
* you hereby assign to us joint ownership, and to the extent that such assignment
is or becomes invalid, ineffective or unenforceable, you hereby grant to us a perpetual,
irrevocable, non-exclusive, worldwide, no-charge, royalty-free, unrestricted license
to exercise all rights under those copyrights. This includes, at our option, the
right to sublicense these same rights to third parties through multiple levels of
sublicensees or other licensing arrangements;
* you agree that each of us can do all things in relation to your contribution
as if each of us were the sole owners, and if one of us makes a derivative work
of your contribution, the one who makes the derivative work (or has it made) will
be the sole owner of that derivative work;
* you agree that you will not assert any moral rights in your contribution against
us, our licensees or transferees;
* you agree that we may register a copyright in your contribution and exercise
all ownership rights associated with it; and
* you agree that neither of us has any duty to consult with, obtain the consent
of, pay or render an accounting to the other for any use or distribution of your
contribution.
3. With respect to any patents you own, or that you can license without payment
to any third party, you hereby grant to us a perpetual, irrevocable, non-exclusive,
worldwide, no-charge, royalty-free license to:
* make, have made, use, sell, offer to sell, import, and otherwise transfer your
contribution in whole or in part, alone or in combination with
or included in any product, work or materials arising out of the project to
which your contribution was submitted, and
* at our option, to sublicense these same rights to third parties through multiple
levels of sublicensees or other licensing arrangements.
4. Except as set out above, you keep all right, title, and interest in your
contribution. The rights that you grant to us under these terms are effective on
the date you first submitted a contribution to us, even if your submission took
place before the date you sign these terms.
5. You covenant, represent, warrant and agree that:
* Each contribution that you submit is and shall be an original work of authorship
and you can legally grant the rights set out in this SCA;
* to the best of your knowledge, each contribution will not violate any third
party's copyrights, trademarks, patents, or other intellectual property rights; and
* each contribution shall be in compliance with U.S. export control laws and other
applicable export and import laws. You agree to notify us if you become aware of
any circumstance which would make any of the foregoing representations inaccurate
in any respect. Syllogism Co. may publicly disclose your participation in the project,
including the fact that you have signed the SCA.
6. This SCA is governed by the laws of the State of California and applicable U.S.
Federal law. Any choice of law rules will not apply.
7. Please place an “x” on one of the applicable statement below. Please do NOT
mark both statements:
_x__ I am signing on behalf of myself as an individual and no other person or entity, including my employer, has or will have rights with respect my contributions.
____ I am signing on behalf of my employer or a legal entity and I have the actual authority to contractually bind that entity.
| Field | Entry |
|------------------------------- | -------------------- |
| Name | Vsevolod Solovyov |
| Company's name (if applicable) | |
| Title or Role (if applicable) | |
| Date | 2015-08-24 |
| GitHub username | vsolovyov |
| Website (optional) | |

316709
corpora/en/clusters.txt Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,6 @@
Cognitive Science Laboratory
Princeton University
http://wordnet.princeton.edu
wordnet@princeton.edu

View File

@ -0,0 +1,31 @@
WordNet Release 3.0
This software and database is being provided to you, the LICENSEE, by
Princeton University under the following license. By obtaining, using
and/or copying this software and database, you agree that you have
read, understood, and will comply with these terms and conditions.:
Permission to use, copy, modify and distribute this software and
database and its documentation for any purpose and without fee or
royalty is hereby granted, provided that you agree to comply with
the following copyright notice and statements, including the disclaimer,
and that the same appear on ALL copies of the software, database and
documentation, including modifications that you make for internal
use or for distribution.
WordNet 3.0 Copyright 2006 by Princeton University. All rights reserved.
THIS SOFTWARE AND DATABASE IS PROVIDED "AS IS" AND PRINCETON
UNIVERSITY MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PRINCETON
UNIVERSITY MAKES NO REPRESENTATIONS OR WARRANTIES OF MERCHANT-
ABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE
OF THE LICENSED SOFTWARE, DATABASE OR DOCUMENTATION WILL NOT
INFRINGE ANY THIRD PARTY PATENTS, COPYRIGHTS, TRADEMARKS OR
OTHER RIGHTS.
The name of Princeton University or Princeton may not be used in
advertising or publicity pertaining to distribution of the software
and/or database. Title to copyright in this software, database and
any associated documentation shall at all times remain with
Princeton University and LICENSEE agrees to preserve same.

View File

@ -0,0 +1,9 @@
Changes between WordNet 2.1 and 3.0
Some changes were made to the graphical interface and WordNet library
with regard to adjective and adverb searches. The adjective search
"Synonyms/Related Nouns" was relabeled "Synonyms", and, similarly, the
adverb search "Synonyms/Stem Adjectives" was relabled "Synonyms". A
separate "Related Noun" search was inserted for adjectives, and a
separate "Base Adjective" search was added for adverbs.

312
corpora/en/wordnet/INSTALL Normal file
View File

@ -0,0 +1,312 @@
WordNet 3.0 Installation Instructions
Beginning with Version 2.1, we have changed the Unix package to a GNU
Autotools package. With Autotools, a system independent installation
process builds and installs WordNet on your specific platform. Read
both the `Basic Installation' and `WordNet Installation' sections
below before attempting to build and install WordNet.
See the `Running WordNet' section for important information concerning
environment variables and the commands to run WordNet.
The WordNet browser makes use of the open source Tcl and Tk
packages. Many systems come with either or both pre-installed. If
your system doesn't (some systems have Tcl installed, but not Tk)
Tcl/Tk can be downloaded from:
Linux - http://www.tcl.tk/
OS X - http://tcltkaqua.sourceforge.net/ (note that 10.4 comes with
Tcl/Tk preinstalled, but earlier versions may not)
Some Linux systems come with the Tcl/Tk libraries installed, but not
all the header files. If your build fails due to missing Tk headers, a
subset that may be sufficient on your system can be found in the
"include/tk" directory. Copy the header files to the "include" directory
and try the make again. If it fails, you should download and install
a full copy of Tcl and/or Tk from the site above.
Tcl and Tk must be installed BEFORE you build and install WordNet. You
must also have a C compiler before installing Tcl/Tk or WordNet.
WordNet has been built and tested with the GNU gcc compiler. This is
pre-installed on most Unix systems, and can be downloaded from:
http://gcc.gnu.org/
Basic Installation
==================
********************************************************************
These are generic installation instructions. Details specific to
WordNet follow in the `WordNet Installation' section below.
********************************************************************
The `configure' shell script attempts to guess correct values for
various system-dependent variables used during compilation. It uses
those values to create a `Makefile' in each directory of the package.
It may also create one or more `.h' files containing system-dependent
definitions. Finally, it creates a shell script `config.status' that
you can run in the future to recreate the current configuration, and a
file `config.log' containing compiler output (useful mainly for
debugging `configure').
It can also use an optional file (typically called `config.cache'
and enabled with `--cache-file=config.cache' or simply `-C') that saves
the results of its tests to speed up reconfiguring. (Caching is
disabled by default to prevent problems with accidental use of stale
cache files.)
The simplest way to compile this package is:
1. `cd' to the directory containing the package's source code and type
`./configure' to configure the package for your system. If you're
using `csh' on an old version of System V, you might need to type
`sh ./configure' instead to prevent `csh' from trying to execute
`configure' itself.
Running `configure' takes awhile. While running, it prints some
messages telling which features it is checking for.
2. Type `make' to compile the package.
3. Type `make install' to install the programs and any data files and
documentation.
4. You can remove the program binaries and object files from the
source code directory by typing `make clean'. To also remove the
files that `configure' created (so you can compile the package for
a different kind of computer), type `make distclean'. There is
also a `make maintainer-clean' target, but that is intended mainly
for the package's developers. If you use it, you may have to get
all sorts of other programs in order to regenerate files that came
with the distribution.
Compilers and Options
=====================
Some systems require unusual options for compilation or linking that
the `configure' script does not know about. Run `./configure --help'
for details on some of the pertinent environment variables.
You can give `configure' initial values for configuration parameters
by setting variables in the command line or in the environment. Here
is an example:
./configure CC=c89 CFLAGS=-O2 LIBS=-lposix
*Note Defining Variables::, for more details.
Compiling For Multiple Architectures
====================================
You can compile the package for more than one kind of computer at the
same time, by placing the object files for each architecture in their
own directory. To do this, you must use a version of `make' that
supports the `VPATH' variable, such as GNU `make'. `cd' to the
directory where you want the object files and executables to go and run
the `configure' script. `configure' automatically checks for the
source code in the directory that `configure' is in and in `..'.
If you have to use a `make' that does not support the `VPATH'
variable, you have to compile the package for one architecture at a
time in the source code directory. After you have installed the
package for one architecture, use `make distclean' before reconfiguring
for another architecture.
Installation Names
==================
By default, `make install' will install the package's files in
`/usr/local/bin', `/usr/local/man', etc. You can specify an
installation prefix other than `/usr/local' by giving `configure' the
option `--prefix=PATH'.
You can specify separate installation prefixes for
architecture-specific files and architecture-independent files. If you
give `configure' the option `--exec-prefix=PATH', the package will use
PATH as the prefix for installing programs and libraries.
Documentation and other data files will still use the regular prefix.
In addition, if you use an unusual directory layout you can give
options like `--bindir=PATH' to specify different values for particular
kinds of files. Run `configure --help' for a list of the directories
you can set and what kinds of files go in them.
If the package supports it, you can cause programs to be installed
with an extra prefix or suffix on their names by giving `configure' the
option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'.
Optional Features
=================
Some packages pay attention to `--enable-FEATURE' options to
`configure', where FEATURE indicates an optional part of the package.
They may also pay attention to `--with-PACKAGE' options, where PACKAGE
is something like `gnu-as' or `x' (for the X Window System). The
`README' should mention any `--enable-' and `--with-' options that the
package recognizes.
For packages that use the X Window System, `configure' can usually
find the X include and library files automatically, but if it doesn't,
you can use the `configure' options `--x-includes=DIR' and
`--x-libraries=DIR' to specify their locations.
Specifying the System Type
==========================
There may be some features `configure' cannot figure out
automatically, but needs to determine by the type of machine the package
will run on. Usually, assuming the package is built to be run on the
_same_ architectures, `configure' can figure that out, but if it prints
a message saying it cannot guess the machine type, give it the
`--build=TYPE' option. TYPE can either be a short name for the system
type, such as `sun4', or a canonical name which has the form:
CPU-COMPANY-SYSTEM
where SYSTEM can have one of these forms:
OS KERNEL-OS
See the file `config.sub' for the possible values of each field. If
`config.sub' isn't included in this package, then this package doesn't
need to know the machine type.
If you are _building_ compiler tools for cross-compiling, you should
use the `--target=TYPE' option to select the type of system they will
produce code for.
If you want to _use_ a cross compiler, that generates code for a
platform different from the build platform, you should specify the
"host" platform (i.e., that on which the generated programs will
eventually be run) with `--host=TYPE'.
Sharing Defaults
================
If you want to set default values for `configure' scripts to share,
you can create a site shell script called `config.site' that gives
default values for variables like `CC', `cache_file', and `prefix'.
`configure' looks for `PREFIX/share/config.site' if it exists, then
`PREFIX/etc/config.site' if it exists. Or, you can set the
`CONFIG_SITE' environment variable to the location of the site script.
A warning: not all `configure' scripts look for a site script.
Defining Variables
==================
Variables not defined in a site shell script can be set in the
environment passed to `configure'. However, some packages may run
configure again during the build, and the customized values of these
variables may be lost. In order to avoid this problem, you should set
them in the `configure' command line, using `VAR=value'. For example:
./configure CC=/usr/local2/bin/gcc
will cause the specified gcc to be used as the C compiler (unless it is
overridden in the site shell script).
`configure' Invocation
======================
`configure' recognizes the following options to control how it
operates.
`--help'
`-h'
Print a summary of the options to `configure', and exit.
`--version'
`-V'
Print the version of Autoconf used to generate the `configure'
script, and exit.
`--cache-file=FILE'
Enable the cache: use and save the results of the tests in FILE,
traditionally `config.cache'. FILE defaults to `/dev/null' to
disable caching.
`--config-cache'
`-C'
Alias for `--cache-file=config.cache'.
`--quiet'
`--silent'
`-q'
Do not print messages saying which checks are being made. To
suppress all normal output, redirect it to `/dev/null' (any error
messages will still be shown).
`--srcdir=DIR'
Look for the package's source code in directory DIR. Usually
`configure' can determine that directory automatically.
`configure' also accepts some other, not widely useful, options. Run
`configure --help' for more details.
WordNet Installation
====================
By default, WordNet is installed in `/usr/local/WordNet-3.0'. You
must usually be the `root' user to install something here. If you
choose to install WordNet in a different location, you must use the
`--prefix=' option to `configure' and specify an installation
directory.
WordNet relies on the Tcl/Tk package, which you must have installed on
your system prior to building the WordNet package. If you have
installed Tcl/Tk in a non-standard location, you must specify the
`--with-tcl=' and `--with-tk=' options to `configure' and specify the
directory that contains the `tclConfig.sh' and `tkConfig.sh'
configuration scripts, respectively. (Note that these are usually the
same directories.)
If you're running OS X and installed the Aqua Tcl/Tk package from the
web site above, use the following settings:
--with-tcl=/Library/Frameworks/Tcl.framework
--with-tk=/Library/Frameworks/Tk.framework
If `configure' can't find either `tclConfig.sh' or `tkConfig.sh', it
will print an error and stop processing.
After successfully running `configure', you must then build and
install WordNet using these commands:
make
make install
Running WordNet
===============
In order to run WordNet, you must set your PATH variable to include
the directory that contains the WordNet binraries. By default, WordNet
is installed in `/usr/local/WordNet-3.0'.
Several other environment variables may need to be set in order to
run WordNet on your system:
PATH - should include either `/usr/local/WordNet-3.0/bin' or the path
you specified with the `--prefix=' option to `configure', unless you
installed WordNet in a directory that is already in your path.
WNHOME - if you did not install in the default location, you must set
this environment variable to the value you specified on the `prefix='
option. This tells the WordNet browser where to find the database files.
LD_LIBRARY_PATH - may need to be set to the location of the Tcl/Tk
libraries.
TK_LIBRARY - on OS X, may need to be set to the directory that
contains the `tk.tcl' file (usually a subidrectory of where the Tk
library is installed).
The command `wnb' starts the WordNet browser application. If any
of the above variables is not set, or not set properly, an error will
occur when you run `wnb'.
The command line interface is run with the `wn' command. The `PATH' and
`WNHOME' environment variables must also be set.

View File

@ -0,0 +1,31 @@
WordNet Release 3.0
This software and database is being provided to you, the LICENSEE, by
Princeton University under the following license. By obtaining, using
and/or copying this software and database, you agree that you have
read, understood, and will comply with these terms and conditions.:
Permission to use, copy, modify and distribute this software and
database and its documentation for any purpose and without fee or
royalty is hereby granted, provided that you agree to comply with
the following copyright notice and statements, including the disclaimer,
and that the same appear on ALL copies of the software, database and
documentation, including modifications that you make for internal
use or for distribution.
WordNet 3.0 Copyright 2006 by Princeton University. All rights reserved.
THIS SOFTWARE AND DATABASE IS PROVIDED "AS IS" AND PRINCETON
UNIVERSITY MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PRINCETON
UNIVERSITY MAKES NO REPRESENTATIONS OR WARRANTIES OF MERCHANT-
ABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE
OF THE LICENSED SOFTWARE, DATABASE OR DOCUMENTATION WILL NOT
INFRINGE ANY THIRD PARTY PATENTS, COPYRIGHTS, TRADEMARKS OR
OTHER RIGHTS.
The name of Princeton University or Princeton may not be used in
advertising or publicity pertaining to distribution of the software
and/or database. Title to copyright in this software, database and
any associated documentation shall at all times remain with
Princeton University and LICENSEE agrees to preserve same.

View File

@ -0,0 +1,2 @@
EXTRA_DIST = README ChangeLog COPYING INSTALL AUTHORS LICENSE doc dict include
SUBDIRS = doc dict include lib src

View File

@ -0,0 +1,569 @@
# Makefile.in generated by automake 1.9 from Makefile.am.
# @configure_input@
# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
# 2003, 2004 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
@SET_MAKE@
srcdir = @srcdir@
top_srcdir = @top_srcdir@
VPATH = @srcdir@
pkgdatadir = $(datadir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
top_builddir = .
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
INSTALL = @INSTALL@
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
subdir = .
DIST_COMMON = README $(am__configure_deps) $(srcdir)/Makefile.am \
$(srcdir)/Makefile.in $(srcdir)/config.h.in \
$(top_srcdir)/configure AUTHORS COPYING ChangeLog INSTALL NEWS \
compile depcomp install-sh missing
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \
$(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
configure.lineno configure.status.lineno
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = config.h
CONFIG_CLEAN_FILES =
SOURCES =
DIST_SOURCES =
RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
html-recursive info-recursive install-data-recursive \
install-exec-recursive install-info-recursive \
install-recursive installcheck-recursive installdirs-recursive \
pdf-recursive ps-recursive uninstall-info-recursive \
uninstall-recursive
ETAGS = etags
CTAGS = ctags
DIST_SUBDIRS = $(SUBDIRS)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
distdir = $(PACKAGE)-$(VERSION)
top_distdir = $(distdir)
am__remove_distdir = \
{ test ! -d $(distdir) \
|| { find $(distdir) -type d ! -perm -200 -exec chmod u+w {} ';' \
&& rm -fr $(distdir); }; }
DIST_ARCHIVES = $(distdir).tar.gz
GZIP_ENV = --best
distuninstallcheck_listfiles = find . -type f -print
distcleancheck_listfiles = find . -type f -print
ACLOCAL = @ACLOCAL@
AMDEP_FALSE = @AMDEP_FALSE@
AMDEP_TRUE = @AMDEP_TRUE@
AMTAR = @AMTAR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CC = @CC@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
LDFLAGS = @LDFLAGS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LTLIBOBJS = @LTLIBOBJS@
MAKEINFO = @MAKEINFO@
OBJEXT = @OBJEXT@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
TCL_INCLUDE_SPEC = @TCL_INCLUDE_SPEC@
TCL_LIB_SPEC = @TCL_LIB_SPEC@
TK_LIBS = @TK_LIBS@
TK_LIB_SPEC = @TK_LIB_SPEC@
TK_PREFIX = @TK_PREFIX@
TK_XINCLUDES = @TK_XINCLUDES@
VERSION = @VERSION@
ac_ct_CC = @ac_ct_CC@
ac_ct_RANLIB = @ac_ct_RANLIB@
ac_ct_STRIP = @ac_ct_STRIP@
ac_prefix = @ac_prefix@
am__fastdepCC_FALSE = @am__fastdepCC_FALSE@
am__fastdepCC_TRUE = @am__fastdepCC_TRUE@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
bindir = @bindir@
build_alias = @build_alias@
datadir = @datadir@
exec_prefix = @exec_prefix@
host_alias = @host_alias@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
localstatedir = @localstatedir@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
prefix = @prefix@
program_transform_name = @program_transform_name@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
EXTRA_DIST = README ChangeLog COPYING INSTALL AUTHORS LICENSE doc dict include
SUBDIRS = doc dict include lib src
all: config.h
$(MAKE) $(AM_MAKEFLAGS) all-recursive
.SUFFIXES:
am--refresh:
@:
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
echo ' cd $(srcdir) && $(AUTOMAKE) --gnu '; \
cd $(srcdir) && $(AUTOMAKE) --gnu \
&& exit 0; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu Makefile'; \
cd $(top_srcdir) && \
$(AUTOMAKE) --gnu Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
echo ' $(SHELL) ./config.status'; \
$(SHELL) ./config.status;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
$(SHELL) ./config.status --recheck
$(top_srcdir)/configure: $(am__configure_deps)
cd $(srcdir) && $(AUTOCONF)
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS)
config.h: stamp-h1
@if test ! -f $@; then \
rm -f stamp-h1; \
$(MAKE) stamp-h1; \
else :; fi
stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status
@rm -f stamp-h1
cd $(top_builddir) && $(SHELL) ./config.status config.h
$(srcdir)/config.h.in: $(am__configure_deps)
cd $(top_srcdir) && $(AUTOHEADER)
rm -f stamp-h1
touch $@
distclean-hdr:
-rm -f config.h stamp-h1
uninstall-info-am:
# This directory's subdirectories are mostly independent; you can cd
# into them and run `make' without going through this Makefile.
# To change the values of `make' variables: instead of editing Makefiles,
# (1) if the variable is set in `config.status', edit `config.status'
# (which will cause the Makefiles to be regenerated when you run `make');
# (2) otherwise, pass the desired values on the `make' command line.
$(RECURSIVE_TARGETS):
@set fnord $$MAKEFLAGS; amf=$$2; \
dot_seen=no; \
target=`echo $@ | sed s/-recursive//`; \
list='$(SUBDIRS)'; for subdir in $$list; do \
echo "Making $$target in $$subdir"; \
if test "$$subdir" = "."; then \
dot_seen=yes; \
local_target="$$target-am"; \
else \
local_target="$$target"; \
fi; \
(cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
|| case "$$amf" in *=*) exit 1;; *k*) fail=yes;; *) exit 1;; esac; \
done; \
if test "$$dot_seen" = "no"; then \
$(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
fi; test -z "$$fail"
mostlyclean-recursive clean-recursive distclean-recursive \
maintainer-clean-recursive:
@set fnord $$MAKEFLAGS; amf=$$2; \
dot_seen=no; \
case "$@" in \
distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
*) list='$(SUBDIRS)' ;; \
esac; \
rev=''; for subdir in $$list; do \
if test "$$subdir" = "."; then :; else \
rev="$$subdir $$rev"; \
fi; \
done; \
rev="$$rev ."; \
target=`echo $@ | sed s/-recursive//`; \
for subdir in $$rev; do \
echo "Making $$target in $$subdir"; \
if test "$$subdir" = "."; then \
local_target="$$target-am"; \
else \
local_target="$$target"; \
fi; \
(cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
|| case "$$amf" in *=*) exit 1;; *k*) fail=yes;; *) exit 1;; esac; \
done && test -z "$$fail"
tags-recursive:
list='$(SUBDIRS)'; for subdir in $$list; do \
test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \
done
ctags-recursive:
list='$(SUBDIRS)'; for subdir in $$list; do \
test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \
done
ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
$(AWK) ' { files[$$0] = 1; } \
END { for (i in files) print i; }'`; \
mkid -fID $$unique
tags: TAGS
TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
$(TAGS_FILES) $(LISP)
tags=; \
here=`pwd`; \
if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
include_option=--etags-include; \
empty_fix=.; \
else \
include_option=--include; \
empty_fix=; \
fi; \
list='$(SUBDIRS)'; for subdir in $$list; do \
if test "$$subdir" = .; then :; else \
test ! -f $$subdir/TAGS || \
tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \
fi; \
done; \
list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
$(AWK) ' { files[$$0] = 1; } \
END { for (i in files) print i; }'`; \
if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
$$tags $$unique; \
fi
ctags: CTAGS
CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
$(TAGS_FILES) $(LISP)
tags=; \
here=`pwd`; \
list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
$(AWK) ' { files[$$0] = 1; } \
END { for (i in files) print i; }'`; \
test -z "$(CTAGS_ARGS)$$tags$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$tags $$unique
GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& cd $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) $$here
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
distdir: $(DISTFILES)
$(am__remove_distdir)
mkdir $(distdir)
@srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
list='$(DISTFILES)'; for file in $$list; do \
case $$file in \
$(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
$(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
esac; \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
if test "$$dir" != "$$file" && test "$$dir" != "."; then \
dir="/$$dir"; \
$(mkdir_p) "$(distdir)$$dir"; \
else \
dir=''; \
fi; \
if test -d $$d/$$file; then \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
fi; \
cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
else \
test -f $(distdir)/$$file \
|| cp -p $$d/$$file $(distdir)/$$file \
|| exit 1; \
fi; \
done
list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
if test "$$subdir" = .; then :; else \
test -d "$(distdir)/$$subdir" \
|| $(mkdir_p) "$(distdir)/$$subdir" \
|| exit 1; \
distdir=`$(am__cd) $(distdir) && pwd`; \
top_distdir=`$(am__cd) $(top_distdir) && pwd`; \
(cd $$subdir && \
$(MAKE) $(AM_MAKEFLAGS) \
top_distdir="$$top_distdir" \
distdir="$$distdir/$$subdir" \
distdir) \
|| exit 1; \
fi; \
done
-find $(distdir) -type d ! -perm -777 -exec chmod a+rwx {} \; -o \
! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \
! -type d ! -perm -400 -exec chmod a+r {} \; -o \
! -type d ! -perm -444 -exec $(SHELL) $(install_sh) -c -m a+r {} {} \; \
|| chmod -R a+r $(distdir)
dist-gzip: distdir
tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
$(am__remove_distdir)
dist-bzip2: distdir
tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2
$(am__remove_distdir)
dist-tarZ: distdir
tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z
$(am__remove_distdir)
dist-shar: distdir
shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz
$(am__remove_distdir)
dist-zip: distdir
-rm -f $(distdir).zip
zip -rq $(distdir).zip $(distdir)
$(am__remove_distdir)
dist dist-all: distdir
tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
$(am__remove_distdir)
# This target untars the dist file and tries a VPATH configuration. Then
# it guarantees that the distribution is self-contained by making another
# tarfile.
distcheck: dist
case '$(DIST_ARCHIVES)' in \
*.tar.gz*) \
GZIP=$(GZIP_ENV) gunzip -c $(distdir).tar.gz | $(am__untar) ;;\
*.tar.bz2*) \
bunzip2 -c $(distdir).tar.bz2 | $(am__untar) ;;\
*.tar.Z*) \
uncompress -c $(distdir).tar.Z | $(am__untar) ;;\
*.shar.gz*) \
GZIP=$(GZIP_ENV) gunzip -c $(distdir).shar.gz | unshar ;;\
*.zip*) \
unzip $(distdir).zip ;;\
esac
chmod -R a-w $(distdir); chmod a+w $(distdir)
mkdir $(distdir)/_build
mkdir $(distdir)/_inst
chmod a-w $(distdir)
dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \
&& dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \
&& cd $(distdir)/_build \
&& ../configure --srcdir=.. --prefix="$$dc_install_base" \
$(DISTCHECK_CONFIGURE_FLAGS) \
&& $(MAKE) $(AM_MAKEFLAGS) \
&& $(MAKE) $(AM_MAKEFLAGS) dvi \
&& $(MAKE) $(AM_MAKEFLAGS) check \
&& $(MAKE) $(AM_MAKEFLAGS) install \
&& $(MAKE) $(AM_MAKEFLAGS) installcheck \
&& $(MAKE) $(AM_MAKEFLAGS) uninstall \
&& $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \
distuninstallcheck \
&& chmod -R a-w "$$dc_install_base" \
&& ({ \
(cd ../.. && umask 077 && mkdir "$$dc_destdir") \
&& $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \
&& $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \
&& $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \
distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \
} || { rm -rf "$$dc_destdir"; exit 1; }) \
&& rm -rf "$$dc_destdir" \
&& $(MAKE) $(AM_MAKEFLAGS) dist \
&& rm -rf $(DIST_ARCHIVES) \
&& $(MAKE) $(AM_MAKEFLAGS) distcleancheck
$(am__remove_distdir)
@(echo "$(distdir) archives ready for distribution: "; \
list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \
sed -e '1{h;s/./=/g;p;x;}' -e '$${p;x;}'
distuninstallcheck:
@cd $(distuninstallcheck_dir) \
&& test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \
|| { echo "ERROR: files left after uninstall:" ; \
if test -n "$(DESTDIR)"; then \
echo " (check DESTDIR support)"; \
fi ; \
$(distuninstallcheck_listfiles) ; \
exit 1; } >&2
distcleancheck: distclean
@if test '$(srcdir)' = . ; then \
echo "ERROR: distcleancheck can only run from a VPATH build" ; \
exit 1 ; \
fi
@test `$(distcleancheck_listfiles) | wc -l` -eq 0 \
|| { echo "ERROR: files left in build directory after distclean:" ; \
$(distcleancheck_listfiles) ; \
exit 1; } >&2
check-am: all-am
check: check-recursive
all-am: Makefile config.h
installdirs: installdirs-recursive
installdirs-am:
install: install-recursive
install-exec: install-exec-recursive
install-data: install-data-recursive
uninstall: uninstall-recursive
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-recursive
install-strip:
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
`test -z '$(STRIP)' || \
echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-recursive
clean-am: clean-generic mostlyclean-am
distclean: distclean-recursive
-rm -f $(am__CONFIG_DISTCLEAN_FILES)
-rm -f Makefile
distclean-am: clean-am distclean-generic distclean-hdr distclean-tags
dvi: dvi-recursive
dvi-am:
html: html-recursive
info: info-recursive
info-am:
install-data-am:
install-exec-am:
install-info: install-info-recursive
install-man:
installcheck-am:
maintainer-clean: maintainer-clean-recursive
-rm -f $(am__CONFIG_DISTCLEAN_FILES)
-rm -rf $(top_srcdir)/autom4te.cache
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-recursive
mostlyclean-am: mostlyclean-generic
pdf: pdf-recursive
pdf-am:
ps: ps-recursive
ps-am:
uninstall-am: uninstall-info-am
uninstall-info: uninstall-info-recursive
.PHONY: $(RECURSIVE_TARGETS) CTAGS GTAGS all all-am am--refresh check \
check-am clean clean-generic clean-recursive ctags \
ctags-recursive dist dist-all dist-bzip2 dist-gzip dist-shar \
dist-tarZ dist-zip distcheck distclean distclean-generic \
distclean-hdr distclean-recursive distclean-tags \
distcleancheck distdir distuninstallcheck dvi dvi-am html \
html-am info info-am install install-am install-data \
install-data-am install-exec install-exec-am install-info \
install-info-am install-man install-strip installcheck \
installcheck-am installdirs installdirs-am maintainer-clean \
maintainer-clean-generic maintainer-clean-recursive \
mostlyclean mostlyclean-generic mostlyclean-recursive pdf \
pdf-am ps ps-am tags tags-recursive uninstall uninstall-am \
uninstall-info-am
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:

0
corpora/en/wordnet/NEWS Normal file
View File

101
corpora/en/wordnet/README Normal file
View File

@ -0,0 +1,101 @@
This is the README file for WordNet 3.0
1. About WordNet
WordNet was developed at Princeton University's Cognitive Science
Laboratory under the direction of George Miller, James S. McDonnell
Distinguished University Professor of Psychology, Emeritus. Over the
years many linguists, lexicographers, students, and software engineers
have contributed to the project.
WordNet is an online lexical reference system. Word forms in WordNet
are represented in their familiar orthography; word meanings are
represented by synonym sets (synsets) - lists of synonymous word forms
that are interchangeable in some context. Two kinds of relations are
recognized: lexical and semantic. Lexical relations hold between word
forms; semantic relations hold between word meanings.
To learn more about WordNet, the book "WordNet: An Electronic Lexical
Database," containing an updated version of "Five Papers on WordNet"
and additional papers by WordNet users, is available from MIT Press:
http://mitpress.mit.edu/book-home.tcl?isbn=026206197X
2. The WordNet Web Site
We maintain a Web site at:
http://wordnet.princeton.edu
Information about WordNet, access to our online interface, and the
various WordNet packages that you can download are available from our
web site. All of the software documentation is available online, as
well as a FAQ. On this site we also have information about other
applications that use WordNet. If you have an application that you
would like included, please send e-mail to the above address.
3. Contacting Us
Ongoing deveopment work and WordNet related projects are done by a
small group of researchers, lexicographers, and systems programmers.
Since our resources are VERY limited, we request that you please
confine correspondence to WordNet topics only. Please check the
documentation, FAQ, and other resources for the answer to your
question or problem before contacting us.
If you have trouble installing or downloading WordNet, have a bug to
report, or any other problem, please refer to the online FAQ file
first. If you can heal thyself, please do so. The FAQ will be
updated over time. And if you do find a previously unreported
problem, please use our Bug Report Form:
http://wordnet.princeton.edu/cgi-bin/bugsubmit.pl
When reporting a problem, please be as specific as possible, stating
the computer platform you are using, which interface you are using,
and the exact error. The more details you can provide, the more
likely it is that you will get an answer.
There is a WordNet user discussion group mailing list that we invite
our users to join. Users use this list to ask questions of one
another, announce extensions to WordNet that they've developed, and
other topics of general usefulness to the user community.
Information on joining the user discussion list, reporting bugs and other
contact information is in found on our website at:
http://wordnet.princeton.edu/contact
4. Current Release
WordNet Version 3.0 is the latest version available for download. Two
basic database packages are available - one for Windows and one for
Unix platforms (including Mac OS X). See the file ChangeLog (Unix) or
CHANGES.txt (Windows) for a list of changes from previous versions.
WordNet packages can either be downloaded from our web site via:
http://wordnet.princeton.edu/obtain
The Windows package is a self-extracting archive that installs itself
when you double-click on it.
Beginning with Version 2.1, we changed the Unix package to a GNU Autotools
package. The WordNet browser makes use of the open source Tcl and Tk
packages. Many systems come with either or both pre-installed. If
your system doesn't (some systems have Tcl installed, but not Tk)
Tcl/Tk can be downloaded from:
http://www.tcl.tk/
Tcl and Tk must be installed BEFORE you compile WordNet. You must also
have a C compiler before installing Tcl/Tk or WordNet. WordNet has
been built and tested with the GNU gcc compiler. This is
pre-installed on most Unix systems, and can be downloaded from:
http://gcc.gnu.org/
See the file INSTALL for detailed WordNet installation instructions.

View File

@ -0,0 +1,333 @@
#------------------------------------------------------------------------
# SC_PATH_TCLCONFIG --
#
# Locate the tclConfig.sh file and perform a sanity check on
# the Tcl compile flags
#
# Arguments:
# none
#
# Results:
#
# Adds the following arguments to configure:
# --with-tcl=...
#
# Defines the following vars:
# TCL_BIN_DIR Full path to the directory containing
# the tclConfig.sh file
#------------------------------------------------------------------------
AC_DEFUN(SC_PATH_TCLCONFIG, [
#
# Ok, lets find the tcl configuration
# First, look for one uninstalled.
# the alternative search directory is invoked by --with-tcl
#
if test x"${no_tcl}" = x ; then
# we reset no_tcl in case something fails here
no_tcl=true
AC_ARG_WITH(tcl, [ --with-tcl directory containing tcl configuration (tclConfig.sh)], with_tclconfig=${withval})
AC_MSG_CHECKING([for Tcl configuration])
AC_CACHE_VAL(ac_cv_c_tclconfig,[
# First check to see if --with-tcl was specified.
if test x"${with_tclconfig}" != x ; then
if test -f "${with_tclconfig}/tclConfig.sh" ; then
ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)`
else
AC_MSG_ERROR([${with_tclconfig} directory doesn't contain tclConfig.sh])
fi
fi
# then check for a private Tcl installation
if test x"${ac_cv_c_tclconfig}" = x ; then
for i in \
../tcl \
`ls -dr ../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \
`ls -dr ../tcl[[8-9]].[[0-9]] 2>/dev/null` \
`ls -dr ../tcl[[8-9]].[[0-9]]* 2>/dev/null` \
../../tcl \
`ls -dr ../../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \
`ls -dr ../../tcl[[8-9]].[[0-9]] 2>/dev/null` \
`ls -dr ../../tcl[[8-9]].[[0-9]]* 2>/dev/null` \
../../../tcl \
`ls -dr ../../../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \
`ls -dr ../../../tcl[[8-9]].[[0-9]] 2>/dev/null` \
`ls -dr ../../../tcl[[8-9]].[[0-9]]* 2>/dev/null` ; do
if test -f "$i/unix/tclConfig.sh" ; then
ac_cv_c_tclconfig=`(cd $i/unix; pwd)`
break
fi
done
fi
# check in a few common install locations
if test x"${ac_cv_c_tclconfig}" = x ; then
for i in `ls -d ${libdir} 2>/dev/null` \
`ls -d /usr/local/lib 2>/dev/null` \
`ls -d /usr/contrib/lib 2>/dev/null` \
`ls -d /usr/lib 2>/dev/null` \
`ls -d /usr/lib64 2>/dev/null` \
; do
if test -f "$i/tclConfig.sh" ; then
ac_cv_c_tclconfig=`(cd $i; pwd)`
break
fi
done
fi
# check in a few other private locations
if test x"${ac_cv_c_tclconfig}" = x ; then
for i in \
${srcdir}/../tcl \
`ls -dr ${srcdir}/../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \
`ls -dr ${srcdir}/../tcl[[8-9]].[[0-9]] 2>/dev/null` \
`ls -dr ${srcdir}/../tcl[[8-9]].[[0-9]]* 2>/dev/null` ; do
if test -f "$i/unix/tclConfig.sh" ; then
ac_cv_c_tclconfig=`(cd $i/unix; pwd)`
break
fi
done
fi
])
if test x"${ac_cv_c_tclconfig}" = x ; then
TCL_BIN_DIR="# no Tcl configs found"
AC_MSG_WARN(Can't find Tcl configuration definitions)
exit 0
else
no_tcl=
TCL_BIN_DIR=${ac_cv_c_tclconfig}
AC_MSG_RESULT(found $TCL_BIN_DIR/tclConfig.sh)
fi
fi
])
#------------------------------------------------------------------------
# SC_PATH_TKCONFIG --
#
# Locate the tkConfig.sh file
#
# Arguments:
# none
#
# Results:
#
# Adds the following arguments to configure:
# --with-tk=...
#
# Defines the following vars:
# TK_BIN_DIR Full path to the directory containing
# the tkConfig.sh file
#------------------------------------------------------------------------
AC_DEFUN(SC_PATH_TKCONFIG, [
#
# Ok, lets find the tk configuration
# First, look for one uninstalled.
# the alternative search directory is invoked by --with-tk
#
if test x"${no_tk}" = x ; then
# we reset no_tk in case something fails here
no_tk=true
AC_ARG_WITH(tk, [ --with-tk directory containing tk configuration (tkConfig.sh)], with_tkconfig=${withval})
AC_MSG_CHECKING([for Tk configuration])
AC_CACHE_VAL(ac_cv_c_tkconfig,[
# First check to see if --with-tkconfig was specified.
if test x"${with_tkconfig}" != x ; then
if test -f "${with_tkconfig}/tkConfig.sh" ; then
ac_cv_c_tkconfig=`(cd ${with_tkconfig}; pwd)`
else
AC_MSG_ERROR([${with_tkconfig} directory doesn't contain tkConfig.sh])
fi
fi
# then check for a private Tk library
if test x"${ac_cv_c_tkconfig}" = x ; then
for i in \
../tk \
`ls -dr ../tk[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \
`ls -dr ../tk[[8-9]].[[0-9]] 2>/dev/null` \
`ls -dr ../tk[[8-9]].[[0-9]]* 2>/dev/null` \
../../tk \
`ls -dr ../../tk[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \
`ls -dr ../../tk[[8-9]].[[0-9]] 2>/dev/null` \
`ls -dr ../../tk[[8-9]].[[0-9]]* 2>/dev/null` \
../../../tk \
`ls -dr ../../../tk[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \
`ls -dr ../../../tk[[8-9]].[[0-9]] 2>/dev/null` \
`ls -dr ../../../tk[[8-9]].[[0-9]]* 2>/dev/null` ; do
if test -f "$i/unix/tkConfig.sh" ; then
ac_cv_c_tkconfig=`(cd $i/unix; pwd)`
break
fi
done
fi
# check in a few common install locations
if test x"${ac_cv_c_tkconfig}" = x ; then
for i in `ls -d ${libdir} 2>/dev/null` \
`ls -d /usr/local/lib 2>/dev/null` \
`ls -d /usr/contrib/lib 2>/dev/null` \
`ls -d /usr/lib 2>/dev/null` \
`ls -d /usr/lib64 2>/dev/null` \
; do
if test -f "$i/tkConfig.sh" ; then
ac_cv_c_tkconfig=`(cd $i; pwd)`
break
fi
done
fi
# check in a few other private locations
if test x"${ac_cv_c_tkconfig}" = x ; then
for i in \
${srcdir}/../tk \
`ls -dr ${srcdir}/../tk[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \
`ls -dr ${srcdir}/../tk[[8-9]].[[0-9]] 2>/dev/null` \
`ls -dr ${srcdir}/../tk[[8-9]].[[0-9]]* 2>/dev/null` ; do
if test -f "$i/unix/tkConfig.sh" ; then
ac_cv_c_tkconfig=`(cd $i/unix; pwd)`
break
fi
done
fi
])
if test x"${ac_cv_c_tkconfig}" = x ; then
TK_BIN_DIR="# no Tk configs found"
AC_MSG_WARN(Can't find Tk configuration definitions)
exit 0
else
no_tk=
TK_BIN_DIR=${ac_cv_c_tkconfig}
AC_MSG_RESULT(found $TK_BIN_DIR/tkConfig.sh)
fi
fi
])
#------------------------------------------------------------------------
# SC_LOAD_TCLCONFIG --
#
# Load the tclConfig.sh file
#
# Arguments:
#
# Requires the following vars to be set:
# TCL_BIN_DIR
#
# Results:
#
# Subst the following vars:
# TCL_BIN_DIR
# TCL_SRC_DIR
# TCL_LIB_FILE
#
#------------------------------------------------------------------------
AC_DEFUN(SC_LOAD_TCLCONFIG, [
AC_MSG_CHECKING([for existence of $TCL_BIN_DIR/tclConfig.sh])
if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then
AC_MSG_RESULT([loading])
. $TCL_BIN_DIR/tclConfig.sh
else
AC_MSG_RESULT([file not found])
fi
#
# If the TCL_BIN_DIR is the build directory (not the install directory),
# then set the common variable name to the value of the build variables.
# For example, the variable TCL_LIB_SPEC will be set to the value
# of TCL_BUILD_LIB_SPEC. An extension should make use of TCL_LIB_SPEC
# instead of TCL_BUILD_LIB_SPEC since it will work with both an
# installed and uninstalled version of Tcl.
#
if test -f $TCL_BIN_DIR/Makefile ; then
TCL_LIB_SPEC=${TCL_BUILD_LIB_SPEC}
TCL_STUB_LIB_SPEC=${TCL_BUILD_STUB_LIB_SPEC}
TCL_STUB_LIB_PATH=${TCL_BUILD_STUB_LIB_PATH}
fi
#
# eval is required to do the TCL_DBGX substitution
#
eval "TCL_LIB_FILE=\"${TCL_LIB_FILE}\""
eval "TCL_LIB_FLAG=\"${TCL_LIB_FLAG}\""
eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\""
eval "TCL_INCLUDE_SPEC=\"${TCL_INCLUDE_SPEC}\""
eval "TCL_STUB_LIB_FILE=\"${TCL_STUB_LIB_FILE}\""
eval "TCL_STUB_LIB_FLAG=\"${TCL_STUB_LIB_FLAG}\""
eval "TCL_STUB_LIB_SPEC=\"${TCL_STUB_LIB_SPEC}\""
# AC_SUBST(TCL_VERSION)
# AC_SUBST(TCL_BIN_DIR)
# AC_SUBST(TCL_SRC_DIR)
# AC_SUBST(TCL_LIB_FILE)
# AC_SUBST(TCL_LIB_FLAG)
AC_SUBST(TCL_LIB_SPEC)
AC_SUBST(TCL_INCLUDE_SPEC)
# AC_SUBST(TCL_STUB_LIB_FILE)
# AC_SUBST(TCL_STUB_LIB_FLAG)
# AC_SUBST(TCL_STUB_LIB_SPEC)
])
#------------------------------------------------------------------------
# SC_LOAD_TKCONFIG --
#
# Load the tkConfig.sh file
#
# Arguments:
#
# Requires the following vars to be set:
# TK_BIN_DIR
#
# Results:
#
# Sets the following vars that should be in tkConfig.sh:
# TK_BIN_DIR
#------------------------------------------------------------------------
AC_DEFUN(SC_LOAD_TKCONFIG, [
AC_MSG_CHECKING([for existence of $TK_BIN_DIR/tkConfig.sh])
if test -f "$TK_BIN_DIR/tkConfig.sh" ; then
AC_MSG_RESULT([loading])
. $TK_BIN_DIR/tkConfig.sh
else
AC_MSG_RESULT([could not find $TK_BIN_DIR/tkConfig.sh])
fi
AC_SUBST(TK_LIB_SPEC)
AC_SUBST(TK_LIBS)
AC_SUBST(TK_XINCLUDES)
AC_SUBST(TK_PREFIX)
# AC_SUBST(TK_VERSION)
# AC_SUBST(TK_BIN_DIR)
# AC_SUBST(TK_SRC_DIR)
# AC_SUBST(TK_LIB_FILE)
])
dnl From Bruno Haible.
AC_DEFUN([AC_LANGINFO_CODESET],
[
AC_CACHE_CHECK([for nl_langinfo and CODESET], am_cv_langinfo_codeset,
[AC_TRY_LINK([#include <langinfo.h>],
[char* cs = nl_langinfo(CODESET);],
am_cv_langinfo_codeset=yes,
am_cv_langinfo_codeset=no)
])
if test $am_cv_langinfo_codeset = yes; then
AC_DEFINE(HAVE_LANGINFO_CODESET, 1,
[Define if you have <langinfo.h> and nl_langinfo(CODESET).])
fi
])

1021
corpora/en/wordnet/aclocal.m4 vendored Normal file

File diff suppressed because it is too large Load Diff

136
corpora/en/wordnet/compile Executable file
View File

@ -0,0 +1,136 @@
#! /bin/sh
# Wrapper for compilers which do not understand `-c -o'.
scriptversion=2003-11-09.00
# Copyright (C) 1999, 2000, 2003 Free Software Foundation, Inc.
# Written by Tom Tromey <tromey@cygnus.com>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.
# This file is maintained in Automake, please report
# bugs to <bug-automake@gnu.org> or send patches to
# <automake-patches@gnu.org>.
case $1 in
'')
echo "$0: No command. Try \`$0 --help' for more information." 1>&2
exit 1;
;;
-h | --h*)
cat <<\EOF
Usage: compile [--help] [--version] PROGRAM [ARGS]
Wrapper for compilers which do not understand `-c -o'.
Remove `-o dest.o' from ARGS, run PROGRAM with the remaining
arguments, and rename the output as expected.
If you are trying to build a whole package this is not the
right script to run: please start by reading the file `INSTALL'.
Report bugs to <bug-automake@gnu.org>.
EOF
exit 0
;;
-v | --v*)
echo "compile $scriptversion"
exit 0
;;
esac
prog=$1
shift
ofile=
cfile=
args=
while test $# -gt 0; do
case "$1" in
-o)
# configure might choose to run compile as `compile cc -o foo foo.c'.
# So we do something ugly here.
ofile=$2
shift
case "$ofile" in
*.o | *.obj)
;;
*)
args="$args -o $ofile"
ofile=
;;
esac
;;
*.c)
cfile=$1
args="$args $1"
;;
*)
args="$args $1"
;;
esac
shift
done
if test -z "$ofile" || test -z "$cfile"; then
# If no `-o' option was seen then we might have been invoked from a
# pattern rule where we don't need one. That is ok -- this is a
# normal compilation that the losing compiler can handle. If no
# `.c' file was seen then we are probably linking. That is also
# ok.
exec "$prog" $args
fi
# Name of file we expect compiler to create.
cofile=`echo $cfile | sed -e 's|^.*/||' -e 's/\.c$/.o/'`
# Create the lock directory.
# Note: use `[/.-]' here to ensure that we don't use the same name
# that we are using for the .o file. Also, base the name on the expected
# object file name, since that is what matters with a parallel build.
lockdir=`echo $cofile | sed -e 's|[/.-]|_|g'`.d
while true; do
if mkdir $lockdir > /dev/null 2>&1; then
break
fi
sleep 1
done
# FIXME: race condition here if user kills between mkdir and trap.
trap "rmdir $lockdir; exit 1" 1 2 15
# Run the compile.
"$prog" $args
status=$?
if test -f "$cofile"; then
mv "$cofile" "$ofile"
fi
rmdir $lockdir
exit $status
# Local Variables:
# mode: shell-script
# sh-indentation: 2
# eval: (add-hook 'write-file-hooks 'time-stamp)
# time-stamp-start: "scriptversion="
# time-stamp-format: "%:y-%02m-%02d.%02H"
# time-stamp-end: "$"
# End:

View File

@ -0,0 +1,86 @@
/* config.h.in. Generated from configure.ac by autoheader. */
/* Default installation prefix. */
#undef DEFAULTPATH
/* Define to 1 if you have the <inttypes.h> header file. */
#undef HAVE_INTTYPES_H
/* Define if you have <langinfo.h> and nl_langinfo(CODESET). */
#undef HAVE_LANGINFO_CODESET
/* Define to 1 if you have the <locale.h> header file. */
#undef HAVE_LOCALE_H
/* Define to 1 if your system has a GNU libc compatible `malloc' function, and
to 0 otherwise. */
#undef HAVE_MALLOC
/* Define to 1 if you have the <malloc.h> header file. */
#undef HAVE_MALLOC_H
/* Define to 1 if you have the <memory.h> header file. */
#undef HAVE_MEMORY_H
/* Define to 1 if you have the <stdint.h> header file. */
#undef HAVE_STDINT_H
/* Define to 1 if you have the <stdlib.h> header file. */
#undef HAVE_STDLIB_H
/* Define to 1 if you have the `strchr' function. */
#undef HAVE_STRCHR
/* Define to 1 if you have the `strdup' function. */
#undef HAVE_STRDUP
/* Define to 1 if you have the <strings.h> header file. */
#undef HAVE_STRINGS_H
/* Define to 1 if you have the <string.h> header file. */
#undef HAVE_STRING_H
/* Define to 1 if you have the `strrchr' function. */
#undef HAVE_STRRCHR
/* Define to 1 if you have the `strstr' function. */
#undef HAVE_STRSTR
/* Define to 1 if you have the `strtol' function. */
#undef HAVE_STRTOL
/* Define to 1 if you have the <sys/stat.h> header file. */
#undef HAVE_SYS_STAT_H
/* Define to 1 if you have the <sys/types.h> header file. */
#undef HAVE_SYS_TYPES_H
/* Define to 1 if you have the <unistd.h> header file. */
#undef HAVE_UNISTD_H
/* Name of package */
#undef PACKAGE
/* Define to the address where bug reports for this package should be sent. */
#undef PACKAGE_BUGREPORT
/* Define to the full name of this package. */
#undef PACKAGE_NAME
/* Define to the full name and version of this package. */
#undef PACKAGE_STRING
/* Define to the one symbol short name of this package. */
#undef PACKAGE_TARNAME
/* Define to the version of this package. */
#undef PACKAGE_VERSION
/* Define to 1 if you have the ANSI C header files. */
#undef STDC_HEADERS
/* Version number of package */
#undef VERSION
/* Define to rpl_malloc if the replacement function should be used. */
#undef malloc

5755
corpora/en/wordnet/configure vendored Executable file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,74 @@
# -*- Autoconf -*-
# Process this file with autoconf to produce a configure script.
AC_PREREQ(2.59)
AC_INIT(WordNet, 3.0, [wordnet@princeton.edu], wordnet)
AC_CONFIG_SRCDIR([config.h.in])
AC_CONFIG_HEADER([config.h])
# Checks for programs.
AC_PROG_CC
AC_PROG_RANLIB
AC_PROG_INSTALL
# Checks for header files.
AC_HEADER_STDC
AC_CHECK_HEADERS([locale.h malloc.h stdlib.h string.h])
# Checks for typedefs, structures, and compiler characteristics.
# Checks for library functions.
AC_FUNC_MALLOC
AC_CHECK_FUNCS([strchr strdup strrchr strstr strtol])
# Set HAVE_LANGINFO_CODESET if nl_langinfo is found
AC_LANGINFO_CODESET
AM_INIT_AUTOMAKE(WordNet, 3.0)
SC_PATH_TCLCONFIG
SC_PATH_TKCONFIG
SC_LOAD_TCLCONFIG
SC_LOAD_TKCONFIG
# Set default installation prefix.
AC_PREFIX_DEFAULT([/usr/local/WordNet-3.0])
ac_prefix=$prefix
if test "x$ac_prefix" = "xNONE"; then
ac_prefix=$ac_default_prefix
fi
AC_SUBST(ac_prefix)
AH_TEMPLATE([DEFAULTPATH],[The default search path for WordNet data files])
AC_DEFINE_UNQUOTED(DEFAULTPATH, ["$ac_prefix/dict"], [Default installation prefix.])
#AC_DEFINE_UNQUOTED(DEFAULTPATH,"${prefix}/dict")
# This doesn't do anything
AC_CONFIG_COMMANDS([default])
AC_CONFIG_FILES(Makefile dict/Makefile doc/Makefile doc/html/Makefile doc/man/Makefile doc/pdf/Makefile doc/ps/Makefile include/Makefile include/tk/Makefile
src/Makefile lib/Makefile lib/wnres/Makefile)
AC_OUTPUT
AC_MSG_RESULT(
[
WordNet is now configured
Installation directory: ${prefix}
To build and install WordNet:
make
make install
To run, environment variables should be set as follows:
PATH - include ${bindir}
WNHOME - if not using default installation location, set to ${prefix}
See INSTALL file for details and additional environment variables
which may need to be set on your system.
])

522
corpora/en/wordnet/depcomp Executable file
View File

@ -0,0 +1,522 @@
#! /bin/sh
# depcomp - compile a program generating dependencies as side-effects
scriptversion=2004-05-31.23
# Copyright (C) 1999, 2000, 2003, 2004 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.
# Originally written by Alexandre Oliva <oliva@dcc.unicamp.br>.
case $1 in
'')
echo "$0: No command. Try \`$0 --help' for more information." 1>&2
exit 1;
;;
-h | --h*)
cat <<\EOF
Usage: depcomp [--help] [--version] PROGRAM [ARGS]
Run PROGRAMS ARGS to compile a file, generating dependencies
as side-effects.
Environment variables:
depmode Dependency tracking mode.
source Source file read by `PROGRAMS ARGS'.
object Object file output by `PROGRAMS ARGS'.
DEPDIR directory where to store dependencies.
depfile Dependency file to output.
tmpdepfile Temporary file to use when outputing dependencies.
libtool Whether libtool is used (yes/no).
Report bugs to <bug-automake@gnu.org>.
EOF
exit 0
;;
-v | --v*)
echo "depcomp $scriptversion"
exit 0
;;
esac
if test -z "$depmode" || test -z "$source" || test -z "$object"; then
echo "depcomp: Variables source, object and depmode must be set" 1>&2
exit 1
fi
# Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po.
depfile=${depfile-`echo "$object" |
sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`}
tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`}
rm -f "$tmpdepfile"
# Some modes work just like other modes, but use different flags. We
# parameterize here, but still list the modes in the big case below,
# to make depend.m4 easier to write. Note that we *cannot* use a case
# here, because this file can only contain one case statement.
if test "$depmode" = hp; then
# HP compiler uses -M and no extra arg.
gccflag=-M
depmode=gcc
fi
if test "$depmode" = dashXmstdout; then
# This is just like dashmstdout with a different argument.
dashmflag=-xM
depmode=dashmstdout
fi
case "$depmode" in
gcc3)
## gcc 3 implements dependency tracking that does exactly what
## we want. Yay! Note: for some reason libtool 1.4 doesn't like
## it if -MD -MP comes after the -MF stuff. Hmm.
"$@" -MT "$object" -MD -MP -MF "$tmpdepfile"
stat=$?
if test $stat -eq 0; then :
else
rm -f "$tmpdepfile"
exit $stat
fi
mv "$tmpdepfile" "$depfile"
;;
gcc)
## There are various ways to get dependency output from gcc. Here's
## why we pick this rather obscure method:
## - Don't want to use -MD because we'd like the dependencies to end
## up in a subdir. Having to rename by hand is ugly.
## (We might end up doing this anyway to support other compilers.)
## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like
## -MM, not -M (despite what the docs say).
## - Using -M directly means running the compiler twice (even worse
## than renaming).
if test -z "$gccflag"; then
gccflag=-MD,
fi
"$@" -Wp,"$gccflag$tmpdepfile"
stat=$?
if test $stat -eq 0; then :
else
rm -f "$tmpdepfile"
exit $stat
fi
rm -f "$depfile"
echo "$object : \\" > "$depfile"
alpha=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz
## The second -e expression handles DOS-style file names with drive letters.
sed -e 's/^[^:]*: / /' \
-e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile"
## This next piece of magic avoids the `deleted header file' problem.
## The problem is that when a header file which appears in a .P file
## is deleted, the dependency causes make to die (because there is
## typically no way to rebuild the header). We avoid this by adding
## dummy dependencies for each header file. Too bad gcc doesn't do
## this for us directly.
tr ' ' '
' < "$tmpdepfile" |
## Some versions of gcc put a space before the `:'. On the theory
## that the space means something, we add a space to the output as
## well.
## Some versions of the HPUX 10.20 sed can't process this invocation
## correctly. Breaking it into two sed invocations is a workaround.
sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
rm -f "$tmpdepfile"
;;
hp)
# This case exists only to let depend.m4 do its work. It works by
# looking at the text of this script. This case will never be run,
# since it is checked for above.
exit 1
;;
sgi)
if test "$libtool" = yes; then
"$@" "-Wp,-MDupdate,$tmpdepfile"
else
"$@" -MDupdate "$tmpdepfile"
fi
stat=$?
if test $stat -eq 0; then :
else
rm -f "$tmpdepfile"
exit $stat
fi
rm -f "$depfile"
if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files
echo "$object : \\" > "$depfile"
# Clip off the initial element (the dependent). Don't try to be
# clever and replace this with sed code, as IRIX sed won't handle
# lines with more than a fixed number of characters (4096 in
# IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines;
# the IRIX cc adds comments like `#:fec' to the end of the
# dependency line.
tr ' ' '
' < "$tmpdepfile" \
| sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' | \
tr '
' ' ' >> $depfile
echo >> $depfile
# The second pass generates a dummy entry for each header file.
tr ' ' '
' < "$tmpdepfile" \
| sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \
>> $depfile
else
# The sourcefile does not contain any dependencies, so just
# store a dummy comment line, to avoid errors with the Makefile
# "include basename.Plo" scheme.
echo "#dummy" > "$depfile"
fi
rm -f "$tmpdepfile"
;;
aix)
# The C for AIX Compiler uses -M and outputs the dependencies
# in a .u file. In older versions, this file always lives in the
# current directory. Also, the AIX compiler puts `$object:' at the
# start of each line; $object doesn't have directory information.
# Version 6 uses the directory in both cases.
stripped=`echo "$object" | sed 's/\(.*\)\..*$/\1/'`
tmpdepfile="$stripped.u"
if test "$libtool" = yes; then
"$@" -Wc,-M
else
"$@" -M
fi
stat=$?
if test -f "$tmpdepfile"; then :
else
stripped=`echo "$stripped" | sed 's,^.*/,,'`
tmpdepfile="$stripped.u"
fi
if test $stat -eq 0; then :
else
rm -f "$tmpdepfile"
exit $stat
fi
if test -f "$tmpdepfile"; then
outname="$stripped.o"
# Each line is of the form `foo.o: dependent.h'.
# Do two passes, one to just change these to
# `$object: dependent.h' and one to simply `dependent.h:'.
sed -e "s,^$outname:,$object :," < "$tmpdepfile" > "$depfile"
sed -e "s,^$outname: \(.*\)$,\1:," < "$tmpdepfile" >> "$depfile"
else
# The sourcefile does not contain any dependencies, so just
# store a dummy comment line, to avoid errors with the Makefile
# "include basename.Plo" scheme.
echo "#dummy" > "$depfile"
fi
rm -f "$tmpdepfile"
;;
icc)
# Intel's C compiler understands `-MD -MF file'. However on
# icc -MD -MF foo.d -c -o sub/foo.o sub/foo.c
# ICC 7.0 will fill foo.d with something like
# foo.o: sub/foo.c
# foo.o: sub/foo.h
# which is wrong. We want:
# sub/foo.o: sub/foo.c
# sub/foo.o: sub/foo.h
# sub/foo.c:
# sub/foo.h:
# ICC 7.1 will output
# foo.o: sub/foo.c sub/foo.h
# and will wrap long lines using \ :
# foo.o: sub/foo.c ... \
# sub/foo.h ... \
# ...
"$@" -MD -MF "$tmpdepfile"
stat=$?
if test $stat -eq 0; then :
else
rm -f "$tmpdepfile"
exit $stat
fi
rm -f "$depfile"
# Each line is of the form `foo.o: dependent.h',
# or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'.
# Do two passes, one to just change these to
# `$object: dependent.h' and one to simply `dependent.h:'.
sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile"
# Some versions of the HPUX 10.20 sed can't process this invocation
# correctly. Breaking it into two sed invocations is a workaround.
sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" |
sed -e 's/$/ :/' >> "$depfile"
rm -f "$tmpdepfile"
;;
tru64)
# The Tru64 compiler uses -MD to generate dependencies as a side
# effect. `cc -MD -o foo.o ...' puts the dependencies into `foo.o.d'.
# At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put
# dependencies in `foo.d' instead, so we check for that too.
# Subdirectories are respected.
dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
test "x$dir" = "x$object" && dir=
base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
if test "$libtool" = yes; then
# Dependencies are output in .lo.d with libtool 1.4.
# With libtool 1.5 they are output both in $dir.libs/$base.o.d
# and in $dir.libs/$base.o.d and $dir$base.o.d. We process the
# latter, because the former will be cleaned when $dir.libs is
# erased.
tmpdepfile1="$dir.libs/$base.lo.d"
tmpdepfile2="$dir$base.o.d"
tmpdepfile3="$dir.libs/$base.d"
"$@" -Wc,-MD
else
tmpdepfile1="$dir$base.o.d"
tmpdepfile2="$dir$base.d"
tmpdepfile3="$dir$base.d"
"$@" -MD
fi
stat=$?
if test $stat -eq 0; then :
else
rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
exit $stat
fi
if test -f "$tmpdepfile1"; then
tmpdepfile="$tmpdepfile1"
elif test -f "$tmpdepfile2"; then
tmpdepfile="$tmpdepfile2"
else
tmpdepfile="$tmpdepfile3"
fi
if test -f "$tmpdepfile"; then
sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile"
# That's a tab and a space in the [].
sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile"
else
echo "#dummy" > "$depfile"
fi
rm -f "$tmpdepfile"
;;
#nosideeffect)
# This comment above is used by automake to tell side-effect
# dependency tracking mechanisms from slower ones.
dashmstdout)
# Important note: in order to support this mode, a compiler *must*
# always write the preprocessed file to stdout, regardless of -o.
"$@" || exit $?
# Remove the call to Libtool.
if test "$libtool" = yes; then
while test $1 != '--mode=compile'; do
shift
done
shift
fi
# Remove `-o $object'.
IFS=" "
for arg
do
case $arg in
-o)
shift
;;
$object)
shift
;;
*)
set fnord "$@" "$arg"
shift # fnord
shift # $arg
;;
esac
done
test -z "$dashmflag" && dashmflag=-M
# Require at least two characters before searching for `:'
# in the target name. This is to cope with DOS-style filenames:
# a dependency such as `c:/foo/bar' could be seen as target `c' otherwise.
"$@" $dashmflag |
sed 's:^[ ]*[^: ][^:][^:]*\:[ ]*:'"$object"'\: :' > "$tmpdepfile"
rm -f "$depfile"
cat < "$tmpdepfile" > "$depfile"
tr ' ' '
' < "$tmpdepfile" | \
## Some versions of the HPUX 10.20 sed can't process this invocation
## correctly. Breaking it into two sed invocations is a workaround.
sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
rm -f "$tmpdepfile"
;;
dashXmstdout)
# This case only exists to satisfy depend.m4. It is never actually
# run, as this mode is specially recognized in the preamble.
exit 1
;;
makedepend)
"$@" || exit $?
# Remove any Libtool call
if test "$libtool" = yes; then
while test $1 != '--mode=compile'; do
shift
done
shift
fi
# X makedepend
shift
cleared=no
for arg in "$@"; do
case $cleared in
no)
set ""; shift
cleared=yes ;;
esac
case "$arg" in
-D*|-I*)
set fnord "$@" "$arg"; shift ;;
# Strip any option that makedepend may not understand. Remove
# the object too, otherwise makedepend will parse it as a source file.
-*|$object)
;;
*)
set fnord "$@" "$arg"; shift ;;
esac
done
obj_suffix="`echo $object | sed 's/^.*\././'`"
touch "$tmpdepfile"
${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@"
rm -f "$depfile"
cat < "$tmpdepfile" > "$depfile"
sed '1,2d' "$tmpdepfile" | tr ' ' '
' | \
## Some versions of the HPUX 10.20 sed can't process this invocation
## correctly. Breaking it into two sed invocations is a workaround.
sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
rm -f "$tmpdepfile" "$tmpdepfile".bak
;;
cpp)
# Important note: in order to support this mode, a compiler *must*
# always write the preprocessed file to stdout.
"$@" || exit $?
# Remove the call to Libtool.
if test "$libtool" = yes; then
while test $1 != '--mode=compile'; do
shift
done
shift
fi
# Remove `-o $object'.
IFS=" "
for arg
do
case $arg in
-o)
shift
;;
$object)
shift
;;
*)
set fnord "$@" "$arg"
shift # fnord
shift # $arg
;;
esac
done
"$@" -E |
sed -n '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' |
sed '$ s: \\$::' > "$tmpdepfile"
rm -f "$depfile"
echo "$object : \\" > "$depfile"
cat < "$tmpdepfile" >> "$depfile"
sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile"
rm -f "$tmpdepfile"
;;
msvisualcpp)
# Important note: in order to support this mode, a compiler *must*
# always write the preprocessed file to stdout, regardless of -o,
# because we must use -o when running libtool.
"$@" || exit $?
IFS=" "
for arg
do
case "$arg" in
"-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI")
set fnord "$@"
shift
shift
;;
*)
set fnord "$@" "$arg"
shift
shift
;;
esac
done
"$@" -E |
sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::echo "`cygpath -u \\"\1\\"`":p' | sort | uniq > "$tmpdepfile"
rm -f "$depfile"
echo "$object : \\" > "$depfile"
. "$tmpdepfile" | sed 's% %\\ %g' | sed -n '/^\(.*\)$/ s:: \1 \\:p' >> "$depfile"
echo " " >> "$depfile"
. "$tmpdepfile" | sed 's% %\\ %g' | sed -n '/^\(.*\)$/ s::\1\::p' >> "$depfile"
rm -f "$tmpdepfile"
;;
none)
exec "$@"
;;
*)
echo "Unknown depmode $depmode" 1>&2
exit 1
;;
esac
exit 0
# Local Variables:
# mode: shell-script
# sh-indentation: 2
# eval: (add-hook 'write-file-hooks 'time-stamp)
# time-stamp-start: "scriptversion="
# time-stamp-format: "%:y-%02m-%02d.%02H"
# time-stamp-end: "$"
# End:

View File

@ -0,0 +1,314 @@
# Makefile.in generated by automake 1.9 from Makefile.am.
# dict/Makefile. Generated from Makefile.in by configure.
# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
# 2003, 2004 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
srcdir = .
top_srcdir = ..
pkgdatadir = $(datadir)/WordNet
pkglibdir = $(libdir)/WordNet
pkgincludedir = $(includedir)/WordNet
top_builddir = ..
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
INSTALL = /usr/csl/bin/install -c
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
subdir = dict
DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \
$(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h
CONFIG_CLEAN_FILES =
SOURCES =
DIST_SOURCES =
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
*) f=$$p;; \
esac;
am__strip_dir = `echo $$p | sed -e 's|^.*/||'`;
am__installdirs = "$(DESTDIR)$(dictdir)"
dictDATA_INSTALL = $(INSTALL_DATA)
DATA = $(dict_DATA)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run aclocal-1.9
AMDEP_FALSE = #
AMDEP_TRUE =
AMTAR = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run tar
AUTOCONF = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run autoconf
AUTOHEADER = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run autoheader
AUTOMAKE = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run automake-1.9
AWK = nawk
CC = gcc
CCDEPMODE = depmode=gcc3
CFLAGS = -g -O2
CPP = gcc -E
CPPFLAGS =
CYGPATH_W = echo
DEFS = -DHAVE_CONFIG_H
DEPDIR = .deps
ECHO_C =
ECHO_N = -n
ECHO_T =
EGREP = egrep
EXEEXT =
INSTALL_DATA = ${INSTALL} -m 644
INSTALL_PROGRAM = ${INSTALL}
INSTALL_SCRIPT = ${INSTALL}
INSTALL_STRIP_PROGRAM = ${SHELL} $(install_sh) -c -s
LDFLAGS =
LIBOBJS =
LIBS =
LTLIBOBJS =
MAKEINFO = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run makeinfo
OBJEXT = o
PACKAGE = WordNet
PACKAGE_BUGREPORT = wordnet@princeton.edu
PACKAGE_NAME = WordNet
PACKAGE_STRING = WordNet 3.0
PACKAGE_TARNAME = wordnet
PACKAGE_VERSION = 3.0
PATH_SEPARATOR = :
RANLIB = ranlib
SET_MAKE =
SHELL = /bin/bash
STRIP =
TCL_INCLUDE_SPEC = -I/usr/csl/include
TCL_LIB_SPEC = -L/usr/csl/lib -ltcl8.4
TK_LIBS = -L/usr/openwin/lib -lX11 -ldl -lpthread -lsocket -lnsl -lm
TK_LIB_SPEC = -L/usr/csl/lib -ltk8.4
TK_PREFIX = /usr/csl
TK_XINCLUDES = -I/usr/openwin/include
VERSION = 3.0
ac_ct_CC = gcc
ac_ct_RANLIB = ranlib
ac_ct_STRIP =
ac_prefix = /usr/local/WordNet-3.0
am__fastdepCC_FALSE = #
am__fastdepCC_TRUE =
am__include = include
am__leading_dot = .
am__quote =
am__tar = ${AMTAR} chof - "$$tardir"
am__untar = ${AMTAR} xf -
bindir = ${exec_prefix}/bin
build_alias =
datadir = ${prefix}/share
exec_prefix = ${prefix}
host_alias =
includedir = ${prefix}/include
infodir = ${prefix}/info
install_sh = /people/wn/src/Release/3.0/Unix/install-sh
libdir = ${exec_prefix}/lib
libexecdir = ${exec_prefix}/libexec
localstatedir = ${prefix}/var
mandir = ${prefix}/man
mkdir_p = $(install_sh) -d
oldincludedir = /usr/include
prefix = /usr/local/WordNet-3.0
program_transform_name = s,x,x,
sbindir = ${exec_prefix}/sbin
sharedstatedir = ${prefix}/com
sysconfdir = ${prefix}/etc
target_alias =
dictdir = $(prefix)/dict
dict_DATA = adj.exc adv.exc cntlist cntlist.rev data.adj data.adv data.noun data.verb frames.vrb index.adj index.adv index.noun index.sense index.verb log.grind.3.0 noun.exc sentidx.vrb sents.vrb verb.Framestext verb.exc lexnames
all: all-am
.SUFFIXES:
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
&& exit 0; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu dict/Makefile'; \
cd $(top_srcdir) && \
$(AUTOMAKE) --gnu dict/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
uninstall-info-am:
install-dictDATA: $(dict_DATA)
@$(NORMAL_INSTALL)
test -z "$(dictdir)" || $(mkdir_p) "$(DESTDIR)$(dictdir)"
@list='$(dict_DATA)'; for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
f=$(am__strip_dir) \
echo " $(dictDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(dictdir)/$$f'"; \
$(dictDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(dictdir)/$$f"; \
done
uninstall-dictDATA:
@$(NORMAL_UNINSTALL)
@list='$(dict_DATA)'; for p in $$list; do \
f=$(am__strip_dir) \
echo " rm -f '$(DESTDIR)$(dictdir)/$$f'"; \
rm -f "$(DESTDIR)$(dictdir)/$$f"; \
done
tags: TAGS
TAGS:
ctags: CTAGS
CTAGS:
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
list='$(DISTFILES)'; for file in $$list; do \
case $$file in \
$(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
$(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
esac; \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
if test "$$dir" != "$$file" && test "$$dir" != "."; then \
dir="/$$dir"; \
$(mkdir_p) "$(distdir)$$dir"; \
else \
dir=''; \
fi; \
if test -d $$d/$$file; then \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
fi; \
cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
else \
test -f $(distdir)/$$file \
|| cp -p $$d/$$file $(distdir)/$$file \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-am
all-am: Makefile $(DATA)
installdirs:
for dir in "$(DESTDIR)$(dictdir)"; do \
test -z "$$dir" || $(mkdir_p) "$$dir"; \
done
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-am
install-strip:
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
`test -z '$(STRIP)' || \
echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
clean-am: clean-generic mostlyclean-am
distclean: distclean-am
-rm -f Makefile
distclean-am: clean-am distclean-generic
dvi: dvi-am
dvi-am:
html: html-am
info: info-am
info-am:
install-data-am: install-dictDATA
install-exec-am:
install-info: install-info-am
install-man:
installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-am
mostlyclean-am: mostlyclean-generic
pdf: pdf-am
pdf-am:
ps: ps-am
ps-am:
uninstall-am: uninstall-dictDATA uninstall-info-am
.PHONY: all all-am check check-am clean clean-generic distclean \
distclean-generic distdir dvi dvi-am html html-am info info-am \
install install-am install-data install-data-am \
install-dictDATA install-exec install-exec-am install-info \
install-info-am install-man install-strip installcheck \
installcheck-am installdirs maintainer-clean \
maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
pdf-am ps ps-am uninstall uninstall-am uninstall-dictDATA \
uninstall-info-am
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:

View File

@ -0,0 +1,2 @@
dictdir = $(prefix)/dict
dict_DATA = adj.exc adv.exc cntlist cntlist.rev data.adj data.adv data.noun data.verb frames.vrb index.adj index.adv index.noun index.sense index.verb log.grind.3.0 noun.exc sentidx.vrb sents.vrb verb.Framestext verb.exc lexnames

View File

@ -0,0 +1,314 @@
# Makefile.in generated by automake 1.9 from Makefile.am.
# @configure_input@
# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
# 2003, 2004 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
@SET_MAKE@
srcdir = @srcdir@
top_srcdir = @top_srcdir@
VPATH = @srcdir@
pkgdatadir = $(datadir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
top_builddir = ..
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
INSTALL = @INSTALL@
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
subdir = dict
DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \
$(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h
CONFIG_CLEAN_FILES =
SOURCES =
DIST_SOURCES =
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
*) f=$$p;; \
esac;
am__strip_dir = `echo $$p | sed -e 's|^.*/||'`;
am__installdirs = "$(DESTDIR)$(dictdir)"
dictDATA_INSTALL = $(INSTALL_DATA)
DATA = $(dict_DATA)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMDEP_FALSE = @AMDEP_FALSE@
AMDEP_TRUE = @AMDEP_TRUE@
AMTAR = @AMTAR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CC = @CC@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
LDFLAGS = @LDFLAGS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LTLIBOBJS = @LTLIBOBJS@
MAKEINFO = @MAKEINFO@
OBJEXT = @OBJEXT@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
TCL_INCLUDE_SPEC = @TCL_INCLUDE_SPEC@
TCL_LIB_SPEC = @TCL_LIB_SPEC@
TK_LIBS = @TK_LIBS@
TK_LIB_SPEC = @TK_LIB_SPEC@
TK_PREFIX = @TK_PREFIX@
TK_XINCLUDES = @TK_XINCLUDES@
VERSION = @VERSION@
ac_ct_CC = @ac_ct_CC@
ac_ct_RANLIB = @ac_ct_RANLIB@
ac_ct_STRIP = @ac_ct_STRIP@
ac_prefix = @ac_prefix@
am__fastdepCC_FALSE = @am__fastdepCC_FALSE@
am__fastdepCC_TRUE = @am__fastdepCC_TRUE@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
bindir = @bindir@
build_alias = @build_alias@
datadir = @datadir@
exec_prefix = @exec_prefix@
host_alias = @host_alias@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
localstatedir = @localstatedir@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
prefix = @prefix@
program_transform_name = @program_transform_name@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
dictdir = $(prefix)/dict
dict_DATA = adj.exc adv.exc cntlist cntlist.rev data.adj data.adv data.noun data.verb frames.vrb index.adj index.adv index.noun index.sense index.verb log.grind.3.0 noun.exc sentidx.vrb sents.vrb verb.Framestext verb.exc lexnames
all: all-am
.SUFFIXES:
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
&& exit 0; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu dict/Makefile'; \
cd $(top_srcdir) && \
$(AUTOMAKE) --gnu dict/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
uninstall-info-am:
install-dictDATA: $(dict_DATA)
@$(NORMAL_INSTALL)
test -z "$(dictdir)" || $(mkdir_p) "$(DESTDIR)$(dictdir)"
@list='$(dict_DATA)'; for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
f=$(am__strip_dir) \
echo " $(dictDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(dictdir)/$$f'"; \
$(dictDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(dictdir)/$$f"; \
done
uninstall-dictDATA:
@$(NORMAL_UNINSTALL)
@list='$(dict_DATA)'; for p in $$list; do \
f=$(am__strip_dir) \
echo " rm -f '$(DESTDIR)$(dictdir)/$$f'"; \
rm -f "$(DESTDIR)$(dictdir)/$$f"; \
done
tags: TAGS
TAGS:
ctags: CTAGS
CTAGS:
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
list='$(DISTFILES)'; for file in $$list; do \
case $$file in \
$(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
$(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
esac; \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
if test "$$dir" != "$$file" && test "$$dir" != "."; then \
dir="/$$dir"; \
$(mkdir_p) "$(distdir)$$dir"; \
else \
dir=''; \
fi; \
if test -d $$d/$$file; then \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
fi; \
cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
else \
test -f $(distdir)/$$file \
|| cp -p $$d/$$file $(distdir)/$$file \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-am
all-am: Makefile $(DATA)
installdirs:
for dir in "$(DESTDIR)$(dictdir)"; do \
test -z "$$dir" || $(mkdir_p) "$$dir"; \
done
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-am
install-strip:
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
`test -z '$(STRIP)' || \
echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
clean-am: clean-generic mostlyclean-am
distclean: distclean-am
-rm -f Makefile
distclean-am: clean-am distclean-generic
dvi: dvi-am
dvi-am:
html: html-am
info: info-am
info-am:
install-data-am: install-dictDATA
install-exec-am:
install-info: install-info-am
install-man:
installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-am
mostlyclean-am: mostlyclean-generic
pdf: pdf-am
pdf-am:
ps: ps-am
ps-am:
uninstall-am: uninstall-dictDATA uninstall-info-am
.PHONY: all all-am check check-am clean clean-generic distclean \
distclean-generic distdir dvi dvi-am html html-am info info-am \
install install-am install-data install-data-am \
install-dictDATA install-exec install-exec-am install-info \
install-info-am install-man install-strip installcheck \
installcheck-am installdirs maintainer-clean \
maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
pdf-am ps ps-am uninstall uninstall-am uninstall-dictDATA \
uninstall-info-am
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,7 @@
best well
better well
deeper deeply
farther far
further far
harder hard
hardest hard

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,35 @@
1 Something ----s
2 Somebody ----s
3 It is ----ing
4 Something is ----ing PP
5 Something ----s something Adjective/Noun
6 Something ----s Adjective/Noun
7 Somebody ----s Adjective
8 Somebody ----s something
9 Somebody ----s somebody
10 Something ----s somebody
11 Something ----s something
12 Something ----s to somebody
13 Somebody ----s on something
14 Somebody ----s somebody something
15 Somebody ----s something to somebody
16 Somebody ----s something from somebody
17 Somebody ----s somebody with something
18 Somebody ----s somebody of something
19 Somebody ----s something on somebody
20 Somebody ----s somebody PP
21 Somebody ----s something PP
22 Somebody ----s PP
23 Somebody's (body part) ----s
24 Somebody ----s somebody to INFINITIVE
25 Somebody ----s somebody INFINITIVE
26 Somebody ----s that CLAUSE
27 Somebody ----s to somebody
28 Somebody ----s to INFINITIVE
29 Somebody ----s whether INFINITIVE
30 Somebody ----s somebody into V-ing something
31 Somebody ----s something with something
32 Somebody ----s INFINITIVE
33 Somebody ----s VERB-ing
34 It ----s that CLAUSE
35 Something ----s INFINITIVE

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,45 @@
00 adj.all 3
01 adj.pert 3
02 adv.all 4
03 noun.Tops 1
04 noun.act 1
05 noun.animal 1
06 noun.artifact 1
07 noun.attribute 1
08 noun.body 1
09 noun.cognition 1
10 noun.communication 1
11 noun.event 1
12 noun.feeling 1
13 noun.food 1
14 noun.group 1
15 noun.location 1
16 noun.motive 1
17 noun.object 1
18 noun.person 1
19 noun.phenomenon 1
20 noun.plant 1
21 noun.possession 1
22 noun.process 1
23 noun.quantity 1
24 noun.relation 1
25 noun.shape 1
26 noun.state 1
27 noun.substance 1
28 noun.time 1
29 verb.body 2
30 verb.change 2
31 verb.cognition 2
32 verb.communication 2
33 verb.competition 2
34 verb.consumption 2
35 verb.contact 2
36 verb.creation 2
37 verb.emotion 2
38 verb.motion 2
39 verb.perception 2
40 verb.possession 2
41 verb.social 2
42 verb.stative 2
43 verb.weather 2
44 adj.ppl 3

View File

@ -0,0 +1,89 @@
Processing adj.all...
Processing adj.pert...
Processing adv.all...
Processing noun.Tops...
noun.Tops, line 7: warning: No hypernyms in synset
Processing noun.act...
Processing noun.animal...
Processing noun.artifact...
Processing noun.attribute...
Processing noun.body...
Processing noun.cognition...
Processing noun.communication...
Processing noun.event...
Processing noun.feeling...
Processing noun.food...
Processing noun.group...
Processing noun.location...
Processing noun.motive...
Processing noun.object...
Processing noun.person...
Processing noun.phenomenon...
Processing noun.plant...
Processing noun.possession...
Processing noun.process...
Processing noun.quantity...
Processing noun.relation...
Processing noun.shape...
Processing noun.state...
Processing noun.substance...
Processing noun.time...
Processing verb.body...
Processing verb.change...
Processing verb.cognition...
Processing verb.communication...
Processing verb.competition...
Processing verb.consumption...
Processing verb.contact...
Processing verb.creation...
Processing verb.emotion...
Processing verb.motion...
Processing verb.perception...
Processing verb.possession...
Processing verb.social...
Processing verb.stative...
Processing verb.weather...
Processing adj.ppl...
*** Statistics for ground files:
82115 noun synsets
13767 verb synsets
3812 adj synsets
3621 adv synsets
3651 pertainym synsets
10693 adjective satellite synsets
117659 synsets in total (including satellite and pertainym synsets)
82115 noun synsets have definitional glosses
13767 verb synsets have definitional glosses
3812 adj synsets have definitional glosses
3621 adv synsets have definitional glosses
3651 pertainym synsets have definitional glosses
10693 adjective satellite synsets have definitional glosses
117659 definitional glosses in total (including adjective satellite synsets)
225000 pointers in total
206978 synonyms in synsets
147306 unique word phrases
83118 word phrases of length 1
54533 word phrases of length 2
7766 word phrases of length 3
1454 word phrases of length 4
298 word phrases of length 5
80 word phrases of length 6
28 word phrases of length 7
20 word phrases of length 8
9 word phrases of length 9
Resolving pointers...
Done resolving pointers...
Getting sense counts...
Done with sense counts...
Figuring out byte offsets...
Dumping data files...
Done dumping data files...
Cannot open file: cntlist
Cannot order senses
Dumping index files...
Done dumping index files...
Dumping sense index...
Done dumping sense index...

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,170 @@
1 The children %s to the playground
10 The cars %s down the avenue
100 These glasses %s easily
101 These fabrics %s easily
102 They %s their earnings this year
103 Their earnings %s this year
104 The water %ss
105 They %s the water
106 The animals %s
107 They %s a long time
108 The car %ss the tree
109 John will %s angry
11 They %s the car down the avenue
110 They %s in the city
111 They won't %s the story
112 They %s that there was a traffic accident
113 They %s whether there was a traffic accident
114 They %s her vice president
115 Did he %s his major works over a short period of time?
116 The chefs %s the vegetables
117 They %s the cape
118 The food does %s good
119 The music does %s good
12 They %s the glass tubes
120 The cool air does %s good
121 This food does %s well
122 It was %sing all day long
123 They %s him to write the letter
124 They %s him into writing the letter
125 They %s him from writing the letter
126 The bad news will %s him
127 The good news will %s her
128 The chef wants to %s the eggs
129 Sam wants to %s with Sue
13 The glass tubes %s
130 The fighter managed to %s his opponent
131 These cars won't %s
132 The branches %s from the trees
133 The stock market is going to %s
134 The moon will soon %s
135 The business is going to %s
136 The airplane is sure to %s
137 They %s to move
138 They %s moving
139 Sam and Sue %s the movie
14 Sam and Sue %s
140 They want to %s the prisoners
141 They want to %s the doors
142 The doors %s
143 Did he %s his foot?
144 Did his feet %s?
145 They will %s the duet
146 They %s their hair
147 They %s the trees
148 They %s him of all his money
149 Lights %s on the horizon
15 Sam cannot %s Sue
150 The horizon is %sing with lights
151 The crowds %s in the streets
152 The streets %s with crowds
153 Cars %s in the streets
154 The streets %s with cars
155 You can hear animals %s in the meadows
156 The meadows %s with animals
157 The birds %s in the woods
158 The woods %s with many kinds of birds
159 The performance is likely to %s Sue
16 The ropes %s
160 Sam and Sue %s over the results of the experiment
161 In the summer they like to go out and %s
162 The children %s in the rocking chair
163 There %s some children in the rocking chair
164 Some big birds %s in the tree
165 There %s some big birds in the tree
166 The men %s the area for animals
167 The men %s for animals in the area
168 The customs agents %s the bags for drugs
169 They %s him as chairman
17 The strong winds %s the rope
170 They %s him "Bobby"
18 They %s the sheets
19 The sheets didn't %s
2 The banks %s the check
20 The horses %s across the field
21 They %s the bags on the table
22 The men %s the horses across the field
23 Our properties %s at this point
24 His fields %s mine at this point
25 They %s the hill
26 They %s up the hill
27 They %s the river
28 They %s down the river
29 They %s the countryside
3 The checks %s
30 They %s in the countryside
31 These men %s across the river
32 These men %s the river
33 They %s the food to the people
34 They %s the people the food
35 They %s more bread
36 They %s the object in the water
37 The men %s the bookshelves
38 They %s the money in the closet
39 The lights %s from the ceiling
4 The children %s the ball
40 They %s the lights from the ceiling
41 They %s their rifles on the cabinet
42 The chairs %s in the corner
43 The men %s the chairs
44 The women %s water into the bowl
45 Water and oil %s into the bowl
46 They %s the wire around the stick
47 The wires %s around the stick
48 They %s the bread with melted butter
49 They %s the cart with boxes
5 The balls %s
50 They %s the books into the box
51 They %s sugar over the cake
52 They %s the cake with sugar
53 They %s the fruit with a chemical
54 They %s a chemical into the fruit
55 They %s the field with rye
56 They %s rye in the field
57 They %s notices on the doors
58 They %s the doors with notices
59 They %s money on their grandchild
6 The girls %s the wooden sticks
60 They %s their grandchild with money
61 They %s coins on the image
62 They %s the image with coins
63 They %s butter on the bread
64 They %s the lake with fish
65 The children %s the paper with grease
66 The children %s grease onto the paper
67 They %s papers over the floor
68 They %s the floor with papers
69 They %s the money
7 The wooden sticks %s
70 They %s the newspapers
71 They %s the goods
72 The men %s the boat
73 They %s the animals
74 The books %s the box
75 They %s the halls with holly
76 Holly flowers %s the halls
77 The wind storms %s the area with dust and dirt
78 Dust and dirt %s the area
79 The swollen rivers %s the area with water
8 The coins %s
80 The waters %s the area
81 They %s the cloth with water and alcohol
82 Water and alcohol %s the cloth
83 They %s the snow from the path
84 They %s the path of the snow
85 They %s the water from the sink
86 They %s the sink of water
87 They %s the parcel to their parents
88 They %s them the parcel
89 They %s cars to the tourists
9 They %s the coin
90 They %s the tourists their cars
91 They %s the money to them
92 They %s them the money
93 They %s them the information
94 They %s the information to them
95 The parents %s a French poem to the children
96 The parents %s the children a French poem
97 They %s
98 They %s themselves
99 These balls %s easily

View File

@ -0,0 +1,35 @@
1 Something ----s
2 Somebody ----s
3 It is ----ing
4 Something is ----ing PP
5 Something ----s something Adjective/Noun
6 Something ----s Adjective/Noun
7 Somebody ----s Adjective
8 Somebody ----s something
9 Somebody ----s somebody
10 Something ----s somebody
11 Something ----s something
12 Something ----s to somebody
13 Somebody ----s on something
14 Somebody ----s somebody something
15 Somebody ----s something to somebody
16 Somebody ----s something from somebody
17 Somebody ----s somebody with something
18 Somebody ----s somebody of something
19 Somebody ----s something on somebody
20 Somebody ----s somebody PP
21 Somebody ----s something PP
22 Somebody ----s PP
23 Somebody's (body part) ----s
24 Somebody ----s somebody to INFINITIVE
25 Somebody ----s somebody INFINITIVE
26 Somebody ----s that CLAUSE
27 Somebody ----s to somebody
28 Somebody ----s to INFINITIVE
29 Somebody ----s whether INFINITIVE
30 Somebody ----s somebody into V-ing something
31 Somebody ----s something with something
32 Somebody ----s INFINITIVE
33 Somebody ----s VERB-ing
34 It ----s that CLAUSE
35 Something ----s INFINITIVE

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,426 @@
# Makefile.in generated by automake 1.9 from Makefile.am.
# doc/Makefile. Generated from Makefile.in by configure.
# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
# 2003, 2004 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
srcdir = .
top_srcdir = ..
pkgdatadir = $(datadir)/WordNet
pkglibdir = $(libdir)/WordNet
pkgincludedir = $(includedir)/WordNet
top_builddir = ..
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
INSTALL = /usr/csl/bin/install -c
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
subdir = doc
DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \
$(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h
CONFIG_CLEAN_FILES =
SOURCES =
DIST_SOURCES =
RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
html-recursive info-recursive install-data-recursive \
install-exec-recursive install-info-recursive \
install-recursive installcheck-recursive installdirs-recursive \
pdf-recursive ps-recursive uninstall-info-recursive \
uninstall-recursive
ETAGS = etags
CTAGS = ctags
DIST_SUBDIRS = $(SUBDIRS)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run aclocal-1.9
AMDEP_FALSE = #
AMDEP_TRUE =
AMTAR = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run tar
AUTOCONF = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run autoconf
AUTOHEADER = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run autoheader
AUTOMAKE = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run automake-1.9
AWK = nawk
CC = gcc
CCDEPMODE = depmode=gcc3
CFLAGS = -g -O2
CPP = gcc -E
CPPFLAGS =
CYGPATH_W = echo
DEFS = -DHAVE_CONFIG_H
DEPDIR = .deps
ECHO_C =
ECHO_N = -n
ECHO_T =
EGREP = egrep
EXEEXT =
INSTALL_DATA = ${INSTALL} -m 644
INSTALL_PROGRAM = ${INSTALL}
INSTALL_SCRIPT = ${INSTALL}
INSTALL_STRIP_PROGRAM = ${SHELL} $(install_sh) -c -s
LDFLAGS =
LIBOBJS =
LIBS =
LTLIBOBJS =
MAKEINFO = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run makeinfo
OBJEXT = o
PACKAGE = WordNet
PACKAGE_BUGREPORT = wordnet@princeton.edu
PACKAGE_NAME = WordNet
PACKAGE_STRING = WordNet 3.0
PACKAGE_TARNAME = wordnet
PACKAGE_VERSION = 3.0
PATH_SEPARATOR = :
RANLIB = ranlib
SET_MAKE =
SHELL = /bin/bash
STRIP =
TCL_INCLUDE_SPEC = -I/usr/csl/include
TCL_LIB_SPEC = -L/usr/csl/lib -ltcl8.4
TK_LIBS = -L/usr/openwin/lib -lX11 -ldl -lpthread -lsocket -lnsl -lm
TK_LIB_SPEC = -L/usr/csl/lib -ltk8.4
TK_PREFIX = /usr/csl
TK_XINCLUDES = -I/usr/openwin/include
VERSION = 3.0
ac_ct_CC = gcc
ac_ct_RANLIB = ranlib
ac_ct_STRIP =
ac_prefix = /usr/local/WordNet-3.0
am__fastdepCC_FALSE = #
am__fastdepCC_TRUE =
am__include = include
am__leading_dot = .
am__quote =
am__tar = ${AMTAR} chof - "$$tardir"
am__untar = ${AMTAR} xf -
bindir = ${exec_prefix}/bin
build_alias =
datadir = ${prefix}/share
exec_prefix = ${prefix}
host_alias =
includedir = ${prefix}/include
infodir = ${prefix}/info
install_sh = /people/wn/src/Release/3.0/Unix/install-sh
libdir = ${exec_prefix}/lib
libexecdir = ${exec_prefix}/libexec
localstatedir = ${prefix}/var
mandir = ${prefix}/man
mkdir_p = $(install_sh) -d
oldincludedir = /usr/include
prefix = /usr/local/WordNet-3.0
program_transform_name = s,x,x,
sbindir = ${exec_prefix}/sbin
sharedstatedir = ${prefix}/com
sysconfdir = ${prefix}/etc
target_alias =
SUBDIRS = html man pdf ps
all: all-recursive
.SUFFIXES:
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
&& exit 0; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu doc/Makefile'; \
cd $(top_srcdir) && \
$(AUTOMAKE) --gnu doc/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
uninstall-info-am:
# This directory's subdirectories are mostly independent; you can cd
# into them and run `make' without going through this Makefile.
# To change the values of `make' variables: instead of editing Makefiles,
# (1) if the variable is set in `config.status', edit `config.status'
# (which will cause the Makefiles to be regenerated when you run `make');
# (2) otherwise, pass the desired values on the `make' command line.
$(RECURSIVE_TARGETS):
@set fnord $$MAKEFLAGS; amf=$$2; \
dot_seen=no; \
target=`echo $@ | sed s/-recursive//`; \
list='$(SUBDIRS)'; for subdir in $$list; do \
echo "Making $$target in $$subdir"; \
if test "$$subdir" = "."; then \
dot_seen=yes; \
local_target="$$target-am"; \
else \
local_target="$$target"; \
fi; \
(cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
|| case "$$amf" in *=*) exit 1;; *k*) fail=yes;; *) exit 1;; esac; \
done; \
if test "$$dot_seen" = "no"; then \
$(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
fi; test -z "$$fail"
mostlyclean-recursive clean-recursive distclean-recursive \
maintainer-clean-recursive:
@set fnord $$MAKEFLAGS; amf=$$2; \
dot_seen=no; \
case "$@" in \
distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
*) list='$(SUBDIRS)' ;; \
esac; \
rev=''; for subdir in $$list; do \
if test "$$subdir" = "."; then :; else \
rev="$$subdir $$rev"; \
fi; \
done; \
rev="$$rev ."; \
target=`echo $@ | sed s/-recursive//`; \
for subdir in $$rev; do \
echo "Making $$target in $$subdir"; \
if test "$$subdir" = "."; then \
local_target="$$target-am"; \
else \
local_target="$$target"; \
fi; \
(cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
|| case "$$amf" in *=*) exit 1;; *k*) fail=yes;; *) exit 1;; esac; \
done && test -z "$$fail"
tags-recursive:
list='$(SUBDIRS)'; for subdir in $$list; do \
test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \
done
ctags-recursive:
list='$(SUBDIRS)'; for subdir in $$list; do \
test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \
done
ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
$(AWK) ' { files[$$0] = 1; } \
END { for (i in files) print i; }'`; \
mkid -fID $$unique
tags: TAGS
TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
$(TAGS_FILES) $(LISP)
tags=; \
here=`pwd`; \
if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
include_option=--etags-include; \
empty_fix=.; \
else \
include_option=--include; \
empty_fix=; \
fi; \
list='$(SUBDIRS)'; for subdir in $$list; do \
if test "$$subdir" = .; then :; else \
test ! -f $$subdir/TAGS || \
tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \
fi; \
done; \
list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
$(AWK) ' { files[$$0] = 1; } \
END { for (i in files) print i; }'`; \
if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
$$tags $$unique; \
fi
ctags: CTAGS
CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
$(TAGS_FILES) $(LISP)
tags=; \
here=`pwd`; \
list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
$(AWK) ' { files[$$0] = 1; } \
END { for (i in files) print i; }'`; \
test -z "$(CTAGS_ARGS)$$tags$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$tags $$unique
GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& cd $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) $$here
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
list='$(DISTFILES)'; for file in $$list; do \
case $$file in \
$(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
$(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
esac; \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
if test "$$dir" != "$$file" && test "$$dir" != "."; then \
dir="/$$dir"; \
$(mkdir_p) "$(distdir)$$dir"; \
else \
dir=''; \
fi; \
if test -d $$d/$$file; then \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
fi; \
cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
else \
test -f $(distdir)/$$file \
|| cp -p $$d/$$file $(distdir)/$$file \
|| exit 1; \
fi; \
done
list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
if test "$$subdir" = .; then :; else \
test -d "$(distdir)/$$subdir" \
|| $(mkdir_p) "$(distdir)/$$subdir" \
|| exit 1; \
distdir=`$(am__cd) $(distdir) && pwd`; \
top_distdir=`$(am__cd) $(top_distdir) && pwd`; \
(cd $$subdir && \
$(MAKE) $(AM_MAKEFLAGS) \
top_distdir="$$top_distdir" \
distdir="$$distdir/$$subdir" \
distdir) \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-recursive
all-am: Makefile
installdirs: installdirs-recursive
installdirs-am:
install: install-recursive
install-exec: install-exec-recursive
install-data: install-data-recursive
uninstall: uninstall-recursive
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-recursive
install-strip:
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
`test -z '$(STRIP)' || \
echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-recursive
clean-am: clean-generic mostlyclean-am
distclean: distclean-recursive
-rm -f Makefile
distclean-am: clean-am distclean-generic distclean-tags
dvi: dvi-recursive
dvi-am:
html: html-recursive
info: info-recursive
info-am:
install-data-am:
install-exec-am:
install-info: install-info-recursive
install-man:
installcheck-am:
maintainer-clean: maintainer-clean-recursive
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-recursive
mostlyclean-am: mostlyclean-generic
pdf: pdf-recursive
pdf-am:
ps: ps-recursive
ps-am:
uninstall-am: uninstall-info-am
uninstall-info: uninstall-info-recursive
.PHONY: $(RECURSIVE_TARGETS) CTAGS GTAGS all all-am check check-am \
clean clean-generic clean-recursive ctags ctags-recursive \
distclean distclean-generic distclean-recursive distclean-tags \
distdir dvi dvi-am html html-am info info-am install \
install-am install-data install-data-am install-exec \
install-exec-am install-info install-info-am install-man \
install-strip installcheck installcheck-am installdirs \
installdirs-am maintainer-clean maintainer-clean-generic \
maintainer-clean-recursive mostlyclean mostlyclean-generic \
mostlyclean-recursive pdf pdf-am ps ps-am tags tags-recursive \
uninstall uninstall-am uninstall-info-am
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:

View File

@ -0,0 +1 @@
SUBDIRS = html man pdf ps

View File

@ -0,0 +1,426 @@
# Makefile.in generated by automake 1.9 from Makefile.am.
# @configure_input@
# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
# 2003, 2004 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
@SET_MAKE@
srcdir = @srcdir@
top_srcdir = @top_srcdir@
VPATH = @srcdir@
pkgdatadir = $(datadir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
top_builddir = ..
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
INSTALL = @INSTALL@
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
subdir = doc
DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \
$(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h
CONFIG_CLEAN_FILES =
SOURCES =
DIST_SOURCES =
RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
html-recursive info-recursive install-data-recursive \
install-exec-recursive install-info-recursive \
install-recursive installcheck-recursive installdirs-recursive \
pdf-recursive ps-recursive uninstall-info-recursive \
uninstall-recursive
ETAGS = etags
CTAGS = ctags
DIST_SUBDIRS = $(SUBDIRS)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMDEP_FALSE = @AMDEP_FALSE@
AMDEP_TRUE = @AMDEP_TRUE@
AMTAR = @AMTAR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CC = @CC@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
LDFLAGS = @LDFLAGS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LTLIBOBJS = @LTLIBOBJS@
MAKEINFO = @MAKEINFO@
OBJEXT = @OBJEXT@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
TCL_INCLUDE_SPEC = @TCL_INCLUDE_SPEC@
TCL_LIB_SPEC = @TCL_LIB_SPEC@
TK_LIBS = @TK_LIBS@
TK_LIB_SPEC = @TK_LIB_SPEC@
TK_PREFIX = @TK_PREFIX@
TK_XINCLUDES = @TK_XINCLUDES@
VERSION = @VERSION@
ac_ct_CC = @ac_ct_CC@
ac_ct_RANLIB = @ac_ct_RANLIB@
ac_ct_STRIP = @ac_ct_STRIP@
ac_prefix = @ac_prefix@
am__fastdepCC_FALSE = @am__fastdepCC_FALSE@
am__fastdepCC_TRUE = @am__fastdepCC_TRUE@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
bindir = @bindir@
build_alias = @build_alias@
datadir = @datadir@
exec_prefix = @exec_prefix@
host_alias = @host_alias@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
localstatedir = @localstatedir@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
prefix = @prefix@
program_transform_name = @program_transform_name@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
SUBDIRS = html man pdf ps
all: all-recursive
.SUFFIXES:
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
&& exit 0; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu doc/Makefile'; \
cd $(top_srcdir) && \
$(AUTOMAKE) --gnu doc/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
uninstall-info-am:
# This directory's subdirectories are mostly independent; you can cd
# into them and run `make' without going through this Makefile.
# To change the values of `make' variables: instead of editing Makefiles,
# (1) if the variable is set in `config.status', edit `config.status'
# (which will cause the Makefiles to be regenerated when you run `make');
# (2) otherwise, pass the desired values on the `make' command line.
$(RECURSIVE_TARGETS):
@set fnord $$MAKEFLAGS; amf=$$2; \
dot_seen=no; \
target=`echo $@ | sed s/-recursive//`; \
list='$(SUBDIRS)'; for subdir in $$list; do \
echo "Making $$target in $$subdir"; \
if test "$$subdir" = "."; then \
dot_seen=yes; \
local_target="$$target-am"; \
else \
local_target="$$target"; \
fi; \
(cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
|| case "$$amf" in *=*) exit 1;; *k*) fail=yes;; *) exit 1;; esac; \
done; \
if test "$$dot_seen" = "no"; then \
$(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
fi; test -z "$$fail"
mostlyclean-recursive clean-recursive distclean-recursive \
maintainer-clean-recursive:
@set fnord $$MAKEFLAGS; amf=$$2; \
dot_seen=no; \
case "$@" in \
distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
*) list='$(SUBDIRS)' ;; \
esac; \
rev=''; for subdir in $$list; do \
if test "$$subdir" = "."; then :; else \
rev="$$subdir $$rev"; \
fi; \
done; \
rev="$$rev ."; \
target=`echo $@ | sed s/-recursive//`; \
for subdir in $$rev; do \
echo "Making $$target in $$subdir"; \
if test "$$subdir" = "."; then \
local_target="$$target-am"; \
else \
local_target="$$target"; \
fi; \
(cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
|| case "$$amf" in *=*) exit 1;; *k*) fail=yes;; *) exit 1;; esac; \
done && test -z "$$fail"
tags-recursive:
list='$(SUBDIRS)'; for subdir in $$list; do \
test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \
done
ctags-recursive:
list='$(SUBDIRS)'; for subdir in $$list; do \
test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \
done
ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
$(AWK) ' { files[$$0] = 1; } \
END { for (i in files) print i; }'`; \
mkid -fID $$unique
tags: TAGS
TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
$(TAGS_FILES) $(LISP)
tags=; \
here=`pwd`; \
if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
include_option=--etags-include; \
empty_fix=.; \
else \
include_option=--include; \
empty_fix=; \
fi; \
list='$(SUBDIRS)'; for subdir in $$list; do \
if test "$$subdir" = .; then :; else \
test ! -f $$subdir/TAGS || \
tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \
fi; \
done; \
list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
$(AWK) ' { files[$$0] = 1; } \
END { for (i in files) print i; }'`; \
if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
$$tags $$unique; \
fi
ctags: CTAGS
CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
$(TAGS_FILES) $(LISP)
tags=; \
here=`pwd`; \
list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
$(AWK) ' { files[$$0] = 1; } \
END { for (i in files) print i; }'`; \
test -z "$(CTAGS_ARGS)$$tags$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$tags $$unique
GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& cd $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) $$here
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
list='$(DISTFILES)'; for file in $$list; do \
case $$file in \
$(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
$(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
esac; \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
if test "$$dir" != "$$file" && test "$$dir" != "."; then \
dir="/$$dir"; \
$(mkdir_p) "$(distdir)$$dir"; \
else \
dir=''; \
fi; \
if test -d $$d/$$file; then \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
fi; \
cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
else \
test -f $(distdir)/$$file \
|| cp -p $$d/$$file $(distdir)/$$file \
|| exit 1; \
fi; \
done
list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
if test "$$subdir" = .; then :; else \
test -d "$(distdir)/$$subdir" \
|| $(mkdir_p) "$(distdir)/$$subdir" \
|| exit 1; \
distdir=`$(am__cd) $(distdir) && pwd`; \
top_distdir=`$(am__cd) $(top_distdir) && pwd`; \
(cd $$subdir && \
$(MAKE) $(AM_MAKEFLAGS) \
top_distdir="$$top_distdir" \
distdir="$$distdir/$$subdir" \
distdir) \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-recursive
all-am: Makefile
installdirs: installdirs-recursive
installdirs-am:
install: install-recursive
install-exec: install-exec-recursive
install-data: install-data-recursive
uninstall: uninstall-recursive
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-recursive
install-strip:
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
`test -z '$(STRIP)' || \
echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-recursive
clean-am: clean-generic mostlyclean-am
distclean: distclean-recursive
-rm -f Makefile
distclean-am: clean-am distclean-generic distclean-tags
dvi: dvi-recursive
dvi-am:
html: html-recursive
info: info-recursive
info-am:
install-data-am:
install-exec-am:
install-info: install-info-recursive
install-man:
installcheck-am:
maintainer-clean: maintainer-clean-recursive
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-recursive
mostlyclean-am: mostlyclean-generic
pdf: pdf-recursive
pdf-am:
ps: ps-recursive
ps-am:
uninstall-am: uninstall-info-am
uninstall-info: uninstall-info-recursive
.PHONY: $(RECURSIVE_TARGETS) CTAGS GTAGS all all-am check check-am \
clean clean-generic clean-recursive ctags ctags-recursive \
distclean distclean-generic distclean-recursive distclean-tags \
distdir dvi dvi-am html html-am info info-am install \
install-am install-data install-data-am install-exec \
install-exec-am install-info install-info-am install-man \
install-strip installcheck installcheck-am installdirs \
installdirs-am maintainer-clean maintainer-clean-generic \
maintainer-clean-recursive mostlyclean mostlyclean-generic \
mostlyclean-recursive pdf pdf-am ps ps-am tags tags-recursive \
uninstall uninstall-am uninstall-info-am
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:

View File

@ -0,0 +1,313 @@
# Makefile.in generated by automake 1.9 from Makefile.am.
# doc/html/Makefile. Generated from Makefile.in by configure.
# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
# 2003, 2004 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
srcdir = .
top_srcdir = ../..
pkgdatadir = $(datadir)/WordNet
pkglibdir = $(libdir)/WordNet
pkgincludedir = $(includedir)/WordNet
top_builddir = ../..
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
INSTALL = /usr/csl/bin/install -c
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
subdir = doc/html
DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \
$(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h
CONFIG_CLEAN_FILES =
SOURCES =
DIST_SOURCES =
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
*) f=$$p;; \
esac;
am__strip_dir = `echo $$p | sed -e 's|^.*/||'`;
am__installdirs = "$(DESTDIR)$(htmldir)"
htmlDATA_INSTALL = $(INSTALL_DATA)
DATA = $(html_DATA)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run aclocal-1.9
AMDEP_FALSE = #
AMDEP_TRUE =
AMTAR = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run tar
AUTOCONF = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run autoconf
AUTOHEADER = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run autoheader
AUTOMAKE = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run automake-1.9
AWK = nawk
CC = gcc
CCDEPMODE = depmode=gcc3
CFLAGS = -g -O2
CPP = gcc -E
CPPFLAGS =
CYGPATH_W = echo
DEFS = -DHAVE_CONFIG_H
DEPDIR = .deps
ECHO_C =
ECHO_N = -n
ECHO_T =
EGREP = egrep
EXEEXT =
INSTALL_DATA = ${INSTALL} -m 644
INSTALL_PROGRAM = ${INSTALL}
INSTALL_SCRIPT = ${INSTALL}
INSTALL_STRIP_PROGRAM = ${SHELL} $(install_sh) -c -s
LDFLAGS =
LIBOBJS =
LIBS =
LTLIBOBJS =
MAKEINFO = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run makeinfo
OBJEXT = o
PACKAGE = WordNet
PACKAGE_BUGREPORT = wordnet@princeton.edu
PACKAGE_NAME = WordNet
PACKAGE_STRING = WordNet 3.0
PACKAGE_TARNAME = wordnet
PACKAGE_VERSION = 3.0
PATH_SEPARATOR = :
RANLIB = ranlib
SET_MAKE =
SHELL = /bin/bash
STRIP =
TCL_INCLUDE_SPEC = -I/usr/csl/include
TCL_LIB_SPEC = -L/usr/csl/lib -ltcl8.4
TK_LIBS = -L/usr/openwin/lib -lX11 -ldl -lpthread -lsocket -lnsl -lm
TK_LIB_SPEC = -L/usr/csl/lib -ltk8.4
TK_PREFIX = /usr/csl
TK_XINCLUDES = -I/usr/openwin/include
VERSION = 3.0
ac_ct_CC = gcc
ac_ct_RANLIB = ranlib
ac_ct_STRIP =
ac_prefix = /usr/local/WordNet-3.0
am__fastdepCC_FALSE = #
am__fastdepCC_TRUE =
am__include = include
am__leading_dot = .
am__quote =
am__tar = ${AMTAR} chof - "$$tardir"
am__untar = ${AMTAR} xf -
bindir = ${exec_prefix}/bin
build_alias =
datadir = ${prefix}/share
exec_prefix = ${prefix}
host_alias =
includedir = ${prefix}/include
infodir = ${prefix}/info
install_sh = /people/wn/src/Release/3.0/Unix/install-sh
libdir = ${exec_prefix}/lib
libexecdir = ${exec_prefix}/libexec
localstatedir = ${prefix}/var
mandir = ${prefix}/man
mkdir_p = $(install_sh) -d
oldincludedir = /usr/include
prefix = /usr/local/WordNet-3.0
program_transform_name = s,x,x,
sbindir = ${exec_prefix}/sbin
sharedstatedir = ${prefix}/com
sysconfdir = ${prefix}/etc
target_alias =
htmldir = $(prefix)/doc/html
html_DATA = binsrch.3WN.html cntlist.5WN.html grind.1WN.html lexnames.5WN.html morph.3WN.html morphy.7WN.html senseidx.5WN.html uniqbeg.7WN.html wn.1WN.html wnb.1WN.html wndb.5WN.html wngloss.7WN.html wngroups.7WN.html wninput.5WN.html wnintro.1WN.html wnintro.3WN.html wnintro.5WN.html wnintro.7WN.html wnlicens.7WN.html wnpkgs.7WN.html wnsearch.3WN.html wnstats.7WN.html wnutil.3WN.html
all: all-am
.SUFFIXES:
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
&& exit 0; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu doc/html/Makefile'; \
cd $(top_srcdir) && \
$(AUTOMAKE) --gnu doc/html/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
uninstall-info-am:
install-htmlDATA: $(html_DATA)
@$(NORMAL_INSTALL)
test -z "$(htmldir)" || $(mkdir_p) "$(DESTDIR)$(htmldir)"
@list='$(html_DATA)'; for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
f=$(am__strip_dir) \
echo " $(htmlDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(htmldir)/$$f'"; \
$(htmlDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(htmldir)/$$f"; \
done
uninstall-htmlDATA:
@$(NORMAL_UNINSTALL)
@list='$(html_DATA)'; for p in $$list; do \
f=$(am__strip_dir) \
echo " rm -f '$(DESTDIR)$(htmldir)/$$f'"; \
rm -f "$(DESTDIR)$(htmldir)/$$f"; \
done
tags: TAGS
TAGS:
ctags: CTAGS
CTAGS:
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
list='$(DISTFILES)'; for file in $$list; do \
case $$file in \
$(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
$(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
esac; \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
if test "$$dir" != "$$file" && test "$$dir" != "."; then \
dir="/$$dir"; \
$(mkdir_p) "$(distdir)$$dir"; \
else \
dir=''; \
fi; \
if test -d $$d/$$file; then \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
fi; \
cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
else \
test -f $(distdir)/$$file \
|| cp -p $$d/$$file $(distdir)/$$file \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-am
all-am: Makefile $(DATA)
installdirs:
for dir in "$(DESTDIR)$(htmldir)"; do \
test -z "$$dir" || $(mkdir_p) "$$dir"; \
done
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-am
install-strip:
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
`test -z '$(STRIP)' || \
echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
clean-am: clean-generic mostlyclean-am
distclean: distclean-am
-rm -f Makefile
distclean-am: clean-am distclean-generic
dvi: dvi-am
dvi-am:
html: html-am
info: info-am
info-am:
install-data-am: install-htmlDATA
install-exec-am:
install-info: install-info-am
install-man:
installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-am
mostlyclean-am: mostlyclean-generic
pdf: pdf-am
pdf-am:
ps: ps-am
ps-am:
uninstall-am: uninstall-htmlDATA uninstall-info-am
.PHONY: all all-am check check-am clean clean-generic distclean \
distclean-generic distdir dvi dvi-am html html-am info info-am \
install install-am install-data install-data-am install-exec \
install-exec-am install-htmlDATA install-info install-info-am \
install-man install-strip installcheck installcheck-am \
installdirs maintainer-clean maintainer-clean-generic \
mostlyclean mostlyclean-generic pdf pdf-am ps ps-am uninstall \
uninstall-am uninstall-htmlDATA uninstall-info-am
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:

View File

@ -0,0 +1,2 @@
htmldir = $(prefix)/doc/html
html_DATA = binsrch.3WN.html cntlist.5WN.html grind.1WN.html lexnames.5WN.html morph.3WN.html morphy.7WN.html senseidx.5WN.html uniqbeg.7WN.html wn.1WN.html wnb.1WN.html wndb.5WN.html wngloss.7WN.html wngroups.7WN.html wninput.5WN.html wnintro.1WN.html wnintro.3WN.html wnintro.5WN.html wnintro.7WN.html wnlicens.7WN.html wnpkgs.7WN.html wnsearch.3WN.html wnstats.7WN.html wnutil.3WN.html

View File

@ -0,0 +1,313 @@
# Makefile.in generated by automake 1.9 from Makefile.am.
# @configure_input@
# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
# 2003, 2004 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
@SET_MAKE@
srcdir = @srcdir@
top_srcdir = @top_srcdir@
VPATH = @srcdir@
pkgdatadir = $(datadir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
top_builddir = ../..
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
INSTALL = @INSTALL@
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
subdir = doc/html
DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \
$(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h
CONFIG_CLEAN_FILES =
SOURCES =
DIST_SOURCES =
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
*) f=$$p;; \
esac;
am__strip_dir = `echo $$p | sed -e 's|^.*/||'`;
am__installdirs = "$(DESTDIR)$(htmldir)"
htmlDATA_INSTALL = $(INSTALL_DATA)
DATA = $(html_DATA)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMDEP_FALSE = @AMDEP_FALSE@
AMDEP_TRUE = @AMDEP_TRUE@
AMTAR = @AMTAR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CC = @CC@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
LDFLAGS = @LDFLAGS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LTLIBOBJS = @LTLIBOBJS@
MAKEINFO = @MAKEINFO@
OBJEXT = @OBJEXT@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
TCL_INCLUDE_SPEC = @TCL_INCLUDE_SPEC@
TCL_LIB_SPEC = @TCL_LIB_SPEC@
TK_LIBS = @TK_LIBS@
TK_LIB_SPEC = @TK_LIB_SPEC@
TK_PREFIX = @TK_PREFIX@
TK_XINCLUDES = @TK_XINCLUDES@
VERSION = @VERSION@
ac_ct_CC = @ac_ct_CC@
ac_ct_RANLIB = @ac_ct_RANLIB@
ac_ct_STRIP = @ac_ct_STRIP@
ac_prefix = @ac_prefix@
am__fastdepCC_FALSE = @am__fastdepCC_FALSE@
am__fastdepCC_TRUE = @am__fastdepCC_TRUE@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
bindir = @bindir@
build_alias = @build_alias@
datadir = @datadir@
exec_prefix = @exec_prefix@
host_alias = @host_alias@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
localstatedir = @localstatedir@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
prefix = @prefix@
program_transform_name = @program_transform_name@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
htmldir = $(prefix)/doc/html
html_DATA = binsrch.3WN.html cntlist.5WN.html grind.1WN.html lexnames.5WN.html morph.3WN.html morphy.7WN.html senseidx.5WN.html uniqbeg.7WN.html wn.1WN.html wnb.1WN.html wndb.5WN.html wngloss.7WN.html wngroups.7WN.html wninput.5WN.html wnintro.1WN.html wnintro.3WN.html wnintro.5WN.html wnintro.7WN.html wnlicens.7WN.html wnpkgs.7WN.html wnsearch.3WN.html wnstats.7WN.html wnutil.3WN.html
all: all-am
.SUFFIXES:
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
&& exit 0; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu doc/html/Makefile'; \
cd $(top_srcdir) && \
$(AUTOMAKE) --gnu doc/html/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
uninstall-info-am:
install-htmlDATA: $(html_DATA)
@$(NORMAL_INSTALL)
test -z "$(htmldir)" || $(mkdir_p) "$(DESTDIR)$(htmldir)"
@list='$(html_DATA)'; for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
f=$(am__strip_dir) \
echo " $(htmlDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(htmldir)/$$f'"; \
$(htmlDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(htmldir)/$$f"; \
done
uninstall-htmlDATA:
@$(NORMAL_UNINSTALL)
@list='$(html_DATA)'; for p in $$list; do \
f=$(am__strip_dir) \
echo " rm -f '$(DESTDIR)$(htmldir)/$$f'"; \
rm -f "$(DESTDIR)$(htmldir)/$$f"; \
done
tags: TAGS
TAGS:
ctags: CTAGS
CTAGS:
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
list='$(DISTFILES)'; for file in $$list; do \
case $$file in \
$(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
$(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
esac; \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
if test "$$dir" != "$$file" && test "$$dir" != "."; then \
dir="/$$dir"; \
$(mkdir_p) "$(distdir)$$dir"; \
else \
dir=''; \
fi; \
if test -d $$d/$$file; then \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
fi; \
cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
else \
test -f $(distdir)/$$file \
|| cp -p $$d/$$file $(distdir)/$$file \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-am
all-am: Makefile $(DATA)
installdirs:
for dir in "$(DESTDIR)$(htmldir)"; do \
test -z "$$dir" || $(mkdir_p) "$$dir"; \
done
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-am
install-strip:
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
`test -z '$(STRIP)' || \
echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
clean-am: clean-generic mostlyclean-am
distclean: distclean-am
-rm -f Makefile
distclean-am: clean-am distclean-generic
dvi: dvi-am
dvi-am:
html: html-am
info: info-am
info-am:
install-data-am: install-htmlDATA
install-exec-am:
install-info: install-info-am
install-man:
installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-am
mostlyclean-am: mostlyclean-generic
pdf: pdf-am
pdf-am:
ps: ps-am
ps-am:
uninstall-am: uninstall-htmlDATA uninstall-info-am
.PHONY: all all-am check check-am clean clean-generic distclean \
distclean-generic distdir dvi dvi-am html html-am info info-am \
install install-am install-data install-data-am install-exec \
install-exec-am install-htmlDATA install-info install-info-am \
install-man install-strip installcheck installcheck-am \
installdirs maintainer-clean maintainer-clean-generic \
mostlyclean mostlyclean-generic pdf pdf-am ps ps-am uninstall \
uninstall-am uninstall-htmlDATA uninstall-info-am
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:

View File

@ -0,0 +1,78 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>BINSRCH(3WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
bin_search, copyfile, replace_line, insert_line
<H2><A NAME="sect1" HREF="#toc1">SYNOPSIS </A></H2>
<P>
<B>char *bin_search(char
*key, FILE *fp); </B> <P>
<B>void copyfile(FILE *fromfp, FILE *tofp); </B> <P>
<B>char *replace_line(char
*new_line, char *key, FILE *fp); </B> <P>
<B>char *insert_line(char *new_line, char
*key, FILE *fp); </B>
<H2><A NAME="sect2" HREF="#toc2">DESCRIPTION </A></H2>
<P>
The WordNet library contains several general
purpose functions for performing a binary search and modifying sorted
files. <P>
<B>bin_search()</B> is the primary binary search algorithm to search for
<I>key </I> as the first item on a line in the file pointed to by <I>fp </I>. The delimiter
between the key and the rest of the fields on the line, if any, must be
a space. A pointer to a static variable containing the entire line is
returned. <FONT SIZE=-1><B>NULL </B></FONT>
is returned if a match is not found. <P>
The remaining functions
are not used by WordNet, and are only briefly described. <P>
<B>copyfile()</B> copies
the contents of one file to another. <P>
<B>replace_line()</B> replaces a line in
a file having searchkey <I>key </I> with the contents of <I>new_line </I>. It returns
the original line or <FONT SIZE=-1><B>NULL </B></FONT>
in case of error. <P>
<B>insert_line()</B> finds the proper
place to insert the contents of <I>new_line </I>, having searchkey <I>key </I> in the
sorted file pointed to by <I>fp </I>. It returns <FONT SIZE=-1><B>NULL </B></FONT>
if a line with this searchkey
is already in the file.
<H2><A NAME="sect3" HREF="#toc3">NOTES </A></H2>
The maximum length of <I>key </I> is 1024. <P>
The
maximum line length in a file is 25K. <P>
If there are no additional fields
after the search key, the key must be followed by at least one space before
the newline character.
<H2><A NAME="sect4" HREF="#toc4">SEE ALSO </A></H2>
<B><A HREF="wnintro.3WN.html">wnintro</B>(3WN)</A>
, <B><A HREF="morph.3WN.html">morph</B>(3WN)</A>
, <B><A HREF="wnsearch.3WN.html">wnsearch</B>(3WN)</A>
,
<B><A HREF="wnutil.3WN.html">wnutil</B>(3WN)</A>
, <B><A HREF="wnintro.5WN.html">wnintro</B>(5WN)</A>
.
<H2><A NAME="sect5" HREF="#toc5">WARNINGS </A></H2>
<B>binsearch() </B> returns a pointer to
a static character buffer. The returned string should be copied by the
caller if the results need to be saved, as a subsequent call will replace
the contents of the static buffer. <P>
<P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">SYNOPSIS</A></LI>
<LI><A NAME="toc2" HREF="#sect2">DESCRIPTION</A></LI>
<LI><A NAME="toc3" HREF="#sect3">NOTES</A></LI>
<LI><A NAME="toc4" HREF="#sect4">SEE ALSO</A></LI>
<LI><A NAME="toc5" HREF="#sect5">WARNINGS</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,125 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>CNTLIST(5WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
cntlist - file listing number of times each tagged sense occurs
in a semantic concordance, sorted most to least frequently tagged <P>
cntlist.rev
- file listing number of times each tagged sense occurs in a semantic concordance,
sorted by sense key
<H2><A NAME="sect1" HREF="#toc1">DESCRIPTION </A></H2>
A cntlist file for a semantic concordance
lists the number of times each semantically tagged sense occurs in the
concordance and its sense number in the WordNet database. Each line in
the file corresponds to a sense in the WordNet database to which at least
one semantic tag points. Only senses that are tagged in a concordance
are in the concordance's cntlist file. <P>
<H3><A NAME="sect2" HREF="#toc2">WordNet Database <I>cntlist </I> File
</A></H3>
In the WordNet database, words are assigned sense numbers based on frequency
of use in semantically tagged corpora. The cntlist file used by <B><A HREF="grind.1WN.html">grind</B>(1WN)<B></B></A>
to build the WordNet database and assign the sense numbers is a union
of the cntlist files from the various semantic concordances that were
formerly released by Princeton University. This combined cntlist file
is provided with the WordNet package and is found in the <B>WNSEARCHDIR </B>
directory. <P>
The <I>cntlist.rev </I> file is used at run-time by the WordNet library
code and browser interfaces to print in the output display the number
of times each sense has been tagged.
<H3><A NAME="sect3" HREF="#toc3">File Format </A></H3>
Each line in a cntlist
file contains information for one sense. The file is ordered from most
to least frequently tagged sense. The fields are separated by one space,
and each line is terminated with a newline character. Senses having the
same <I>tag_cnt </I> value are listed in reverse alphabetical order of the <I>lemma
</I> field of the <I>sense_key </I>. <P>
Each line in <B>cntlist </B> is of the form: <P>
<blockquote><I>tag_cnt&nbsp;&nbsp;sense_key&nbsp;&nbsp;sense_number
</I> </blockquote>
<P>
where <I>tag_cnt </I> is the decimal number of times the sense is tagged in
the corresponding semantic concordance. <I>sense_key </I> is a WordNet sense
encoding and <I>sense_number </I> is a WordNet sense number as described in <P>
The <I>cntlist.rev </I> file contains the same fields described above, in the
following order: <P>
<blockquote><I>sense_key&nbsp;&nbsp;sense_number&nbsp;&nbsp;tag_cnt </I> </blockquote>
<P>
<H2><A NAME="sect4" HREF="#toc4">NOTES </A></H2>
Princeton
no longer maintains or releases the Semantic Concordance files. The <I>cntlist
</I> file used to order the senses in WordNet 3.0 was generated from the Semantic
Concordance files at the point that they were last updated in 2001. In
general, the order of senses presented usually reflects what the user
would expect, however sense ordering is now less reliable than in prior
releases and should not be construed as an accurate indicator of frequency
of use.
<H2><A NAME="sect5" HREF="#toc5">ENVIRONMENT VARIABLES (UNIX) </A></H2>
<DL>
<DT><B>WNHOME</B> </DT>
<DD>Base directory for WordNet.
Default is <B>/usr/local/WordNet-3.0 </B>. </DD>
<DT><B>WNSEARCHDIR</B> </DT>
<DD>Directory in which the
WordNet database has been installed. Default is <B>WNHOME/dict </B>. </DD>
</DL>
<H2><A NAME="sect6" HREF="#toc6">REGISTRY
(WINDOWS) </A></H2>
<DL>
<DT><B>HKEY_LOCAL_MACHINE\SOFTWARE\WordNet\3.0\WNHome</B> </DT>
<DD>Base directory for
WordNet. Default is <B>C:\Program&nbsp;Files\WordNet\3.0 </B>. </DD>
<DT><B>HKEY_CURRENT_USER\SOFTWARE\WordNet\3.0\wnres</B>
</DT>
<DD>User's default browser options. </DD>
</DL>
<H2><A NAME="sect7" HREF="#toc7">FILES </A></H2>
<DL>
<DT><B>cntlist, cntlist.rev</B> </DT>
<DD>file of combined
semantic concordance <B>cntlist </B> files. Used to assign sense numbers in WordNet
database </DD>
</DL>
<H2><A NAME="sect8" HREF="#toc8">SEE ALSO </A></H2>
<B><A HREF="grind.1WN.html">grind</B>(1WN)</A>
, <B><A HREF="wnintro.5WN.html">wnintro</B>(5WN)</A>
, <B><A HREF="senseidx.5WN.html">senseidx</B>(5WN)</A>
. <P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">DESCRIPTION</A></LI>
<UL>
<LI><A NAME="toc2" HREF="#sect2">WordNet Database cntlist File</A></LI>
<LI><A NAME="toc3" HREF="#sect3">File Format</A></LI>
</UL>
<LI><A NAME="toc4" HREF="#sect4">NOTES</A></LI>
<LI><A NAME="toc5" HREF="#sect5">ENVIRONMENT VARIABLES (UNIX)</A></LI>
<LI><A NAME="toc6" HREF="#sect6">REGISTRY (WINDOWS)</A></LI>
<LI><A NAME="toc7" HREF="#sect7">FILES</A></LI>
<LI><A NAME="toc8" HREF="#sect8">SEE ALSO</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,195 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>GRIND(1) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
grind - process WordNet lexicographer files
<H2><A NAME="sect1" HREF="#toc1">SYNOPSIS </A></H2>
<B>grind </B> [ <B>-v
</B> ] [ <B>-s </B> ] [ <B>-L </B><I>logfile </I> ] [ <B>-a </B> ] [ <B>-d </B> ] [ <B>-i </B> ] [ <B>-o </B> ] [ <B>-n </B> ] <I>filename </I>
[ <I>filename </I>... ]
<H2><A NAME="sect2" HREF="#toc2">DESCRIPTION </A></H2>
<B>grind() </B> processes WordNet lexicographer files,
producing database files suitable for use with the WordNet search and
interface code and other applications. The syntactic and structural integrity
of the input files is verified. Warnings and errors are reported via <B>stderr
</B> and a run-time log is produced on <B>stdout </B>. A database is generated only
if there are no errors.
<H3><A NAME="sect3" HREF="#toc3">Input Files </A></H3>
Input files correspond to the syntactic
categories implemented in WordNet - <B>noun</B>, <B></B> <B>verb</B>, <B></B> <B>adjective</B> and <B></B> <B>adverb</B>.
Each input lexicographer file consists of a list of synonym sets (<I>synsets
</I>) for one part of speech. Although the basic synset syntax is the same
for all of the parts of speech, some parts of the syntax only apply to
a particular part of speech. See <B><A HREF="wninput.5WN.html">wninput</B>(5WN)<B></B></A>
for a description of the
input file format. <P>
Each <I>filename </I> specified is of the form: <P>
<blockquote> </blockquote>
<P>
where
<I>pathname </I> is optional and <I>pos </I> is either <B>noun</B>, <B></B> <B>verb</B>, <B></B> <B>adj</B> or <B></B> <B>adv</B>. <I>suffix
</I> may be used to separate groups of synsets into different files, for example
<B>noun.animal </B> and <B>noun.plant </B>. One or more input files, in any combination
of syntactic categories, may be specified. See <B><A HREF="lexnames.5WN.html">lexnames</B>(5WN)</A>
for a list
of the lexicographer files used to build the complete WordNet database.
<H3><A NAME="sect4" HREF="#toc4">Output Files </A></H3>
<B>grind() </B> produces the following output files: <P>
<TABLE BORDER=0>
<TR> <TD ALIGN=CENTER><B>Filename
</B></TD> <TD ALIGN=CENTER>Description </TD> </TR>
<TR> <TR> <TD ALIGN=LEFT><B>index.<I>pos </I></B> </TD> <TD ALIGN=LEFT>Index file for each syntactic category </TD>
</TR>
<TR> <TD ALIGN=LEFT><B>data.<I>pos </I></B> </TD> <TD ALIGN=LEFT>Data file for each syntactic category </TD> </TR>
<TR> <TD ALIGN=LEFT><B>index.sense </B> </TD> <TD ALIGN=LEFT>Sense
index </TD> </TR>
</TABLE>
<P>
See <B><A HREF="wndb.5WN.html">wndb</B>(5WN)<B></B></A>
for a description of the database file formats.
<P>
Each time <B>grind() </B> is run, any existing database files are overwritten
with the database files generated from the specified input files. If no
input files from a syntactic category are specified, the corresponding
database files are not overwritten.
<H3><A NAME="sect5" HREF="#toc5">Sense Numbers </A></H3>
Senses are generally
ordered from most to least frequently used, with the most common sense
numbered <B>1 </B>. Frequency of use is determined by the number of times a sense
is tagged in the various semantic concordance texts. Senses that are not
semantically tagged follow the ordered senses in an arbitrary order.
Note that this ordering is only an estimate based on usage in a small
corpus. <P>
The <I>tagsense_cnt </I> field for each entry in the <B>index.<I>pos </I></B> files
indicates how many of the senses in the list have been tagged. <P>
The <B>cntlist
</B> file provided with the database lists the number of times each sense
is tagged in the semantic concordances. <B>grind() </B> uses the data from <B>cntlist
</B> to order the senses of each word. When the <B>index </B>.<I>pos </I> files are generated,
the <I>synset_offset </I>s are output in sense number order, with sense 1 first
in the list. Senses with the same number of semantic tags are assigned
unique but consecutive sense numbers. The WordNet <FONT SIZE=-1><B>OVERVIEW </B></FONT>
search displays
all senses of the specified word, in all syntactic categories, and indicates
which of the senses are represented in the semantically tagged texts.
<H2><A NAME="sect6" HREF="#toc6">OPTIONS </A></H2>
<DL>
<DT><B>-v</B> </DT>
<DD>Verify integrity of input without generating database. </DD>
<DT><B>-s</B> </DT>
<DD>Suppress
generation of warning messages. Usually <B>grind </B> is run with this option
until all syntactic and structural errors are corrected since the warning
messages may make it difficult to spot error messages. </DD>
<DT><B>-L</B><I>logfile</I> </DT>
<DD>Write
all messages to <I>logfile </I> instead of <B>stderr </B>. </DD>
<DT><B>-a</B> </DT>
<DD>Generate statistical report
on input files processed. </DD>
<DT><B>-d</B> </DT>
<DD>Generate distribution of senses by string
length report on input files processed. </DD>
<DT><B>-i</B> </DT>
<DD>Generate sense index file. </DD>
<DT><B>-o</B>
</DT>
<DD>Order senses using <B>cntlist </B>. </DD>
<DT><B>-n</B> </DT>
<DD>Generate nominalization (derivational
morphology) links in database. </DD>
<DT><I>filename</I> </DT>
<DD>Input file of the form described
in <FONT SIZE=-1><B>Input </B></FONT>
</DD>
</DL>
<H2><A NAME="sect7" HREF="#toc7">FILES </A></H2>
<DL>
<DT><B><I>pos </I>.*</B> </DT>
<DD>lexicographer files to use to build database
</DD>
<DT><B>cntlist</B> </DT>
<DD>file of combined semantic concordance <B>cntlist </B> files. Used to
assign sense numbers in WordNet database </DD>
</DL>
<H2><A NAME="sect8" HREF="#toc8">SEE ALSO </A></H2>
<B><A HREF="cntlist.5WN.html">cntlist</B>(5WN)</A>
, <B><A HREF="lexnames.5WN.html">lexnames</B>(5WN)</A>
,
<B><A HREF="senseidx.5WN.html">senseidx</B>(5WN)</A>
, <B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
, <B><A HREF="wninput.5WN.html">wninput</B>(5WN)</A>
, <B><A HREF="uniqbeg.7WN.html">uniqbeg</B>(7WN)</A>
, <B><A HREF="wngloss.7WN.html">wngloss</B>(7WN)</A>
.
<H2><A NAME="sect9" HREF="#toc9">DIAGNOSTICS
</A></H2>
Exit status is normally 0. Exit status is -1 if non-specific error occurs.
If syntactic or structural errors exist, exit status is number of errors
detected.
<DL>
<DT><B>usage: grind [-v] [-s] [-Llogfile] [-a ] [-d] [-i] [-o] [-n] filename
[filename...]</B> </DT>
<DD>Invalid options were specified on the command line. </DD>
<DT><B>No input
files processed.</B> </DT>
<DD>None of the filenames specified were of the appropriate
form. </DD>
<DT><B><I>n </I> syntactic errors found.</B> </DT>
<DD>Syntax errors were found while parsing
the input files. </DD>
<DT><B><I>n </I> structural errors found.</B> </DT>
<DD>Pointer errors were found
that could not be automatically corrected. </DD>
</DL>
<H2><A NAME="sect10" HREF="#toc10">BUGS </A></H2>
Please report bugs to
<B>wordnet@princeton.edu </B>. <P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">SYNOPSIS</A></LI>
<LI><A NAME="toc2" HREF="#sect2">DESCRIPTION</A></LI>
<UL>
<LI><A NAME="toc3" HREF="#sect3">Input Files</A></LI>
<LI><A NAME="toc4" HREF="#sect4">Output Files</A></LI>
<LI><A NAME="toc5" HREF="#sect5">Sense Numbers</A></LI>
</UL>
<LI><A NAME="toc6" HREF="#sect6">OPTIONS</A></LI>
<LI><A NAME="toc7" HREF="#sect7">FILES</A></LI>
<LI><A NAME="toc8" HREF="#sect8">SEE ALSO</A></LI>
<LI><A NAME="toc9" HREF="#sect9">DIAGNOSTICS</A></LI>
<LI><A NAME="toc10" HREF="#sect10">BUGS</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,195 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>LEXNAMES(5WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
List of WordNet lexicographer file names and numbers
<H2><A NAME="sect1" HREF="#toc1">DESCRIPTION
</A></H2>
During WordNet development synsets are organized into forty-five lexicographer
files based on syntactic category and logical groupings. <B><A HREF="grind.1WN.html">grind</B>(1WN)</A>
processes
these files and produces a database suitable for use with the WordNet
library, interface code, and other applications. The format of the lexicographer
files is described in <B><A HREF="wninput.5WN.html">wninput</B>(5WN)</A>
. <P>
A file number corresponds to each
lexicographer file. File numbers are encoded in several parts of the WordNet
system as an efficient way to indicate a lexicographer file name. The
file <B>lexnames </B> lists the mapping between file names and numbers, and can
be used by programs or end users to correlate the two.
<H3><A NAME="sect2" HREF="#toc2">File Format </A></H3>
Each
line in <B>lexnames </B> contains 3 tab separated fields, and is terminated with
a newline character. The first field is the two digit decimal integer
file number. (The first file in the list is numbered <B>00 </B>.) The second
field is the name of the lexicographer file that is represented by that
number, and the third field is an integer that indicates the syntactic
category of the synsets contained in the file. This is simply a shortcut
for programs and scripts, since the syntactic category is also part of
the lexicographer file's name.
<H3><A NAME="sect3" HREF="#toc3">Syntactic Category </A></H3>
The syntactic category
field is encoded as follows: <P>
<blockquote><B>1 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;NOUN <BR>
<B>2 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;VERB <BR>
<B>3 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;ADJECTIVE <BR>
<B>4 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;ADVERB <BR>
</blockquote>
<H3><A NAME="sect4" HREF="#toc4">Lexicographer Files </A></H3>
The names of the lexicographer files and their corresponding
file numbers are listed below along with a brief description each file's
contents. <P>
<blockquote> <TABLE BORDER=0>
<TR> <TD ALIGN=LEFT><B>File Number </B> </TD> <TD ALIGN=LEFT><B>Name </B> </TD> <TD ALIGN=LEFT><B>Contents </B> </TD> </TR>
<TR> <TR> <TD ALIGN=LEFT>00 </TD> <TD ALIGN=LEFT>adj.all </TD> <TD ALIGN=LEFT>all adjective
clusters </TD> </TR>
<TR> <TD ALIGN=LEFT>01 </TD> <TD ALIGN=LEFT>adj.pert </TD> <TD ALIGN=LEFT>relational adjectives (pertainyms) </TD> </TR>
<TR> <TD ALIGN=LEFT>02 </TD> <TD ALIGN=LEFT>adv.all
</TD> <TD ALIGN=LEFT>all adverbs </TD> </TR>
<TR> <TD ALIGN=LEFT>03 </TD> <TD ALIGN=LEFT>noun.Tops </TD> <TD ALIGN=LEFT>unique beginner for nouns </TD> </TR>
<TR> <TD ALIGN=LEFT>04 </TD> <TD ALIGN=LEFT>noun.act
</TD> <TD ALIGN=LEFT>nouns denoting acts or actions </TD> </TR>
<TR> <TD ALIGN=LEFT>05 </TD> <TD ALIGN=LEFT>noun.animal </TD> <TD ALIGN=LEFT>nouns denoting animals
</TD> </TR>
<TR> <TD ALIGN=LEFT>06 </TD> <TD ALIGN=LEFT>noun.artifact </TD> <TD ALIGN=LEFT>nouns denoting man-made objects </TD> </TR>
<TR> <TD ALIGN=LEFT>07 </TD> <TD ALIGN=LEFT>noun.attribute
</TD> <TD ALIGN=LEFT>nouns denoting attributes of people and objects </TD> </TR>
<TR> <TD ALIGN=LEFT>08 </TD> <TD ALIGN=LEFT>noun.body </TD> <TD ALIGN=LEFT>nouns
denoting body parts </TD> </TR>
<TR> <TD ALIGN=LEFT>09 </TD> <TD ALIGN=LEFT>noun.cognition </TD> <TD ALIGN=LEFT>nouns denoting cognitive processes
and contents </TD> </TR>
<TR> <TD ALIGN=LEFT>10 </TD> <TD ALIGN=LEFT>noun.communication </TD> <TD ALIGN=LEFT>nouns denoting communicative processes
and contents </TD> </TR>
<TR> <TD ALIGN=LEFT>11 </TD> <TD ALIGN=LEFT>noun.event </TD> <TD ALIGN=LEFT>nouns denoting natural events </TD> </TR>
<TR> <TD ALIGN=LEFT>12
</TD> <TD ALIGN=LEFT>noun.feeling </TD> <TD ALIGN=LEFT>nouns denoting feelings and emotions </TD> </TR>
<TR> <TD ALIGN=LEFT>13 </TD> <TD ALIGN=LEFT>noun.food </TD>
<TD ALIGN=LEFT>nouns denoting foods and drinks </TD> </TR>
<TR> <TD ALIGN=LEFT>14 </TD> <TD ALIGN=LEFT>noun.group </TD> <TD ALIGN=LEFT>nouns denoting groupings
of people or objects </TD> </TR>
<TR> <TD ALIGN=LEFT>15 </TD> <TD ALIGN=LEFT>noun.location </TD> <TD ALIGN=LEFT>nouns denoting spatial position
</TD> </TR>
<TR> <TD ALIGN=LEFT>16 </TD> <TD ALIGN=LEFT>noun.motive </TD> <TD ALIGN=LEFT>nouns denoting goals </TD> </TR>
<TR> <TD ALIGN=LEFT>17 </TD> <TD ALIGN=LEFT>noun.object </TD> <TD ALIGN=LEFT>nouns denoting
natural objects (not man-made) </TD> </TR>
<TR> <TD ALIGN=LEFT>18 </TD> <TD ALIGN=LEFT>noun.person </TD> <TD ALIGN=LEFT>nouns denoting people
</TD> </TR>
<TR> <TD ALIGN=LEFT>19 </TD> <TD ALIGN=LEFT>noun.phenomenon </TD> <TD ALIGN=LEFT>nouns denoting natural phenomena </TD> </TR>
<TR> <TD ALIGN=LEFT>20 </TD> <TD ALIGN=LEFT>noun.plant
</TD> <TD ALIGN=LEFT>nouns denoting plants </TD> </TR>
<TR> <TD ALIGN=LEFT>21 </TD> <TD ALIGN=LEFT>noun.possession </TD> <TD ALIGN=LEFT>nouns denoting possession
and transfer of possession </TD> </TR>
<TR> <TD ALIGN=LEFT>22 </TD> <TD ALIGN=LEFT>noun.process </TD> <TD ALIGN=LEFT>nouns denoting natural
processes </TD> </TR>
<TR> <TD ALIGN=LEFT>23 </TD> <TD ALIGN=LEFT>noun.quantity </TD> <TD ALIGN=LEFT>nouns denoting quantities and units of
measure </TD> </TR>
<TR> <TD ALIGN=LEFT>24 </TD> <TD ALIGN=LEFT>noun.relation </TD> <TD ALIGN=LEFT>nouns denoting relations between people
or things or ideas </TD> </TR>
<TR> <TD ALIGN=LEFT>25 </TD> <TD ALIGN=LEFT>noun.shape </TD> <TD ALIGN=LEFT>nouns denoting two and three dimensional
shapes </TD> </TR>
<TR> <TD ALIGN=LEFT>26 </TD> <TD ALIGN=LEFT>noun.state </TD> <TD ALIGN=LEFT>nouns denoting stable states of affairs </TD>
</TR>
<TR> <TD ALIGN=LEFT>27 </TD> <TD ALIGN=LEFT>noun.substance </TD> <TD ALIGN=LEFT>nouns denoting substances </TD> </TR>
<TR> <TD ALIGN=LEFT>28 </TD> <TD ALIGN=LEFT>noun.time </TD> <TD ALIGN=LEFT>nouns
denoting time and temporal relations </TD> </TR>
<TR> <TD ALIGN=LEFT>29 </TD> <TD ALIGN=LEFT>verb.body </TD> <TD ALIGN=LEFT>verbs of grooming,
dressing and bodily care </TD> </TR>
<TR> <TD ALIGN=LEFT>30 </TD> <TD ALIGN=LEFT>verb.change </TD> <TD ALIGN=LEFT>verbs of size, temperature
change, intensifying, etc. </TD> </TR>
<TR> <TD ALIGN=LEFT>31 </TD> <TD ALIGN=LEFT>verb.cognition </TD> <TD ALIGN=LEFT>verbs of thinking, judging,
analyzing, doubting </TD> </TR>
<TR> <TD ALIGN=LEFT>32 </TD> <TD ALIGN=LEFT>verb.communication </TD> <TD ALIGN=LEFT>verbs of telling, asking,
ordering, singing </TD> </TR>
<TR> <TD ALIGN=LEFT>33 </TD> <TD ALIGN=LEFT>verb.competition </TD> <TD ALIGN=LEFT>verbs of fighting, athletic
activities </TD> </TR>
<TR> <TD ALIGN=LEFT>34 </TD> <TD ALIGN=LEFT>verb.consumption </TD> <TD ALIGN=LEFT>verbs of eating and drinking </TD> </TR>
<TR> <TD ALIGN=LEFT>35 </TD> <TD ALIGN=LEFT>verb.contact </TD> <TD ALIGN=LEFT>verbs of touching, hitting, tying, digging </TD> </TR>
<TR> <TD ALIGN=LEFT>36 </TD>
<TD ALIGN=LEFT>verb.creation </TD> <TD ALIGN=LEFT>verbs of sewing, baking, painting, performing </TD> </TR>
<TR> <TD ALIGN=LEFT>37 </TD> <TD ALIGN=LEFT>verb.emotion
</TD> <TD ALIGN=LEFT>verbs of feeling </TD> </TR>
<TR> <TD ALIGN=LEFT>38 </TD> <TD ALIGN=LEFT>verb.motion </TD> <TD ALIGN=LEFT>verbs of walking, flying, swimming
</TD> </TR>
<TR> <TD ALIGN=LEFT>39 </TD> <TD ALIGN=LEFT>verb.perception </TD> <TD ALIGN=LEFT>verbs of seeing, hearing, feeling </TD> </TR>
<TR> <TD ALIGN=LEFT>40 </TD> <TD ALIGN=LEFT>verb.possession
</TD> <TD ALIGN=LEFT>verbs of buying, selling, owning </TD> </TR>
<TR> <TD ALIGN=LEFT>41 </TD> <TD ALIGN=LEFT>verb.social </TD> <TD ALIGN=LEFT>verbs of political
and social activities and events </TD> </TR>
<TR> <TD ALIGN=LEFT>42 </TD> <TD ALIGN=LEFT>verb.stative </TD> <TD ALIGN=LEFT>verbs of being,
having, spatial relations </TD> </TR>
<TR> <TD ALIGN=LEFT>43 </TD> <TD ALIGN=LEFT>verb.weather </TD> <TD ALIGN=LEFT>verbs of raining, snowing,
thawing, thundering </TD> </TR>
<TR> <TD ALIGN=LEFT>44 </TD> <TD ALIGN=LEFT>adj.ppl </TD> <TD ALIGN=LEFT>participial adjectives </TD> </TR>
</TABLE>
</blockquote>
<H2><A NAME="sect5" HREF="#toc5">NOTES
</A></H2>
The lexicographer files are not included in the WordNet database package.
<H2><A NAME="sect6" HREF="#toc6">ENVIRONMENT VARIABLES (UNIX) </A></H2>
<DL>
<DT><B>WNHOME</B> </DT>
<DD>Base directory for WordNet. Default
is <B>/usr/local/WordNet-3.0 </B>. </DD>
<DT><B>WNSEARCHDIR</B> </DT>
<DD>Directory in which the WordNet database
has been installed. Default is <B>WNHOME/dict </B>. </DD>
</DL>
<H2><A NAME="sect7" HREF="#toc7">REGISTRY (WINDOWS) </A></H2>
<DL>
<DT><B>HKEY_LOCAL_MACHINE\SOFTWARE\WordNet\3.0\WNHome</B>
</DT>
<DD>Base directory for WordNet. Default is <B>C:\Program&nbsp;Files\WordNet\3.0 </B>. </DD>
</DL>
<H2><A NAME="sect8" HREF="#toc8">FILES
</A></H2>
<DL>
<DT><B>lexnames</B> </DT>
<DD>list of lexicographer file names and numbers </DD>
</DL>
<H2><A NAME="sect9" HREF="#toc9">SEE ALSO </A></H2>
<B><A HREF="grind.1WN.html">grind</B>(1WN)</A>
,
<B><A HREF="wnintro.5WN.html">wnintro</B>(5WN)</A>
, <B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
, <B><A HREF="wninput.5WN.html">wninput</B>(5WN)</A>
. <P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">DESCRIPTION</A></LI>
<UL>
<LI><A NAME="toc2" HREF="#sect2">File Format</A></LI>
<LI><A NAME="toc3" HREF="#sect3">Syntactic Category</A></LI>
<LI><A NAME="toc4" HREF="#sect4">Lexicographer Files</A></LI>
</UL>
<LI><A NAME="toc5" HREF="#sect5">NOTES</A></LI>
<LI><A NAME="toc6" HREF="#sect6">ENVIRONMENT VARIABLES (UNIX)</A></LI>
<LI><A NAME="toc7" HREF="#sect7">REGISTRY (WINDOWS)</A></LI>
<LI><A NAME="toc8" HREF="#sect8">FILES</A></LI>
<LI><A NAME="toc9" HREF="#sect9">SEE ALSO</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,109 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>MORPH(3WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
morphinit, re_morphinit, morphstr, morphword
<H2><A NAME="sect1" HREF="#toc1">SYNOPSIS </A></H2>
<P>
<B>#include
"wn.h" </B> <P>
<B>int morphinit(void); </B> <P>
<B>int re_morphinit(void); </B> <P>
<B>char *morphstr(char
*origstr, int pos); </B> <P>
<B>char *morphword(char *word, int pos); </B>
<H2><A NAME="sect2" HREF="#toc2">DESCRIPTION
</A></H2>
<P>
The WordNet morphological processor, Morphy, is accessed through these
functions: <P>
<B>morphinit()</B> is used to open the exception list files. It returns
<B>0 </B> if successful, <B>-1 </B> otherwise. The exception list files must be opened
before <B>morphstr() </B> or <B>morphword()</B> are called. <P>
<B>re_morphinit()</B> is used to
close the exception list files and reopen them, and is used exclusively
for WordNet development. Return codes are as described above. <P>
<B>morphstr()</B>
is the basic user interface to Morphy. It tries to find the base form
(lemma) of the word or collocation <I>origstr </I> in the specified <I>pos </I>. The
first call (with <I>origstr </I> specified) returns a pointer to the first base
form found. Subsequent calls requesting base forms of the same string
must be made with the first argument of <FONT SIZE=-1><B>NULL. </B></FONT>
When no more base forms
for <I>origstr </I> can be found, <FONT SIZE=-1><B>NULL </B></FONT>
is returned. Note that <B>morphstr() </B> returns
a pointer to a static character buffer. A subsequent call to <B>morphstr()
</B> with a new string (instead of <B>NULL </B>) will overwrite the string pointed
to by a previous call. Users should copy the returned string into a local
buffer, or use the C library function <B>strdup </B> to duplicate the returned
string into a <I>malloc'd </I> buffer. <P>
<B>morphword()</B> tries to find the base form
of <I>word </I> in the specified <I>pos </I>. This function is called by <B>morphstr()</B> for
each individual word in a collocation. Note that <B>morphword() </B> returns a
pointer to a static character buffer. A subsequent call to <B>morphword()
</B> will overwrite the string pointed to by a previous call. Users should
copy the returned string into a local buffer, or use the C library function
<B>strdup </B> to duplicate the returned string into a <I>malloc'd </I> buffer.
<H2><A NAME="sect3" HREF="#toc3">NOTES
</A></H2>
<B>morphinit()</B> is called by <B>wninit() </B> and is not intended to be called directly
by an application. Applications wishing to use WordNet and/or the morphological
functions must call <B>wninit() </B> at the start of the program. See <B><A HREF="wnutil.3WN.html">wnutil</B>(3WN)</A>
for more information. <P>
<I>origstr </I> may be either a word or a collocation formed
by joining individual words with underscore characters (<B>_ </B>). <P>
Usually only
<B>morphstr() </B> is called from applications, as it works on both words and
collocations. <P>
<I>pos </I> must be one of the following: <P>
<blockquote><B>1 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;NOUN <BR>
<B>2 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;VERB <BR>
<B>3 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;ADJECTIVE
<BR>
<B>4 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;ADVERB <BR>
<B>5 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;ADJECTIVE_SATELLITE <BR>
</blockquote>
<P>
If <FONT SIZE=-1><B>ADJECTIVE_SATELLITE </B></FONT>
is passed,
it is treated by <B>morphstr() </B> as <FONT SIZE=-1><B>ADJECTIVE. </B></FONT>
<H2><A NAME="sect4" HREF="#toc4">SEE ALSO </A></H2>
<B><A HREF="wnintro.3WN.html">wnintro</B>(3WN)</A>
, <B><A HREF="wnsearch.3WN.html">wnsearch</B>(3WN)</A>
,
<B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
, <B><A HREF="morphy.7WN.html">morphy</B>(7WN)</A>
. <P>
<H2><A NAME="sect5" HREF="#toc5">WARNINGS </A></H2>
Passing an invalid part of speech will
result in a core dump. <P>
The WordNet database files must be open to use
<B>morphstr() </B> or <B>morphword(). <P>
</B>
<H2><A NAME="sect6" HREF="#toc6">BUGS </A></H2>
Morphy will allow non-words to be converted
to words, if they follow one of the rules described above. For example,
it will happily convert <B>plantes </B> to <B>plants </B>. <P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">SYNOPSIS</A></LI>
<LI><A NAME="toc2" HREF="#sect2">DESCRIPTION</A></LI>
<LI><A NAME="toc3" HREF="#sect3">NOTES</A></LI>
<LI><A NAME="toc4" HREF="#sect4">SEE ALSO</A></LI>
<LI><A NAME="toc5" HREF="#sect5">WARNINGS</A></LI>
<LI><A NAME="toc6" HREF="#sect6">BUGS</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,221 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>MORPHY(7WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
morphy - discussion of WordNet's morphological processing
<H2><A NAME="sect1" HREF="#toc1">DESCRIPTION
</A></H2>
Although only base forms of words are usually stored in WordNet, searches
may be done on inflected forms. A set of morphology functions, Morphy,
is applied to the search string to generate a form that is present in
WordNet. <P>
Morphology in WordNet uses two types of processes to try to convert
the string passed into one that can be found in the WordNet database. There
are lists of inflectional endings, based on syntactic category, that can
be detached from individual words in an attempt to find a form of the
word that is in WordNet. There are also exception list files, one for
each syntactic category, in which a search for an inflected form is done.
Morphy tries to use these two processes in an intelligent manner to translate
the string passed to the base form found in WordNet. Morphy first checks
for exceptions, then uses the rules of detachment. The Morphy functions
are not independent from WordNet. After each transformation, WordNet is
searched for the resulting string in the syntactic category specified.
<P>
The Morphy functions are passed a string and a syntactic category. A
string is either a single word or a collocation. Since some words, such
as <B>axes </B> can have more than one base form (<B>axe </B> and <B>axis </B>), Morphy works
in the following manner. The first time that Morphy is called with a specific
string, it returns a base form. For each subsequent call to Morphy made
with a <FONT SIZE=-1><B>NULL </B></FONT>
string argument, Morphy returns another base form. Whenever
Morphy cannot perform a transformation, whether on the first call for
a word or subsequent calls, <FONT SIZE=-1><B>NULL </B></FONT>
is returned. A transformation to a
valid English string will return <FONT SIZE=-1><B>NULL </B></FONT>
if the base form of the string
is not in WordNet. <P>
The morphological functions are found in the WordNet
library. See <B><A HREF="morph.3WN.html">morph</B>(3WN)</A>
for information on using these functions.
<H3><A NAME="sect2" HREF="#toc2">Rules
of Detachment </A></H3>
The following table shows the rules of detachment used by
Morphy. If a word ends with one of the suffixes, it is stripped from the
word and the corresponding ending is added. Then WordNet is searched for
the resulting string. No rules are applicable to adverbs. <P>
<TABLE BORDER=0>
<TR> <TD ALIGN=CENTER><B>POS </B> </TD> <TD ALIGN=CENTER><B>Suffix
</B> </TD> <TD ALIGN=CENTER><B>Ending </B> </TD> </TR>
<TR> <TR> <TD ALIGN=LEFT>NOUN </TD> <TD ALIGN=LEFT>"s" </TD> <TD ALIGN=LEFT>"" </TD> </TR>
<TR> <TD ALIGN=LEFT>NOUN </TD> <TD ALIGN=LEFT>"ses" </TD> <TD ALIGN=LEFT>"s" </TD> </TR>
<TR> <TD ALIGN=LEFT>NOUN </TD> <TD ALIGN=LEFT>"xes" </TD> <TD ALIGN=LEFT>"x" </TD>
</TR>
<TR> <TD ALIGN=LEFT>NOUN </TD> <TD ALIGN=LEFT>"zes" </TD> <TD ALIGN=LEFT>"z" </TD> </TR>
<TR> <TD ALIGN=LEFT>NOUN </TD> <TD ALIGN=LEFT>"ches" </TD> <TD ALIGN=LEFT>"ch" </TD> </TR>
<TR> <TD ALIGN=LEFT>NOUN </TD> <TD ALIGN=LEFT>"shes" </TD> <TD ALIGN=LEFT>"sh" </TD> </TR>
<TR> <TD ALIGN=LEFT>NOUN
</TD> <TD ALIGN=LEFT>"men" </TD> <TD ALIGN=LEFT>"man" </TD> </TR>
<TR> <TD ALIGN=LEFT>NOUN </TD> <TD ALIGN=LEFT>"ies" </TD> <TD ALIGN=LEFT>"y" </TD> </TR>
<TR> <TD ALIGN=LEFT>VERB </TD> <TD ALIGN=LEFT>"s" </TD> <TD ALIGN=LEFT>"" </TD> </TR>
<TR> <TD ALIGN=LEFT>VERB </TD> <TD ALIGN=LEFT>"ies" </TD> <TD ALIGN=LEFT>"y"
</TD> </TR>
<TR> <TD ALIGN=LEFT>VERB </TD> <TD ALIGN=LEFT>"es" </TD> <TD ALIGN=LEFT>"e" </TD> </TR>
<TR> <TD ALIGN=LEFT>VERB </TD> <TD ALIGN=LEFT>"es" </TD> <TD ALIGN=LEFT>"" </TD> </TR>
<TR> <TD ALIGN=LEFT>VERB </TD> <TD ALIGN=LEFT>"ed" </TD> <TD ALIGN=LEFT>"e" </TD> </TR>
<TR> <TD ALIGN=LEFT>VERB </TD> <TD ALIGN=LEFT>"ed"
</TD> <TD ALIGN=LEFT>"" </TD> </TR>
<TR> <TD ALIGN=LEFT>VERB </TD> <TD ALIGN=LEFT>"ing" </TD> <TD ALIGN=LEFT>"e" </TD> </TR>
<TR> <TD ALIGN=LEFT>VERB </TD> <TD ALIGN=LEFT>"ing" </TD> <TD ALIGN=LEFT>"" </TD> </TR>
<TR> <TD ALIGN=LEFT>ADJ </TD> <TD ALIGN=LEFT>"er" </TD> <TD ALIGN=LEFT>"" </TD> </TR>
<TR> <TD ALIGN=LEFT>ADJ </TD> <TD ALIGN=LEFT>"est"
</TD> <TD ALIGN=LEFT>"" </TD> </TR>
<TR> <TD ALIGN=LEFT>ADJ </TD> <TD ALIGN=LEFT>"er" </TD> <TD ALIGN=LEFT>"e" </TD> </TR>
<TR> <TD ALIGN=LEFT>ADJ </TD> <TD ALIGN=LEFT>"est" </TD> <TD ALIGN=LEFT>"e" </TD> </TR>
</TABLE>
<H3><A NAME="sect3" HREF="#toc3">Exception Lists </A></H3>
There is one
exception list file for each syntactic category. The exception lists contain
the morphological transformations for strings that are not regular and
therefore cannot be processed in an algorithmic manner. Each line of an
exception list contains an inflected form of a word or collocation, followed
by one or more base forms. The list is kept in alphabetical order and
a binary search is used to find words in these lists. See <B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
for
information on the format of the exception list files.
<H3><A NAME="sect4" HREF="#toc4">Single Words </A></H3>
In
general, single words are relatively easy to process. Morphy first looks
for the word in the exception list. If it is found the first base form
is returned. Subsequent calls with a <FONT SIZE=-1><B>NULL </B></FONT>
argument return additional
base forms, if present. A <FONT SIZE=-1><B>NULL </B></FONT>
is returned when there are no more base
forms of the word. <P>
If the word is not found in the exception list corresponding
to the syntactic category, an algorithmic process using the rules of detachment
looks for a matching suffix. If a matching suffix is found, a corresponding
ending is applied (sometimes this ending is a <FONT SIZE=-1><B>NULL </B></FONT>
string, so in effect
the suffix is removed from the word), and WordNet is consulted to see
if the resulting word is found in the desired part of speech.
<H3><A NAME="sect5" HREF="#toc5">Collocations
</A></H3>
As opposed to single words, collocations can be quite difficult to transform
into a base form that is present in WordNet. In general, only base forms
of words, even those comprising collocations, are stored in WordNet, such
as <B>attorney&nbsp;general </B>. Transforming the collocation <B>attorneys&nbsp;general </B>
is then simply a matter of finding the base forms of the individual words
comprising the collocation. This usually works for nouns, therefore non-conforming
nouns, such as <B>customs&nbsp;duty </B> are presently entered in the noun exception
list. <P>
Verb collocations that contain prepositions, such as <B>ask&nbsp;for&nbsp;it
</B>, are more difficult. As with single words, the exception list is searched
first. If the collocation is not found, special code in Morphy determines
whether a verb collocation includes a preposition. If it does, a function
is called to try to find the base form in the following manner. It is
assumed that the first word in the collocation is a verb and that the
last word is a noun. The algorithm then builds a search string with the
base forms of the verb and noun, leaving the remainder of the collocation
(usually just the preposition, but more words may be involved) in the
middle. For example, passed <B>asking&nbsp;for&nbsp;it </B>, the database search would
be performed with <B>ask&nbsp;for&nbsp;it </B>, which is found in WordNet, and therefore
returned from Morphy. If a verb collocation does not contain a preposition,
then the base form of each word in the collocation is found and WordNet
is searched for the resulting string.
<H3><A NAME="sect6" HREF="#toc6">Hyphenation </A></H3>
Hyphenation also presents
special difficulties when searching WordNet. It is often a subjective decision
as to whether a word is hyphenated, joined as one word, or is a collocation
of several words, and which of the various forms are entered into WordNet.
When Morphy breaks a string into "words", it looks for both spaces and
hyphens as delimiters. It also looks for periods in strings and removes
them if an exact match is not found. A search for an abbreviation like
<B>oct. </B> return the synset for <B>{&nbsp;October,&nbsp;Oct&nbsp;} </B>. Not every pattern of hyphenated
and collocated string is searched for properly, so it may be advantageous
to specify several search strings if the results of a search attempt seem
incomplete.
<H3><A NAME="sect7" HREF="#toc7">Special Processing for nouns ending with 'ful' </A></H3>
Morphy contains
code that searches for nouns ending with <B>ful </B> and performs a transformation
on the substring preceeding it. It then appends 'ful' back onto the resulting
string and returns it. For example, if passed the nouns <B>boxesful </B>, it will
return <B>boxful </B>.
<H2><A NAME="sect8" HREF="#toc8">BUGS </A></H2>
Since many noun collocations contains prepositions,
such as <B>line&nbsp;of&nbsp;products </B>, an algorithm similar to that used for verbs
should be written for nouns. In the present scheme, if Morphy is passed
<B>lines&nbsp;of&nbsp;products </B>, the search string becomes <B>line&nbsp;of&nbsp;product </B>, which
is not in WordNet <P>
Morphy will allow non-words to be converted to words,
if they follow one of the rules described above. For example, it will
happily convert <B>plantes </B> to <B>plants </B>.
<H2><A NAME="sect9" HREF="#toc9">ENVIRONMENT VARIABLES (UNIX) </A></H2>
<DL>
<DT><B>WNHOME</B>
</DT>
<DD>Base directory for WordNet. Default is <B>/usr/local/WordNet-3.0 </B>. </DD>
<DT><B>WNSEARCHDIR</B>
</DT>
<DD>Directory in which the WordNet database has been installed. Default
is <B>WNHOME/dict </B>. </DD>
</DL>
<H2><A NAME="sect10" HREF="#toc10">REGISTRY (WINDOWS) </A></H2>
<DL>
<DT><B>HKEY_LOCAL_MACHINE\SOFTWARE\WordNet\3.0\WNHome</B>
</DT>
<DD>Base directory for WordNet. Default is <B>C:\Program&nbsp;Files\WordNet\3.0 </B>. </DD>
</DL>
<H2><A NAME="sect11" HREF="#toc11">FILES
</A></H2>
<DL>
<DT><B><I>pos </I>.exc</B> </DT>
<DD>morphology exception lists </DD>
</DL>
<H2><A NAME="sect12" HREF="#toc12">SEE ALSO </A></H2>
<B><A HREF="wn.1WN.html">wn</B>(1WN)</A>
, <B><A HREF="wnb.1WN.html">wnb</B>(1WN)</A>
, <B><A HREF="binsrch.3WN.html">binsrch</B>(3WN)</A>
,
<B><A HREF="morph.3WN.html">morph</B>(3WN)</A>
, <B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
, <B><A HREF="wninput.7WN.html">wninput</B>(7WN)</A>
. <P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">DESCRIPTION</A></LI>
<UL>
<LI><A NAME="toc2" HREF="#sect2">Rules of Detachment</A></LI>
<LI><A NAME="toc3" HREF="#sect3">Exception Lists</A></LI>
<LI><A NAME="toc4" HREF="#sect4">Single Words</A></LI>
<LI><A NAME="toc5" HREF="#sect5">Collocations</A></LI>
<LI><A NAME="toc6" HREF="#sect6">Hyphenation</A></LI>
<LI><A NAME="toc7" HREF="#sect7">Special Processing for nouns ending with 'ful'</A></LI>
</UL>
<LI><A NAME="toc8" HREF="#sect8">BUGS</A></LI>
<LI><A NAME="toc9" HREF="#sect9">ENVIRONMENT VARIABLES (UNIX)</A></LI>
<LI><A NAME="toc10" HREF="#sect10">REGISTRY (WINDOWS)</A></LI>
<LI><A NAME="toc11" HREF="#sect11">FILES</A></LI>
<LI><A NAME="toc12" HREF="#sect12">SEE ALSO</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,318 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>PROLOGDB(5WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
wn_pl - description of Prolog database files
<H2><A NAME="sect1" HREF="#toc1">DESCRIPTION </A></H2>
The files
<B>wn_ </B><I>* </I><B>.pl </B> contain the WordNet database in a prolog-readable format. A prolog
interface to WordNet is not implemented. <P>
The prolog database is very large
and may take many minutes to load into the Prolog workspace. A separate
file has been created for each WordNet relation giving the user the ability
to load only those parts of the database that they are interested. <P>
See
<B>FILES </B>, below, for a list of the database files and <B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
and <B><A HREF="wninput.5WN.html">wninput</B>(5WN)<B></B></A>
for detailed descriptions of the various WordNet relations (referred to
as <I>operators </I> in this manual page).
<H3><A NAME="sect2" HREF="#toc2">File Format </A></H3>
Each prolog database file
contains information corresponding to the synsets and word senses contained
in the WordNet database. In the prolog version of the database, the <I>synset_id
</I>s (defined below) are used as unique synset identifiers. <P>
Each line of
a file contains an operator that corresponds to a WordNet relation. All
lines with the same <I>operator </I> value are stored in the file <B>wn_ </B><I>operator
</I><B>.pl </B>. <P>
The general format of a line in a prolog database file is as follows:
<P>
<blockquote><I>operator<B>(<I>field1<B>,<I>&nbsp;&nbsp;...&nbsp;&nbsp;<B>,<I>fieldn<B>). </B></I></B></I></B></I></B></I> <BR>
</blockquote>
<P>
Each line contains the name of the
operator, followed by a left parenthesis, a comma-separated list of fields,
a right parenthesis, and a period. Note there are no spaces, and each
line is terminated with a newline character.
<H3><A NAME="sect3" HREF="#toc3">Operators </A></H3>
Each WordNet relation
is represented in a separate file by <I>operator </I> name. Some operators are
reflexive (i.e. the "reverse" relation is implicit). So, for example, if
<B>x </B> is a hypernym of <B>y </B>, <B>y </B> is necessarily a hyponym of <B>x </B>. In the prolog
database, reflected pointers are usually implied for semantic relations.
<P>
Semantic relations are represented by a pair of <I>synset_id </I>s, in which
the first <I>synset_id </I> is generally the source of the relation and the second
is the target. If two pairs <I>synset_id </I><B>, </B><I>w_num </I> are present, the operator
represents a lexical relation between word forms. <P>
<B>s(<I>synset_id<B>,<I>w_num<B>,'<I>word<B>',<I>ss_type<B>,<I>sense_number<B>,<I>tag_count<B>).
</B></I></B></I></B></I></B></I></B></I></B></I></B><BR>
<blockquote>A <B>s </B> operator is present for every word sense in WordNet. In <B>wn_s.pl
</B>, <I>w_num </I> specifies the word number for <I>word </I> in the synset. </blockquote>
<P>
<B>g(<I>synset_id<B>,'(<I>gloss<B>)').
</B></I></B></I></B><BR>
<blockquote>The <B>g </B> operator specifies the gloss for a synset. </blockquote>
<P>
<B>hyp(<I>synset_id<B>,<I>synset_id<B>).
</B></I></B></I></B><BR>
<blockquote>The <B>hyp </B> operator specifies that the second synset is a hypernym of
the first synset. This relation holds for nouns and verbs. The reflexive
operator, hyponym, implies that the first synset is a hyponym of the second
synset. </blockquote>
<P>
<B>ent(<I>synset_id<B>,<I>synset_id<B>). </B></I></B></I></B><BR>
<blockquote>The <B>ent </B> operator specifies that the
second synset is an entailment of first synset. This relation only holds
for verbs. </blockquote>
<P>
<B>sim(<I>synset_id<B>,<I>synset_id<B>). </B></I></B></I></B><BR>
<blockquote>The <B>sim </B> operator specifies that
the second synset is similar in meaning to the first synset. This means
that the second synset is a satellite the first synset, which is the cluster
head. This relation only holds for adjective synsets contained in adjective
clusters. </blockquote>
<P>
<B>mm(<I>synset_id<B>,<I>synset_id<B>). </B></I></B></I></B><BR>
<blockquote>The <B>mm </B> operator specifies that the
second synset is a member meronym of the first synset. This relation only
holds for nouns. The reflexive operator, member holonym, can be implied.
</blockquote>
<P>
<B>ms(<I>synset_id<B>,<I>synset_id<B>). </B></I></B></I></B><BR>
<blockquote>The <B>ms </B> operator specifies that the second
synset is a substance meronym of the first synset. This relation only
holds for nouns. The reflexive operator, substance holonym, can be implied.
</blockquote>
<P>
<B>mp(<I>synset_id<B>,<I>synset_id<B>). </B></I></B></I></B><BR>
<blockquote>The <B>mp </B> operator specifies that the second
synset is a part meronym of the first synset. This relation only holds
for nouns. The reflexive operator, part holonym, can be implied. </blockquote>
<P>
<B>cs(<I>synset_id<B>,<I>synset_id<B>).
</B></I></B></I></B><BR>
<blockquote>The <B>cs </B> operator specifies that the second synset is a cause of the
first synset. This relation only holds for verbs. </blockquote>
<P>
<B>vgp(<I>synset_id<B>,<I>synset_id<B>).
</B></I></B></I></B><BR>
<blockquote>The <B>vgp </B> operator specifies verb synsets that are similar in meaning
and should be grouped together when displayed in response to a grouped
synset search. </blockquote>
<P>
<B>at(<I>synset_id<B>,<I>synset_id<B>). </B></I></B></I></B><BR>
<blockquote>The <B>at </B> operator defines the
attribute relation between noun and adjective synset pairs in which the
adjective is a value of the noun. For each pair, both relations are listed
(ie. each <I>synset_id </I> is both a source and target). </blockquote>
<P>
<B>ant(<I>synset_id<B>,<I>w_num<B>,<I>synset_id<B>,<I>w_num<B>).
</B></I></B></I></B></I></B></I></B><BR>
<blockquote>The <B>ant </B> operator specifies antonymous <I>word </I>s. This is a lexical relation
that holds for all syntactic categories. For each antonymous pair, both
relations are listed (ie. each <I>synset_id,w_num </I> pair is both a source and
target word.) </blockquote>
<P>
<B>sa(<I>synset_id<B>,<I>w_num<B>,<I>synset_id<B>,<I>w_num<B>). </B></I></B></I></B></I></B></I></B><BR>
<blockquote>The <B>sa </B> operator
specifies that additional information about the first word can be obtained
by seeing the second word. This operator is only defined for verbs and
adjectives. There is no reflexive relation (ie. it cannot be inferred that
the additional information about the second word can be obtained from
the first word). </blockquote>
<P>
<B>ppl(<I>synset_id<B>,<I>w_num<B>,<I>synset_id<B>,<I>w_num<B>). </B></I></B></I></B></I></B></I></B><BR>
<blockquote>The <B>ppl </B> operator
specifies that the adjective first word is a participle of the verb second
word. The reflexive operator can be implied. </blockquote>
<P>
<B>per(<I>synset_id<B>,<I>w_num<B>,<I>synset_id<B>,<I>w_num<B>).
</B></I></B></I></B></I></B></I></B><BR>
<blockquote>The <B>per </B> operator specifies two different relations based on the parts
of speech involved. If the first word is in an adjective synset, that
word pertains to either the noun or adjective second word. If the first
word is in an adverb synset, that word is derived from the adjective second
word. </blockquote>
<P>
<B>fr(<I>synset_id<B>,<I>f_num<B>,<I>w_num<B>). </B></I></B></I></B></I></B><BR>
<blockquote>The <B>fr </B> operator specifies a generic
sentence frame for one or all words in a synset. The operator is defined
only for verbs. </blockquote>
<H3><A NAME="sect4" HREF="#toc4">Field Definitions </A></H3>
A <I>synset_id </I> is a nine byte field in
which the first byte defines the syntactic category of the synset and
the remaining eight bytes are a <I>synset_offset </I>, as defined in <B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
,
indicating the byte offset in the <B>data. </B><I>pos </I> file that corresponds to the
syntactic category. <P>
The syntactic category is encoded as: <P>
<blockquote><B>1 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;NOUN <BR>
<B>2 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;VERB <BR>
<B>3 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;ADJECTIVE <BR>
<B>4 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;ADVERB <BR>
</blockquote>
<P>
<I>w_num </I>, if present, indicates which word
in the synset is being referred to. Word numbers are assigned to the <I>word
</I> fields in a synset, from left to right, beginning with 1. When used to
represent lexical WordNet relations <I>w_num </I> may be 0, indicating that the
relation holds for all words in the synset indicated by the preceding
<I>synset_id </I>. See <B><A HREF="wninput.5WN.html">wninput</B>(5WN)</A>
for a discussion of semantic and lexical
relations. <P>
<I>ss_type </I> is a one character code indicating the synset type:
<P>
<blockquote><B>n </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;NOUN <BR>
<B>v </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;VERB <BR>
<B>a </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;ADJECTIVE <BR>
<B>s </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;ADJECTIVE&nbsp;SATELLITE <BR>
<B>r </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;ADVERB <BR>
</blockquote>
<P>
<I>sense_number
</I> specifies the sense number of the word, within the part of speech encoded
in the <I>synset_id </I>, in the WordNet database. <P>
<I>word </I> is the ASCII text of
the word as entered in the synset by the lexicographer, with spaces replaced
by underscore characters (<B>_ </B>). The text of the word is case sensitive.
An adjective <I>word </I> is immediately followed by a syntactic marker if one
was specified in the lexicographer file. A syntactic marker is appended,
in parentheses, onto <I>word </I> without any intervening spaces. See <B><A HREF="wninput.5WN.html">wninput</B>(5WN)</A>
for a list of the syntactic markers for adjectives. <P>
Each synset has a
<I>gloss </I> that may contain a definition, one or more example sentences, or
both. Note that glosses are enclosed in single forward quotes and parentheses:&nbsp;&nbsp;<B>'(<I>gloss<B>)'
</B></I></B>. <P>
<I>f_num </I> specifies the generic sentence frame number for word <I>w_num </I> in
the synset indicated by <I>synset_id </I>. Note that when <I>w_num </I> is <B>0 </B>, the frame
number applies to all words in the synset. If non-zero, the frame applies
to that word in the synset. <P>
In WordNet, sense numbers are assigned as
described in <B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
. <I>tag_count </I> is the number of times the sense was
tagged in the Semantic Concordances, and <B>0 </B> if it was not instantiated.
<H2><A NAME="sect5" HREF="#toc5">NOTES </A></H2>
Since single forward quotes are used to enclose character strings,
single quote characters found in <I>word </I> and <I>gloss </I> fields are represented
as two adjacent single quote characters. <P>
The load time can be greatly
reduced by creating "object language" versions of the files, an option
that is supported by some implementations, such as Quintus Prolog.
<H2><A NAME="sect6" HREF="#toc6">ENVIRONMENT
VARIABLES (UNIX) </A></H2>
<DL>
<DT><B>WNHOME</B> </DT>
<DD>Base directory for WordNet. Default is <B>/usr/local/WordNet-3.0
</B>. </DD>
</DL>
<H2><A NAME="sect7" HREF="#toc7">REGISTRY (WINDOWS) </A></H2>
<DL>
<DT><B>HKEY_LOCAL_MACHINE\SOFTWARE\WordNet\3.0\WNHome</B> </DT>
<DD>Base directory
for WordNet. Default is <B>C:\Program&nbsp;Files\WordNet\3.0 </B>. </DD>
</DL>
<H2><A NAME="sect8" HREF="#toc8">FILES </A></H2>
All files are
in <B>WNHOME/prolog </B> on Unix platforms and <B>WNHome\prolog </B> on Windows platforms
<DL>
<DT><B>wn_s.pl</B> </DT>
<DD>synset pointers </DD>
<DT><B>wn_g.pl</B> </DT>
<DD>gloss pointers </DD>
<DT><B>wn_hyp.pl</B> </DT>
<DD>hypernym pointers
</DD>
<DT><B>wn_ent.pl</B> </DT>
<DD>entailment pointers </DD>
<DT><B>wn_sim.pl</B> </DT>
<DD>similar pointers </DD>
<DT><B>wn_mm.pl</B> </DT>
<DD>member
meronym pointers </DD>
<DT><B>wn_ms.pl</B> </DT>
<DD>substance meronym pointers </DD>
<DT><B>wn_mp.pl</B> </DT>
<DD>part meronym
pointers </DD>
<DT><B>wn_cs.pl</B> </DT>
<DD>cause pointers </DD>
<DT><B>wn_vgp.pl</B> </DT>
<DD>grouped verb pointers </DD>
<DT><B>wn_at.pl</B>
</DT>
<DD>attribute pointers </DD>
<DT><B>wn_ant.pl</B> </DT>
<DD>antonym pointers </DD>
<DT><B>wn_sa.pl</B> </DT>
<DD>see also pointers
</DD>
<DT><B>wn_ppl.pl</B> </DT>
<DD>participle pointers </DD>
<DT><B>wn_per.pl</B> </DT>
<DD>pertainym pointers </DD>
<DT><B>wn_fr.pl</B> </DT>
<DD>frame
pointers </DD>
</DL>
<H2><A NAME="sect9" HREF="#toc9">SEE ALSO </A></H2>
<B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
, <B><A HREF="wninput.5WN.html">wninput</B>(5WN)</A>
, <B><A HREF="wngroups.7WN.html">wngroups</B>(7WN)</A>
, <B><A HREF="wnpkgs.7WN.html">wnpkgs</B>(7WN)</A>
.
<P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">DESCRIPTION</A></LI>
<UL>
<LI><A NAME="toc2" HREF="#sect2">File Format</A></LI>
<LI><A NAME="toc3" HREF="#sect3">Operators</A></LI>
<LI><A NAME="toc4" HREF="#sect4">Field Definitions</A></LI>
</UL>
<LI><A NAME="toc5" HREF="#sect5">NOTES</A></LI>
<LI><A NAME="toc6" HREF="#sect6">ENVIRONMENT VARIABLES (UNIX)</A></LI>
<LI><A NAME="toc7" HREF="#sect7">REGISTRY (WINDOWS)</A></LI>
<LI><A NAME="toc8" HREF="#sect8">FILES</A></LI>
<LI><A NAME="toc9" HREF="#sect9">SEE ALSO</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,184 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>SENSEIDX(5WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
index.sense, sense.idx - WordNet's sense index
<H2><A NAME="sect1" HREF="#toc1">DESCRIPTION </A></H2>
The WordNet
sense index provides an alternate method for accessing synsets and word
senses in the WordNet database. It is useful to applications that retrieve
synsets or other information related to a specific sense in WordNet, rather
than all the senses of a word or collocation. It can also be used with
tools like <B>grep </B> and Perl to find all senses of a word in one or more
parts of speech. A specific WordNet sense, encoded as a <I>sense_key </I>, can
be used as an index into this file to obtain its WordNet sense number,
the database byte offset of the synset containing the sense, and the number
of times it has been tagged in the semantic concordance texts. <P>
Concatenating
the <I>lemma </I> and <I>lex_sense </I> fields of a semantically tagged word (represented
in a <B>&lt;wf&nbsp; </B>...&nbsp;<B>&gt; </B> attribute/value pair) in a semantic concordance file, using
<B>% </B> as the concatenation character, creates the <I>sense_key </I> for that sense,
which can in turn be used to search the sense index file. <P>
A <I>sense_key
</I> is the best way to represent a sense in semantic tagging or other systems
that refer to WordNet senses. <I>sense_key </I>s are independent of WordNet sense
numbers and <I>synset_offset </I>s, which vary between versions of the database.
Using the sense index and a <I>sense_key </I>, the corresponding synset (via
the <I>synset_offset </I>) and WordNet sense number can easily be obtained. A
mapping from noun <I>sense_key </I>s in WordNet 1.6 to corresponding 2.0 <I>sense_key
</I>s is provided with version 2.0, and is described in <B><A HREF="sensemap.5WN.html">sensemap</B>(5WN)</A>
. <P>
See
<B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
for a thorough discussion of the WordNet database files.
<H3><A NAME="sect2" HREF="#toc2">File
Format </A></H3>
The sense index file lists all of the senses in the WordNet database
with each line representing one sense. The file is in alphabetical order,
fields are separated by one space, and each line is terminated with a
newline character. <P>
Each line is of the form: <P>
<blockquote><I>sense_key&nbsp;&nbsp;synset_offset&nbsp;&nbsp;sense_number&nbsp;&nbsp;tag_cnt
</I> </blockquote>
<P>
<I>sense_key </I> is an encoding of the word sense. Programs can construct
a sense key in this format and use it as a binary search key into the
sense index file. The format of a <I>sense_key </I> is described below. <P>
<I>synset_offset
</I> is the byte offset that the synset containing the sense is found at in
the database "data" file corresponding to the part of speech encoded in
the <I>sense_key </I>. <I>synset_offset </I> is an 8 digit, zero-filled decimal integer,
and can be used with <B><A HREF="fseek.3.html">fseek</B>(3)</A>
to read a synset from the data file. When
passed to the WordNet library function <B>read_synset() </B> along with the syntactic
category, a data structure containing the parsed synset is returned. <P>
<I>sense_number
</I> is a decimal integer indicating the sense number of the word, within
the part of speech encoded in <I>sense_key </I>, in the WordNet database. See
<B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
for information about how sense numbers are assigned. <P>
<I>tag_cnt
</I> represents the decimal number of times the sense is tagged in various
semantic concordance texts. A <I>tag_cnt </I> of <B>0 </B> indicates that the sense
has not been semantically tagged.
<H3><A NAME="sect3" HREF="#toc3">Sense Key Encoding </A></H3>
A <I>sense_key </I> is represented
as: <P>
<blockquote><I>lemma </I><B>% </B><I>lex_sense </I> </blockquote>
<P>
where <I>lex_sense </I> is encoded as: <P>
<blockquote><I>ss_type<B>:<I>lex_filenum<B>:<I>lex_id<B>:<I>head_word<B>:<I>head_id
</I></B></I></B></I></B></I></B></I> </blockquote>
<P>
<I>lemma </I> is the ASCII text of the word or collocation as found in the
WordNet database index file corresponding to <I>pos </I>. <I>lemma </I> is in lower case,
and collocations are formed by joining individual words with an underscore
(<B>_ </B>) character. <P>
<I>ss_type </I> is a one digit decimal integer representing the
synset type for the sense. See <FONT SIZE=-1><B>Synset Type </B></FONT>
below for a listing of the
numbers corresponding to each synset type. <P>
<I>lex_filenum </I> is a two digit
decimal integer representing the name of the lexicographer file containing
the synset for the sense. See <B><A HREF="lexnames.5WN.html">lexnames</B>(5WN)</A>
for the list of lexicographer
file names and their corresponding numbers. <P>
<I>lex_id </I> is a two digit decimal
integer that, when appended onto <I>lemma </I>, uniquely identifies a sense within
a lexicographer file. <I>lex_id </I> numbers usually start with <B>00 </B>, and are incremented
as additional senses of the word are added to the same file, although
there is no requirement that the numbers be consecutive or begin with
<B>00 </B>. Note that a value of <B>00 </B> is the default, and therefore is not present
in lexicographer files. Only non-default <I>lex_id </I> values must be explicitly
assigned in lexicographer files. See <B><A HREF="wninput.5WN.html">wninput</B>(5WN)</A>
for information on the
format of lexicographer files. <P>
<I>head_word </I> is only present if the sense
is in an adjective satellite synset. It is the lemma of the first word
of the satellite's head synset. <P>
<I>head_id </I> is a two digit decimal integer
that, when appended onto <I>head_word </I>, uniquely identifies the sense of
<I>head_word </I> within a lexicographer file, as described for <I>lex_id </I>. There
is a value in this field only if <I>head_word </I> is present.
<H3><A NAME="sect4" HREF="#toc4">Synset Type </A></H3>
The
synset type is encoded as follows: <P>
<blockquote><B>1 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;NOUN <BR>
<B>2 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;VERB <BR>
<B>3 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;ADJECTIVE <BR>
<B>4 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;ADVERB
<BR>
<B>5 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;ADJECTIVE SATELLITE <BR>
</blockquote>
<H2><A NAME="sect5" HREF="#toc5">NOTES </A></H2>
For non-satellite senses the <I>head_word
</I> and <I>head_id </I> fields have no values, however the field separator character
(<B>: </B>) is present.
<H2><A NAME="sect6" HREF="#toc6">ENVIRONMENT VARIABLES (UNIX) </A></H2>
<DL>
<DT><B>WNHOME</B> </DT>
<DD>Base directory
for WordNet. Default is <B>/usr/local/WordNet-3.0 </B>. </DD>
<DT><B>WNSEARCHDIR</B> </DT>
<DD>Directory in
which the WordNet database has been installed. Default is <B>WNHOME/dict
</B>. </DD>
</DL>
<H2><A NAME="sect7" HREF="#toc7">REGISTRY (WINDOWS) </A></H2>
<DL>
<DT><B>HKEY_LOCAL_MACHINE\SOFTWARE\WordNet\3.0\WNHome</B> </DT>
<DD>Base directory
for WordNet. Default is <B>C:\Program&nbsp;Files\WordNet\3.0 </B>. </DD>
</DL>
<H2><A NAME="sect8" HREF="#toc8">FILES </A></H2>
<DL>
<DT><B>index.sense</B> </DT>
<DD>sense
index </DD>
</DL>
<H2><A NAME="sect9" HREF="#toc9">SEE ALSO </A></H2>
<B><A HREF="binsrch.3WN.html">binsrch</B>(3WN)</A>
, <B><A HREF="wnsearch.3WN.html">wnsearch</B>(3WN)</A>
, <B><A HREF="lexnames.5WN.html">lexnames</B>(5WN)</A>
, <B><A HREF="wnintro.5WN.html">wnintro</B>(5WN)</A>
,
<B><A HREF="sensemap.5WN.html">sensemap</B>(5WN)</A>
, <B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
, <B><A HREF="wninput.5WN.html">wninput</B>(5WN)</A>
. <P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">DESCRIPTION</A></LI>
<UL>
<LI><A NAME="toc2" HREF="#sect2">File Format</A></LI>
<LI><A NAME="toc3" HREF="#sect3">Sense Key Encoding</A></LI>
<LI><A NAME="toc4" HREF="#sect4">Synset Type</A></LI>
</UL>
<LI><A NAME="toc5" HREF="#sect5">NOTES</A></LI>
<LI><A NAME="toc6" HREF="#sect6">ENVIRONMENT VARIABLES (UNIX)</A></LI>
<LI><A NAME="toc7" HREF="#sect7">REGISTRY (WINDOWS)</A></LI>
<LI><A NAME="toc8" HREF="#sect8">FILES</A></LI>
<LI><A NAME="toc9" HREF="#sect9">SEE ALSO</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,53 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>UNIQBEG(7WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
uniqbeg - unique beginners for noun hierarchies
<H2><A NAME="sect1" HREF="#toc1">DESCRIPTION </A></H2>
All
of the WordNet noun synsets are organized into hierarchies, headed by
the unique beginner synset for <B>entity </B> in the file <B>noun.Tops </B>. <P>
<blockquote>{ entity
(that which is perceived or known or inferred to have its own <BR>
distinct
existence (living or nonliving)) } <BR>
<P>
</blockquote>
<H2><A NAME="sect2" HREF="#toc2">NOTES </A></H2>
The lexicographer files are
not included in the WordNet database package.
<H2><A NAME="sect3" HREF="#toc3">FILES </A></H2>
<DL>
<DT><B>noun.Tops</B> </DT>
<DD>unique beginners
for nouns </DD>
</DL>
<H2><A NAME="sect4" HREF="#toc4">SEE ALSO </A></H2>
<B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
, <B><A HREF="wninput.5WN.html">wninput</B>(5WN)</A>
, <B><A HREF="wnintro.7WN.html">wnintro</B>(7WN)</A>
, <B><A HREF="wngloss.7WN.html">wngloss</B>(7WN)</A>
.
<P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">DESCRIPTION</A></LI>
<LI><A NAME="toc2" HREF="#sect2">NOTES</A></LI>
<LI><A NAME="toc3" HREF="#sect3">FILES</A></LI>
<LI><A NAME="toc4" HREF="#sect4">SEE ALSO</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,388 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>WN(1WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
wn - command line interface to WordNet lexical database
<H2><A NAME="sect1" HREF="#toc1">SYNOPSIS
</A></H2>
<B>wn </B> [ <I>searchstr </I> ] [ <B>-h </B>] [ <B>-g </B> ] [ <B>-a </B> ] [ <B>-l </B> ] [ <B>-o </B> ] [ <B>-s </B> ] [ <B>-n<I># </I></B> ] [
<I>search_option </I>... ]
<H2><A NAME="sect2" HREF="#toc2">DESCRIPTION </A></H2>
<B>wn() </B> provides a command line interface
to the WordNet database, allowing synsets and relations to be displayed
as formatted text. For each word, different searches are provided, based
on syntactic category and pointer types. Although only base forms of words
are usually stored in WordNet, users may search for inflected forms. A
morphological process is applied to the search string to generate a form
that is present in WordNet. <P>
The command line interface is often useful
when writing scripts to extract information from the WordNet database.
Post-processing of the output with various scripting tools can reformat
the results as desired.
<H2><A NAME="sect3" HREF="#toc3">OPTIONS </A></H2>
<DL>
<DT><B>-h</B> </DT>
<DD>Print help text before search results.
</DD>
<DT><B>-g</B> </DT>
<DD>Display textual glosses associated with synsets. </DD>
<DT><B>-a</B> </DT>
<DD>Display lexicographer
file information. </DD>
<DT><B>-o</B> </DT>
<DD>Display synset offset of each synset. </DD>
<DT><B>-s</B> </DT>
<DD>Display each
word's sense numbers in synsets. </DD>
<DT><B>-l</B> </DT>
<DD>Display the WordNet copyright notice,
version number, and license. </DD>
<DT><B>-n<I># </I></B> </DT>
<DD>Perform search on sense number <I># </I> only.
</DD>
<DT><B>-over </B> </DT>
<DD>Display overview of all senses of <I>searchstr </I> in all syntactic categories.
</DD>
</DL>
<H3><A NAME="sect4" HREF="#toc4">Search Options </A></H3>
Note that the last letter of <I>search_option </I> generally
denotes the part of speech that the search applies to: <B>n </B> for nouns, <B>v
</B> for verbs, <B>a </B> for adjectives, and <B>r </B> for adverbs. Multiple searches may
be done for <I>searchstr </I> with a single command by specifying all the appropriate
search options. <P>
<DL>
<DT><B>-syns </B>(<I>n </I> | <I>v </I> | <I>a </I> | <I>r </I>) </DT>
<DD>Display synonyms and immediate
hypernyms of synsets containing <I>searchstr </I>. Synsets are ordered by estimated
frequency of use. For adjectives, if <I>searchstr </I> is in a head synset, the
cluster's satellite synsets are displayed in place of hypernyms. If <I>searchstr
</I> is in a satellite synset, its head synset is also displayed. </DD>
<DT><B>-simsv </B> </DT>
<DD>Display
verb synonyms and immediate hypernyms of synsets containing <I>searchstr
</I>. Synsets are grouped by similarity of meaning. </DD>
<DT><B>-ants </B>(<I>n </I> | <I>v </I> | <I>a </I> | <I>r </I>)
</DT>
<DD>Display synsets containing antonyms of <I>searchstr </I>. For adjectives, if <I>searchstr
</I> is in a head synset, <I>searchstr </I> has a direct antonym. The head synset
for the direct antonym is displayed along with the direct antonym's satellite
synsets. If <I>searchstr </I> is in a satellite synset, <I>searchstr </I> has an indirect
antonym via the head synset, which is displayed. </DD>
<DT><B>-faml </B>(<I>n </I> | <I>v </I> | <I>a </I> | <I>r </I>)
</DT>
<DD>Display familiarity and polysemy information for <I>searchstr </I>. </DD>
<DT><B>-hype </B>(<I>n </I>
| <I>v </I>) </DT>
<DD>Recursively display hypernym (superordinate) tree for <I>searchstr
</I> (<I>searchstr </I> <I>IS A KIND OF _____ </I> relation). </DD>
<DT><B>-hypo </B>(<I>n </I> | <I>v </I>) </DT>
<DD>Display immediate
hyponyms (subordinates) for <I>searchstr </I> (<I>_____ IS A KIND OF </I> <I>searchstr
</I> relation). </DD>
<DT><B>-tree </B>(<I>n </I> | <I>v </I>) </DT>
<DD>Display hyponym (subordinate) tree for <I>searchstr
</I>. This is a recursive search that finds the hyponyms of each hyponym. </DD>
<DT><B>-coor
</B>(<I>n </I> | <I>v </I>) </DT>
<DD>Display the coordinates (sisters) of <I>searchstr </I>. This search
prints the immediate hypernym for each synset that contains <I>searchstr
</I> and the hypernym's immediate hyponyms. </DD>
<DT><B>-deri </B>(<I>n </I> | <I>v </I>) </DT>
<DD>Display derivational
morphology links between noun and verb forms. </DD>
<DT><B>-domn </B>(<I>n </I> | <I>v </I> | <I>a </I> | <I>r </I>) </DT>
<DD>Display
domain that <I>searchstr </I> has been classified in. </DD>
<DT><B>-domt </B>(<I>n </I> | <I>v </I> | <I>a </I> | <I>r </I>) </DT>
<DD>Display
all terms classified as members of the <I>searchstr </I>'s domain. </DD>
<DT><B>-subsn</B> </DT>
<DD>Display
substance meronyms of <I>searchstr </I> (<I>HAS SUBSTANCE </I> relation). </DD>
<DT><B>-partn</B> </DT>
<DD>Display
part meronyms of <I>searchstr </I> (<I>HAS PART </I> relation). </DD>
<DT><B>-membn</B> </DT>
<DD>Display member
meronyms of <I>searchstr </I> (<I>HAS MEMBER </I> relation). </DD>
<DT><B>-meron</B> </DT>
<DD>Display all meronyms
of <I>searchstr </I> (<I>HAS PART, HAS MEMBER, HAS SUBSTANCE </I> relations). </DD>
<DT><B>-hmern</B>
</DT>
<DD>Display meronyms for <I>searchstr </I> tree. This is a recursive search that
prints all the meronyms of <I>searchstr </I> and all of its hypernyms. </DD>
<DT><B>-sprtn</B>
</DT>
<DD>Display <I>part of </I> holonyms of <I>searchstr </I> (<I>PART OF </I> relation). </DD>
<DT><B>-smemn</B> </DT>
<DD>Display
<I>member of </I> holonyms of <I>searchstr </I> (<I>MEMBER OF </I> relation). </DD>
<DT><B>-ssubn</B> </DT>
<DD>Display
<I>substance of </I> holonyms of <I>searchstr </I> (<I>SUBSTANCE OF </I> relation). </DD>
<DT><B>-holon</B> </DT>
<DD>Display
all holonyms of <I>searchstr </I> (<I>PART OF, MEMBER OF, SUBSTANCE OF </I> relations).
</DD>
<DT><B>-hholn</B> </DT>
<DD>Display holonyms for <I>searchstr </I> tree. This is a recursive search
that prints all the holonyms of <I>searchstr </I> and all of each holonym's holonyms.
</DD>
<DT><B>-entav</B> </DT>
<DD>Display entailment relations of <I>searchstr </I>. </DD>
<DT><B>-framv</B> </DT>
<DD>Display applicable
verb sentence frames for <I>searchstr </I>. </DD>
<DT><B>-causv</B> </DT>
<DD>Display <I>cause to </I> relations
of <I>searchstr </I>. </DD>
<DT><B> -pert </B>(<I>a </I> | <I>r </I>) </DT>
<DD>Display pertainyms of <I>searchstr </I>. </DD>
<DT><B> -attr </B>(<I>n
</I> | <I>a </I>) </DT>
<DD>Display adjective values for noun attribute, or noun attributes
of adjective values. </DD>
<DT><B>-grep </B>(<I>n </I> | <I>v </I> | <I>a </I> | <I>r </I>) </DT>
<DD>List compound words containing
<I>searchstr </I> as a substring. </DD>
</DL>
<H2><A NAME="sect5" HREF="#toc5">SEARCH RESULTS </A></H2>
The results of a search are
written to the standard output. For each search, the output consists a
one line description of the search, followed by the search results. <P>
All
searches other than <B>-over </B> list all senses matching the search results
in the following general format. Items enclosed in italicized square brackets
(<I>[&nbsp;...&nbsp;] </I>) may not be present. <P>
<blockquote>One line listing the number of senses matching
the search request. <P>
Each sense matching the search requested displayed
as follows: <P>
<tt> </tt>&nbsp;<tt> </tt>&nbsp;<B>Sense <I>n </I></B> <BR>
<tt> </tt>&nbsp;<tt> </tt>&nbsp;<I>[<B>{<I>synset_offset<B>}<I>] [<B>&lt;<I>lex_filename<B>&gt;<I>]&nbsp;&nbsp;word1[<B>#<I>sense_number][,&nbsp;&nbsp;word2...]
</I></B></I></B></I></B></I></B></I></B></I> <BR>
<P>
Where <I>n </I> is the sense number of the search word, <I>synset_offset </I> is
the byte offset of the synset in the <B>data.<I>pos </I></B> file corresponding to the
syntactic category, <I>lex_filename </I> is the name of the lexicographer file
that the synset comes from, <I>word1 </I> is the first word in the synset (note
that this is not necessarily the search word) and <I>sense_number </I> is the
WordNet sense number assigned to the preceding word. <I>synset_offset, lex_filename
</I>, and <I>sense_number </I> are generated when the <B>-o, -a, </B> and <B>-s </B> options, respectively,
are specified. <P>
The synsets matching the search requested are printed below
each sense's synset output described above. Each line of output is preceded
by a marker (usually <B>=&gt; </B>), then a synset, formatted as described above.
If a search traverses more one level of the tree, then successive lines
are indented by spaces corresponding to its level in the hierarchy. When
the <B>-g </B> option is specified, synset glosses are displayed in parentheses
at the end of each synset. Each synset is printed on one line. <P>
Senses
are generally ordered from most to least frequently used, with the most
common sense numbered <B>1 </B>. Frequency of use is determined by the number
of times a sense is tagged in the various semantic concordance texts.
Senses that are not semantically tagged follow the ordered senses. Note
that this ordering is only an estimate based on usage in a small corpus.
<P>
Verb senses can be grouped by similarity of meaning, rather than ordered
by frequency of use. The <B>-simsv </B> search prints all senses that are close
in meaning together, with a line of dashes indicating the end of a group.
See <B><A HREF="wngroups.7WN.html">wngroups</B>(7WN)</A>
for a discussion of how senses are grouped. <P>
The <B>-over
</B> search displays an overview of all the senses of the search word in all
syntactic categories. The results of this search are similar to the <B>-syns
</B> search, however no additional (ex. hypernym) synsets are displayed, and
synset glosses are always printed. The senses are grouped by syntactic
category, and each synset is annotated as described above with <I>synset_offset
</I>, <I>lex_filename </I>, and <I>sense_number </I> as dictated by the <B>-o, -a, </B> and <B>-s </B> options.
The overview search also indicates how many of the senses in each syntactic
category are represented in the tagged texts. This is a way for the user
to determine whether a sense's sense number is based on semantic tagging
data, or was arbitrarily assigned. For each sense that has appeared in
such texts, the number of semantic tags to that sense are indicated in
parentheses after the sense number. <P>
If a search cannot be performed on
some senses of <I>searchstr </I>, the search results are headed by a string of
the form: <tt> </tt>&nbsp;<tt> </tt>&nbsp;X of Y senses of <I>searchstr </I> <BR>
<P>
The output of the <B>-deri </B> search
shows word forms that are morphologically related to <B>searchstr </B>. Each word
form pointed to from <I>searchstr </I> is displayed, preceded by <B>RELATED TO-&gt; </B>
and the syntactic category of the link, followed, on the next line, by
its synset. Printed after the word form is <B># </B><I>n </I> where <I>n </I> indicates the
WordNet sense number of the term pointed to. <P>
The <B>-domn </B> and <B>-domt </B> searches
show the domain that a synset has been classified in and, conversely,
all of the terms that have been assigned to a specific domain. A domain
is either a <B>TOPIC, </B> <B>REGION </B> or <B>USAGE, </B> as reflected in the specific pointer
character stored in the database, and displayed in the output. A <B>-domn
</B> search on a term shows the domain, if any, that each synset containing
<I>searchstr </I> has been classified in. The output display shows the domain
type (<B>TOPIC, </B> <B>REGION </B> or <B>USAGE </B>), followed by the syntactic category of
the domain synset and the terms in the synset. Each term is followed by
<B># </B><I>n </I> where <I>n </I> indicates the WordNet sense number of the term. The converse
search, <B>-domt </B>, shows all of the synsets that have been placed into the
domain <I>searchstr </I>, with analogous markers. <P>
When <B>-framv </B> is specified,
sample illustrative sentences and generic sentence frames are displayed.
If a sample sentence is found, the base form of <I>search </I> is substituted
into the sentence, and it is printed below the synset, preceded with the
<B>EX: </B> marker. When no sample sentences are found, the generic sentence
frames are displayed. Sentence frames that are acceptable for all words
in a synset are preceded by the marker <B>*&gt; </B>. If a frame is acceptable for
the search word only, it is preceded by the marker <B>=&gt; </B>. <P>
Search results
for adjectives are slightly different from those for other parts of speech.
When an adjective is printed, its direct antonym, if it has one, is also
printed in parentheses. When <I>searchstr </I> is in a head synset, all of the
head synset's satellites are also displayed. The position of an adjective
in relation to the noun may be restricted to the <I>prenominal </I>, <I>postnominal
</I> or <I>predicative </I> position. Where present, these restrictions are noted
in parentheses. <P>
When an adjective is a participle of a verb, the output
indicates the verb and displays its synset. <P>
When an adverb is derived
from an adjective, the specific adjectival sense on which it is based
is indicated. <P>
The morphological transformations performed by the search
code may result in more than one word to search for. WordNet automatically
performs the requested search on all of the strings and returns the results
grouped by word. For example, the verb <B>saw </B> is both the present tense
of <B>saw </B> and the past tense of <B>see </B>. When passed <I>searchstr </I> <B>saw </B>, WordNet
performs the desired search first on <B>saw </B> and next on <B>see </B>, returning
the list of <B>saw </B> senses and search results, followed by those for <B>see
</B>. </blockquote>
<H2><A NAME="sect6" HREF="#toc6">EXIT STATUS </A></H2>
<B>wn() </B> normally exits with the number of senses displayed.
If <I>searchword </I> is not found in WordNet, it exits with <B>0 </B>. <P>
If the WordNet
database cannot be opened, an error messages is displayed and <B>wn() </B> exits
with <B>-1 </B>.
<H2><A NAME="sect7" HREF="#toc7">ENVIRONMENT VARIABLES (UNIX) </A></H2>
<DL>
<DT><B>WNHOME</B> </DT>
<DD>Base directory for WordNet.
Default is <B>/usr/local/WordNet-3.0 </B>. </DD>
<DT><B>WNSEARCHDIR</B> </DT>
<DD>Directory in which the
WordNet database has been installed. Default is <B>WNHOME/dict </B>. </DD>
</DL>
<H2><A NAME="sect8" HREF="#toc8">REGISTRY
(WINDOWS) </A></H2>
<DL>
<DT><B>HKEY_LOCAL_MACHINE\SOFTWARE\WordNet\3.0\WNHome</B> </DT>
<DD>Base directory for
WordNet. Default is <B>C:\Program&nbsp;Files\WordNet\3.0 </B>. </DD>
</DL>
<H2><A NAME="sect9" HREF="#toc9">FILES </A></H2>
<DL>
<DT><B>index.<I>pos </I></B> </DT>
<DD>database
index files </DD>
<DT><B>data.<I>pos </I></B> </DT>
<DD>database data files </DD>
<DT><B>*.vrb</B> </DT>
<DD>files of sentences illustrating
the use of verbs </DD>
<DT><B><I>pos </I>.exc</B> </DT>
<DD>morphology exception lists </DD>
</DL>
<H2><A NAME="sect10" HREF="#toc10">SEE ALSO </A></H2>
<B><A HREF="wnintro.1WN.html">wnintro</B>(1WN)</A>
,
<B><A HREF="wnb.1WN.html">wnb</B>(1WN)</A>
, <B><A HREF="wnintro.3WN.html">wnintro</B>(3WN)</A>
, <B><A HREF="lexnames.5WN.html">lexnames</B>(5WN)</A>
, <B><A HREF="senseidx.5WN.html">senseidx</B>(5WN)</A>
<B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
,<B></B> <B><A HREF="wninput.5WN.html">wninput</B>(5WN)</A>
,
<B><A HREF="morphy.7WN.html">morphy</B>(7WN)</A>
, <B><A HREF="wngloss.7WN.html">wngloss</B>(7WN)</A>
, <B><A HREF="wngroups.7WN.html">wngroups</B>(7WN)</A>
.
<H2><A NAME="sect11" HREF="#toc11">BUGS </A></H2>
Please report bugs to wordnet@princeton.edu.
<P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">SYNOPSIS</A></LI>
<LI><A NAME="toc2" HREF="#sect2">DESCRIPTION</A></LI>
<LI><A NAME="toc3" HREF="#sect3">OPTIONS</A></LI>
<UL>
<LI><A NAME="toc4" HREF="#sect4">Search Options</A></LI>
</UL>
<LI><A NAME="toc5" HREF="#sect5">SEARCH RESULTS</A></LI>
<LI><A NAME="toc6" HREF="#sect6">EXIT STATUS</A></LI>
<LI><A NAME="toc7" HREF="#sect7">ENVIRONMENT VARIABLES (UNIX)</A></LI>
<LI><A NAME="toc8" HREF="#sect8">REGISTRY (WINDOWS)</A></LI>
<LI><A NAME="toc9" HREF="#sect9">FILES</A></LI>
<LI><A NAME="toc10" HREF="#sect10">SEE ALSO</A></LI>
<LI><A NAME="toc11" HREF="#sect11">BUGS</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,524 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>WNB(1WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
wnb - WordNet window-based browser interface
<H2><A NAME="sect1" HREF="#toc1">SYNOPSIS </A></H2>
<P>
<B>wnb </B>
<H2><A NAME="sect2" HREF="#toc2">DESCRIPTION
</A></H2>
<B>wnb() </B> provides a window-based interface for browsing the WordNet database,
allowing synsets and relations to be displayed as formatted text. For
each search word, different searches are available based on syntactic
category and information available in the database. <P>
<B>wnb </B> is written
in Tcl/Tk, which is available for Unix and Windows platforms. This allows
the same code to work on all supported WordNet platforms without modification.
<H2><A NAME="sect3" HREF="#toc3">WNB WINDOWS </A></H2>
<B>wnb() </B> was developed with the philosophy that only those
searches and buttons that are applicable at the current time are displayed.
As a result, the appearance of the interface changes as it is used. Use
the standard windowing system mouse functions to open and close the WordNet
Browser Window, move the window, and change its size. <P>
The WordNet Browser
Window contains the following areas, from top to bottom:
<DL>
<DT>Menubar </DT>
<DD>A menubar
runs along the top of the browser window with pulldown menus and button
entitled <B>File </B>, <B>History </B>, <B>Options </B>, and <B>Help </B>. </DD>
<DT>Search Word Entry </DT>
<DD>Below
the Menubar is a line for entering the search word. A search word can
be a single word, hyphenated string, or a collocation. Case is ignored.
Although only uninflected forms of words are usually stored in WordNet,
users may search for inflected forms. WordNet's morphological processor
finds the base form automatically. </DD>
<DT>Search Selection </DT>
<DD>Below the Search Word
Entry line is an area for selecting the search type and senses to search.
Until a search word is entered this area is blank. After a search word
is entered, buttons appear corresponding to each syntactic category (<B>Noun
</B>, <B>Verb </B>, <B>Adjective </B>, <B>Adverb </B>) in which the search string is defined in
WordNet. </DD>
</DL>
<P>
At the right edge of the Search Selection line is a box for
entering sense numbers. When this box is empty, search results for all
senses of the search word that match the search type are displayed. The
search may be restricted to one or more specific senses by entering a
comma or space separated list of sense numbers in the <B>Senses </B> box. These
sense numbers remain in effect until either the user changes or deletes
them, or a new search word is entered.
<DL>
<DT>Results Window </DT>
<DD>Most of the browser
window consists of a large text buffer for displaying the results of WordNet
searches. Horizontal and vertical scroll bars are present for scrolling
through the output. </DD>
<DT>Status Line </DT>
<DD>A status line is at the bottom of the
browser window. When search results are displayed in the Results Window,
this status line reflects the type of search selected. When there is no
search word entered, your are prompted to <B>"Enter search word and press
return." </B> If the search word entered is not in WordNet, the message <B>"Sorry,
no matches found." </B> is displayed. </DD>
</DL>
<H2><A NAME="sect4" HREF="#toc4">SEARCHING THE DATABASE </A></H2>
The WordNet browser
navigates through WordNet in two steps. First a search word is entered
and an overview of all the senses of the word in all syntactic categories
is displayed in the Results Window. The senses are grouped by syntactic
category, and each synset is annotated as described above with <I>synset_offset
</I>, <I>lex_filename </I>, and <I>sense_number </I> as dictated by the advanced search
options set. The overview search also indicates how many of the senses
in each syntactic category are represented in the tagged texts. This is
a way for the user to determine whether a sense's sense number is based
on semantic tagging data, or was arbitrarily assigned. For each sense
that has appeared in such texts, the number of semantic tags to that sense
are indicated in parentheses after the sense number. <P>
Then, within a syntactic
category, a specific search is selected. The desired search is performed
and the search results are displayed in the Results Window. Additional
searches on the same word can be performed, or a new search word can be
entered. <P>
To enter a search word, click the mouse in the horizontal box
labeled <B>Search Word </B>, type a single word, hyphenated string, or collocation
and press <FONT SIZE=-1><B>RETURN. </B></FONT>
<P>
<B>wnb() </B> responds by making a set of Part of Speech
buttons appear in the Search Selection line. Each button corresponds to
a syntactic category in which the search string is defined in WordNet.
At the same time, an Overview of the synsets for all senses of the search
word is displayed in the Results Window. The Overview includes the gloss
for each synset and also indicates which of the senses have appeared in
the semantically tagged texts. For each sense that has appeared in such
texts, the number of semantic tags to that sense are indicated in parentheses
after the sense number. <P>
The pulldown menus in the Search Selection line
list all of the WordNet searches that can be performed for the search
word in that part of speech. To select a search, highlight it by dragging
the mouse to it, and release the mouse while it is highlighted. Drag the
mouse outside of the pulldown list and release to hide the menu without
making a selection. Dragging the mouse across the Part of Speech buttons
displays the available searches for each syntactic category. <P>
To restrict
a search to one or more senses within a syntactic category, enter a comma
or space separated list of sense numbers in the <B>Senses </B> box before selecting
a search. <P>
After a search is selected, <B>wnb() </B> performs the search on the
WordNet database and displays the formatted results in the Results Window.
Whenever search results are displayed, a button entitled <B>Redisplay Overview
</B> is present at the right edge of the Search Word Entry line. Clicking
on this button redisplays the Overview of all synsets for the search word
in the Results Window.
<H3><A NAME="sect5" HREF="#toc5">Changing the Search Word </A></H3>
A new search word can
be entered at any time by moving to the Search Word Entry box, if necessary
highlighting it by clicking, erasing the old string, typing a new one
and pressing <FONT SIZE=-1><B>RETURN. </B></FONT>
The <B>Senses </B> box is cleared if necessary, the Part
of Speech buttons applicable to the new search word appear, and the Overview
for the new search word is displayed. <P>
The middle mouse button can also
be used to select a new search word by placing the mouse over any word
in the Results Window and clicking. The selected word will replace the
text in the Search Word Entry box, and the overview for that word will
automatically be displayed. <P>
To select a new search string collocation
from text in the Results Window, highlight the text with the mouse and
press <FONT SIZE=-1><B>CONTROL-S. </B></FONT>
<P>
<H3><A NAME="sect6" HREF="#toc6">Interrupting a Search </A></H3>
When a search is in progress
the message <B>"Searching...(press escape to abort)" </B> is displayed in the Status
Line. Note that most searches return very quickly, so this message isn't
noticeable. As indicated, pressing the <FONT SIZE=-1><B>ESCAPE </B></FONT>
key will interrupt the
search. The results of the search obtained before the time the search
was interrupted are displayed in the Results Window.
<H2><A NAME="sect7" HREF="#toc7">MENUS </A></H2>
<H3><A NAME="sect8" HREF="#toc8">File Menu
</A></H3>
<blockquote>
<DL>
<DT>Find keywords by substring </DT>
<DD>Display a popup window for specifying a search
of WordNet for words or collocations that contain a specific substring.
If a search word is currently entered in the <B>Search Word </B> box, it is
used as the substring to search for by default. The Substring Search Window
contains a box for entering a substring, a pulldown menu to its right
for specifying the part of speech to search, a large area for displaying
the search results, and action buttons at the bottom entitled <B>Search </B>,
<B>Save </B>, <B>Print </B> <B>Dismiss </B>. </DD>
</DL>
<P>
Once a substring is entered and a part of speech
selected, clicking on the <B>Search </B> button causes a search to be done for
all words and collocations in WordNet, in that syntactic category, that
contain the substring according to the following criteria: <P>
1. The substring
can appear at the beginning or end of a word, hyphenated string o collocation.
<P>
2. The substring can appear in the middle of a hyphenated string or collocation,
but only delimited on both sides by spaces or hyphens. <P>
The search results
are displayed in the large buffer. Clicking on an item from the search
results list causes <B>wnb() </B> to automatically enter that word in the <B>Search
Word </B> box of the WordNet Browser Window and perform the Overview search.
<P>
Clicking the <B>Save </B> button generates a popup dialog for specifying a filename
to save the substring search results to. Clicking the <B>Print </B> button generates
a popup dialog in which a print command can be specified. <P>
Selecting <B>Dismiss
</B> closes the Substring Search Window.
<DL>
<DT>Save current display </DT>
<DD>Display a popup
dialog for specifying a filename to save the current Results Window contents
to. </DD>
<DT>Print current display </DT>
<DD>Display a popup dialog in which to specify a
print command to which the current Results Window contents can be piped.
Note - this option does not exist in the Windows version. </DD>
<DT>Clear current
display </DT>
<DD>Clear the <B>Search Word </B> and <B>Senses </B> boxes, and Results Window. </DD>
<DT>Exit
</DT>
<DD>Does what you would expect. </DD>
</DL>
</blockquote>
<H3><A NAME="sect9" HREF="#toc9">History </A></H3>
This pulldown menu contains a list
of the last searches performed. Selecting an item from this list performs
that search again. The maximum number of searches stored in the list can
be adjusted from the <B>Options </B> menu. The default is 10.
<H3><A NAME="sect10" HREF="#toc10">Options </A></H3>
<blockquote>
<DL>
<DT>Show help
with each search </DT>
<DD>When this checkbox is selected search results are preceded
by some explanatory text about the type of search selected. This is off
by default. </DD>
<DT>Show descriptive gloss </DT>
<DD>When this checkbox is selected, synset
glosses are displayed in all search results. This is set by default. Note
that glosses are always displayed in the Overview. </DD>
<DT>Wrap Lines </DT>
<DD>When this
checkbox is selected, lines in the Results Window that are wider than
the window are automatically wrapped. This is set by default. If not selected,
a horizontal scroll bar is present if any lines are longer than the width
of the window. </DD>
<DT>Set advanced search options... </DT>
<DD>Selecting this item displays
a popup window for setting the following search options: <B>Lexical file
information; Synset location in database file; Sense number </B>. Choices
for each are: </DD>
</DL>
<P>
<tt> </tt>&nbsp;<tt> </tt>&nbsp;<B>Don't show </B> (default) <BR>
<tt> </tt>&nbsp;<tt> </tt>&nbsp;<B>Show with searches </B> <BR>
<tt> </tt>&nbsp;<tt> </tt>&nbsp;<B>Show with searches
and overview </B> <BR>
<P>
When lexical file information is shown, the name of the
lexicographer file is printed before each synset, enclosed in angle brackets
(<B>&lt;&nbsp;&nbsp;<I>...<B>&nbsp;&nbsp;&gt; </B></I></B>). When both lexical file information and synset location information
are displayed, the synset location information appears first. If within
one lexicographer file more than one sense of a word is entered, an integer
<I>lex_id </I> is appended onto all but one of the word's instances to uniquely
identify it. In each synset, each word having a non-zero <I>lex_id </I> is printed
with the <I>lex_id </I> value printed immediately following the word. If both
lexicographer information and sense numbers are displayed, <I>lex_id </I>s, if
present, precede sense numbers. <P>
When synset location is shown, the byte
offset of the synset in the database "data" file corresponding to the
syntactic category of the synset is printed before each synset, enclosed
in curly braces (<B>{&nbsp;&nbsp;<I>...<B>&nbsp;&nbsp;} </B></I></B>). When both lexical file information and synset
location information are displayed, the synset location information appears
first. <P>
When sense numbers are shown, the sense number of each word in
each synset is printed immediately after the word, and is preceded by
a number sign (<B># </B>).
<DL>
<DT>Set maximum history length... </DT>
<DD>Display a popup dialog in
which the maximum number of previous searches to be kept on the History
list can be set. </DD>
<DT>Set font...&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </DT>
<DD>Display a popup window for setting
the font (typeface) and font size to use for the Results Window. Choices
for typeface are: <B>Courier </B>, <B>Helvetica </B>, and <B>Times </B> (default). Font size
can be <B>small </B>, <B>medium </B> (default), or <B>large </B>. </DD>
<DT>Save current options as default
</DT>
<DD>Save the currently set options. Next time the browser is started, these
options will be used as the user defaults. </DD>
</DL>
</blockquote>
<H3><A NAME="sect11" HREF="#toc11">Help </A></H3>
<blockquote>
<DL>
<DT>Help on using the WordNet
browser </DT>
<DD>Display this manual page. </DD>
<DT>Help on WordNet terminology </DT>
<DD>Display the
<B><A HREF="wngloss.7WN.html">wngloss</B>(7WN)</A>
manual page. </DD>
<DT>Display the WordNet license </DT>
<DD>Display the WordNet
copyright notice and license agreement. </DD>
<DT>About the WordNet browser </DT>
<DD>Information
about this application. </DD>
</DL>
</blockquote>
<H2><A NAME="sect12" HREF="#toc12">SHORCUTS </A></H2>
Clicking on any word in the Results
Window while holding down the <FONT SIZE=-1><B>SHIFT </B></FONT>
key on the keyboard causes the
browser to replace <B>Search Word </B> with the word and display its Overview
and available searches. Clicking on any word in the Results Window with
the middle mouse button does the same thing. <P>
Pressing the <FONT SIZE=-1><B>CONTROL-S </B></FONT>
keys
causes the browser to do as above on the text that is currently highlighted.
Under Unix, this will work even if the highlighted text is in another
window. This works on hyphenated strings and collocations, as well as
individual words. <P>
Pressing the <FONT SIZE=-1><B>CONTROL-G </B></FONT>
keys displays the Substring
Search Window. <P>
<H2><A NAME="sect13" HREF="#toc13">SEARCH RESULTS </A></H2>
The results of a search of the WordNet
database are displayed in the Results Window. Horizontal and vertical
scroll bars are present for scrolling through the search results. <P>
All
searches other than the Overview list all senses matching the search results
in the following general format. Items enclosed in italicized square brackets
(<I>[&nbsp;...&nbsp;] </I>) may not be present. <P>
If a search cannot be performed on some senses
of <I>searchstr </I>, the search results are headed by a string of the form:
<tt> </tt>&nbsp;<tt> </tt>&nbsp;X of Y senses of <I>searchstr </I> <BR>
<P>
<blockquote>One line listing the number of senses matching
the search selected. <P>
Each sense matching the search selected displayed
as follows: <P>
<tt> </tt>&nbsp;<tt> </tt>&nbsp;<B>Sense <I>n </I></B> <BR>
<tt> </tt>&nbsp;<tt> </tt>&nbsp;<I>[<B>{<I>synset_offset<B>}<I>] [<B>&lt;<I>lex_filename<B>&gt;<I>]&nbsp;&nbsp;word1[<B>#<I>sense_number][,&nbsp;&nbsp;word2...]
</I></B></I></B></I></B></I></B></I></B></I> <BR>
<P>
Where <I>n </I> is the sense number of the search word, <I>synset_offset </I> is
the byte offset of the synset in the <B>data.<I>pos </I></B> file corresponding to the
syntactic category, <I>lex_filename </I> is the name of the lexicographer file
that the synset comes from, <I>word1 </I> is the first word in the synset (note
that this is not necessarily the search word) and <I>sense_number </I> is the
WordNet sense number assigned to the preceding word. <I>synset_offset </I>, <I>lex_filename
</I>, and <I>sense_number </I> are generated if the appropriate Options are specified.
<P>
The synsets matching the search selected are printed below each sense's
synset output described above. Each line of output is preceded by a marker
(usually <B>=&gt; </B>), then a synset, formatted as described above. If a search
traverses more one level of the tree, then successive lines are indented
by spaces corresponding to its level in the hierarchy. Glosses are displayed
in parentheses at the end of each synset if the appropriate Option is
set. Each synset is printed on one line. <P>
Senses are ordered from most
to least frequently used, with the most common sense numbered <B>1 </B>. Frequency
of use is determined by the number of times a sense is tagged in the various
semantic concordance texts. Senses that are not semantically tagged follow
the ordered senses. Note that this ordering is only an estimate based on
usage in a small corpus. <P>
Verb senses can be grouped by similarity of meaning,
rather than ordered by frequency of use. When the <B>"Synonyms, grouped by
similarity" </B> search is selected, senses that are close in meaning are
printed together, with a line of dashes indicating the end of a group.
See <B><A HREF="wngroups.7WN.html">wngroups</B>(7WN)</A>
for a discussion how senses are grouped. <P>
The output
of the <B>"Derivationally Related Forms" </B> search shows word forms that are
morphologically related to <B>searchstr </B>. Each word form pointed to from <I>searchstr
</I> is displayed, preceded by <B>RELATED TO-&gt; </B> and the syntactic category of the
link, followed, on the next line, by its synset. Printed after the word
form is <B># </B><I>n </I> where <I>n </I> indicates the WordNet sense number of the term pointed
to. <P>
The <B>"Domain" </B> and <B>"Domain Terms" </B> searches show the domain that a
synset has been classified in and, conversely, all of the terms that have
been assigned to a specific domain. A domain is either a <B>TOPIC, </B> <B>REGION
</B> or <B>USAGE, </B> as reflected in the specific pointer character stored in the
database, and displayed in the output. A <B>Domain </B> search on a term shows
the domain, if any, that each synset containing <I>searchstr </I> has been classified
in. The output display shows the domain type (<B>TOPIC, </B> <B>REGION </B> or <B>USAGE
</B>), followed by the syntactic category of the domain synset and the terms
in the synset. Each term is followed by <B># </B><I>n </I> where <I>n </I> indicates the WordNet
sense number of the term. The converse search, <B>Domain Terms </B>, shows all
of the synsets that have been placed into the domain <I>searchstr </I>, with
analogous markers. <P>
When the <B>"Sentence Frames" </B> search is specified, sample
illustrative sentences and generic sentence frames are displayed. If a
sample sentence is found, the base form of the search word is substituted
into the sentence, and it is printed below the synset, preceded with the
<B>EX: </B> marker. When no sample sentences are found, the generic sentence
frames are displayed. Sentence frames that are acceptable for all words
in a synset are preceded by the marker <B>*&gt; </B>. If a frame is acceptable for
the search word only, it is preceded by the marker <B>=&gt; </B>. <P>
Search results
for adjectives are slightly different from those for other parts of speech.
When an adjective is printed, its direct antonym, if it has one, is also
printed in parentheses. When the search word is in a head synset, all
of the head synset's satellites are also displayed. The position of an
adjective in relation to the noun may be restricted to the <I>prenominal
</I>, <I>postnominal </I> or <I>predicative </I> position. Where present, these restrictions
are noted in parentheses. <P>
When an adjective is a participle of a verb,
the output indicates the verb and displays its synset. <P>
When an adverb
is derived from an adjective, the specific adjectival sense on which it
is based is indicated. <P>
The morphological transformations performed by
the search code may result in more than one word to search for. <B>wnb()
</B> automatically performs the requested search on all of the strings and
returns the results grouped by word. For example, the verb <B>saw </B> is both
the present tense of <B>saw </B> and the past tense of <B>see </B>. When there is more
than one word to search for, search results are grouped by word. </blockquote>
<H2><A NAME="sect14" HREF="#toc14">DIAGNOSTICS
</A></H2>
If the WordNet database files cannot be opened, error messages are displayed.
This is usually corrected by setting the environment variables described
below to the proper location of the WordNet database for your installation.
<H2><A NAME="sect15" HREF="#toc15">ENVIRONMENT VARIABLES (UNIX) </A></H2>
<DL>
<DT><B>WNHOME</B> </DT>
<DD>Base directory for WordNet. Default
is <B>/usr/local/WordNet-3.0 </B>. </DD>
<DT><B>WNSEARCHDIR</B> </DT>
<DD>Directory in which the WordNet database
has been installed. Default is <B>WNHOME/dict </B>. </DD>
</DL>
<H2><A NAME="sect16" HREF="#toc16">REGISTRY (WINDOWS) </A></H2>
<DL>
<DT><B>HKEY_LOCAL_MACHINE\SOFTWARE\WordNet\3.0\WNHome</B>
</DT>
<DD>Base directory for WordNet. Default is <B>C:\Program&nbsp;Files\WordNet\3.0 </B>. </DD>
<DT><B>HKEY_CURRENT_USER\SOFTWARE\WordNet\3.0\wnres</B>
</DT>
<DD>User's default browser options. </DD>
</DL>
<H2><A NAME="sect17" HREF="#toc17">FILES </A></H2>
<DL>
<DT><B>index.<I>pos </I></B> </DT>
<DD>database index files
</DD>
<DT><B>data.<I>pos </I></B> </DT>
<DD>database data files </DD>
<DT><B>*.vrb</B> </DT>
<DD>files of sentences illustrating the
use of verbs </DD>
<DT><B><I>pos </I>.exc</B> </DT>
<DD>morphology exception lists </DD>
</DL>
<H2><A NAME="sect18" HREF="#toc18">SEE ALSO </A></H2>
<B><A HREF="wnintro.1WN.html">wnintro</B>(1WN)</A>
,
<B><A HREF="wn.1WN.html">wn</B>(1WN)</A>
, <B><A HREF="wnintro.3WN.html">wnintro</B>(3WN)</A>
, <B><A HREF="lexnames.5WN.html">lexnames</B>(5WN)</A>
, <B><A HREF="senseidx.5WN.html">senseidx</B>(5WN)</A>
, <B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
,<B></B> <B><A HREF="wninput.5WN.html">wninput</B>(5WN)</A>
,
<B><A HREF="morphy.7WN.html">morphy</B>(7WN)</A>
,<B></B> <B><A HREF="wngloss.7WN.html">wngloss</B>(7WN)</A>
, <B><A HREF="wngroups.7WN.html">wngroups</B>(7WN)</A>
.
<H2><A NAME="sect19" HREF="#toc19">BUGS </A></H2>
Please reports bugs to
wordnet@princeton.edu. <P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">SYNOPSIS</A></LI>
<LI><A NAME="toc2" HREF="#sect2">DESCRIPTION</A></LI>
<LI><A NAME="toc3" HREF="#sect3">WNB WINDOWS</A></LI>
<LI><A NAME="toc4" HREF="#sect4">SEARCHING THE DATABASE</A></LI>
<UL>
<LI><A NAME="toc5" HREF="#sect5">Changing the Search Word</A></LI>
<LI><A NAME="toc6" HREF="#sect6">Interrupting a Search</A></LI>
</UL>
<LI><A NAME="toc7" HREF="#sect7">MENUS</A></LI>
<UL>
<LI><A NAME="toc8" HREF="#sect8">File Menu</A></LI>
<LI><A NAME="toc9" HREF="#sect9">History</A></LI>
<LI><A NAME="toc10" HREF="#sect10">Options</A></LI>
<LI><A NAME="toc11" HREF="#sect11">Help</A></LI>
</UL>
<LI><A NAME="toc12" HREF="#sect12">SHORCUTS</A></LI>
<LI><A NAME="toc13" HREF="#sect13">SEARCH RESULTS</A></LI>
<LI><A NAME="toc14" HREF="#sect14">DIAGNOSTICS</A></LI>
<LI><A NAME="toc15" HREF="#sect15">ENVIRONMENT VARIABLES (UNIX)</A></LI>
<LI><A NAME="toc16" HREF="#sect16">REGISTRY (WINDOWS)</A></LI>
<LI><A NAME="toc17" HREF="#sect17">FILES</A></LI>
<LI><A NAME="toc18" HREF="#sect18">SEE ALSO</A></LI>
<LI><A NAME="toc19" HREF="#sect19">BUGS</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,398 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>WNDB(5WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
index.noun, data.noun, index.verb, data.verb, index.adj, data.adj, index.adv,
data.adv - WordNet database files <P>
noun.exc, verb.exc. adj.exc adv.exc - morphology
exception lists <P>
sentidx.vrb, sents.vrb - files used by search code to display
sentences illustrating the use of some specific verbs
<H2><A NAME="sect1" HREF="#toc1">DESCRIPTION </A></H2>
For
each syntactic category, two files are needed to represent the contents
of the WordNet database - <B>index. </B><I>pos </I> and <B>data. </B><I>pos </I>, where <I>pos </I> is <B>noun
</B>, <B>verb </B>, <B>adj </B> and <B>adv </B>. The other auxiliary files are used by the WordNet
library's searching functions and are needed to run the various WordNet
browsers. <P>
Each index file is an alphabetized list of all the words found
in WordNet in the corresponding part of speech. On each line, following
the word, is a list of byte offsets (<I>synset_offset </I>s) in the corresponding
data file, one for each synset containing the word. Words in the index
file are in lower case only, regardless of how they were entered in the
lexicographer files. This folds various orthographic representations of
the word into one line enabling database searches to be case insensitive.
See <B><A HREF="wninput.5WN.html">wninput</B>(5WN)</A>
for a detailed description of the lexicographer files
<P>
A data file for a syntactic category contains information corresponding
to the synsets that were specified in the lexicographer files, with relational
pointers resolved to <I>synset_offset </I>s. Each line corresponds to a synset.
Pointers are followed and hierarchies traversed by moving from one synset
to another via the <I>synset_offset </I>s. <P>
The exception list files, <I>pos </I><B>.exc
</B>, are used to help the morphological processor find base forms from irregular
inflections. <P>
The files <B>sentidx.vrb </B> and <B>sents.vrb </B> contain sentences illustrating
the use of specific senses of some verbs. These files are used by the
searching software in response to a request for verb sentence frames.
Generic sentence frames are displayed when an illustrative sentence is
not present. <P>
The various database files are in ASCII formats that are
easily read by both humans and machines. All fields, unless otherwise
noted, are separated by one space character, and all lines are terminated
by a newline character. Fields enclosed in italicized square brackets
may not be present. <P>
See <B><A HREF="wngloss.7WN.html">wngloss</B>(7WN)</A>
for a glossary of WordNet terminology
and a discussion of the database's content and logical organization.
<H3><A NAME="sect2" HREF="#toc2">Index
File Format </A></H3>
Each index file begins with several lines containing a copyright
notice, version number and license agreement. These lines all begin with
two spaces and the line number so they do not interfere with the binary
search algorithm that is used to look up entries in the index files. All
other lines are in the following format. In the field descriptions, <B>number
</B> always refers to a decimal integer unless otherwise defined. <P>
<I>lemma&nbsp;&nbsp;pos&nbsp;&nbsp;synset_cnt&nbsp;&nbsp;p_cnt&nbsp;&nbsp;[ptr_symbol...]&nbsp;&nbsp;sense_cnt&nbsp;&nbsp;tagsense_cnt
&nbsp;&nbsp;synset_offset&nbsp;&nbsp;[synset_offset...] </I> <BR>
<P>
<DL>
<DT><I>lemma</I> </DT>
<DD>lower case ASCII text of word
or collocation. Collocations are formed by joining individual words with
an underscore (<B>_ </B>) character. </DD>
<DT><I>pos</I> </DT>
<DD>Syntactic category: <B>n </B> for noun files,
<B>v </B> for verb files, <B>a </B> for adjective files, <B>r </B> for adverb files. </DD>
</DL>
<P>
<P>
All remaining
fields are with respect to senses of <I>lemma </I> in <I>pos </I>. <P>
<DL>
<DT><I>synset_cnt</I> </DT>
<DD>Number
of synsets that <I>lemma </I> is in. This is the number of senses of the word
in WordNet. See <FONT SIZE=-1><B>Sense Numbers </B></FONT>
below for a discussion of how sense numbers
are assigned and the order of <I>synset_offset </I>s in the index files. </DD>
<DT><I>p_cnt</I>
</DT>
<DD>Number of different pointers that <I>lemma </I> has in all synsets containing
it. </DD>
<DT><I>ptr_symbol</I> </DT>
<DD>A space separated list of <I>p_cnt </I> different types of pointers
that <I>lemma </I> has in all synsets containing it. See <B><A HREF="wninput.5WN.html">wninput</B>(5WN)</A>
for a list
of <I>pointer_symbol </I>s. If all senses of <I>lemma </I> have no pointers, this field
is omitted and <I>p_cnt </I> is <B>0 </B>. </DD>
<DT><I>sense_cnt</I> </DT>
<DD>Same as <I>sense_cnt </I> above. This
is redundant, but the field was preserved for compatibility reasons. </DD>
<DT><I>tagsense_cnt</I>
</DT>
<DD>Number of senses of <I>lemma </I> that are ranked according to their frequency
of occurrence in semantic concordance texts. </DD>
<DT><I>synset_offset</I> </DT>
<DD>Byte offset
in <B>data.<I>pos </I></B> file of a synset containing <I>lemma </I>. Each <I>synset_offset </I> in
the list corresponds to a different sense of <I>lemma </I> in WordNet. <I>synset_offset
</I> is an 8 digit, zero-filled decimal integer that can be used with <B><A HREF="fseek.3.html">fseek</B>(3)</A>
to read a synset from the data file. When passed to <B><A HREF="read_synset.3WN.html">read_synset</B>(3WN)</A>
along
with the syntactic category, a data structure containing the parsed synset
is returned. </DD>
</DL>
<H3><A NAME="sect3" HREF="#toc3">Data File Format </A></H3>
Each data file begins with several lines
containing a copyright notice, version number and license agreement. These
lines all begin with two spaces and the line number. All other lines are
in the following format. Integer fields are of fixed length, and are zero-filled.
<P>
<I>synset_offset&nbsp;&nbsp;lex_filenum&nbsp;&nbsp;ss_type&nbsp;&nbsp;w_cnt&nbsp;&nbsp;word&nbsp;&nbsp;lex_id&nbsp;&nbsp;[word&nbsp;&nbsp;lex_id...]&nbsp;&nbsp;p_cnt&nbsp;&nbsp;[ptr...]&nbsp;&nbsp;[frames...]&nbsp;&nbsp;<B>|
</B></I><I>&nbsp;&nbsp;gloss </I> <BR>
<P>
<DL>
<DT><I>synset_offset</I> </DT>
<DD>Current byte offset in the file represented
as an 8 digit decimal integer. </DD>
<DT><I>lex_filenum</I> </DT>
<DD>Two digit decimal integer
corresponding to the lexicographer file name containing the synset. See
<B><A HREF="lexnames.5WN.html">lexnames</B>(5WN)</A>
for the list of filenames and their corresponding numbers.
</DD>
<DT><I>ss_type</I> </DT>
<DD>One character code indicating the synset type: </DD>
</DL>
<P>
<blockquote><B>n </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;NOUN <BR>
<B>v </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;VERB
<BR>
<B>a </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;ADJECTIVE <BR>
<B>s </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;ADJECTIVE SATELLITE <BR>
<B>r </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;ADVERB <BR>
</blockquote>
<DL>
<DT><I>w_cnt</I> </DT>
<DD>Two digit hexadecimal
integer indicating the number of words in the synset. </DD>
<DT><I>word</I> </DT>
<DD>ASCII form
of a word as entered in the synset by the lexicographer, with spaces replaced
by underscore characters (<B>_ </B>). The text of the word is case sensitive,
in contrast to its form in the corresponding <B>index. </B><I>pos </I> file, that contains
only lower-case forms. In <B>data.adj </B>, a <I>word </I> is followed by a syntactic
marker if one was specified in the lexicographer file. A syntactic marker
is appended, in parentheses, onto <I>word </I> without any intervening spaces.
See <B><A HREF="wninput.5WN.html">wninput</B>(5WN)</A>
for a list of the syntactic markers for adjectives. </DD>
<DT><I>lex_id</I>
</DT>
<DD>One digit hexadecimal integer that, when appended onto <I>lemma </I>, uniquely
identifies a sense within a lexicographer file. <I>lex_id </I> numbers usually
start with <B>0 </B>, and are incremented as additional senses of the word are
added to the same file, although there is no requirement that the numbers
be consecutive or begin with <B>0 </B>. Note that a value of <B>0 </B> is the default,
and therefore is not present in lexicographer files. </DD>
<DT><I>p_cnt</I> </DT>
<DD>Three digit
decimal integer indicating the number of pointers from this synset to
other synsets. If <I>p_cnt </I> is <B>000 </B> the synset has no pointers. </DD>
<DT><I>ptr</I> </DT>
<DD>A pointer
from this synset to another. <I>ptr </I> is of the form: </DD>
</DL>
<P>
<I>pointer_symbol&nbsp;&nbsp;synset_offset&nbsp;&nbsp;pos&nbsp;&nbsp;source/target
</I> <BR>
<P>
where <I>synset_offset </I> is the byte offset of the target synset in the
data file corresponding to <I>pos </I>. <P>
The <I>source/target </I> field distinguishes
lexical and semantic pointers. It is a four byte field, containing two
two-digit hexadecimal integers. The first two digits indicates the word
number in the current (source) synset, the last two digits indicate the
word number in the target synset. A value of <B>0000 </B> means that <I>pointer_symbol
</I> represents a semantic relation between the current (source) synset and
the target synset indicated by <I>synset_offset </I>. <P>
A lexical relation between
two words in different synsets is represented by non-zero values in the
source and target word numbers. The first and last two bytes of this field
indicate the word numbers in the source and target synsets, respectively,
between which the relation holds. Word numbers are assigned to the <I>word
</I> fields in a synset, from left to right, beginning with <B>1 </B>. <P>
See <B><A HREF="wninput.5WN.html">wninput</B>(5WN)</A>
for a list of <I>pointer_symbol </I>s, and semantic and lexical pointer classifications.
<DL>
<DT><I>frames</I> </DT>
<DD>In <B>data.verb </B> only, a list of numbers corresponding to the generic
verb sentence frames for <I>word </I>s in the synset. <I>frames </I> is of the form:
</DD>
</DL>
<P>
<I>f_cnt&nbsp;&nbsp; </I> <B>+ </B> <I>&nbsp;&nbsp;f_num&nbsp;&nbsp;w_num&nbsp;&nbsp;[ </I> <B>+ </B> <I>&nbsp;&nbsp;f_num&nbsp;&nbsp;w_num...] </I> <BR>
<P>
where <I>f_cnt </I> a two
digit decimal integer indicating the number of generic frames listed,
<I>f_num </I> is a two digit decimal integer frame number, and <I>w_num </I> is a two
digit hexadecimal integer indicating the word in the synset that the frame
applies to. As with pointers, if this number is <B>00 </B>, <I>f_num </I> applies to
all <I>word </I>s in the synset. If non-zero, it is applicable only to the word
indicated. Word numbers are assigned as described for pointers. Each <I>f_num&nbsp;&nbsp;w_num
</I> pair is preceded by a <B>+ </B>. See <B><A HREF="wninput.5WN.html">wninput</B>(5WN)</A>
for the text of the generic
sentence frames.
<DL>
<DT><I>gloss</I> </DT>
<DD>Each synset contains a gloss. A <I>gloss </I> is represented
as a vertical bar (<B>| </B>), followed by a text string that continues until
the end of the line. The gloss may contain a definition, one or more example
sentences, or both. </DD>
</DL>
<H3><A NAME="sect4" HREF="#toc4">Sense Numbers </A></H3>
Senses in WordNet are generally ordered
from most to least frequently used, with the most common sense numbered
<B>1 </B>. Frequency of use is determined by the number of times a sense is tagged
in the various semantic concordance texts. Senses that are not semantically
tagged follow the ordered senses. The <I>tagsense_cnt </I> field for each entry
in the <B>index.<I>pos </I></B> files indicates how many of the senses in the list have
been tagged. <P>
The <B><A HREF="cntlist.5WN.html">cntlist</B>(5WN)</A>
file provided with the database lists the
number of times each sense is tagged in the semantic concordances. The
data from <B>cntlist </B> is used by <B><A HREF="grind.1WN.html">grind</B>(1WN)</A>
to order the senses of each word.
When the <B>index </B>.<I>pos </I> files are generated, the <I>synset_offset </I>s are output
in sense number order, with sense 1 first in the list. Senses with the
same number of semantic tags are assigned unique but consecutive sense
numbers. The WordNet <FONT SIZE=-1><B>OVERVIEW </B></FONT>
search displays all senses of the specified
word, in all syntactic categories, and indicates which of the senses are
represented in the semantically tagged texts.
<H3><A NAME="sect5" HREF="#toc5">Exception List File Format
</A></H3>
Exception lists are alphabetized lists of inflected forms of words and
their base forms. The first field of each line is an inflected form, followed
by a space separated list of one or more base forms of the word. There
is one exception list file for each syntactic category. <P>
Note that the
noun and verb exception lists were automatically generated from a machine-readable
dictionary, and contain many words that are not in WordNet. Also, for
many of the inflected forms, base forms could be easily derived using
the standard rules of detachment programmed into Morphy (See <B><A HREF="morph.7WN.html">morph</B>(7WN)</A>
).
These anomalies are allowed to remain in the exception list files, as
they do no harm. <P>
<H3><A NAME="sect6" HREF="#toc6">Verb Example Sentences </A></H3>
For some verb senses, example
sentences illustrating the use of the verb sense can be displayed. Each
line of the file <B>sentidx.vrb </B> contains a <I>sense_key </I> followed by a space
and a comma separated list of example sentence template numbers, in decimal.
The file <B>sents.vrb </B> lists all of the example sentence templates. Each
line begins with the template number followed by a space. The rest of
the line is the text of a template example sentence, with <B>%s </B> used as
a placeholder in the text for the verb. Both files are sorted alphabetically
so that the <I>sense_key </I> and template sentence number can be used as indices,
via <B><A HREF="binsrch.3WN.html">binsrch</B>(3WN)</A>
,<B></B> into the appropriate file. <P>
When a request for <FONT SIZE=-1><B>FRAMES
</B></FONT>
is made, the WordNet search code looks for the sense in <B>sentidx.vrb </B>.
If found, the sentence template(s) listed is retrieved from <B>sents.vrb
</B>, and the <B>%s </B> is replaced with the verb. If the sense is not found, the
applicable generic sentence frame(s) listed in <I>frames </I> is displayed.
<H2><A NAME="sect7" HREF="#toc7">NOTES
</A></H2>
Information in the <B>data.<I>pos </I></B> and <B>index.<I>pos </I></B> files represents all of the
word senses and synsets in the WordNet database. The <I>word </I>, <I>lex_id </I>, and
<I>lex_filenum </I> fields together uniquely identify each word sense in WordNet.
These can be encoded in a <I>sense_key </I> as described in <B><A HREF="senseidx.5WN.html">senseidx</B>(5WN)</A>
. Each
synset in the database can be uniquely identified by combining the <I>synset_offset
</I> for the synset with a code for the syntactic category (since it is possible
for synsets in different <B>data.<I>pos </I></B> files to have the same <I>synset_offset
</I>). <P>
The WordNet system provide both command line and window-based browser
interfaces to the database. Both interfaces utilize a common library of
search and morphology code. The source code for the library and interfaces
is included in the WordNet package. See <B><A HREF="wnintro.3WN.html">wnintro</B>(3WN)</A>
for an overview of
the WordNet source code.
<H2><A NAME="sect8" HREF="#toc8">ENVIRONMENT VARIABLES (UNIX) </A></H2>
<DL>
<DT><B>WNHOME</B> </DT>
<DD>Base directory
for WordNet. Default is <B>/usr/local/WordNet-3.0 </B>. </DD>
<DT><B>WNSEARCHDIR</B> </DT>
<DD>Directory in
which the WordNet database has been installed. Default is <B>WNHOME/dict
</B>. </DD>
</DL>
<H2><A NAME="sect9" HREF="#toc9">REGISTRY (WINDOWS) </A></H2>
<DL>
<DT><B>HKEY_LOCAL_MACHINE\SOFTWARE\WordNet\3.0\WNHome</B> </DT>
<DD>Base directory
for WordNet. Default is <B>C:\Program&nbsp;Files\WordNet\3.0 </B>. </DD>
</DL>
<H2><A NAME="sect10" HREF="#toc10">FILES </A></H2>
<DL>
<DT><B>index.<I>pos </I></B> </DT>
<DD>database
index files </DD>
<DT><B>data.<I>pos </I></B> </DT>
<DD>database data files </DD>
<DT><B>*.vrb</B> </DT>
<DD>files of sentences illustrating
the use of verbs </DD>
<DT><B><I>pos </I>.exc</B> </DT>
<DD>morphology exception lists </DD>
</DL>
<H2><A NAME="sect11" HREF="#toc11">SEE ALSO </A></H2>
<B><A HREF="grind.1WN.html">grind</B>(1WN)</A>
,
<B><A HREF="wn.1WN.html">wn</B>(1WN)</A>
, <B><A HREF="wnb.1WN.html">wnb</B>(1WN)</A>
, <B><A HREF="wnintro.3WN.html">wnintro</B>(3WN)</A>
, <B><A HREF="binsrch.3WN.html">binsrch</B>(3WN)</A>
, <B><A HREF="wnintro.5WN.html">wnintro</B>(5WN)</A>
, <B><A HREF="cntlist.5WN.html">cntlist</B>(5WN)</A>
,
<B><A HREF="lexnames.5WN.html">lexnames</B>(5WN)</A>
, <B><A HREF="senseidx.5WN.html">senseidx</B>(5WN)</A>
, <B><A HREF="wninput.5WN.html">wninput</B>(5WN)</A>
, <B><A HREF="morphy.7WN.html">morphy</B>(7WN)</A>
, <B><A HREF="wngloss.7WN.html">wngloss</B>(7WN)</A>
,
<B><A HREF="wngroups.7WN.html">wngroups</B>(7WN)</A>
, <B><A HREF="wnstats.7WN.html">wnstats</B>(7WN)</A>
. <P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">DESCRIPTION</A></LI>
<UL>
<LI><A NAME="toc2" HREF="#sect2">Index File Format</A></LI>
<LI><A NAME="toc3" HREF="#sect3">Data File Format</A></LI>
<LI><A NAME="toc4" HREF="#sect4">Sense Numbers</A></LI>
<LI><A NAME="toc5" HREF="#sect5">Exception List File Format</A></LI>
<LI><A NAME="toc6" HREF="#sect6">Verb Example Sentences</A></LI>
</UL>
<LI><A NAME="toc7" HREF="#sect7">NOTES</A></LI>
<LI><A NAME="toc8" HREF="#sect8">ENVIRONMENT VARIABLES (UNIX)</A></LI>
<LI><A NAME="toc9" HREF="#sect9">REGISTRY (WINDOWS)</A></LI>
<LI><A NAME="toc10" HREF="#sect10">FILES</A></LI>
<LI><A NAME="toc11" HREF="#sect11">SEE ALSO</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,325 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>WNGLOSS(7WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
wngloss - glossary of terms used in WordNet system
<H2><A NAME="sect1" HREF="#toc1">DESCRIPTION
</A></H2>
The <I>WordNet Reference Manual </I> consists of Unix-style manual pages divided
into sections as follows: <P>
<TABLE BORDER=0>
<TR> <TD ALIGN=CENTER><B>Section </B> </TD> <TD ALIGN=CENTER><B>Description </B> </TD> </TR>
<TR> <TR> <TD ALIGN=CENTER>1 </TD> <TD ALIGN=LEFT>WordNet User
Commands </TD> </TR>
<TR> <TD ALIGN=CENTER>3 </TD> <TD ALIGN=LEFT>WordNet Library Functions </TD> </TR>
<TR> <TD ALIGN=CENTER>5 </TD> <TD ALIGN=LEFT>WordNet File Formats </TD> </TR>
<TR> <TD ALIGN=CENTER>7 </TD> <TD ALIGN=LEFT>Miscellaneous Information about WordNet </TD> </TR>
</TABLE>
<P>
<H3><A NAME="sect2" HREF="#toc2">System Description </A></H3>
The
WordNet system consists of lexicographer files, code to convert these
files into a database, and search routines and interfaces that display
information from the database. The lexicographer files organize nouns,
verbs, adjectives and adverbs into groups of synonyms, and describe relations
between synonym groups. <B><A HREF="grind.1WN.html">grind</B>(1WN)</A>
converts the lexicographer files into
a database that encodes the relations between the synonym groups. The
different interfaces to the WordNet database utilize a common library
of search routines to display these relations. Note that the lexicographer
files and <B><A HREF="grind.1WN.html">grind</B>(1WN)</A>
program are not generally distributed. <P>
<H3><A NAME="sect3" HREF="#toc3">Database
Organization </A></H3>
Information in WordNet is organized around logical groupings
called synsets. Each synset consists of a list of synonymous words or
collocations (eg. <B>"fountain pen" </B>, <B>"take in" </B>), and pointers that describe
the relations between this synset and other synsets. A word or collocation
may appear in more than one synset, and in more than one part of speech.
The words in a synset are grouped such that they are interchangeable
in some context. <P>
Two kinds of relations are represented by pointers: lexical
and semantic. Lexical relations hold between semantically related word
forms; semantic relations hold between word meanings. These relations
include (but are not limited to) hypernymy/hyponymy (superordinate/subordinate),
antonymy, entailment, and meronymy/holonymy. <P>
Nouns and verbs are organized
into hierarchies based on the hypernymy/hyponymy relation between synsets.
Additional pointers are be used to indicate other relations. <P>
Adjectives
are arranged in clusters containing head synsets and satellite synsets.
Each cluster is organized around antonymous pairs (and occasionally antonymous
triplets). The antonymous pairs (or triplets) are indicated in the head
synsets of a cluster. Most head synsets have one or more satellite synsets,
each of which represents a concept that is similar in meaning to the concept
represented by the head synset. One way to think of the adjective cluster
organization is to visualize a wheel, with a head synset as the hub and
satellite synsets as the spokes. Two or more wheels are logically connected
via antonymy, which can be thought of as an axle between the wheels. <P>
Pertainyms
are relational adjectives and do not follow the structure just described.
Pertainyms do not have antonyms; the synset for a pertainym most often
contains only one word or collocation and a lexical pointer to the noun
that the adjective is "pertaining to". Participial adjectives have lexical
pointers to the verbs that they are derived from. <P>
Adverbs are often derived
from adjectives, and sometimes have antonyms; therefore the synset for
an adverb usually contains a lexical pointer to the adjective from which
it is derived. <P>
See <B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
for a detailed description of the database
files and how the data are represented.
<H2><A NAME="sect4" HREF="#toc4">GLOSSARY OF TERMS </A></H2>
Many terms
used in the <I>WordNet Reference Manual </I> are unique to the WordNet system.
Other general terms have specific meanings when used in the WordNet documentation.
Definitions for many of these terms are given to help with the interpretation
and understanding of the reference manual, and in the use of the WordNet
system. <P>
In following definitions <B>word </B> is used in place of <B>word or collocation
</B>.
<DL>
<DT><B>adjective cluster</B> </DT>
<DD>A group of adjective synsets that are organized around
antonymous pairs or triplets. An adjective cluster contains two or more
<B>head synsets </B> which represent antonymous concepts. Each head synset has
one or more <B>satellite synsets </B>. </DD>
<DT><B>attribute</B> </DT>
<DD>A noun for which adjectives
express values. The noun <B>weight </B> is an attribute, for which the adjectives
<B>light </B> and <B>heavy </B> express values. </DD>
<DT><B>base form</B> </DT>
<DD>The base form of a word
or collocation is the form to which inflections are added. </DD>
<DT><B>basic synset</B>
</DT>
<DD>Syntactically, same as <B>synset </B>. Term is used in <B><A HREF="wninput.5WN.html">wninput</B>(5WN)<B></B></A>
to help
explain differences in entering synsets in lexicographer files. </DD>
<DT><B>collocation</B>
</DT>
<DD>A collocation in WordNet is a string of two or more words, connected
by spaces or hyphens. Examples are: <B>man-eating&nbsp;shark </B>, <B>blue-collar </B>, <B>depend&nbsp;on
</B>, <B>line&nbsp;of&nbsp;products </B>. In the database files spaces are represented as underscore
(<B>_ </B>) characters. </DD>
<DT><B>coordinate</B> </DT>
<DD>Coordinate terms are nouns or verbs that have
the same <B>hypernym </B>. </DD>
<DT><B>cross-cluster pointer</B> </DT>
<DD>A <B>semantic pointer </B> from one
adjective cluster to another. </DD>
<DT><B>derivationally related forms</B> </DT>
<DD>Terms in different
syntactic categories that have the same root form and are semantically
related. </DD>
<DT><B>direct antonyms</B> </DT>
<DD>A pair of words between which there is an associative
bond resulting from their frequent co-occurrence. In <B>adjective clusters
</B>, direct antonyms appears only in <B>head synsets </B>. </DD>
<DT><B>domain</B> </DT>
<DD>A topical classification
to which a synset has been linked with a CATEGORY, REGION or USAGE pointer.
</DD>
<DT><B>domain term</B> </DT>
<DD>A synset belonging to a topical class. A domain term is further
identified as being a CATEGORY_TERM, REGION_TERM or USAGE_TERM. </DD>
<DT><B>entailment</B>
</DT>
<DD>A verb <B>X </B> entails <B>Y </B> if <B>X </B> cannot be done unless <B>Y </B> is, or has been,
done. </DD>
<DT><B>exception list</B> </DT>
<DD>Morphological transformations for words that are
not regular and therefore cannot be processed in an algorithmic manner.
</DD>
<DT><B>group</B> </DT>
<DD>Verb senses that similar in meaning and have been manually grouped
together. </DD>
<DT><B>gloss</B> </DT>
<DD>Each synset contains <B>gloss </B> consisting of a definition
and optionally example sentences. </DD>
<DT><B>head synset</B> </DT>
<DD>Synset in an adjective <B>cluster
</B> containing at least one word that has a <B>direct antonym </B>. </DD>
<DT><B>holonym</B> </DT>
<DD>The
name of the whole of which the meronym names a part. <B>Y </B> is a holonym
of <B>X </B> if <B>X </B> is a part of <B>Y </B>. </DD>
<DT><B>hypernym</B> </DT>
<DD>The generic term used to designate
a whole class of specific instances. <B>Y </B> is a hypernym of <B>X </B> if <B>X </B> is a
(kind of) <B>Y </B>. </DD>
<DT><B>hyponym</B> </DT>
<DD>The specific term used to designate a member of
a class. <B>X </B> is a hyponym of <B>Y </B> if <B>X </B> is a (kind of) <B>Y </B>. </DD>
<DT><B>indirect antonym</B>
</DT>
<DD>An adjective in a <B>satellite synset </B> that does not have a <B>direct antonym
</B> has an indirect antonyms via the direct antonym of the <B>head synset </B>. </DD>
<DT><B>instance</B>
</DT>
<DD>A proper noun that refers to a particular, unique referent (as distinguished
from nouns that refer to classes). This is a specific form of hyponym.
</DD>
<DT><B>lemma</B> </DT>
<DD>Lower case ASCII text of word as found in the WordNet database
index files. Usually the <B>base form </B> for a word or collocation. </DD>
<DT><B>lexical
pointer</B> </DT>
<DD>A lexical pointer indicates a relation between words in synsets
(word forms). </DD>
<DT><B>lexicographer file</B> </DT>
<DD>Files containing the raw data for WordNet
synsets, edited by lexicographers, that are input to the <B>grind </B> program
to generate a WordNet database. </DD>
<DT><B>lexicographer id (lex id)</B> </DT>
<DD>A decimal integer
that, when appended onto <B>lemma </B>, uniquely identifies a sense within a
lexicographer file. </DD>
<DT><B>monosemous</B> </DT>
<DD>Having only one sense in a syntactic category.
</DD>
<DT><B>meronym</B> </DT>
<DD>The name of a constituent part of, the substance of, or a member
of something. <B>X </B> is a meronym of <B>Y </B> if <B>X </B> is a part of <B>Y </B>. </DD>
<DT><B>part of speech</B>
</DT>
<DD>WordNet defines "part of speech" as either noun, verb, adjective, or
adverb. Same as <B>syntactic category </B>. </DD>
<DT><B>participial adjective</B> </DT>
<DD>An adjective
that is derived from a verb. </DD>
<DT><B>pertainym</B> </DT>
<DD>A relational adjective. Adjectives
that are pertainyms are usually defined by such phrases as "of or pertaining
to" and do not have antonyms. A pertainym can point to a noun or another
pertainym. </DD>
<DT><B>polysemous</B> </DT>
<DD>Having more than one sense in a syntactic category.
</DD>
<DT><B>polysemy count</B> </DT>
<DD>Number of senses of a word in a syntactic category, in
WordNet. </DD>
<DT><B>postnominal</B> </DT>
<DD>A postnominal adjective occurs only immediately following
the noun that it modifies. </DD>
<DT><B>predicative</B> </DT>
<DD>An adjective that can be used
only in predicate positions. If <B>X </B> is a predicate adjective, it can only
be used in such phrases as "it is <B>X </B>" and never prenominally. </DD>
<DT><B>prenominal</B>
</DT>
<DD>An adjective that can occur only before the noun that it modifies: it
cannot be used predicatively. </DD>
<DT><B>satellite synset</B> </DT>
<DD>Synset in an adjective
<B>cluster </B> representing a concept that is similar in meaning to the concept
represented by its <B>head synset </B>. </DD>
<DT><B>semantic concordance</B> </DT>
<DD>A textual corpus
(e.g. the Brown Corpus) and a lexicon (e.g. WordNet) so combined that every
substantive word in the text is linked to its appropriate sense in the
lexicon via a <B>semantic tag </B>. </DD>
<DT><B>semantic tag</B> </DT>
<DD>A pointer from a word in a text
file to a specific sense of that word in the WordNet database. A semantic
tag in a semantic concordance is represented by a <B>sense key </B>. </DD>
<DT><B>semantic
pointer</B> </DT>
<DD>A semantic pointer indicates a relation between synsets (concepts).
</DD>
<DT><B>sense</B> </DT>
<DD>A meaning of a word in WordNet. Each sense of a word is in a different
<B>synset </B>. </DD>
<DT><B>sense key</B> </DT>
<DD>Information necessary to find a sense in the WordNet
database. A sense key combines a <B>lemma </B> field and codes for the synset
type, lexicographer id, lexicographer file number, and information about
a satellite's <B>head synset </B>, if required. See <B><A HREF="senseidx.5WN.html">senseidx</B>(5WN)</A>
for a description
of the format of a sense key. </DD>
<DT><B>subordinate</B> </DT>
<DD>Same as <B>hyponym </B>. </DD>
<DT><B>superordinate</B>
</DT>
<DD>Same as <B>hypernym </B>. </DD>
<DT><B>synset</B> </DT>
<DD>A synonym set; a set of words that are interchangeable
in some context without changing the truth value of the preposition in
which they are embedded. </DD>
<DT><B>troponym</B> </DT>
<DD>A verb expressing a specific manner
elaboration of another verb. <B>X </B> is a troponym of <B>Y </B> if <B>to X </B> is <B>to Y </B> in
some manner. </DD>
<DT><B>unique beginner</B> </DT>
<DD>A noun synset with no <B>superordinate </B>. </DD>
</DL>
<P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">DESCRIPTION</A></LI>
<UL>
<LI><A NAME="toc2" HREF="#sect2">System Description</A></LI>
<LI><A NAME="toc3" HREF="#sect3">Database Organization</A></LI>
</UL>
<LI><A NAME="toc4" HREF="#sect4">GLOSSARY OF TERMS</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,80 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>WNGROUPS(7WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
wngroups - discussion of WordNet search code to group similar verb
senses
<H2><A NAME="sect1" HREF="#toc1">DESCRIPTION </A></H2>
Some similar senses of verbs have been grouped by
the lexicographers. This grouping is done statically in the lexicographer
source files using the semantic <I>pointer_symbol </I> <B>$ </B>. Transitivity is used
to combine groups of overlapping senses into the largest sense groups
possible.
<H2><A NAME="sect2" HREF="#toc2">NOTES </A></H2>
Coverage of verb groups is incomplete.
<H2><A NAME="sect3" HREF="#toc3">ENVIRONMENT VARIABLES
(UNIX) </A></H2>
<DL>
<DT><B>WNHOME</B> </DT>
<DD>Base directory for WordNet. Default is <B>/usr/local/WordNet-3.0
</B>. </DD>
<DT><B>WNSEARCHDIR</B> </DT>
<DD>Directory in which the WordNet database has been installed.
Default is <B>WNHOME/dict </B>. </DD>
</DL>
<H2><A NAME="sect4" HREF="#toc4">REGISTRY (WINDOWS) </A></H2>
<DL>
<DT><B>HKEY_LOCAL_MACHINE\SOFTWARE\WordNet\3.0\WNHome</B>
</DT>
<DD>Base directory for WordNet. Default is <B>C:\Program&nbsp;Files\WordNet\3.0 </B>. </DD>
</DL>
<H2><A NAME="sect5" HREF="#toc5">FILES
</A></H2>
<DL>
<DT><B>sentidx.vrb</B> </DT>
<DD>verb sense keys and sentence frame numbers </DD>
<DT><B>sents.vrb</B> </DT>
<DD>example
sentence frames </DD>
</DL>
<H2><A NAME="sect6" HREF="#toc6">SEE ALSO </A></H2>
<B><A HREF="wn.1WN.html">wn</B>(1WN)</A>
, <B><A HREF="wnb.1WN.html">wnb</B>(1WN)</A>
, <B><A HREF="senseidx.5WN.html">senseidx</B>(5WN)</A>
, <B><A HREF="wnsearch.3WN.html">wnsearch</B>(3WN)</A>
,
<B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
, <B><A HREF="wnintro.7WN.html">wnintro</B>(7WN)</A>
. <P>
<P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">DESCRIPTION</A></LI>
<LI><A NAME="toc2" HREF="#sect2">NOTES</A></LI>
<LI><A NAME="toc3" HREF="#sect3">ENVIRONMENT VARIABLES (UNIX)</A></LI>
<LI><A NAME="toc4" HREF="#sect4">REGISTRY (WINDOWS)</A></LI>
<LI><A NAME="toc5" HREF="#sect5">FILES</A></LI>
<LI><A NAME="toc6" HREF="#sect6">SEE ALSO</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,491 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>WNINPUT(5WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
noun.<I>suffix </I>, verb.<I>suffix </I>, adj.<I>suffix </I>, adv.<I>suffix </I> - WordNet lexicographer
files that are input to <B><A HREF="grind.1WN.html">grind</B>(1WN)</A>
<H2><A NAME="sect1" HREF="#toc1">DESCRIPTION </A></H2>
WordNet's source files
are written by lexicographers. They are the product of a detailed relational
analysis of lexical semantics: a variety of lexical and semantic relations
are used to represent the organization of lexical knowledge. Two kinds
of building blocks are distinguished in the source files: word forms and
word meanings. Word forms are represented in their familiar orthography;
word meanings are represented by synonym sets (<I>synset </I>s) - lists of synonymous
word forms that are interchangeable in some context. Two kinds of relations
are recognized: lexical and semantic. Lexical relations hold between word
forms; semantic relations hold between word meanings. <P>
Lexicographer files
correspond to the syntactic categories implemented in WordNet - noun, verb,
adjective and adverb. All of the synsets in a lexicographer file are in
the same syntactic category. Each synset consists of a list of synonymous
words or collocations (eg. <B>"fountain pen" </B>, <B>"take in" </B>), and pointers that
describe the relations between this synset and other synsets. These relations
include (but are not limited to) hypernymy/hyponymy, antonymy, entailment,
and meronymy/holonymy. A word or collocation may appear in more than one
synset, and in more than one part of speech. Each use of a word in a synset
represents a sense of that word in the part of speech corresponding to
the synset. <P>
Adjectives may be organized into clusters containing head
synsets and satellite synsets. Adverbs generally point to the adjectives
from which they are derived. <P>
See <B><A HREF="wngloss.7WN.html">wngloss</B>(7WN)</A>
for a glossary of WordNet
terminology and a discussion of the database's content and logical organization.
<H3><A NAME="sect2" HREF="#toc2">Lexicographer File Names </A></H3>
The names of the lexicographer files are of
the form: <P>
<blockquote><I>pos</I>.<I>suffix</I> </blockquote>
<P>
where <I>pos </I> is either <B>noun </B>, <B>verb </B>, <B>adj </B> or <B>adv
</B>. <I>suffix </I> may be used to organize groups of synsets into different files,
for example <B>noun.animal </B> and <B>noun.plant </B>. See <B><A HREF="lexnames.5WN.html">lexnames</B>(5WN)</A>
for a list of
lexicographer file names that are used in building WordNet.
<H3><A NAME="sect3" HREF="#toc3">Pointers </A></H3>
Pointers
are used to represent the relations between the words in one synset and
another. Semantic pointers represent relations between word meanings,
and therefore pertain to all of the words in the source and target synsets.
Lexical pointers represent relations between word forms, and pertain
only to specific words in the source and target synsets. The following
pointer types are usually used to indicate lexical relations: Antonym,
Pertainym, Participle, Also See, Derivationally Related. The remaining
pointer types are generally used to represent semantic relations. <P>
A relation
from a source to a target synset is formed by specifying a word from the
target synset in the source synset, followed by the <I>pointer_symbol </I> indicating
the pointer type. The location of a pointer within a synset defines it
as either lexical or semantic. The <FONT SIZE=-1><B>Lexicographer File Format </B></FONT>
section
describes the syntax for entering a semantic pointer, and <FONT SIZE=-1><B>Word Syntax
</B></FONT>
describes the syntax for entering a lexical pointer. <P>
Although there
are many pointer types, only certain types of relations are permitted
between synsets of each syntactic category. <P>
The <I>pointer_symbol </I>s for nouns
are: <blockquote><B>! </B> <tt> </tt>&nbsp;<tt> </tt>&nbsp;Antonym <BR>
<B>@ </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Hypernym <BR>
<B>@i </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Instance Hypernym <BR>
<B>&nbsp; </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Hyponym <BR>
<B>&nbsp;i </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Instance
Hyponym <BR>
<B>#m </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Member holonym <BR>
<B>#s </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Substance holonym <BR>
<B>#p </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Part holonym <BR>
<B>%m
</B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Member meronym <BR>
<B>%s </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Substance meronym <BR>
<B>%p </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Part meronym <BR>
<B>= </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Attribute <BR>
<B>+
</B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Derivationally related form<tt> </tt>&nbsp;<tt> </tt>&nbsp;<tt> </tt>&nbsp;<tt> </tt>&nbsp; <BR>
<B>;c </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Domain of synset - TOPIC <BR>
<B>-c </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Member of this
domain - TOPIC <BR>
<B>;r </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Domain of synset - REGION <BR>
<B>-r </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Member of this domain - REGION
<BR>
<B>;u </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Domain of synset - USAGE <BR>
<B>-u </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Member of this domain - USAGE <BR>
</blockquote>
<P>
The <I>pointer_symbol
</I>s for verbs are: <blockquote><B>! </B> <tt> </tt>&nbsp;<tt> </tt>&nbsp;Antonym <BR>
<B>@ </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Hypernym <BR>
<B>&nbsp; </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Hyponym <BR>
<B>* </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Entailment <BR>
<B>&gt; </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Cause
<BR>
<B>^ </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Also see <BR>
<B>$ </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Verb Group <BR>
<B>+ </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Derivationally related form<tt> </tt>&nbsp;<tt> </tt>&nbsp;<tt> </tt>&nbsp;<tt> </tt>&nbsp; <BR>
<B>;c </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Domain of
synset - TOPIC <BR>
<B>;r </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Domain of synset - REGION <BR>
<B>;u </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Domain of synset - USAGE
<BR>
</blockquote>
<P>
The <I>pointer_symbol </I>s for adjectives are: <blockquote><B>! </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Antonym <BR>
<B>&amp; </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Similar to <BR>
<B>&lt;
</B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Participle of verb <BR>
<B>\ </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Pertainym (pertains to noun) <BR>
<B>= </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Attribute <BR>
<B>^ </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Also
see <BR>
<B>;c </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Domain of synset - TOPIC <BR>
<B>;r </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Domain of synset - REGION <BR>
<B>;u </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Domain
of synset - USAGE <BR>
</blockquote>
<P>
The <I>pointer_symbol </I>s for adverbs are: <blockquote><B>! </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Antonym <BR>
<B>\ </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Derived from adjective <BR>
<B>;c </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Domain of synset - TOPIC <BR>
<B>;r </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Domain of synset
- REGION <BR>
<B>;u </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;Domain of synset - USAGE <BR>
</blockquote>
<P>
Many pointer types are reflexive,
meaning that if a synset contains a pointer to another synset, the other
synset should contain a corresponding reflexive pointer. <B><A HREF="grind.1WN.html">grind</B>(1WN)</A>
automatically
inserts missing reflexive pointers for the following pointer types: <P>
<TABLE BORDER=0>
<TR> <TD ALIGN=CENTER><B>Pointer </B> </TD> <TD ALIGN=CENTER><B>Reflect </B> </TD> </TR>
<TR> <TR> <TD ALIGN=LEFT>Antonym </TD> <TD ALIGN=LEFT>Antonym </TD> </TR>
<TR> <TD ALIGN=LEFT>Hyponym </TD> <TD ALIGN=LEFT>Hypernym </TD> </TR>
<TR> <TD ALIGN=LEFT>Hypernym
</TD> <TD ALIGN=LEFT>Hyponym </TD> </TR>
<TR> <TD ALIGN=LEFT>Instance Hyponym </TD> <TD ALIGN=LEFT>Instance Hypernym </TD> </TR>
<TR> <TD ALIGN=LEFT>Instance Hypernym </TD>
<TD ALIGN=LEFT>Instance Hyponym </TD> </TR>
<TR> <TD ALIGN=LEFT>Holonym </TD> <TD ALIGN=LEFT>Meronym </TD> </TR>
<TR> <TD ALIGN=LEFT>Meronym </TD> <TD ALIGN=LEFT>Holonym </TD> </TR>
<TR> <TD ALIGN=LEFT>Similar to
</TD> <TD ALIGN=LEFT>Similar to </TD> </TR>
<TR> <TD ALIGN=LEFT>Attribute </TD> <TD ALIGN=LEFT>Attribute </TD> </TR>
<TR> <TD ALIGN=LEFT>Verb Group </TD> <TD ALIGN=LEFT>Verb Group </TD> </TR>
<TR> <TD ALIGN=LEFT>Derivationally
Related </TD> <TD ALIGN=LEFT>Derivationally Related </TD> </TR>
<TR> <TD ALIGN=LEFT>Domain of synset </TD> <TD ALIGN=LEFT>Member of Doman </TD>
</TR>
</TABLE>
<H3><A NAME="sect4" HREF="#toc4">Verb Frames </A></H3>
Each verb synset contains a list of generic sentence frames
illustrating the types of simple sentences in which the verbs in the synset
can be used. For some verb senses, example sentences illustrating actual
uses of the verb are provided. (See <FONT SIZE=-1><B>Verb Example Sentences </B></FONT>
in <B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
.)
Whenever there is no example sentence, the generic sentence frames specified
by the lexicographer are used. The generic sentence frames are entered
in a synset as a comma-separated list of integer frame numbers. The following
list is the text of the generic frames, preceded by their frame numbers:
<P>
<blockquote>1<tt> </tt>&nbsp;<tt> </tt>&nbsp;Something ----s <BR>
2<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s <BR>
3<tt> </tt>&nbsp;<tt> </tt>&nbsp;It is ----ing <BR>
4<tt> </tt>&nbsp;<tt> </tt>&nbsp;Something is ----ing PP <BR>
5<tt> </tt>&nbsp;<tt> </tt>&nbsp;Something
----s something Adjective/Noun <BR>
6<tt> </tt>&nbsp;<tt> </tt>&nbsp;Something ----s Adjective/Noun <BR>
7<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s Adjective
<BR>
8<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s something <BR>
9<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s somebody <BR>
10<tt> </tt>&nbsp;<tt> </tt>&nbsp;Something ----s somebody <BR>
11<tt> </tt>&nbsp;<tt> </tt>&nbsp;Something ----s something <BR>
12<tt> </tt>&nbsp;<tt> </tt>&nbsp;Something ----s to somebody <BR>
13<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s on something
<BR>
14<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s somebody something <BR>
15<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s something to somebody <BR>
16<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s something from somebody <BR>
17<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s somebody with something
<BR>
18<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s somebody of something <BR>
19<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s something on somebody
<BR>
20<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s somebody PP <BR>
21<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s something PP <BR>
22<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s PP
<BR>
23<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody's (body part) ----s <BR>
24<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s somebody to INFINITIVE <BR>
25<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody
----s somebody INFINITIVE <BR>
26<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s that CLAUSE <BR>
27<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s to somebody
<BR>
28<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s to INFINITIVE <BR>
29<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s whether INFINITIVE <BR>
30<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody
----s somebody into V-ing something <BR>
31<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s something with something
<BR>
32<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s INFINITIVE <BR>
33<tt> </tt>&nbsp;<tt> </tt>&nbsp;Somebody ----s VERB-ing <BR>
34<tt> </tt>&nbsp;<tt> </tt>&nbsp;It ----s that CLAUSE <BR>
35<tt> </tt>&nbsp;<tt> </tt>&nbsp;Something
----s INFINITIVE <BR>
</blockquote>
<H3><A NAME="sect5" HREF="#toc5">Lexicographer File Format </A></H3>
Synsets are entered one per
line, and each line is terminated with a newline character. A line containing
a synset may be as long as necessary, but no newlines can be entered within
a synset. Within a synset, spaces or tabs may be used to separate entities.
Items enclosed in italicized square brackets may not be present. <P>
The
general synset syntax is: <P>
<blockquote><B>{ </B> <I>&nbsp;&nbsp;words&nbsp;&nbsp;pointers&nbsp;&nbsp; </I> <B>( </B> <I>&nbsp;gloss&nbsp; </I> <B>)&nbsp;&nbsp;} </B>
<BR>
</blockquote>
<P>
Synsets of this form are valid for all syntactic categories except
verb, and are referred to as basic synsets. At least one <I>word </I> and a <I>gloss
</I> are required to form a valid synset. Pointers entered following all the
<I>words </I> in a synset represent semantic relations between all the words
in the source and target synsets. <P>
For verbs, the basic synset syntax is
defined as follows: <P>
<blockquote><B>{ </B> <I>&nbsp;&nbsp;words&nbsp;&nbsp;pointers&nbsp;&nbsp;frames&nbsp;&nbsp; </I> <B>( </B> &nbsp;<I>gloss&nbsp; </I> <B>)&nbsp;&nbsp;}
</B> <BR>
</blockquote>
<P>
Adjective may be organized into clusters containing one or more head
synsets and optional satellite synsets. Adjective clusters are of the
form: <P>
<blockquote><B>[ </B><BR>
<I>head synset </I><BR>
[satellite synsets] <BR>
[-] <BR>
[additional head/satellite
synsets] <BR>
<B>] </B> <BR>
</blockquote>
<P>
Each adjective cluster is enclosed in square brackets,
and may have one or more parts. Each part consists of a head synset and
optional satellite synsets that are conceptually similar to the head synset's
meaning. Parts of a cluster are separated by one or more hyphens (<B>- </B>) on
a line by themselves, with the terminating square bracket following the
last synset. Head and satellite synsets follow the syntax of basic synsets,
however a "Similar to" pointer must be specified in a head synset for
each of its satellite synsets. Most adjective clusters contain two antonymous
parts. See <B><A HREF="wngloss.7WN.html">wngloss</B>(7WN)</A>
for a discussion of adjective clusters, and <FONT SIZE=-1><B>Special
Adjective Syntax </B></FONT>
for more information on adjective cluster syntax. <P>
Synsets
for relational adjectives (pertainyms) and participial adjectives do not
adhere to the cluster structure. They use the basic synset syntax. <P>
Comments
can be entered in a lexicographer file by enclosing the text of the comment
in parentheses. Note that comments <B>cannot </B> appear within a synset, as
parentheses within a synset have an entirely different meaning (see <FONT SIZE=-1><B>Gloss
Syntax </B></FONT>
). However, entire synsets (or adjective clusters) can be "commented
out" by enclosing them in parentheses. This is often used by the lexicographers
to verify the syntax of files under development or to leave a note to
oneself while working on entries.
<H3><A NAME="sect6" HREF="#toc6">Word Syntax </A></H3>
A synset must have at least
one word, and the words of a synset must appear after the opening brace
and before any other synset constructs. A word may be entered in either
the simple word or word/pointer syntax. <P>
A simple word is of the form:
<P>
<blockquote><I>word[ </I> <B>( </B> <I>marker </I> <B>) </B> <I>][lex_id] </I> <B>, </B> <BR>
</blockquote>
<P>
<I>word </I> may be entered in any combination
of upper and lower case unless it is in an adjective cluster. A collocation
is entered by joining the individual words with an underscore character
(<B>_ </B>). Numbers (integer or real) may be entered, either by themselves or
as part of a word string, by following the number with a double quote
(<B>" </B>). <P>
See <FONT SIZE=-1><B>Special Adjective Syntax </B></FONT>
for a description of adjective clusters
and markers. <P>
<I>word </I> may be followed by an integer <I>lex_id </I> from <B>1 </B> to <B>15
</B>. The <I>lex_id </I> is used to distinguish different senses of the same word
within a lexicographer file. The lexicographer assigns <I>lex_id </I> values,
usually in ascending order, although there is no requirement that the
numbers be consecutive. The default is <B>0 </B>, and does not have to be specified.
A <I>lex_id </I> must be used on pointers if the desired sense has a non-zero
<I>lex_id </I> in its synset specification. <P>
Word/pointer syntax is of the form:
<P>
<blockquote><B>[&nbsp;&nbsp; </B> <I>word[ </I> <B>( </B> <I>marker </I> <B>) </B> <I>][lex_id] </I> <B>, </B> <I>&nbsp;&nbsp;pointers&nbsp;&nbsp; </I> <B>] </B> <BR>
</blockquote>
<P>
This syntax
is used when one or more pointers correspond only to the specific word
in the word/pointer set, rather than all the words in the synset, and
represents a lexical relation. Note that a word/pointer set appears within
a synset, therefore the square brackets used to enclose it are treated
differently from those used to define an adjective cluster. Only one word
can be specified in each word/pointer set, and any number of pointers
may be included. A synset can have any number of word/pointer sets. Each
is treated by <B><A HREF="grind.1WN.html">grind</B>(1WN)<B></B></A>
essentially as a <I>word </I>, so they all must appear
before any synset <I>pointers </I> representing semantic relations. <P>
For verbs,
the word/pointer syntax is extended in the following manner to allow the
user to specify generic sentence frames that, like pointers, correspond
only to a specific word, rather than all the words in the synset. In this
case, <I>pointers </I> are optional. <P>
<blockquote><B>[&nbsp;&nbsp; </B> <I>word </I> <B>, </B> &nbsp;&nbsp;<I>[pointers]&nbsp;&nbsp;frames&nbsp;&nbsp; </I> <B>]
</B> <BR>
</blockquote>
<H3><A NAME="sect7" HREF="#toc7">Pointer Syntax </A></H3>
Pointers are optional in synsets. If a pointer is specified
outside of a word/pointer set, the relation is applied to all of the words
in the synset, including any words specified using the word/pointer syntax.
This indicates a semantic relation between the meanings of the words
in the synsets. If specified within a word/pointer set, the relation corresponds
only to the word in the set and represents a lexical relation. <P>
A pointer
is of the form: <P>
<blockquote><I>[lex_filename </I><B>: </B> <I>]word[lex_id] </I><B>, </B><I>pointer_symbol </I> <BR>
</blockquote>
<P>
or: <P>
<blockquote><I>[lex_filename </I><B>: </B> <I>]word[lex_id] </I><B>^ </B><I>word[lex_id] </I><B>, </B><I>pointer_symbol </I> <BR>
</blockquote>
<P>
For pointers, <I>word </I> indicates a word in another synset. When the second
form of a pointer is used, the first <I>word </I> indicates a word in a head
synset, and the second is a word in a satellite of that cluster. <I>word
</I> may be followed by a <I>lex_id </I> that is used to match the pointer to the
correct target synset. The synset containing <I>word </I> may reside in another
lexicographer file. In this case, <I>word </I> is preceded by <I>lex_filename </I> as
shown. <P>
See <FONT SIZE=-1><B>Pointers </B></FONT>
for a list of <I>pointer_symbol </I>s and their meanings.
<H3><A NAME="sect8" HREF="#toc8">Verb Frame List Syntax </A></H3>
Frame numbers corresponding to generic sentence
frames must be entered in each verb synset. If a frame list is specified
outside of a word/pointer set, the verb frames in the list apply to all
of the words in the synset, including any words specified using the word/pointer
syntax. If specified within a word/pointer set, the verb frames in the
list correspond only to the word in the set. <P>
A frame number list is entered
as follows: <P>
<blockquote><B>frames: </B>&nbsp;&nbsp;<I>f_num </I>[<B>, </B><I>f_num...] </I> </blockquote>
<P>
Where <I>f_num </I> specifies a generic
frame number. See <FONT SIZE=-1><B>Verb Frames </B></FONT>
for a list of generic sentences and their
corresponding frame numbers.
<H3><A NAME="sect9" HREF="#toc9">Gloss Syntax </A></H3>
A gloss is included in all synsets.
The lexicographer may enter a text string of any length desired. A gloss
is simply a string enclosed in parentheses with no embedded carriage returns.
It provides a definition of what the synset represents and/or example
sentences.
<H3><A NAME="sect10" HREF="#toc10">Special Adjective Syntax </A></H3>
The syntax for representing antonymous
adjective synsets requires several additional conditions. <P>
The first word
of a head synset <B>must </B> be entered in upper case, and can be thought of
as the head word of the head synset. The <I>word </I> part of a pointer from
one head synset to another head synset within the same cluster (usually
an antonym) must also be entered in upper case. Usually antonymous adjectives
are entered using the word/pointer syntax described in <FONT SIZE=-1><B>Word Syntax </B></FONT>
to
indicate a lexical relation. There is no restriction on the number of
parts that a cluster may have, and some clusters have three parts, representing
antonymous triplets, such as <B>solid </B>, <B>liquid </B>, and <B>gas </B>. <P>
A cross-cluster
pointer may be specified, allowing a head or satellite synset to point
to a head synset in a different cluster. A cross-cluster pointer is indicated
by entering the <I>word </I> part of the pointer in upper case. <P>
An adjective
may be annotated with a syntactic marker indicating a limitation on the
syntactic position the adjective may have in relation to noun that it
modifies. If so marked, the marker appears between the word and its following
comma. If a <I>lex_id </I> is specified, the marker immediately follows it. The
syntactic markers are: <blockquote><B>(p) </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;predicate position <BR>
<B>(a) </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;prenominal (attributive)
position <BR>
<B>(ip) </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;immediately postnominal position<tt> </tt>&nbsp;<tt> </tt>&nbsp;<tt> </tt>&nbsp;<tt> </tt>&nbsp; <BR>
</blockquote>
<H2><A NAME="sect11" HREF="#toc11">EXAMPLES </A></H2>
<I>(Note that
these are hypothetical examples not found in the WordNet lexicographer
files.) </I> <P>
Sample noun synsets: <blockquote>{ canine, [ dog1, cat,! ] pooch, canid,@
} <BR>
{ collie, dog1,@ (large multi-colored dog with pointy nose) } <BR>
{ hound,
hunting_dog, pack,#m dog1,@ } <BR>
{ dog, } <BR>
</blockquote>
<P>
Sample verb synsets: <blockquote>{ [ confuse,
clarify,! frames: 1 ] blur, obscure, frames: 8, 10 } <BR>
{ [ clarify, confuse,!
] make_clear, interpret,@ frames: 8 } <BR>
{ interpret, construe, understand,@
frames: 8 } <BR>
</blockquote>
<P>
Sample adjective clusters: <blockquote>[ <BR>
{ [ HOT, COLD,! ] lukewarm(a),
TEPID,^ (hot to the touch) } <BR>
{ warm, } <BR>
- <BR>
{ [ COLD, HOT,! ] frigid, (cold
to the touch) } <BR>
{ freezing, } <BR>
] <BR>
</blockquote>
<P>
Sample adverb synsets: <blockquote>{ [ basically,
adj.all:essential^basic,\ ] [ essentially, adj.all:basic^fundamental,\ ] ( by
one's very nature )} <BR>
{ pointedly, adj.all:pungent^pointed,\ } <BR>
{ [ badly,
adj.all:bad,\ well,! ] ill, ("He was badly prepared") } <BR>
</blockquote>
<H2><A NAME="sect12" HREF="#toc12">SEE ALSO </A></H2>
<B><A HREF="grind.1WN.html">grind</B>(1WN)</A>
,
<B><A HREF="wnintro.5WN.html">wnintro</B>(5WN)</A>
, <B><A HREF="lexnames.5WN.html">lexnames</B>(5WN)</A>
, <B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
, <B><A HREF="uniqbeg.7WN.html">uniqbeg</B>(7WN)</A>
, <B><A HREF="wngloss.7WN.html">wngloss</B>(7WN)</A>
. <P>
Fellbaum,
C. (1998), ed. <I>"WordNet: An Electronic Lexical Database" </I>. MIT Press, Cambridge,
MA. <P>
<P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">DESCRIPTION</A></LI>
<UL>
<LI><A NAME="toc2" HREF="#sect2">Lexicographer File Names</A></LI>
<LI><A NAME="toc3" HREF="#sect3">Pointers</A></LI>
<LI><A NAME="toc4" HREF="#sect4">Verb Frames</A></LI>
<LI><A NAME="toc5" HREF="#sect5">Lexicographer File Format</A></LI>
<LI><A NAME="toc6" HREF="#sect6">Word Syntax</A></LI>
<LI><A NAME="toc7" HREF="#sect7">Pointer Syntax</A></LI>
<LI><A NAME="toc8" HREF="#sect8">Verb Frame List Syntax</A></LI>
<LI><A NAME="toc9" HREF="#sect9">Gloss Syntax</A></LI>
<LI><A NAME="toc10" HREF="#sect10">Special Adjective Syntax</A></LI>
</UL>
<LI><A NAME="toc11" HREF="#sect11">EXAMPLES</A></LI>
<LI><A NAME="toc12" HREF="#sect12">SEE ALSO</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,81 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>WNINTRO(1WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
wnintro - WordNet user commands
<H2><A NAME="sect1" HREF="#toc1">SYNOPSIS </A></H2>
<P>
<B>wn </B> - command line interface
to WordNet database <P>
<B>wnb </B> - window based WordNet browser
<H2><A NAME="sect2" HREF="#toc2">DESCRIPTION </A></H2>
This
section of the <I>WordNet Reference Manual </I> contains manual pages that describe
commands available with the various WordNet system packages. <P>
The WordNet
interfaces <B><A HREF="wn.1WN.html">wn</B>(1WN)</A>
and <B><A HREF="wnb.1WN.html">wnb</B>(1WN)</A>
allow the user to search the WordNet
database and display the information textually.
<H2><A NAME="sect3" HREF="#toc3">ENVIRONMENT VARIABLES
(UNIX) </A></H2>
<DL>
<DT><B>WNHOME</B> </DT>
<DD>Base directory for WordNet. Default is <B>/usr/local/WordNet-3.0
</B>. </DD>
<DT><B>WNSEARCHDIR</B> </DT>
<DD>Directory in which the WordNet database has been installed.
Default is <B>WNHOME/dict </B>. </DD>
</DL>
<H2><A NAME="sect4" HREF="#toc4">REGISTRY (WINDOWS) </A></H2>
<DL>
<DT><B>HKEY_LOCAL_MACHINE\SOFTWARE\WordNet\3.0\WNHome</B>
</DT>
<DD>Base directory for WordNet. Default is <B>C:\Program&nbsp;Files\WordNet\3.0 </B>. </DD>
</DL>
<H2><A NAME="sect5" HREF="#toc5">SEE
ALSO </A></H2>
<B><A HREF="grind.1WN.html">grind</B>(1WN)</A>
, <B><A HREF="wn.1WN.html">wn</B>(1WN)</A>
, <B><A HREF="wnb.1WN.html">wnb</B>(1WN)</A>
, <B><A HREF="wnintro.3WN.html">wnintro</B>(3WN)</A>
, <B><A HREF="wnintro.5WN.html">wnintro</B>(5WN)</A>
, <B><A HREF="wnintro.7WN.html">wnintro</B>(7WN)</A>
.
<P>
Fellbaum, C. (1998), ed. <I>"WordNet: An Electronic Lexical Database" </I>. MIT
Press, Cambridge, MA.
<H2><A NAME="sect6" HREF="#toc6">AVAILABILITY </A></H2>
WordNet has a World Wide Web site at
<B><A HREF="http://wordnet.princeton.edu">http://wordnet.princeton.edu</A>
</B>. From this web site users can learn about
the WordNet project, run several different interfaces to the WordNet database,
and download various WordNet system packages and <I>"Five Papers on WordNet"
</I>. <P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">SYNOPSIS</A></LI>
<LI><A NAME="toc2" HREF="#sect2">DESCRIPTION</A></LI>
<LI><A NAME="toc3" HREF="#sect3">ENVIRONMENT VARIABLES (UNIX)</A></LI>
<LI><A NAME="toc4" HREF="#sect4">REGISTRY (WINDOWS)</A></LI>
<LI><A NAME="toc5" HREF="#sect5">SEE ALSO</A></LI>
<LI><A NAME="toc6" HREF="#sect6">AVAILABILITY</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,365 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>WNINTRO(3WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
wnintro - introduction to WordNet library functions
<H2><A NAME="sect1" HREF="#toc1">DESCRIPTION
</A></H2>
This section of the <I>WordNet Reference Manual </I> contains manual pages that
describe the WordNet library functions and API. <P>
Functions are organized
into the following categories: <P>
<TABLE BORDER=0>
<TR> <TD ALIGN=LEFT><B>Category </B> </TD> <TD ALIGN=LEFT><B>Manual Page </B> </TD> <TD ALIGN=LEFT><B>Object File
</B> </TD> </TR>
<TR> <TR> <TD ALIGN=LEFT>Database Search </TD> <TD ALIGN=LEFT>wnsearch (3WN) </TD> <TD ALIGN=LEFT>search.o </TD> </TR>
<TR> <TD ALIGN=LEFT>Morphology </TD> <TD ALIGN=LEFT>morph (3WN)
</TD> <TD ALIGN=LEFT>morph.o </TD> </TR>
<TR> <TD ALIGN=LEFT>Misc. Utility </TD> <TD ALIGN=LEFT>wnutil (3WN) </TD> <TD ALIGN=LEFT>wnutil.o </TD> </TR>
<TR> <TD ALIGN=LEFT>Binary Search </TD> <TD ALIGN=LEFT>binsrch
(3WN) </TD> <TD ALIGN=LEFT>binsrch.o </TD> </TR>
</TABLE>
<P>
The WordNet library is used by all of the searching
interfaces provided with the various WordNet packages. Additional programs
in the system, such as <B><A HREF="grind.1WN.html">grind</B>(1WN)</A>
, also use functions in this library.
<P>
The WordNet library is provided in both source and binary forms (on some
platforms) to allow users to build applications and tools to their own
specifications that utilize the WordNet database. We do not provide programming
support or assistance. <P>
The code conforms to ANSI C standards. Functions
are defined with function prototypes. If you do not have a compiler that
accepts prototypes, you must edit the source code and remove the prototypes
before compiling.
<H2><A NAME="sect2" HREF="#toc2">LIST OF WORDNET LIBRARY FUNCTIONS </A></H2>
Not all library functions
are listed below. Missing are mainly functions that are called by documented
ones, or ones that were written for specific applications or tools used
during WordNet development. Data structures are defined in <B>wn.h </B>. <P>
<H3><A NAME="sect3" HREF="#toc3">Database
Searching Functions (search.o) </A></H3>
<P>
<DL>
<DT><B>findtheinfo </B> </DT>
<DD>Primary search function for
WordNet database. Returns formatted search results in text buffer. Used
by WordNet interfaces to perform requested search. </DD>
<DT><B>findtheinfo_ds</B> </DT>
<DD>Primary
search function for WordNet database. Returns search results in linked
list data structure. </DD>
<DT><B>is_defined</B> </DT>
<DD>Set bit for each search type that is valid
for the search word passed and return bit mask. </DD>
<DT><B>in_wn</B> </DT>
<DD>Set bit for each
syntactic category that search word is in. </DD>
<DT><B>index_lookup</B> </DT>
<DD>Find word in index
file and return parsed entry in data structure. Input word must be exact
match of string in database. Called by <B>getindex() </B>. </DD>
<DT><B>getindex</B> </DT>
<DD>Find word
in index file, trying different techniques - replace hyphens with underscores,
replace underscores with hyphens, strip hyphens and underscores, strip
periods. </DD>
<DT><B>read_synset</B> </DT>
<DD>Read synset from data file at byte offset passed
and return parsed entry in data structure. Calls <B>parse_synset() </B>. </DD>
<DT><B>parse_synset</B>
</DT>
<DD>Read synset at current byte offset in file and return parsed entry in
data structure. </DD>
<DT><B>free_syns</B> </DT>
<DD>Free a synset linked list allocated by <B>findtheinfo_ds()
</B>. </DD>
<DT><B>free_synset</B> </DT>
<DD>Free a synset structure. </DD>
<DT><B>free_index</B> </DT>
<DD>Free an index structure.
</DD>
<DT><B>traceptrs_ds</B> </DT>
<DD>Recursive search algorithm to trace a pointer tree and return
results in linked list. </DD>
<DT><B>do_trace</B> </DT>
<DD>Do requested search on synset passed
returning formatted output in buffer. </DD>
</DL>
<P>
<H3><A NAME="sect4" HREF="#toc4">Morphology Functions (morph.o) </A></H3>
<P>
<DL>
<DT><B>morphinit</B> </DT>
<DD>Open exception list files. </DD>
<DT><B>re_morphinit</B> </DT>
<DD>Close exception list
files and reopen. </DD>
<DT><B>morphstr</B> </DT>
<DD>Try to find base form (lemma) of word or collocation
in syntactic category passed. Calls <B>morphword() </B> for each word in string
passed. </DD>
<DT><B>morphword</B> </DT>
<DD>Try to find base form (lemma) of individual word in
syntactic category passed. </DD>
</DL>
<P>
<H3><A NAME="sect5" HREF="#toc5">Utility Functions (wnutil.o) </A></H3>
<P>
<DL>
<DT><B>wninit</B> </DT>
<DD>Top level
function to open database files and morphology exception lists. </DD>
<DT><B>re_wninit</B>
</DT>
<DD>Top level function to close and reopen database files and morphology
exception lists. </DD>
<DT><B>cntwords</B> </DT>
<DD>Count the number of underscore or space separated
words in a string. </DD>
<DT><B>strtolower</B> </DT>
<DD>Convert string to lower case and remove
trailing adjective marker if found. </DD>
<DT><B>ToLowerCase</B> </DT>
<DD>Convert string passed
to lower case. </DD>
<DT><B>strsubst</B> </DT>
<DD>Replace all occurrences of <I>from </I> with <I>to </I> in <I>str
</I>. </DD>
<DT><B>getptrtype</B> </DT>
<DD>Return code for pointer type character passed. </DD>
<DT><B>getpos</B> </DT>
<DD>Return
syntactic category code for string passed. </DD>
<DT><B>getsstype</B> </DT>
<DD>Return synset type
code for string passed. </DD>
<DT><B>FmtSynset</B> </DT>
<DD>Reconstruct synset string from synset
pointer. </DD>
<DT><B>StrToPos</B> </DT>
<DD>Passed string for syntactic category, returns corresponding
integer value. </DD>
<DT><B>GetSynsetForSense</B> </DT>
<DD>Return synset for sense key passed. </DD>
<DT><B>GetDataOffset</B>
</DT>
<DD>Find synset offset for sense. </DD>
<DT><B>GetPolyCount</B> </DT>
<DD>Find polysemy count for sense
passed. </DD>
<DT><B>GetWORD</B> </DT>
<DD>Return word part of sense key. </DD>
<DT><B>GetPOS</B> </DT>
<DD>Return syntactic
category code for sense key passed. </DD>
<DT><B>WNSnsToStr</B> </DT>
<DD>Generate sense key for
index entry passed. </DD>
<DT><B>GetValidIndexPointer</B> </DT>
<DD>Search for string and/or base
form of word in database and return index structure for word if found.
</DD>
<DT><B>GetWNSense</B> </DT>
<DD>Return sense number in database for sense key. </DD>
<DT><B>GetSenseIndex</B>
</DT>
<DD>Return parsed sense index entry for sense key passed. </DD>
<DT><B>default_display_message</B>
</DT>
<DD>Default function to use as value of <B>display_message </B>. Simply returns
<B>-1 </B>. </DD>
</DL>
<P>
<H3><A NAME="sect6" HREF="#toc6">Binary Search Functions (binsrch.o) </A></H3>
<P>
<DL>
<DT><B>bin_search</B> </DT>
<DD>General purpose binary
search function to search for key as first item on line in sorted file.
</DD>
<DT><B>copyfile</B> </DT>
<DD>Copy contents from one file to another. </DD>
<DT><B>replace_line</B> </DT>
<DD>Replace
a line in a sorted file. </DD>
<DT><B>insert_line</B> </DT>
<DD>Insert a line into a sorted file.
</DD>
</DL>
<H2><A NAME="sect7" HREF="#toc7">HEADER FILE </A></H2>
<DL>
<DT><B>wn.h</B> </DT>
<DD>WordNet include file of constants, data structures,
external declarations for global variables initialized in <B>wnglobal.c </B>. Also
lists function prototypes for library API. It must be included to use any
WordNet library functions. </DD>
</DL>
<H2><A NAME="sect8" HREF="#toc8">NOTES </A></H2>
All library functions that access the
database files expect the files to be open. The function <B><A HREF="wninit.3WN.html">wninit</B>(3WN)</A>
must
be called before other database access functions such as <B><A HREF="findtheinfo.3WN.html">findtheinfo</B>(3WN)</A>
or <B><A HREF="read_synset.3WN.html">read_synset</B>(3WN)</A>
.<B></B> <P>
Inclusion of the header file <B>wn.h </B> is necessary. <P>
The
command line interface is a good example of a simple application that
uses several WordNet library functions. <P>
Many of the library functions
are passed or return syntactic category or synset type information. The
following table lists the possible categories as integer codes, synset
type constant names, syntactic category constant names, single characters
and character strings. <P>
<TABLE BORDER=0>
<TR> <TD ALIGN=CENTER><B>Integer </B> </TD> <TD ALIGN=CENTER><B>Synset Type </B> </TD> <TD ALIGN=CENTER><B>Syntactic Category </B>
</TD> <TD ALIGN=CENTER><B>Char </B> </TD> <TD ALIGN=CENTER><B>String </B> </TD> </TR>
<TR> <TR> <TD ALIGN=CENTER>1 </TD> <TD ALIGN=LEFT>NOUN </TD> <TD ALIGN=LEFT>NOUN </TD> <TD ALIGN=CENTER>n </TD> <TD ALIGN=LEFT>noun </TD> </TR>
<TR> <TD ALIGN=CENTER>2 </TD> <TD ALIGN=LEFT>VERB </TD> <TD ALIGN=LEFT>VERB </TD> <TD ALIGN=CENTER>v </TD> <TD ALIGN=LEFT>verb
</TD> </TR>
<TR> <TD ALIGN=CENTER>3 </TD> <TD ALIGN=LEFT>ADJ </TD> <TD ALIGN=LEFT>ADJ </TD> <TD ALIGN=CENTER>a </TD> <TD ALIGN=LEFT>adj </TD> </TR>
<TR> <TD ALIGN=CENTER>4 </TD> <TD ALIGN=LEFT>ADV </TD> <TD ALIGN=LEFT>ADV </TD> <TD ALIGN=CENTER>r </TD> <TD ALIGN=LEFT>adv </TD> </TR>
<TR> <TD ALIGN=CENTER>5 </TD> <TD ALIGN=LEFT>SATELLITE </TD> <TD ALIGN=LEFT>ADJ </TD> <TD ALIGN=CENTER>s
</TD> <TD ALIGN=LEFT><I>n/a </I> </TD> </TR>
</TABLE>
<H2><A NAME="sect9" HREF="#toc9">ENVIRONMENT VARIABLES (UNIX) </A></H2>
<DL>
<DT><B>WNHOME</B> </DT>
<DD>Base directory for WordNet.
Default is <B>/usr/local/WordNet-3.0 </B>. </DD>
<DT><B>WNSEARCHDIR</B> </DT>
<DD>Directory in which the
WordNet database has been installed. Default is <B>WNHOME/dict </B>. </DD>
</DL>
<H2><A NAME="sect10" HREF="#toc10">REGISTRY
(WINDOWS) </A></H2>
<DL>
<DT><B>HKEY_LOCAL_MACHINE\SOFTWARE\WordNet\3.0\WNHome</B> </DT>
<DD>Base directory for
WordNet. Default is <B>C:\Program&nbsp;Files\WordNet\3.0 </B>. </DD>
</DL>
<H2><A NAME="sect11" HREF="#toc11">FILES </A></H2>
<DL>
<DT><B>lib/libwn.a</B> </DT>
<DD>WordNet
library (Unix) </DD>
<DT><B>lib\wn.lib</B> </DT>
<DD>WordNet library (Windows) </DD>
<DT><B>include</B> </DT>
<DD>header files
for use with WordNet library </DD>
</DL>
<H2><A NAME="sect12" HREF="#toc12">SEE ALSO </A></H2>
<B><A HREF="wnintro.1WN.html">wnintro</B>(1WN)</A>
, <B><A HREF="binsrch.3WN.html">binsrch</B>(3WN)</A>
, <B><A HREF="morph.3WN.html">morph</B>(3WN)</A>
,
<B><A HREF="wnsearch.3WN.html">wnsearch</B>(3WN)</A>
, <B><A HREF="wnutil.3WN.html">wnutil</B>(3WN)</A>
, <B><A HREF="wnintro.5WN.html">wnintro</B>(5WN)</A>
, <B><A HREF="wnintro.7WN.html">wnintro</B>(7WN)</A>
. <P>
Fellbaum, C. (1998),
ed. <I>"WordNet: An Electronic Lexical Database" </I>. MIT Press, Cambridge, MA.
<H2><A NAME="sect13" HREF="#toc13">BUGS </A></H2>
Please report bugs to <B>wordnet@princeton.edu </B>. <P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">DESCRIPTION</A></LI>
<LI><A NAME="toc2" HREF="#sect2">LIST OF WORDNET LIBRARY FUNCTIONS</A></LI>
<UL>
<LI><A NAME="toc3" HREF="#sect3">Database Searching Functions (search.o)</A></LI>
<LI><A NAME="toc4" HREF="#sect4">Morphology Functions (morph.o)</A></LI>
<LI><A NAME="toc5" HREF="#sect5">Utility Functions (wnutil.o)</A></LI>
<LI><A NAME="toc6" HREF="#sect6">Binary Search Functions (binsrch.o)</A></LI>
</UL>
<LI><A NAME="toc7" HREF="#sect7">HEADER FILE</A></LI>
<LI><A NAME="toc8" HREF="#sect8">NOTES</A></LI>
<LI><A NAME="toc9" HREF="#sect9">ENVIRONMENT VARIABLES (UNIX)</A></LI>
<LI><A NAME="toc10" HREF="#sect10">REGISTRY (WINDOWS)</A></LI>
<LI><A NAME="toc11" HREF="#sect11">FILES</A></LI>
<LI><A NAME="toc12" HREF="#sect12">SEE ALSO</A></LI>
<LI><A NAME="toc13" HREF="#sect13">BUGS</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,71 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>WNINTRO(5WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
wnintro - introduction to descriptions of WordNet file formats
<H2><A NAME="sect1" HREF="#toc1">SYNOPSIS </A></H2>
<P>
<B>cntlist </B> - format of <B>cntlist </B> and <B>cntlist.rev </B> files <P>
<B>lexnames </B>
- list of lexicographer file names and numbers <P>
<B>prologdb </B> - description of
Prolog database files <P>
<B>senseidx </B> - format of sense index file <P>
<B>sensemap </B>
- mapping from senses in WordNet 2.1 to corresponding 3.0 senses <P>
<B>wndb </B> - format
of WordNet database files <P>
<B>wninput </B> - format of WordNet lexicographer files
<H2><A NAME="sect2" HREF="#toc2">DESCRIPTION </A></H2>
This section of the <I>WordNet Reference Manual </I> contains manual
pages that describe the formats of the various files included in different
WordNet 3.0 packages.
<H2><A NAME="sect3" HREF="#toc3">NOMENCLATURE </A></H2>
All files are in ASCII. Fields are generally
separated by one space, unless otherwise noted, and each line is terminated
with a newline character. In the file format descriptions, terms in <I>italics
</I> refer to field names. Characters or strings in <B>boldface </B> represent an
actual character or string as it appears in the file. Items enclosed in
italicized square brackets (<I>[&nbsp;&nbsp;] </I>) may not be present. Since several files
contain fields that have the identical meaning, field names are consistently
defined. For example, several WordNet files contain one or more <I>synset_offset
</I> fields. In each case, the definition of <I>synset_offset </I> is identical.
<H2><A NAME="sect4" HREF="#toc4">SEE ALSO </A></H2>
<B><A HREF="wnintro.1WN.html">wnintro</B>(1WN)</A>
, <B><A HREF="wnintro.3WN.html">wnintro</B>(3WN)</A>
, <B><A HREF="cntlist.5WN.html">cntlist</B>(5WN)</A>
, <B><A HREF="lexnames.5WN.html">lexnames</B>(5WN)</A>
, <B><A HREF="prologdb.5WN.html">prologdb</B>(5WN)</A>
,
<B><A HREF="senseidx.5WN.html">senseidx</B>(5WN)</A>
, <B><A HREF="sensemap.5WN.html">sensemap</B>(5WN)</A>
, <B><A HREF="wndb.5WN.html">wndb</B>(5WN)</A>
, <B><A HREF="wninput.5WN.html">wninput</B>(5WN)</A>
, <B><A HREF="wnintro.7WN.html">wnintro</B>(7WN)</A>
, <B><A HREF="wngloss.7WN.html">wngloss</B>(7WN)</A>
.
<P>
Fellbaum, C. (1998), ed. <I>"WordNet: An Electronic Lexical Database" </I>. MIT
Press, Cambridge, MA. <P>
<P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">SYNOPSIS</A></LI>
<LI><A NAME="toc2" HREF="#sect2">DESCRIPTION</A></LI>
<LI><A NAME="toc3" HREF="#sect3">NOMENCLATURE</A></LI>
<LI><A NAME="toc4" HREF="#sect4">SEE ALSO</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,57 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>WNINTRO(7WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
wnintro - introduction to miscellaneous WordNet information
<H2><A NAME="sect1" HREF="#toc1">SYNOPSIS
</A></H2>
<P>
<B>morphy </B> - discussion of WordNet's morphological processing <P>
<B>uniqbeg </B> - unique
beginners for noun hierarchies <P>
<B>wngloss </B> - glossary of terms used in WordNet
<P>
<B>wngroups </B> - discussion of WordNet search code to group similar senses <P>
<B>wnlicens
</B> - text of WordNet license agreement <P>
<B>wnpkgs </B> - information about WordNet
packages and distribution <P>
<B>wnstats </B> - database statistics
<H2><A NAME="sect2" HREF="#toc2">DESCRIPTION </A></H2>
This
section of the <I>WordNet Reference Manual </I> contains manual pages that describe
various topics related to WordNet and the semantic concordances, and a
glossary of terms.
<H2><A NAME="sect3" HREF="#toc3">SEE ALSO </A></H2>
<B><A HREF="wnintro.1WN.html">wnintro</B>(1WN)</A>
, <B><A HREF="wnintro.3WN.html">wnintro</B>(3WN)</A>
, <B><A HREF="wnintro.5WN.html">wnintro</B>(5WN)</A>
,
<B><A HREF="morphy.7WN.html">morphy</B>(7WN)</A>
, <B><A HREF="uniqbeg.7WN.html">uniqbeg</B>(7WN)</A>
, <B><A HREF="wngroups.7WN.html">wngroups</B>(7WN)</A>
, <B><A HREF="wnlicens.7WN.html">wnlicens</B>(7WN)</A>
, <B><A HREF="wnpkgs.7WN.html">wnpkgs</B>(7WN)</A>
,
<B><A HREF="wnstats.7WN.html">wnstats</B>(7WN)</A>
, <B><A HREF="wngloss.7WN.html">wngloss</B>(7WN)</A>
. <P>
Fellbaum, C. (1998), ed. <I>"WordNet: An Electronic
Lexical Database" </I>. MIT Press, Cambridge, MA. <P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">SYNOPSIS</A></LI>
<LI><A NAME="toc2" HREF="#sect2">DESCRIPTION</A></LI>
<LI><A NAME="toc3" HREF="#sect3">SEE ALSO</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,45 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>WNLICENS(7WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
wnlicens - text of WordNet license
<H2><A NAME="sect1" HREF="#toc1">DESCRIPTION </A></H2>
WordNet Release 3.0
<P>
This software and database is being provided to you, the LICENSEE, by
Princeton University under the following license. By obtaining, using
and/or copying this software and database, you agree that you have
read, understood, and will comply with these terms and conditions.:
Permission to use, copy, modify and distribute this software and
database and its documentation for any purpose and without fee or royalty
is hereby granted, provided that you agree to comply with the following
copyright notice and statements, including the disclaimer, and that
the same appear on ALL copies of the software, database and documentation,
including modifications that you make for internal use or for distribution.
WordNet 3.0 Copyright 2006 by Princeton University. All rights reserved.
THIS SOFTWARE AND DATABASE IS PROVIDED "AS IS" AND PRINCETON UNIVERSITY
MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF
EXAMPLE, BUT NOT LIMITATION, PRINCETON UNIVERSITY MAKES NO REPRESENTATIONS
OR WARRANTIES OF MERCHANT- ABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE
OR THAT THE USE OF THE LICENSED SOFTWARE, DATABASE OR DOCUMENTATION
WILL NOT INFRINGE ANY THIRD PARTY PATENTS, COPYRIGHTS, TRADEMARKS OR
OTHER RIGHTS. The name of Princeton University or Princeton may
not be used in advertising or publicity pertaining to distribution of
the software and/or database. Title to copyright in this software, database
and any associated documentation shall at all times remain with Princeton
University and LICENSEE agrees to preserve same. <P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">DESCRIPTION</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,95 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>WNPKGS(7WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
wnpkgs - description of various WordNet system packages
<H2><A NAME="sect1" HREF="#toc1">DESCRIPTION
</A></H2>
WordNet 3.0 is distributed in several formats and in various packages. All
of the packages are available via anonymous FTP from <B>ftp.cogsci.princeton.edu
</B> and from the WordNet Web site at <B><A HREF="http://wordnet.princeton.edu">http://wordnet.princeton.edu</A>
</B>.
<H3><A NAME="sect2" HREF="#toc2">Packages
Available Via FTP and WWW </A></H3>
The following WordNet packages can be downloaded
using a web browser from <B>ftp://ftp.cogsci.princeton.edu/wordnet/3.0 </B>, or from
the Web site noted above. Users can also FTP directly from <B>ftp.cogsci.princeton.edu
</B>, directory <B>wordnet/3.0 </B>. <P>
<TABLE BORDER=0>
<TR> <TD ALIGN=CENTER><B>Package </B> </TD> <TD ALIGN=CENTER><B>Filename </B> </TD> <TD ALIGN=CENTER><B>Platform </B> </TD> <TD ALIGN=CENTER><B>Description
</B> </TD> </TR>
<TR> <TR> <TD ALIGN=LEFT>Database </TD> <TD ALIGN=LEFT><B>WordNet-3.0.tar.gz </B> </TD> <TD ALIGN=LEFT>Unix/OS X </TD> <TD ALIGN=LEFT>WordNet 3.0 database, interfaces,
sense index, interface and library source code, documentation. </TD> </TR>
<TR> <TD ALIGN=LEFT>Database
</TD> <TD ALIGN=LEFT><B>WordNet-3.0.exe </B> </TD> <TD ALIGN=LEFT>Windows </TD> <TD ALIGN=LEFT>WordNet 3.0 database, interfaces, sense index,
interface and library source code, documentation. </TD> </TR>
<TR> <TD ALIGN=LEFT>Prolog Database </TD>
<TD ALIGN=LEFT><B>WNprolog-3.0.tar.gz </B> </TD> <TD ALIGN=LEFT>All </TD> <TD ALIGN=LEFT>WordNet 3.0 database files in Prolog-readable format,
documentation. </TD> </TR>
<TR> <TD ALIGN=LEFT>Sense Map </TD> <TD ALIGN=LEFT><B>WNsnsmap-3.0.tar.gz </B> </TD> <TD ALIGN=LEFT>All </TD> <TD ALIGN=LEFT>Mapping of 2.1 to 3.0
senses, documentation. </TD> </TR>
</TABLE>
<P>
<H3><A NAME="sect3" HREF="#toc3">Database Package </A></H3>
The database package is a
complete installation for WordNet 3.0 users. It includes the 3.0 database
files, source code for the WordNet browsers and library, and documentation.
The other packages are not included - they must be downloaded and installed
separately. <P>
Note that with this version of WordNet for Unix platforms,
only source code is provided. Users should carefully read the README and
INSTALL files for detailed information on compiling WordNet and dependencies.
<P>
<H3><A NAME="sect4" HREF="#toc4">Prolog Database Package </A></H3>
The WordNet 3.0 database files are available
in this package in a Prolog-readable format. Documentation describing the
file format is included. This package is only downloadable in compressed
tar file format, although once unpackaged it can be used from Windows
systems since the files are in ASCII. Many Windows utilities, such as
WinZip, can deal with a compressed tar file.
<H3><A NAME="sect5" HREF="#toc5">Sense Map Package </A></H3>
To help
users automatically convert 2.1 noun and verb senses to their corresponding
3.0 senses, we provide sense mapping information in this package. This
package contains files to map polysemous and monosemous words, and documentation
that describes the format of these files. As with the Prolog database,
this package is only downloadable in compressed tar format, but the files
are also in ASCII.
<H2><A NAME="sect6" HREF="#toc6">NOTES </A></H2>
The lexicographer files and <B><A HREF="grind.1WN.html">grind</B>(1WN)</A>
program
are not generally distributed. <P>
All of the packages described above may
not be available at the time of release of the 3.0 database package.
<H2><A NAME="sect7" HREF="#toc7">SEE
ALSO </A></H2>
<B><A HREF="wnintro.1WN.html">wnintro</B>(1WN)</A>
, <B><A HREF="wnintro.3WN.html">wnintro</B>(3WN)</A>
, <B><A HREF="wnintro.5WN.html">wnintro</B>(5WN)</A>
, <B><A HREF="wnintro.7WN.html">wnintro</B>(7WN)</A>
. <P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">DESCRIPTION</A></LI>
<UL>
<LI><A NAME="toc2" HREF="#sect2">Packages Available Via FTP and WWW</A></LI>
<LI><A NAME="toc3" HREF="#sect3">Database Package</A></LI>
<LI><A NAME="toc4" HREF="#sect4">Prolog Database Package</A></LI>
<LI><A NAME="toc5" HREF="#sect5">Sense Map Package</A></LI>
</UL>
<LI><A NAME="toc6" HREF="#sect6">NOTES</A></LI>
<LI><A NAME="toc7" HREF="#sect7">SEE ALSO</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,338 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>WNSEARCH(3WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
findtheinfo, findtheinfo_ds, is_defined, in_wn, index_lookup, parse_index,
getindex, read_synset, parse_synset, free_syns, free_synset, free_index,
traceptrs_ds, do_trace
<H2><A NAME="sect1" HREF="#toc1">SYNOPSIS </A></H2>
<P>
<B>#include "wn.h" <P>
<B>char *findtheinfo(char
*searchstr, int pos, int ptr_type, int sense_num); </B></B> <P>
<B>SynsetPtr findtheinfo_ds(char
*searchstr, int pos, int ptr_type, int sense_num ); </B> <P>
<B>unsigned int is_defined(char
*searchstr, int pos); </B> <P>
<B>unsigned int in_wn(char *searchstr, int pos); </B>
<P>
<B>IndexPtr index_lookup(char *searchstr, int pos); </B> <P>
<B>IndexPtr parse_index(long
offset, int dabase, char *line); </B> <P>
<B>IndexPtr getindex(char *searchstr, int
pos); </B> <P>
<B>SynsetPtr read_synset(int pos, long synset_offset, char *searchstr);
</B> <P>
<B>SynsetPtr parse_synset(FILE *fp, int pos, char *searchstr); </B> <P>
<B>void free_syns(SynsetPtr
synptr); </B> <P>
<B>void free_synset(SynsetPtr synptr); </B> <P>
<B>void free_index(IndexPtr
idx); </B> <P>
<B>SynsetPtr traceptrs_ds(SynsetPtr synptr, int ptr_type, int pos,
int depth); </B> <P>
<B>char *do_trace(SynsetPtr synptr, int ptr_type, int pos, int
depth); </B>
<H2><A NAME="sect2" HREF="#toc2">DESCRIPTION </A></H2>
<P>
These functions are used for searching the WordNet
database. They generally fall into several categories: functions for reading
and parsing index file entries; functions for reading and parsing synsets
in data files; functions for tracing pointers and hierarchies; functions
for freeing space occupied by data structures allocated with <B><A HREF="malloc.3.html">malloc</B>(3)</A>
.
<P>
In the following function descriptions, <I>pos </I> is one of the following:
<P>
<blockquote><B>1 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;NOUN <BR>
<B>2 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;VERB <BR>
<B>3 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;ADJECTIVE <BR>
<B>4 </B><tt> </tt>&nbsp;<tt> </tt>&nbsp;ADVERB <BR>
</blockquote>
<P>
<B>findtheinfo()</B> is the primary
search algorithm for use with database interface applications. Search
results are automatically formatted, and a pointer to the text buffer
is returned. All searches listed in <B>WNHOME/include/wn.h</B> can be done by
<B>findtheinfo()</B>. <B>findtheinfo_ds()</B> can be used to perform most of the searches,
with results returned in a linked list data structure. This is for use
with applications that need to analyze the search results rather than
just display them. <P>
Both functions are passed the same arguments: <I>searchstr
</I> is the word or collocation to search for; <I>pos </I> indicates the syntactic
category to search in; <I>ptr_type </I> is one of the valid search types for
<I>searchstr </I> in <I>pos </I>. (Available searches can be obtained by calling <B>is_defined()</B>
described below.) <I>sense_num </I> should be <FONT SIZE=-1><B>ALLSENSES </B></FONT>
if the search is to
be done on all senses of <I>searchstr </I> in <I>pos </I>, or a positive integer indicating
which sense to search. <P>
<B>findtheinfo_ds() </B> returns a linked list data structures
representing synsets. Senses are linked through the <I>nextss </I> field of a
<B>Synset </B> data structure. For each sense, synsets that match the search
specified with <I>ptr_type </I> are linked through the <I>ptrlist </I> field. See <FONT SIZE=-1><B>Synset
Navigation </B></FONT>
below, for detailed information on the linked lists returned.
<P>
<B>is_defined() </B> sets a bit for each search type that is valid for <I>searchstr
</I> in <I>pos </I>, and returns the resulting unsigned integer. Each bit number
corresponds to a pointer type constant defined in <B>WNHOME/include/wn.h </B>.
For example, if bit 2 is set, the <FONT SIZE=-1><B>HYPERPTR </B></FONT>
search is valid for <I>searchstr
</I>. There are 29 possible searches. <P>
<B>in_wn() </B> is used to find the syntactic
categories in the WordNet database that contain one or more senses of
<I>searchstr </I>. If <I>pos </I> is <FONT SIZE=-1><B>ALL_POS, </B></FONT>
all syntactic categories are checked.
Otherwise, only the part of speech passed is checked. An unsigned integer
is returned with a bit set corresponding to each syntactic category containing
<I>searchstr </I>. The bit number matches the number for the part of speech.
<B>0 </B> is returned if <I>searchstr </I> is not present in <I>pos </I>. <P>
<B>index_lookup() </B> finds
<I>searchstr </I> in the index file for <I>pos </I> and returns a pointer to the parsed
entry in an <B>Index </B> data structure. <I>searchstr </I> must exactly match the form
of the word (lower case only, hyphens and underscores in the same places)
in the index file. <FONT SIZE=-1><B>NULL </B></FONT>
is returned if a match is not found. <P>
<B>parse_index()
</B> parses an entry from an index file and returns a pointer to the parsed
entry in an <B>Index </B> data structure. Passed the byte <I>offset </I> and syntactic
category, it reads the index entry at the desired location in the corresponding
file. If passed <I>line </I>, <I>line </I> contains an index file entry and the database
index file is not consulted. However, <I>offset </I> and <I>dbase </I> should still
be passed so the information can be stored in the <B>Index </B> structure. <P>
<B>getindex()
</B> is a "smart" search for <I>searchstr </I> in the index file corresponding to
<I>pos </I>. It applies to <I>searchstr </I> an algorithm that replaces underscores
with hyphens, hyphens with underscores, removes hyphens and underscores,
and removes periods in an attempt to find a form of the string that is
an exact match for an entry in the index file corresponding to <I>pos </I>. <B>index_lookup()
</B> is called on each transformed string until a match is found or all the
different strings have been tried. It returns a pointer to the parsed
<B>Index </B> data structure for <I>searchstr </I>, or <FONT SIZE=-1><B>NULL </B></FONT>
if a match is not found.
<P>
<B>read_synset() </B> is used to read a synset from a byte offset in a data
file. It performs an <B><A HREF="fseek.3.html">fseek </B>(3)</A>
to <I>synset_offset </I> in the data file corresponding
to <I>pos </I>, and calls <B>parse_synset() </B> to read and parse the synset. A pointer
to the <B>Synset </B> data structure containing the parsed synset is returned.
<P>
<B>parse_synset() </B> reads the synset at the current offset in the file indicated
by <I>fp </I>. <I>pos </I> is the syntactic category, and <I>searchstr </I>, if not <FONT SIZE=-1><B>NULL, </B></FONT>
indicates the word in the synset that the caller is interested in. An
attempt is made to match <I>searchstr </I> to one of the words in the synset.
If an exact match is found, the <I>whichword </I> field in the <B>Synset </B> structure
is set to that word's number in the synset (beginning to count from <B>1 </B>).
<P>
<B>free_syns() </B> is used to free a linked list of <B>Synset </B> structures allocated
by <B>findtheinfo_ds() </B>. <I>synptr </I> is a pointer to the list to free. <P>
<B>free_synset()
</B> frees the <B>Synset </B> structure pointed to by <I>synptr </I>. <P>
<B>free_index() </B> frees
the <B>Index </B> structure pointed to by <I>idx </I>. <P>
<B>traceptrs_ds() </B> is a recursive
search algorithm that traces pointers matching <I>ptr_type </I> starting with
the synset pointed to by <I>synptr </I>. Setting <I>depth </I> to <B>1 </B> when <B>traceptrs_ds()
</B> is called indicates a recursive search; <B>0 </B> indicates a non-recursive call.
<I>synptr </I> points to the data structure representing the synset to search
for a pointer of type <I>ptr_type </I>. When a pointer type match is found, the
synset pointed to is read is linked onto the <I>nextss </I> chain. Levels of
the tree generated by a recursive search are linked via the <I>ptrlist </I> field
structure until <FONT SIZE=-1><B>NULL </B></FONT>
is found, indicating the top (or bottom) of the
tree. This function is usually called from <B>findtheinfo_ds() </B> for each
sense of the word. See <FONT SIZE=-1><B>Synset Navigation </B></FONT>
below, for detailed information
on the linked lists returned. <P>
<B>do_trace() </B> performs the search indicated
by <I>ptr_type </I> on synset synptr in syntactic category <I>pos </I>. <I>depth </I> is
defined as above. <B>do_trace() </B> returns the search results formatted in
a text buffer.
<H3><A NAME="sect3" HREF="#toc3">Synset Navigation </A></H3>
Since the <B>Synset </B> structure is used to
represent the synsets for both word senses and pointers, the <I>ptrlist </I>
and <I>nextss </I> fields have different meanings depending on whether the structure
is a word sense or pointer. This can make navigation through the lists
returned by <B>findtheinfo_ds() </B> confusing. <P>
Navigation through the returned
list involves the following: <P>
Following the <I>nextss </I> chain from the synset
returned moves through the various senses of <I>searchstr </I>. <FONT SIZE=-1><B>NULL </B></FONT>
indicates
that end of the chain of senses. <P>
Following the <I>ptrlist </I> chain from a <B>Synset
</B> structure representing a sense traces the hierarchy of the search results
for that sense. Subsequent links in the <I>ptrlist </I> chain indicate the next
level (up or down, depending on the search) in the hierarchy. <FONT SIZE=-1><B>NULL </B></FONT>
indicates
the end of the chain of search result synsets. <P>
If a synset pointed to
by <I>ptrlist </I> has a value in the <I>nextss </I> field, it represents another pointer
of the same type at that level in the hierarchy. For example, some noun
synsets have two hypernyms. Following this <I>nextss </I> pointer, and then the
<I>ptrlist </I> chain from the <B>Synset </B> structure pointed to, traces another,
parallel, hierarchy, until the end is indicated by <FONT SIZE=-1><B>NULL </B></FONT>
on that <I>ptrlist
</I> chain. So, a <B>synset </B> representing a pointer (versus a sense of <I>searchstr
</I>) having a non-NULL value in <I>nextss </I> has another chain of search results
linked through the <I>ptrlist </I> chain of the synset pointed to by <I>nextss </I>.
<P>
If <I>searchstr </I> contains more than one base form in WordNet (as in the
noun <B>axes </B>, which has base forms <B>axe </B> and <B>axis </B>), synsets representing
the search results for each base form are linked through the <I>nextform
</I> pointer of the <B>Synset </B> structure.
<H3><A NAME="sect4" HREF="#toc4">WordNet Searches </A></H3>
There is no extensive
description of what each search type is or the results returned. Using
the WordNet interface, examining the source code, and reading <B><A HREF="wndb.5WN.html">wndb</B>(5WN)<B></B></A>
are the best ways to see what types of searches are available and the
data returned for each. <P>
Listed below are the valid searches that can be
passed as <I>ptr_type </I> to <B>findtheinfo() </B>. Passing a negative value (when
applicable) causes a recursive, hierarchical search by setting <I>depth </I>
to <B>1 </B> when <B>traceptrs() </B> is called. <P>
<TABLE BORDER=0>
<TR> <TD ALIGN=LEFT><B>ptr_type </B> </TD> <TD ALIGN=CENTER><B>Value </B> </TD> <TD ALIGN=CENTER><B>Pointer </B> </TD> <TD ALIGN=LEFT><B>Search
</B> </TD> </TR>
<TR> <TD ALIGN=LEFT> </TD> <TD ALIGN=CENTER> </TD> <TD ALIGN=CENTER><B>Symbol </B> </TD> </TR>
<TR> <TR> <TD ALIGN=LEFT>ANTPTR </TD> <TD ALIGN=CENTER>1 </TD> <TD ALIGN=CENTER>! </TD> <TD ALIGN=LEFT>Antonyms </TD> </TR>
<TR> <TD ALIGN=LEFT>HYPERPTR </TD> <TD ALIGN=CENTER>2 </TD> <TD ALIGN=CENTER>@ </TD> <TD ALIGN=LEFT>Hypernyms
</TD> </TR>
<TR> <TD ALIGN=LEFT>HYPOPTR </TD> <TD ALIGN=CENTER>3 </TD> <TD ALIGN=CENTER>&nbsp; </TD> <TD ALIGN=LEFT>Hyponyms </TD> </TR>
<TR> <TD ALIGN=LEFT>ENTAILPTR </TD> <TD ALIGN=CENTER>4 </TD> <TD ALIGN=CENTER>* </TD> <TD ALIGN=LEFT>Entailment </TD> </TR>
<TR> <TD ALIGN=LEFT>SIMPTR </TD> <TD ALIGN=CENTER>5
</TD> <TD ALIGN=CENTER>&amp; </TD> <TD ALIGN=LEFT>Similar </TD> </TR>
<TR> <TD ALIGN=LEFT>ISMEMBERPTR </TD> <TD ALIGN=CENTER>6 </TD> <TD ALIGN=CENTER>#m </TD> <TD ALIGN=LEFT>Member meronym </TD> </TR>
<TR> <TD ALIGN=LEFT>ISSTUFFPTR </TD> <TD ALIGN=CENTER>7 </TD> <TD ALIGN=CENTER>#s
</TD> <TD ALIGN=LEFT>Substance meronym </TD> </TR>
<TR> <TD ALIGN=LEFT>ISPARTPTR </TD> <TD ALIGN=CENTER>8 </TD> <TD ALIGN=CENTER>#p </TD> <TD ALIGN=LEFT>Part meronym </TD> </TR>
<TR> <TD ALIGN=LEFT>HASMEMBERPTR </TD>
<TD ALIGN=CENTER>9 </TD> <TD ALIGN=CENTER>%m </TD> <TD ALIGN=LEFT>Member holonym </TD> </TR>
<TR> <TD ALIGN=LEFT>HASSTUFFPTR </TD> <TD ALIGN=CENTER>10 </TD> <TD ALIGN=CENTER>%s </TD> <TD ALIGN=LEFT>Substance holonym </TD> </TR>
<TR> <TD ALIGN=LEFT>HASPARTPTR
</TD> <TD ALIGN=CENTER>11 </TD> <TD ALIGN=CENTER>%p </TD> <TD ALIGN=LEFT>Part holonym </TD> </TR>
<TR> <TD ALIGN=LEFT>MERONYM </TD> <TD ALIGN=CENTER>12 </TD> <TD ALIGN=CENTER>% </TD> <TD ALIGN=LEFT>All meronyms </TD> </TR>
<TR> <TD ALIGN=LEFT>HOLONYM </TD> <TD ALIGN=CENTER>13 </TD>
<TD ALIGN=CENTER># </TD> <TD ALIGN=LEFT>All holonyms </TD> </TR>
<TR> <TD ALIGN=LEFT>CAUSETO </TD> <TD ALIGN=CENTER>14 </TD> <TD ALIGN=CENTER>&gt; </TD> <TD ALIGN=LEFT>Cause </TD> </TR>
<TR> <TD ALIGN=LEFT>PPLPTR </TD> <TD ALIGN=CENTER>15 </TD> <TD ALIGN=CENTER>&lt; </TD> <TD ALIGN=LEFT>Participle of
verb </TD> </TR>
<TR> <TD ALIGN=LEFT>SEEALSOPTR </TD> <TD ALIGN=CENTER>16 </TD> <TD ALIGN=CENTER>^ </TD> <TD ALIGN=LEFT>Also see </TD> </TR>
<TR> <TD ALIGN=LEFT>PERTPTR </TD> <TD ALIGN=CENTER>17 </TD> <TD ALIGN=CENTER>\ </TD> <TD ALIGN=LEFT>Pertains to noun
or derived from adjective </TD> </TR>
<TR> <TD ALIGN=LEFT>ATTRIBUTE </TD> <TD ALIGN=CENTER>18 </TD> <TD ALIGN=CENTER>\= </TD> <TD ALIGN=LEFT>Attribute </TD> </TR>
<TR> <TD ALIGN=LEFT>VERBGROUP
</TD> <TD ALIGN=CENTER>19 </TD> <TD ALIGN=CENTER>$ </TD> <TD ALIGN=LEFT>Verb group </TD> </TR>
<TR> <TD ALIGN=LEFT>DERIVATION </TD> <TD ALIGN=CENTER>20 </TD> <TD ALIGN=CENTER>+ </TD> <TD ALIGN=LEFT>Derivationally related form </TD>
</TR>
<TR> <TD ALIGN=LEFT>CLASSIFICATION </TD> <TD ALIGN=CENTER>21 </TD> <TD ALIGN=CENTER>; </TD> <TD ALIGN=LEFT>Domain of synset </TD> </TR>
<TR> <TD ALIGN=LEFT>CLASS </TD> <TD ALIGN=CENTER>22 </TD> <TD ALIGN=CENTER>- </TD> <TD ALIGN=LEFT>Member of this
domain </TD> </TR>
<TR> <TD ALIGN=LEFT>SYNS </TD> <TD ALIGN=CENTER>23 </TD> <TD ALIGN=CENTER><I>n/a </I> </TD> <TD ALIGN=LEFT>Find synonyms </TD> </TR>
<TR> <TD ALIGN=LEFT>FREQ </TD> <TD ALIGN=CENTER>24 </TD> <TD ALIGN=CENTER><I>n/a </I> </TD> <TD ALIGN=LEFT>Polysemy </TD>
</TR>
<TR> <TD ALIGN=LEFT>FRAMES </TD> <TD ALIGN=CENTER>25 </TD> <TD ALIGN=CENTER><I>n/a </I> </TD> <TD ALIGN=LEFT>Verb example sentences and generic frames </TD> </TR>
<TR> <TD ALIGN=LEFT>COORDS
</TD> <TD ALIGN=CENTER>26 </TD> <TD ALIGN=CENTER><I>n/a </I> </TD> <TD ALIGN=LEFT>Noun coordinates </TD> </TR>
<TR> <TD ALIGN=LEFT>RELATIVES </TD> <TD ALIGN=CENTER>27 </TD> <TD ALIGN=CENTER><I>n/a </I> </TD> <TD ALIGN=LEFT>Group related senses
</TD> </TR>
<TR> <TD ALIGN=LEFT>HMERONYM </TD> <TD ALIGN=CENTER>28 </TD> <TD ALIGN=CENTER><I>n/a </I> </TD> <TD ALIGN=LEFT>Hierarchical meronym search </TD> </TR>
<TR> <TD ALIGN=LEFT>HHOLONYM </TD> <TD ALIGN=CENTER>29 </TD> <TD ALIGN=CENTER><I>n/a
</I> </TD> <TD ALIGN=LEFT>Hierarchical holonym search </TD> </TR>
<TR> <TD ALIGN=LEFT>WNGREP </TD> <TD ALIGN=CENTER>30 </TD> <TD ALIGN=CENTER><I>n/a </I> </TD> <TD ALIGN=LEFT>Find keywords by substring
</TD> </TR>
<TR> <TD ALIGN=LEFT>OVERVIEW </TD> <TD ALIGN=CENTER>31 </TD> <TD ALIGN=CENTER><I>n/a </I> </TD> <TD ALIGN=LEFT>Show all synsets for word </TD> </TR>
<TR> <TD ALIGN=LEFT>CLASSIF_CATEGORY </TD>
<TD ALIGN=CENTER>32 </TD> <TD ALIGN=CENTER>;c </TD> <TD ALIGN=LEFT>Show domain topic </TD> </TR>
<TR> <TD ALIGN=LEFT>CLASSIF_USAGE </TD> <TD ALIGN=CENTER>33 </TD> <TD ALIGN=CENTER>;u </TD> <TD ALIGN=LEFT>Show domain usage
</TD> </TR>
<TR> <TD ALIGN=LEFT>CLASSIF_REGIONAL </TD> <TD ALIGN=CENTER>34 </TD> <TD ALIGN=CENTER>;r </TD> <TD ALIGN=LEFT>Show domain region </TD> </TR>
<TR> <TD ALIGN=LEFT>CLASS_CATEGORY </TD> <TD ALIGN=CENTER>35
</TD> <TD ALIGN=CENTER>-c </TD> <TD ALIGN=LEFT>Show domain terms for topic </TD> </TR>
<TR> <TD ALIGN=LEFT>CLASS_USAGE </TD> <TD ALIGN=CENTER>36 </TD> <TD ALIGN=CENTER>-u </TD> <TD ALIGN=LEFT>Show domain terms
for usage </TD> </TR>
<TR> <TD ALIGN=LEFT>CLASS_REGIONAL </TD> <TD ALIGN=CENTER>37 </TD> <TD ALIGN=CENTER>-r </TD> <TD ALIGN=LEFT>Show domain terms for region </TD> </TR>
<TR> <TD ALIGN=LEFT>INSTANCE
</TD> <TD ALIGN=CENTER>38 </TD> <TD ALIGN=CENTER>@i </TD> <TD ALIGN=LEFT>Instance of </TD> </TR>
<TR> <TD ALIGN=LEFT>INSTANCES </TD> <TD ALIGN=CENTER>39 </TD> <TD ALIGN=CENTER>&nbsp;i </TD> <TD ALIGN=LEFT>Show instances </TD> </TR>
</TABLE>
<P>
<B>findtheinfo_ds()
</B> cannot perform the following searches: <P>
<blockquote>SEEALSOPTR <BR>
PERTPTR <BR>
VERBGROUP
<BR>
FREQ <BR>
FRAMES <BR>
RELATIVES <BR>
WNGREP <BR>
OVERVIEW <BR>
</blockquote>
<H2><A NAME="sect5" HREF="#toc5">NOTES </A></H2>
Applications that
use WordNet and/or the morphological functions must call <B>wninit() </B> at
the start of the program. See <B><A HREF="wnutil.3WN.html">wnutil</B>(3WN)</A>
for more information. <P>
In all
function calls, <I>searchstr </I> may be either a word or a collocation formed
by joining individual words with underscore characters (<B>_ </B>). <P>
The <B>SearchResults
</B> structure defines fields in the <I>wnresults </I> global variable that are set
by the various search functions. This is a way to get additional information,
such as the number of senses the word has, from the search functions. The
<I>searchds </I> field is set by <B>findtheinfo_ds() </B>. <P>
The <I>pos </I> passed to <B>traceptrs_ds()
</B> is not used. <P>
<H2><A NAME="sect6" HREF="#toc6">SEE ALSO </A></H2>
<B><A HREF="wn.1WN.html">wn</B>(1WN)</A>
, <B><A HREF="wnb.1WN.html">wnb</B>(1WN)</A>
, <B><A HREF="wnintro.3WN.html">wnintro</B>(3WN)</A>
, <B><A HREF="binsrch.3WN.html">binsrch</B>(3WN)</A>
,
<B><A HREF="malloc.3.html">malloc</B>(3)</A>
, <B><A HREF="morph.3WN.html">morph</B>(3WN)</A>
, <B><A HREF="wnutil.3WN.html">wnutil</B>(3WN)</A>
, <B><A HREF="wnintro.5WN.html">wnintro</B>(5WN)</A>
.
<H2><A NAME="sect7" HREF="#toc7">WARNINGS </A></H2>
<B>parse_synset()
</B> must find an exact match between the <I>searchstr </I> passed and a word in
the synset to set <I>whichword </I>. No attempt is made to translate hyphens
and underscores, as is done in <B>getindex() </B>. <P>
The WordNet database and exception
list files must be opened with <B>wninit </B> prior to using any of the searching
functions. <P>
A large search may cause <B>findtheinfo() </B> to run out of buffer
space. The maximum buffer size is determined by computer platform. If the
buffer size is exceeded the following message is printed in the output
buffer: <B>"Search too large. Narrow search and try again..." </B>. <P>
Passing an invalid
<I>pos </I> will probably result in a core dump. <P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">SYNOPSIS</A></LI>
<LI><A NAME="toc2" HREF="#sect2">DESCRIPTION</A></LI>
<UL>
<LI><A NAME="toc3" HREF="#sect3">Synset Navigation</A></LI>
<LI><A NAME="toc4" HREF="#sect4">WordNet Searches</A></LI>
</UL>
<LI><A NAME="toc5" HREF="#sect5">NOTES</A></LI>
<LI><A NAME="toc6" HREF="#sect6">SEE ALSO</A></LI>
<LI><A NAME="toc7" HREF="#sect7">WARNINGS</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,80 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>WNSTATS(7WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
wnstats - WordNet 3.0 database statistics
<H2><A NAME="sect1" HREF="#toc1">DESCRIPTION </A></H2>
<H3><A NAME="sect2" HREF="#toc2">Number of
words, synsets, and senses </A></H3>
<TABLE BORDER=0>
<TR> <TD ALIGN=CENTER><B>POS </B> </TD> <TD ALIGN=CENTER><B>Unique </B> </TD> <TD ALIGN=CENTER><B>Synsets </B> </TD> <TD ALIGN=CENTER><B>Total </B> </TD> </TR>
<TR> <TD ALIGN=CENTER> </TD> <TD ALIGN=CENTER><B>Strings
</B> </TD> <TD ALIGN=CENTER> </TD> <TD ALIGN=CENTER><B>Word-Sense Pairs </B> </TD> </TR>
<TR> <TR> <TD ALIGN=LEFT>Noun </TD> <TD ALIGN=RIGHT>117798 </TD> <TD ALIGN=RIGHT>82115 </TD> <TD ALIGN=RIGHT>146312 </TD> </TR>
<TR> <TD ALIGN=LEFT>Verb </TD> <TD ALIGN=RIGHT>11529 </TD>
<TD ALIGN=RIGHT>13767 </TD> <TD ALIGN=RIGHT>25047 </TD> </TR>
<TR> <TD ALIGN=LEFT>Adjective </TD> <TD ALIGN=RIGHT>21479 </TD> <TD ALIGN=RIGHT>18156 </TD> <TD ALIGN=RIGHT>30002 </TD> </TR>
<TR> <TD ALIGN=LEFT>Adverb </TD> <TD ALIGN=RIGHT>4481 </TD> <TD ALIGN=RIGHT>3621 </TD>
<TD ALIGN=RIGHT>5580 </TD> </TR>
<TR> <TR> <TD ALIGN=LEFT>Totals </TD> <TD ALIGN=RIGHT>155287 </TD> <TD ALIGN=RIGHT>117659 </TD> <TD ALIGN=RIGHT>206941 </TD> </TR>
</TABLE>
<P>
<H3><A NAME="sect3" HREF="#toc3">Polysemy information </A></H3>
<P>
<TABLE BORDER=0>
<TR>
<TD ALIGN=CENTER><B>POS </B> </TD> <TD ALIGN=CENTER><B>Monosemous </B> </TD> <TD ALIGN=CENTER><B>Polysemous </B> </TD> <TD ALIGN=CENTER><B>Polysemous </B> </TD> </TR>
<TR> <TD ALIGN=CENTER> </TD> <TD ALIGN=CENTER><B>Words and Senses </B> </TD> <TD ALIGN=CENTER><B>Words
</B> </TD> <TD ALIGN=CENTER><B>Senses </B> </TD> </TR>
<TR> <TR> <TD ALIGN=LEFT>Noun </TD> <TD ALIGN=RIGHT>101863 </TD> <TD ALIGN=RIGHT>15935 </TD> <TD ALIGN=RIGHT>44449 </TD> </TR>
<TR> <TD ALIGN=LEFT>Verb </TD> <TD ALIGN=RIGHT>6277 </TD> <TD ALIGN=RIGHT>5252 </TD> <TD ALIGN=RIGHT>18770 </TD>
</TR>
<TR> <TD ALIGN=LEFT>Adjective </TD> <TD ALIGN=RIGHT>16503 </TD> <TD ALIGN=RIGHT>4976 </TD> <TD ALIGN=RIGHT>14399 </TD> </TR>
<TR> <TD ALIGN=LEFT>Adverb </TD> <TD ALIGN=RIGHT>3748 </TD> <TD ALIGN=RIGHT>733 </TD> <TD ALIGN=RIGHT>1832 </TD> </TR>
<TR> <TR> <TD ALIGN=LEFT>Totals
</TD> <TD ALIGN=RIGHT>128391 </TD> <TD ALIGN=RIGHT>26896 </TD> <TD ALIGN=RIGHT>79450 </TD> </TR>
</TABLE>
<P>
<TABLE BORDER=0>
<TR> <TD ALIGN=CENTER><B>POS </B> </TD> <TD ALIGN=CENTER><B>Average Polysemy </B> </TD> <TD ALIGN=CENTER><B>Average Polysemy
</B> </TD> </TR>
<TR> <TD ALIGN=CENTER> </TD> <TD ALIGN=CENTER><B>Including Monosemous Words </B> </TD> <TD ALIGN=CENTER><B>Excluding Monosemous Words </B> </TD> </TR>
<TR> <TR> <TD ALIGN=LEFT>Noun
</TD> <TD ALIGN=RIGHT>1.24 </TD> <TD ALIGN=RIGHT>2.79 </TD> </TR>
<TR> <TD ALIGN=LEFT>Verb </TD> <TD ALIGN=RIGHT>2.17 </TD> <TD ALIGN=RIGHT>3.57 </TD> </TR>
<TR> <TD ALIGN=LEFT>Adjective </TD> <TD ALIGN=RIGHT>1.40 </TD> <TD ALIGN=RIGHT>2.71 </TD> </TR>
<TR> <TD ALIGN=LEFT>Adverb </TD> <TD ALIGN=RIGHT>1.25 </TD> <TD ALIGN=RIGHT>2.50
</TD> </TR>
</TABLE>
<P>
<H2><A NAME="sect4" HREF="#toc4">NOTES </A></H2>
Statistics for all types of adjectives and adjective satellites
are combined. <P>
The total of all unique noun, verb, adjective, and adverb
strings is actually 147278. However, many strings are unique within a syntactic
category, but are in more than one syntactic category. The figures in
the table represent the unique strings in each syntactic category. <P>
<P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">DESCRIPTION</A></LI>
<UL>
<LI><A NAME="toc2" HREF="#sect2">Number of words, synsets, and senses</A></LI>
<LI><A NAME="toc3" HREF="#sect3">Polysemy information</A></LI>
</UL>
<LI><A NAME="toc4" HREF="#sect4">NOTES</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,154 @@
<!-- manual page source format generated by PolyglotMan v3.0.3a12, -->
<!-- available via anonymous ftp from ftp.cs.berkeley.edu:/ucb/people/phelps/tcltk/rman.tar.Z -->
<HTML>
<HEAD>
<TITLE>WNUTIL(3WN) manual page</TITLE>
</HEAD>
<BODY>
<A HREF="#toc">Table of Contents</A><P>
<H2><A NAME="sect0" HREF="#toc0">NAME </A></H2>
wninit, re_wninit, cntwords, strtolower, ToLowerCase, strsubst,
getptrtype, getpos, getsstype, StrToPos, GetSynsetForSense, GetDataOffset,
GetPolyCount, WNSnsToStr, GetValidIndexPointer, GetWNSense, GetSenseIndex,
default_display_message
<H2><A NAME="sect1" HREF="#toc1">SYNOPSIS </A></H2>
<P>
<B>#include "wn.h" </B> <P>
<B>int wninit(void); </B> <P>
<B>int
re_wninit(void); </B> <P>
<B>int cntwords(char *str, char separator); </B> <P>
<B>char *strtolower(char
*str); </B> <P>
<B>char *ToLowerCase(char *str); </B> <P>
<B>char *strsubst(char *str, char
from, char to); </B> <P>
<B>int getptrtype(char *ptr_symbol); </B> <P>
<B>int getpos(char *ss_type);
</B> <P>
<B>int getsstype(char *ss_type); </B> <P>
<B>int StrToPos(char pos); </B> <P>
<B>SynsetPtr GetSynsetForSense(char
*sense_key); </B> <P>
<B>long GetDataOffset(char *sense_key); </B> <P>
<B>int GetPolyCount(char
*sense_key); </B> <P>
<B>char *WNSnsToStr(IndexPtr idx, int sense_num); </B> <P>
<B>IndexPtr
GetValidIndexPointer(char *str, int pos); </B> <P>
<B>int GetWNSense(char *lemma,
*lex_sense); </B> <P>
<B>SnsIndexPtr GetSenseIndex(char *sense_key); </B> <P>
<B>int GetTagcnt(IndexPtr
idx, int sense); </B> <P>
<B>int default_display_message(char *msg); </B>
<H2><A NAME="sect2" HREF="#toc2">DESCRIPTION
</A></H2>
<P>
The WordNet library contains many utility functions used by the interface
code, other library functions, and various applications and tools. Only
those of importance to the WordNet search code, or which are generally
useful are described here. <P>
<B>wninit()</B> opens the files necessary for using
WordNet with the WordNet library functions. The database files are opened,
and <B>morphinit()</B> is called to open the exception list files. Returns <B>0
</B> if successful, <B>-1 </B> otherwise. The database and exception list files must
be open before the WordNet search and morphology functions are used. If
the database is successfully opened, the global variable <B>OpenDB </B> is set
to <B>1 </B>. Note that it is possible for the database files to be opened (<B>OpenDB
== 1 </B>), but not the exception list files. <P>
<B>re_wninit()</B> is used to close
the database files and reopen them, and is used exclusively for WordNet
development. <B>re_morphinit() </B> is called to close and reopen the exception
list files. Return codes are as described above. <P>
<B>cntwords()</B> counts the
number of underscore or space separated words in <I>str </I>. A hyphen is passed
in <I>separator </I> if is is to be considered a word delimiter. Otherwise <I>separator
</I> can be any other character, or an underscore if another character is
not desired. <P>
<B>strtolower()</B> converts <I>str </I> to lower case and removes a trailing
adjective marker, if present. <I>str </I> is actually modified by this function,
and a pointer to the modified string is returned. <P>
<B>ToLowerCase()</B> converts
<I>str </I> to lower case as above, without removing an adjective marker. <P>
<B>strsubst()</B>
replaces all occurrences of <I>from </I> with <I>to </I> in <I>str </I> and returns resulting
string. <P>
<B>getptrtype()</B> returns the integer <I>ptr_type </I> corresponding to the
pointer character passed in <I>ptr_symbol </I>. See <B><A HREF="wnsearch.3WN.html">wnsearch</B>(3WN)</A>
for a table
of pointer symbols and types. <P>
<B>getpos()</B> returns the integer constant corresponding
to the synset type passed. <I>ss_type </I> may be one of the following: <B>n, v,
a, r, s </B>. If <B>s </B> is passed, <FONT SIZE=-1><B>ADJ </B></FONT>
is returned. Exits with <B>-1 </B> if <I>ss_type
</I> is invalid. <P>
<B>getsstype()</B> works like <B>getpos() </B>, but returns <FONT SIZE=-1><B>SATELLITE </B></FONT>
if <I>ss_type </I> is <B>s </B>. <P>
<B>StrToPos()</B> returns the integer constant corresponding
to the syntactic category passed in <I>pos </I>. <I>string </I> must be one of the following:
<B>noun, verb, adj, adv </B>. <B>-1 </B> is returned if <I>pos </I> is invalid. <P>
<B>GetSynsetForSense()</B>
returns the synset that contains the word sense <I>sense_key </I> and <FONT SIZE=-1><B>NULL </B></FONT>
in case of error. <P>
<B>GetDataOffset()</B> returns the synset offset for synset
that contains the word sense <I>sense_key </I>, and <B>0 </B> if <I>sense_key </I> is not in
sense index file. <P>
<B>GetPolyCount()</B> returns the polysemy count (number of
senses in WordNet) for <I>lemma </I> encoded in <I>sense_key </I> and <B>0 </B> if word is
not found. <P>
<B>WNSnsToStr()</B> returns sense key encoding for <I>sense_num </I> entry
in <I>idx </I>. <P>
<B>GetValidIndexPointer()</B> returns the Index structure for <I>word </I>
in <I>pos </I>. Calls <B><A HREF="morphstr.3WN.html">morphstr</B>(3WN)</A>
to find a valid base form if <I>word </I> is inflected.
<P>
<B>GetWNSense()</B> returns the WordNet sense number for the sense key encoding
represented by <I>lemma </I> and <I>lex_sense </I>. <P>
<B>GetSenseIndex()</B> returns parsed sense
index entry for <I>sense_key </I> and <FONT SIZE=-1><B>NULL </B></FONT>
if <I>sense_key </I> is not in sense index.
<P>
<B>GetTagcnt()</B> returns the number of times the sense passed has been tagged
according to the <I>cntlist </I> file. <P>
<B>default_display_message()</B> simply returns
<B>-1 </B>. This is the default value for the global variable <B>display_message
</B>, that points to a function to call to display an error message. In general,
applications (including the WordNet interfaces) define an application
specific function and set <B>display_message </B> to point to it.
<H2><A NAME="sect3" HREF="#toc3">NOTES </A></H2>
<B>include/wn.h
</B> lists all the pointer and search types and their corresponding constant
values. There is no description of what each search type is or the results
returned. Using the WordNet interface is the best way to see what types
of searches are available, and the data returned for each.
<H2><A NAME="sect4" HREF="#toc4">SEE ALSO </A></H2>
<B><A HREF="wnintro.3WN.html">wnintro</B>(3WN)</A>
,
<B><A HREF="wnsearch.3WN.html">wnsearch</B>(3WN)</A>
, <B><A HREF="morph.3WN.html">morph</B>(3WN)</A>
, <B><A HREF="wnintro.5WN.html">wnintro</B>(5WN)</A>
, <B><A HREF="wnintro.7WN.html">wnintro</B>(7WN)</A>
. <P>
<H2><A NAME="sect5" HREF="#toc5">WARNINGS </A></H2>
Error
checking on passed arguments is not rigorous. Passing <FONT SIZE=-1><B>NULL </B></FONT>
pointers
or invalid values will often cause an application to die. <P>
<HR><P>
<A NAME="toc"><B>Table of Contents</B></A><P>
<UL>
<LI><A NAME="toc0" HREF="#sect0">NAME</A></LI>
<LI><A NAME="toc1" HREF="#sect1">SYNOPSIS</A></LI>
<LI><A NAME="toc2" HREF="#sect2">DESCRIPTION</A></LI>
<LI><A NAME="toc3" HREF="#sect3">NOTES</A></LI>
<LI><A NAME="toc4" HREF="#sect4">SEE ALSO</A></LI>
<LI><A NAME="toc5" HREF="#sect5">WARNINGS</A></LI>
</UL>
</BODY></HTML>

View File

@ -0,0 +1,478 @@
# Makefile.in generated by automake 1.9 from Makefile.am.
# doc/man/Makefile. Generated from Makefile.in by configure.
# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
# 2003, 2004 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
srcdir = .
top_srcdir = ../..
pkgdatadir = $(datadir)/WordNet
pkglibdir = $(libdir)/WordNet
pkgincludedir = $(includedir)/WordNet
top_builddir = ../..
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
INSTALL = /usr/csl/bin/install -c
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
subdir = doc/man
DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \
$(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h
CONFIG_CLEAN_FILES =
SOURCES =
DIST_SOURCES =
man1dir = $(mandir)/man1
am__installdirs = "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man3dir)" \
"$(DESTDIR)$(man5dir)" "$(DESTDIR)$(man7dir)"
man3dir = $(mandir)/man3
man5dir = $(mandir)/man5
man7dir = $(mandir)/man7
NROFF = nroff
MANS = $(man_MANS)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run aclocal-1.9
AMDEP_FALSE = #
AMDEP_TRUE =
AMTAR = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run tar
AUTOCONF = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run autoconf
AUTOHEADER = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run autoheader
AUTOMAKE = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run automake-1.9
AWK = nawk
CC = gcc
CCDEPMODE = depmode=gcc3
CFLAGS = -g -O2
CPP = gcc -E
CPPFLAGS =
CYGPATH_W = echo
DEFS = -DHAVE_CONFIG_H
DEPDIR = .deps
ECHO_C =
ECHO_N = -n
ECHO_T =
EGREP = egrep
EXEEXT =
INSTALL_DATA = ${INSTALL} -m 644
INSTALL_PROGRAM = ${INSTALL}
INSTALL_SCRIPT = ${INSTALL}
INSTALL_STRIP_PROGRAM = ${SHELL} $(install_sh) -c -s
LDFLAGS =
LIBOBJS =
LIBS =
LTLIBOBJS =
MAKEINFO = ${SHELL} /people/wn/src/Release/3.0/Unix/missing --run makeinfo
OBJEXT = o
PACKAGE = WordNet
PACKAGE_BUGREPORT = wordnet@princeton.edu
PACKAGE_NAME = WordNet
PACKAGE_STRING = WordNet 3.0
PACKAGE_TARNAME = wordnet
PACKAGE_VERSION = 3.0
PATH_SEPARATOR = :
RANLIB = ranlib
SET_MAKE =
SHELL = /bin/bash
STRIP =
TCL_INCLUDE_SPEC = -I/usr/csl/include
TCL_LIB_SPEC = -L/usr/csl/lib -ltcl8.4
TK_LIBS = -L/usr/openwin/lib -lX11 -ldl -lpthread -lsocket -lnsl -lm
TK_LIB_SPEC = -L/usr/csl/lib -ltk8.4
TK_PREFIX = /usr/csl
TK_XINCLUDES = -I/usr/openwin/include
VERSION = 3.0
ac_ct_CC = gcc
ac_ct_RANLIB = ranlib
ac_ct_STRIP =
ac_prefix = /usr/local/WordNet-3.0
am__fastdepCC_FALSE = #
am__fastdepCC_TRUE =
am__include = include
am__leading_dot = .
am__quote =
am__tar = ${AMTAR} chof - "$$tardir"
am__untar = ${AMTAR} xf -
bindir = ${exec_prefix}/bin
build_alias =
datadir = ${prefix}/share
exec_prefix = ${prefix}
host_alias =
includedir = ${prefix}/include
infodir = ${prefix}/info
install_sh = /people/wn/src/Release/3.0/Unix/install-sh
libdir = ${exec_prefix}/lib
libexecdir = ${exec_prefix}/libexec
localstatedir = ${prefix}/var
mandir = ${prefix}/man
mkdir_p = $(install_sh) -d
oldincludedir = /usr/include
prefix = /usr/local/WordNet-3.0
program_transform_name = s,x,x,
sbindir = ${exec_prefix}/sbin
sharedstatedir = ${prefix}/com
sysconfdir = ${prefix}/etc
target_alias =
man_MANS = binsrch.3 cntlist.5 grind.1 lexnames.5 morph.3 morphy.7 senseidx.5 uniqbeg.7 wn.1 wnb.1 wndb.5 wngloss.7 wngroups.7 wninput.5 wnintro.1 wnintro.3 wnintro.5 wnintro.7 wnlicens.7 wnpkgs.7 wnsearch.3 wnstats.7 wnutil.3
all: all-am
.SUFFIXES:
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
&& exit 0; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu doc/man/Makefile'; \
cd $(top_srcdir) && \
$(AUTOMAKE) --gnu doc/man/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
uninstall-info-am:
install-man1: $(man1_MANS) $(man_MANS)
@$(NORMAL_INSTALL)
test -z "$(man1dir)" || $(mkdir_p) "$(DESTDIR)$(man1dir)"
@list='$(man1_MANS) $(dist_man1_MANS) $(nodist_man1_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.1*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \
else file=$$i; fi; \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
1*) ;; \
*) ext='1' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \
$(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst"; \
done
uninstall-man1:
@$(NORMAL_UNINSTALL)
@list='$(man1_MANS) $(dist_man1_MANS) $(nodist_man1_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.1*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
1*) ;; \
*) ext='1' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " rm -f '$(DESTDIR)$(man1dir)/$$inst'"; \
rm -f "$(DESTDIR)$(man1dir)/$$inst"; \
done
install-man3: $(man3_MANS) $(man_MANS)
@$(NORMAL_INSTALL)
test -z "$(man3dir)" || $(mkdir_p) "$(DESTDIR)$(man3dir)"
@list='$(man3_MANS) $(dist_man3_MANS) $(nodist_man3_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.3*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \
else file=$$i; fi; \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
3*) ;; \
*) ext='3' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man3dir)/$$inst'"; \
$(INSTALL_DATA) "$$file" "$(DESTDIR)$(man3dir)/$$inst"; \
done
uninstall-man3:
@$(NORMAL_UNINSTALL)
@list='$(man3_MANS) $(dist_man3_MANS) $(nodist_man3_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.3*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
3*) ;; \
*) ext='3' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " rm -f '$(DESTDIR)$(man3dir)/$$inst'"; \
rm -f "$(DESTDIR)$(man3dir)/$$inst"; \
done
install-man5: $(man5_MANS) $(man_MANS)
@$(NORMAL_INSTALL)
test -z "$(man5dir)" || $(mkdir_p) "$(DESTDIR)$(man5dir)"
@list='$(man5_MANS) $(dist_man5_MANS) $(nodist_man5_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.5*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \
else file=$$i; fi; \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
5*) ;; \
*) ext='5' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man5dir)/$$inst'"; \
$(INSTALL_DATA) "$$file" "$(DESTDIR)$(man5dir)/$$inst"; \
done
uninstall-man5:
@$(NORMAL_UNINSTALL)
@list='$(man5_MANS) $(dist_man5_MANS) $(nodist_man5_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.5*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
5*) ;; \
*) ext='5' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " rm -f '$(DESTDIR)$(man5dir)/$$inst'"; \
rm -f "$(DESTDIR)$(man5dir)/$$inst"; \
done
install-man7: $(man7_MANS) $(man_MANS)
@$(NORMAL_INSTALL)
test -z "$(man7dir)" || $(mkdir_p) "$(DESTDIR)$(man7dir)"
@list='$(man7_MANS) $(dist_man7_MANS) $(nodist_man7_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.7*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \
else file=$$i; fi; \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
7*) ;; \
*) ext='7' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man7dir)/$$inst'"; \
$(INSTALL_DATA) "$$file" "$(DESTDIR)$(man7dir)/$$inst"; \
done
uninstall-man7:
@$(NORMAL_UNINSTALL)
@list='$(man7_MANS) $(dist_man7_MANS) $(nodist_man7_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.7*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
7*) ;; \
*) ext='7' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " rm -f '$(DESTDIR)$(man7dir)/$$inst'"; \
rm -f "$(DESTDIR)$(man7dir)/$$inst"; \
done
tags: TAGS
TAGS:
ctags: CTAGS
CTAGS:
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
list='$(DISTFILES)'; for file in $$list; do \
case $$file in \
$(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
$(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
esac; \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
if test "$$dir" != "$$file" && test "$$dir" != "."; then \
dir="/$$dir"; \
$(mkdir_p) "$(distdir)$$dir"; \
else \
dir=''; \
fi; \
if test -d $$d/$$file; then \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
fi; \
cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
else \
test -f $(distdir)/$$file \
|| cp -p $$d/$$file $(distdir)/$$file \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-am
all-am: Makefile $(MANS)
installdirs:
for dir in "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man3dir)" "$(DESTDIR)$(man5dir)" "$(DESTDIR)$(man7dir)"; do \
test -z "$$dir" || $(mkdir_p) "$$dir"; \
done
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-am
install-strip:
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
`test -z '$(STRIP)' || \
echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
clean-am: clean-generic mostlyclean-am
distclean: distclean-am
-rm -f Makefile
distclean-am: clean-am distclean-generic
dvi: dvi-am
dvi-am:
html: html-am
info: info-am
info-am:
install-data-am: install-man
install-exec-am:
install-info: install-info-am
install-man: install-man1 install-man3 install-man5 install-man7
installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-am
mostlyclean-am: mostlyclean-generic
pdf: pdf-am
pdf-am:
ps: ps-am
ps-am:
uninstall-am: uninstall-info-am uninstall-man
uninstall-man: uninstall-man1 uninstall-man3 uninstall-man5 \
uninstall-man7
.PHONY: all all-am check check-am clean clean-generic distclean \
distclean-generic distdir dvi dvi-am html html-am info info-am \
install install-am install-data install-data-am install-exec \
install-exec-am install-info install-info-am install-man \
install-man1 install-man3 install-man5 install-man7 \
install-strip installcheck installcheck-am installdirs \
maintainer-clean maintainer-clean-generic mostlyclean \
mostlyclean-generic pdf pdf-am ps ps-am uninstall uninstall-am \
uninstall-info-am uninstall-man uninstall-man1 uninstall-man3 \
uninstall-man5 uninstall-man7
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:

View File

@ -0,0 +1 @@
man_MANS = binsrch.3 cntlist.5 grind.1 lexnames.5 morph.3 morphy.7 senseidx.5 uniqbeg.7 wn.1 wnb.1 wndb.5 wngloss.7 wngroups.7 wninput.5 wnintro.1 wnintro.3 wnintro.5 wnintro.7 wnlicens.7 wnpkgs.7 wnsearch.3 wnstats.7 wnutil.3

View File

@ -0,0 +1,478 @@
# Makefile.in generated by automake 1.9 from Makefile.am.
# @configure_input@
# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
# 2003, 2004 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
@SET_MAKE@
srcdir = @srcdir@
top_srcdir = @top_srcdir@
VPATH = @srcdir@
pkgdatadir = $(datadir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
top_builddir = ../..
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
INSTALL = @INSTALL@
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
subdir = doc/man
DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \
$(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h
CONFIG_CLEAN_FILES =
SOURCES =
DIST_SOURCES =
man1dir = $(mandir)/man1
am__installdirs = "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man3dir)" \
"$(DESTDIR)$(man5dir)" "$(DESTDIR)$(man7dir)"
man3dir = $(mandir)/man3
man5dir = $(mandir)/man5
man7dir = $(mandir)/man7
NROFF = nroff
MANS = $(man_MANS)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMDEP_FALSE = @AMDEP_FALSE@
AMDEP_TRUE = @AMDEP_TRUE@
AMTAR = @AMTAR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CC = @CC@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
LDFLAGS = @LDFLAGS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LTLIBOBJS = @LTLIBOBJS@
MAKEINFO = @MAKEINFO@
OBJEXT = @OBJEXT@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
TCL_INCLUDE_SPEC = @TCL_INCLUDE_SPEC@
TCL_LIB_SPEC = @TCL_LIB_SPEC@
TK_LIBS = @TK_LIBS@
TK_LIB_SPEC = @TK_LIB_SPEC@
TK_PREFIX = @TK_PREFIX@
TK_XINCLUDES = @TK_XINCLUDES@
VERSION = @VERSION@
ac_ct_CC = @ac_ct_CC@
ac_ct_RANLIB = @ac_ct_RANLIB@
ac_ct_STRIP = @ac_ct_STRIP@
ac_prefix = @ac_prefix@
am__fastdepCC_FALSE = @am__fastdepCC_FALSE@
am__fastdepCC_TRUE = @am__fastdepCC_TRUE@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
bindir = @bindir@
build_alias = @build_alias@
datadir = @datadir@
exec_prefix = @exec_prefix@
host_alias = @host_alias@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
localstatedir = @localstatedir@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
prefix = @prefix@
program_transform_name = @program_transform_name@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
man_MANS = binsrch.3 cntlist.5 grind.1 lexnames.5 morph.3 morphy.7 senseidx.5 uniqbeg.7 wn.1 wnb.1 wndb.5 wngloss.7 wngroups.7 wninput.5 wnintro.1 wnintro.3 wnintro.5 wnintro.7 wnlicens.7 wnpkgs.7 wnsearch.3 wnstats.7 wnutil.3
all: all-am
.SUFFIXES:
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
&& exit 0; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu doc/man/Makefile'; \
cd $(top_srcdir) && \
$(AUTOMAKE) --gnu doc/man/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
uninstall-info-am:
install-man1: $(man1_MANS) $(man_MANS)
@$(NORMAL_INSTALL)
test -z "$(man1dir)" || $(mkdir_p) "$(DESTDIR)$(man1dir)"
@list='$(man1_MANS) $(dist_man1_MANS) $(nodist_man1_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.1*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \
else file=$$i; fi; \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
1*) ;; \
*) ext='1' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \
$(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst"; \
done
uninstall-man1:
@$(NORMAL_UNINSTALL)
@list='$(man1_MANS) $(dist_man1_MANS) $(nodist_man1_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.1*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
1*) ;; \
*) ext='1' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " rm -f '$(DESTDIR)$(man1dir)/$$inst'"; \
rm -f "$(DESTDIR)$(man1dir)/$$inst"; \
done
install-man3: $(man3_MANS) $(man_MANS)
@$(NORMAL_INSTALL)
test -z "$(man3dir)" || $(mkdir_p) "$(DESTDIR)$(man3dir)"
@list='$(man3_MANS) $(dist_man3_MANS) $(nodist_man3_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.3*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \
else file=$$i; fi; \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
3*) ;; \
*) ext='3' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man3dir)/$$inst'"; \
$(INSTALL_DATA) "$$file" "$(DESTDIR)$(man3dir)/$$inst"; \
done
uninstall-man3:
@$(NORMAL_UNINSTALL)
@list='$(man3_MANS) $(dist_man3_MANS) $(nodist_man3_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.3*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
3*) ;; \
*) ext='3' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " rm -f '$(DESTDIR)$(man3dir)/$$inst'"; \
rm -f "$(DESTDIR)$(man3dir)/$$inst"; \
done
install-man5: $(man5_MANS) $(man_MANS)
@$(NORMAL_INSTALL)
test -z "$(man5dir)" || $(mkdir_p) "$(DESTDIR)$(man5dir)"
@list='$(man5_MANS) $(dist_man5_MANS) $(nodist_man5_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.5*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \
else file=$$i; fi; \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
5*) ;; \
*) ext='5' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man5dir)/$$inst'"; \
$(INSTALL_DATA) "$$file" "$(DESTDIR)$(man5dir)/$$inst"; \
done
uninstall-man5:
@$(NORMAL_UNINSTALL)
@list='$(man5_MANS) $(dist_man5_MANS) $(nodist_man5_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.5*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
5*) ;; \
*) ext='5' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " rm -f '$(DESTDIR)$(man5dir)/$$inst'"; \
rm -f "$(DESTDIR)$(man5dir)/$$inst"; \
done
install-man7: $(man7_MANS) $(man_MANS)
@$(NORMAL_INSTALL)
test -z "$(man7dir)" || $(mkdir_p) "$(DESTDIR)$(man7dir)"
@list='$(man7_MANS) $(dist_man7_MANS) $(nodist_man7_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.7*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \
else file=$$i; fi; \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
7*) ;; \
*) ext='7' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man7dir)/$$inst'"; \
$(INSTALL_DATA) "$$file" "$(DESTDIR)$(man7dir)/$$inst"; \
done
uninstall-man7:
@$(NORMAL_UNINSTALL)
@list='$(man7_MANS) $(dist_man7_MANS) $(nodist_man7_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.7*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
7*) ;; \
*) ext='7' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " rm -f '$(DESTDIR)$(man7dir)/$$inst'"; \
rm -f "$(DESTDIR)$(man7dir)/$$inst"; \
done
tags: TAGS
TAGS:
ctags: CTAGS
CTAGS:
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
list='$(DISTFILES)'; for file in $$list; do \
case $$file in \
$(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
$(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
esac; \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
if test "$$dir" != "$$file" && test "$$dir" != "."; then \
dir="/$$dir"; \
$(mkdir_p) "$(distdir)$$dir"; \
else \
dir=''; \
fi; \
if test -d $$d/$$file; then \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
fi; \
cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
else \
test -f $(distdir)/$$file \
|| cp -p $$d/$$file $(distdir)/$$file \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-am
all-am: Makefile $(MANS)
installdirs:
for dir in "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man3dir)" "$(DESTDIR)$(man5dir)" "$(DESTDIR)$(man7dir)"; do \
test -z "$$dir" || $(mkdir_p) "$$dir"; \
done
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-am
install-strip:
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
`test -z '$(STRIP)' || \
echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
clean-am: clean-generic mostlyclean-am
distclean: distclean-am
-rm -f Makefile
distclean-am: clean-am distclean-generic
dvi: dvi-am
dvi-am:
html: html-am
info: info-am
info-am:
install-data-am: install-man
install-exec-am:
install-info: install-info-am
install-man: install-man1 install-man3 install-man5 install-man7
installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-am
mostlyclean-am: mostlyclean-generic
pdf: pdf-am
pdf-am:
ps: ps-am
ps-am:
uninstall-am: uninstall-info-am uninstall-man
uninstall-man: uninstall-man1 uninstall-man3 uninstall-man5 \
uninstall-man7
.PHONY: all all-am check check-am clean clean-generic distclean \
distclean-generic distdir dvi dvi-am html html-am info info-am \
install install-am install-data install-data-am install-exec \
install-exec-am install-info install-info-am install-man \
install-man1 install-man3 install-man5 install-man7 \
install-strip installcheck installcheck-am installdirs \
maintainer-clean maintainer-clean-generic mostlyclean \
mostlyclean-generic pdf pdf-am ps ps-am uninstall uninstall-am \
uninstall-info-am uninstall-man uninstall-man1 uninstall-man3 \
uninstall-man5 uninstall-man7
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:

Some files were not shown because too many files have changed in this diff Show More