コード例 #1
0
ファイル: main.py プロジェクト: arunchaganty/nn-semparse
# Local imports
import atislexicon
import augmentation
from encoderdecoder import EncoderDecoderModel
from attention import AttentionModel
from example import Example
import spec as specutil
from vocabulary import Vocabulary

MODELS = collections.OrderedDict([
    ('encoderdecoder', EncoderDecoderModel),
    ('attention', AttentionModel),
])

VOCAB_TYPES = collections.OrderedDict([
    ('raw', lambda s, e, **kwargs: Vocabulary.from_sentences(
        s, e, **kwargs)), 
    ('glove', lambda s, e, **kwargs: Vocabulary.from_sentences(
        s, e, use_glove=True, **kwargs))
])

# Global options
OPTIONS = None

# Global statistics
STATS = {}

def _parse_args():
  global OPTIONS
  parser = argparse.ArgumentParser(
      description='A neural semantic parser.',
      formatter_class=argparse.RawTextHelpFormatter
コード例 #2
0
ファイル: main.py プロジェクト: dadashkarimi/seq2subj
from example import Example
import spec as specutil
from vocabulary import Vocabulary
from tqdm import tqdm
from lib.common import count_lines
import MySQLdb
from lib import common

MODELS = collections.OrderedDict([
    ('encoderdecoder', EncoderDecoderModel),
    ('attention', AttentionModel),
    ('attn2hist', Attention2HistoryModel),
])

VOCAB_TYPES = collections.OrderedDict([
    ('raw', lambda s, e, **kwargs: Vocabulary.from_sentences(s, e, **kwargs)),
    ('glove', lambda s, e, **kwargs: Vocabulary.from_sentences(
        s, e, use_glove=True, **kwargs))
])

# x,y Statistics in Training Data
#PAIRS = {}

# Global options
OPTIONS = None

# Global statistics
STATS = {}


def _parse_args():