コード例 #1
0
                             "class_based_rnnlm.gz")

if __name__ == '__main__':
    ap = ArgumentParser()
    ap.add_argument("--model", default="")
    ap.add_argument("--small", action="store_true")
    args = ap.parse_args()

    vocab, lmdata = load_data(small=args.small, history_len=5, batch_size=64)
    import pdb
    pdb.set_trace()
    model = NeuralLM(vocab.size)
    model.stack(
        RNN(hidden_size=100,
            output_type="sequence",
            hidden_activation='sigmoid',
            persistent_state=True,
            batch_size=lmdata.size,
            reset_state_for_input=0),
        ClassOutputLayer(output_size=100, class_size=100))

    if os.path.exists(args.model):
        model.load_params(args.model)

    trainer = SGDTrainer(
        model, {
            "learning_rate": LearningRateAnnealer.learning_rate(1.2),
            "weight_l2": 1e-7
        })
    annealer = LearningRateAnnealer()

    trainer.run(lmdata, epoch_controllers=[annealer])
コード例 #2
0
ファイル: lstm_rnnlm.py プロジェクト: zuxfoucault/deepy
logging.basicConfig(level=logging.INFO)

default_model = os.path.join(os.path.dirname(__file__), "models",
                             "lstm_rnnlm.gz")

if __name__ == '__main__':
    ap = ArgumentParser()
    ap.add_argument("--model", default="")
    ap.add_argument("--small", action="store_true")
    args = ap.parse_args()

    vocab, lmdata = load_data(small=args.small, history_len=5, batch_size=64)
    model = NeuralLM(vocab.size, test_data=None)
    model.stack(
        LSTM(hidden_size=100,
             output_type="sequence",
             persistent_state=True,
             batch_size=lmdata.size,
             reset_state_for_input=0), FullOutputLayer(vocab.size))

    if os.path.exists(args.model):
        model.load_params(args.model)

    trainer = SGDTrainer(
        model, {
            "learning_rate": LearningRateAnnealer.learning_rate(1.2),
            "weight_l2": 1e-7
        })
    annealer = LearningRateAnnealer(trainer)

    trainer.run(lmdata, controllers=[annealer])
コード例 #3
0
default_model = os.path.join(os.path.dirname(__file__), "models",
                             "baseline_rnnlm.gz")

if __name__ == '__main__':
    ap = ArgumentParser()
    ap.add_argument("--model", default="")
    ap.add_argument("--small", action="store_true")
    args = ap.parse_args()

    vocab, lmdata = load_data(small=args.small, history_len=5, batch_size=64)
    model = NeuralLM(vocab.size)
    model.stack(
        RNN(hidden_size=100,
            output_type="sequence",
            hidden_activation="sigmoid",
            persistent_state=True,
            batch_size=lmdata.size,
            reset_state_for_input=0), FullOutputLayer(vocab.size))

    if os.path.exists(args.model):
        model.load_params(args.model)

    trainer = SGDTrainer(
        model, {
            "learning_rate": LearningRateAnnealer.learning_rate(1.2),
            "weight_l2": 1e-7
        })
    annealer = LearningRateAnnealer()

    trainer.run(lmdata, controllers=[annealer])
コード例 #4
0
ファイル: baseline_rnnlm.py プロジェクト: JunjieHu/deepy
from layers import FullOutputLayer


logging.basicConfig(level=logging.INFO)

default_model = os.path.join(os.path.dirname(__file__), "models", "baseline_rnnlm.gz")

if __name__ == '__main__':
    ap = ArgumentParser()
    ap.add_argument("--model", default="")
    ap.add_argument("--small", action="store_true")
    args = ap.parse_args()

    vocab, lmdata = load_data(small=args.small, history_len=5, batch_size=64)
    model = NeuralLM(vocab.size)
    model.stack(RNN(hidden_size=100, output_type="sequence", hidden_activation="sigmoid",
                    persistent_state=True, batch_size=lmdata.size,
                    reset_state_for_input=0),
                FullOutputLayer(vocab.size))

    if os.path.exists(args.model):
        model.load_params(args.model)

    trainer = SGDTrainer(model, {"learning_rate": LearningRateAnnealer.learning_rate(1.2),
                                 "weight_l2": 1e-7})
    annealer = LearningRateAnnealer(trainer)

    trainer.run(lmdata, controllers=[annealer])

    model.save_params(default_model)
コード例 #5
0
ファイル: lstm_rnnlm.py プロジェクト: tangyaohua/dl4mt
default_model = os.path.join(os.path.dirname(__file__), "models", "lstm_rnnlmnew.gz")
default_dict  = '/home/tangyaohua/dl4mt/data/larger.corpus/vocab.chinese.pkl'
# default_dict = '/home/tangyh/Dropbox/PycharmProjects/dl4mt/session2/lm/resources/vocab.chinese.pkl'

if __name__ == '__main__':
    ap = ArgumentParser()
    ap.add_argument("--model", default='')
    ap.add_argument("--dictpath", default=default_dict)
    ap.add_argument("--small", action="store_true")
    args = ap.parse_args()

    vocab, lmdata = load_datagivendict(dictpath=args.dictpath, small=args.small, history_len=5, batch_size=16)
    inputx=T.imatrix('x')
    print len(vocab), 'len(vocab)'
    model = NeuralLM(len(vocab), test_data=None, input_tensor=inputx)
    model.stack(LSTM(hidden_size=100, output_type="sequence",
                    persistent_state=True, batch_size=lmdata.size,
                    reset_state_for_input=0),
                FullOutputLayer(len(vocab)))

    if os.path.exists(args.model):
        model.load_params(args.model)

    trainer = SGDTrainer(model, {"learning_rate": LearningRateAnnealer.learning_rate(1.2),
                                 "weight_l2": 1e-7})
    annealer = LearningRateAnnealer(trainer)

    trainer.run(lmdata, controllers=[annealer])

    model.save_params(default_model)
コード例 #6
0
ファイル: char_deep_rnn.py プロジェクト: zhp562176325/deepy
from deepy.layers import RNN, Dense


logging.basicConfig(level=logging.INFO)

resource_dir = os.path.abspath(os.path.dirname(__file__)) + os.sep + "resources"

vocab_path = os.path.join(resource_dir, "ptb.train.txt")
train_path = os.path.join(resource_dir, "ptb.train.txt")
valid_path = os.path.join(resource_dir, "ptb.valid.txt")
vocab = Vocab(char_based=True)
vocab.load(vocab_path, max_size=1000)

model = NeuralLM(input_dim=vocab.size, input_tensor=3)
model.stack(
    RNN(hidden_size=100, output_type="sequence"),
    RNN(hidden_size=100, output_type="sequence"),
    Dense(vocab.size, "softmax"))


if __name__ == '__main__':
    ap = ArgumentParser()
    ap.add_argument("--model", default=os.path.join(os.path.dirname(__file__), "models", "char_rnn_model1.gz"))
    ap.add_argument("--sample", default="")
    args = ap.parse_args()

    if os.path.exists(args.model):
        model.load_params(args.model)

    lmdata = LMDataset(vocab, train_path, valid_path, history_len=30, char_based=True, max_tokens=300)
    batch = SequentialMiniBatches(lmdata, batch_size=20)
コード例 #7
0

logging.basicConfig(level=logging.INFO)

default_model = os.path.join(os.path.dirname(__file__), "models", "class_based_rnnlm.gz")

if __name__ == '__main__':
    ap = ArgumentParser()
    ap.add_argument("--model", default="")
    ap.add_argument("--small", action="store_true")
    args = ap.parse_args()

    vocab, lmdata = load_data(small=args.small, history_len=5, batch_size=64)
    import pdb; pdb.set_trace()
    model = NeuralLM(vocab.size)
    model.stack(RNN(hidden_size=100, output_type="sequence", hidden_activation='sigmoid',
                    persistent_state=True, batch_size=lmdata.size,
                    reset_state_for_input=0),
                ClassOutputLayer(output_size=100, class_size=100))

    if os.path.exists(args.model):
        model.load_params(args.model)

    trainer = SGDTrainer(model, {"learning_rate": LearningRateAnnealer.learning_rate(1.2),
                                 "weight_l2": 1e-7})
    annealer = LearningRateAnnealer()

    trainer.run(lmdata, controllers=[annealer])

    model.save_params(default_model)
コード例 #8
0
ファイル: char_lstm.py プロジェクト: zuxfoucault/deepy
from deepy.trainers import SGDTrainer, LearningRateAnnealer
from deepy.layers import LSTM, Dense

logging.basicConfig(level=logging.INFO)

resource_dir = os.path.abspath(
    os.path.dirname(__file__)) + os.sep + "resources"

vocab_path = os.path.join(resource_dir, "ptb.train.txt")
train_path = os.path.join(resource_dir, "ptb.train.txt")
valid_path = os.path.join(resource_dir, "ptb.valid.txt")
vocab = Vocab(char_based=True)
vocab.load(vocab_path, max_size=1000)

model = NeuralLM(input_dim=vocab.size, input_tensor=3)
model.stack(LSTM(hidden_size=100, output_type="sequence"),
            Dense(vocab.size, activation="softmax"))

default_model = os.path.join(os.path.dirname(__file__), "models",
                             "char_lstm_model1.gz")

if __name__ == '__main__':
    ap = ArgumentParser()
    ap.add_argument("--model", default=default_model)
    ap.add_argument("--sample", default="")
    args = ap.parse_args()

    if os.path.exists(args.model):
        model.load_params(args.model)

    lmdata = LMDataset(vocab,
                       train_path,
コード例 #9
0
ファイル: char_deep_rnn.py プロジェクト: OlafLee/deepy

logging.basicConfig(level=logging.INFO)

resource_dir = os.path.abspath(os.path.dirname(__file__)) + os.sep + "resources"

vocab_path = os.path.join(resource_dir, "ptb.train.txt")
train_path = os.path.join(resource_dir, "ptb.train.txt")
valid_path = os.path.join(resource_dir, "ptb.valid.txt")
vocab = Vocab(char_based=True)
vocab.load(vocab_path, max_size=1000)

model = NeuralLM(input_dim=vocab.size, input_tensor=3)
model.stack(
    RNN(hidden_size=100, output_type="sequence"),
    RNN(hidden_size=100, output_type="sequence"),
    Dense(vocab.size, "softmax"),
)


if __name__ == "__main__":
    ap = ArgumentParser()
    ap.add_argument("--model", default=os.path.join(os.path.dirname(__file__), "models", "char_rnn_model1.gz"))
    ap.add_argument("--sample", default="")
    args = ap.parse_args()

    if os.path.exists(args.model):
        model.load_params(args.model)

    lmdata = LMDataset(vocab, train_path, valid_path, history_len=30, char_based=True, max_tokens=300)
    batch = SequentialMiniBatches(lmdata, batch_size=20)
コード例 #10
0
ファイル: char_lstm.py プロジェクト: 52nlp/deepy
from deepy.trainers import SGDTrainer, LearningRateAnnealer
from deepy.layers import LSTM, Dense


logging.basicConfig(level=logging.INFO)

resource_dir = os.path.abspath(os.path.dirname(__file__)) + os.sep + "resources"

vocab_path = os.path.join(resource_dir, "ptb.train.txt")
train_path = os.path.join(resource_dir, "ptb.train.txt")
valid_path = os.path.join(resource_dir, "ptb.valid.txt")
vocab = Vocab(char_based=True)
vocab.load(vocab_path, max_size=1000)

model = NeuralLM(input_dim=vocab.size, input_tensor=3)
model.stack(LSTM(hidden_size=100, output_type="sequence"),
            Dense(vocab.size, activation="softmax"))

default_model = os.path.join(os.path.dirname(__file__), "models", "char_lstm_model1.gz")

if __name__ == '__main__':
    ap = ArgumentParser()
    ap.add_argument("--model", default=default_model)
    ap.add_argument("--sample", default="")
    args = ap.parse_args()

    if os.path.exists(args.model):
        model.load_params(args.model)

    lmdata = LMDataset(vocab, train_path, valid_path, history_len=30, char_based=True, max_tokens=300)
    batch = SequentialMiniBatches(lmdata, batch_size=20)