Beispiel #1
0
def main():
    data = TextDataset(
        path='../../../../datasets/shakespeare_input.txt',
        source=
        "http://cs.stanford.edu/people/karpathy/char-rnn/shakespeare_input.txt",
        target_n_future=1,
        sequence_length=50)

    rnn = RNN(outdir='outputs/rnn/',
              input_size=len(data.vocab),
              hidden_size=128,
              output_size=len(data.vocab),
              layers=2,
              activation='softmax',
              hidden_activation='relu',
              mrg=RNG_MRG.MRG_RandomStreams(1),
              weights_init='uniform',
              weights_interval='montreal',
              bias_init=0.0,
              r_weights_init='identity',
              r_bias_init=0.0,
              cost_function='nll',
              cost_args=None,
              noise='dropout',
              noise_level=.7,
              noise_decay='exponential',
              noise_decay_amount=.99,
              direction='forward')

    cost_monitor = Monitor("cost",
                           rnn.get_train_cost(),
                           train=False,
                           valid=True,
                           test=True)

    optimizer = RMSProp(model=rnn,
                        dataset=data,
                        grad_clip=5.,
                        hard_clip=False,
                        learning_rate=2e-3,
                        lr_decay='exponential',
                        lr_decay_factor=0.97,
                        decay=0.95,
                        batch_size=50,
                        epochs=50)
    # optimizer = AdaDelta(model=gsn, dataset=mnist, n_epoch=200, batch_size=100, learning_rate=1e-6)
    optimizer.train(monitor_channels=cost_monitor)
def main(sequence):
    rnn_gsn = RNN_GSN()

    # data! (needs to be 3d for rnn).
    mnist = MNIST(sequence_number=sequence, seq_3d=True, seq_length=50)

    # optimizer!
    optimizer = RMSProp(model=rnn_gsn,
                        dataset=mnist,
                        epochs=500,
                        batch_size=50,
                        save_freq=10,
                        stop_patience=30,
                        stop_threshold=.9995,
                        learning_rate=1e-6,
                        decay=.95,
                        max_scaling=1e5,
                        grad_clip=5.,
                        hard_clip=False)
    # train!
    optimizer.train()
def main(sequence):
    rnn_gsn = RNN_GSN()

    # data! (needs to be 3d for rnn).
    mnist = MNIST(sequence_number=sequence, seq_3d=True, seq_length=50)

    # optimizer!
    optimizer = RMSProp(
        model=rnn_gsn,
        dataset=mnist,
        epochs=500,
        batch_size=50,
        save_freq=10,
        stop_patience=30,
        stop_threshold=.9995,
        learning_rate=1e-6,
        decay=.95,
        max_scaling=1e5,
        grad_clip=5.,
        hard_clip=False
    )
    # train!
    optimizer.train()
Beispiel #4
0
def main():
    data = TextDataset(path='../../../../datasets/shakespeare_input.txt',
                       source="http://cs.stanford.edu/people/karpathy/char-rnn/shakespeare_input.txt",
                       target_n_future=1,
                       sequence_length=50)



    rnn = RNN(outdir='outputs/rnn/',
              input_size=len(data.vocab),
              hidden_size=128,
              output_size=len(data.vocab),
              layers=2,
              activation='softmax',
              hidden_activation='relu',
              mrg=RNG_MRG.MRG_RandomStreams(1),
              weights_init='uniform',
              weights_interval='montreal',
              bias_init=0.0,
              r_weights_init='identity',
              r_bias_init=0.0,
              cost_function='nll',
              cost_args=None,
              noise='dropout',
              noise_level=.7,
              noise_decay='exponential',
              noise_decay_amount=.99,
              direction='forward')

    cost_monitor = Monitor("cost", rnn.get_train_cost(), train=False, valid=True, test=True)

    optimizer = RMSProp(model=rnn, dataset=data,
                        grad_clip=5., hard_clip=False,
                        learning_rate=2e-3, lr_decay='exponential', lr_decay_factor=0.97,
                        decay=0.95, batch_size=50, epochs=50)
    # optimizer = AdaDelta(model=gsn, dataset=mnist, n_epoch=200, batch_size=100, learning_rate=1e-6)
    optimizer.train(monitor_channels=cost_monitor)