예제 #1
0
def main():
    train, test, _ = imdb.load_data(path='imdb.pkl',
                                    n_words=10000,
                                    valid_portion=0.1)
    trainX, trainY = train
    testX, testY = test

    trainX = pad_sequences(trainX, maxlen=100, value=0.)
    testX = pad_sequences(testX, maxlen=100, value=0.)
    trainY = np.asarray(trainY)
    testY = np.asarray(testY)
    data_set = DataSet(trainX, trainY, testX, testY)
    training_cnf = {
        'classification': True,
        'batch_size_train': 32,
        'batch_size_test': 32,
        'validation_scores': [('accuracy', tf.metrics.accuracy)],
        'num_epochs': 50,
        'input_size': (100, ),
        'lr_policy': StepDecayPolicy(schedule={
            0: 0.01,
            30: 0.001,
        })
    }
    util.init_logging('train.log',
                      file_log_level=logging.INFO,
                      console_log_level=logging.INFO)

    learner = SupervisedLearner(model,
                                training_cnf,
                                classification=training_cnf['classification'],
                                is_summary=False,
                                num_classes=2)
    learner.fit(data_set, weights_from=None, start_epoch=1)
예제 #2
0
# GRU
gru_output_size = 70

# Training
batch_size = 30
epochs = 5

'''
Note:
batch_size is highly sensitive.
Only 2 epochs are needed as the dataset is very small.
'''

print('Loading data...')
(x_train, y_train), (x_val, y_val), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_val), 'validation sequences')
print(len(x_test), 'test sequences')

print('Pad sequences (samples x time)')

x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_val = sequence.pad_sequences(x_val, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)

print('x_train shape:', x_train.shape)
print('x_val shape:', x_val.shape)
print('x_test shape:', x_test.shape)

print('Build model...')
예제 #3
0
# RNN
rnn_output_size = 70

# Training
batch_size = 30
epochs = 2
'''
Note:
batch_size is highly sensitive.
Only 2 epochs are needed as the dataset is very small.
'''

print('Loading data...')
(x_train, y_train), (x_val,
                     y_val), (x_test,
                              y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_val), 'validation sequences')
print(len(x_test), 'test sequences')

print('Pad sequences (samples x time)')

x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_val = sequence.pad_sequences(x_val, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)

print('x_train shape:', x_train.shape)
print('x_val shape:', x_val.shape)
print('x_test shape:', x_test.shape)

print('Build model...')