Пример #1
0
def train_model(num_epochs=1):
    best_val_accu = 0.0
    best_model_wts = copy.deepcopy(model.state_dict())
    for epoch in range(num_epochs):
        model.train()
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)
        running_loss = 0.0
        running_corrects = 0.0
        since = time.time()
        trainloaders, train_size, _ = data_loader('train', data_dir)
        for inputs, labels in trainloaders:
            inputs = inputs.to(device)
            labels = labels.to(device)
            optimizer.zero_grad()
            outputs = model.forward(inputs)
            _, preds = torch.max(outputs, 1)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item() * inputs.size(0)
            running_corrects += torch.sum(preds == labels.data)
        time_elapsed = time.time() - since

        model.eval()
        val_loss = 0.0
        val_correct = 0.0
        validloaders, valid_size, _ = data_loader('valid', data_dir)
        for inputs, labels in validloaders:
            inputs = inputs.to(device)
            labels = labels.to(device)

            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)
            loss = criterion(outputs, labels)
            val_loss += loss.item() * inputs.size(0)
            val_correct += torch.sum(preds == labels.data)
        if val_correct.item() / valid_size > best_val_accu:
            best_val_accu = val_correct.item() / valid_size
            best_model_wts = copy.deepcopy(model.state_dict())

        print('training took:{}'.format(time_elapsed))
        print('train loss:{:.4f}'.format(running_loss / train_size))
        print('train accuracy:{:.4f}\n'.format(
            (running_corrects.item() / train_size)))

        print('validation loss:{:.4f}'.format(val_loss / valid_size))
        print('validation accuracy:{:.4f}\n'.format(val_correct.item() /
                                                    valid_size))
    model.load_state_dict(best_model_wts)
    print('best accuracy: {:.4f}'.format(best_val_accu))
    return model
Пример #2
0
def train(batch_size, image_size, steps, ckpt_path):

    make_dir(ckpt_path)

    optim = Adam(lr=0.01)

    generator_net = generator_network(image_size)
    discriminator_net = discriminator_network(image_size)

    generator_net.compile(optim, loss=content_loss)
    discriminator_net.compile(optim, loss="binary_crossentropy")

    end_to_end = end_to_end_gan(discriminator_net, generator_net, image_size)
    end_to_end.compile(optim,
                       loss=[content_loss, 'binary_crossentropy'],
                       loss_weights=[1., 1e-3])

    data_iterator = data_loader(batch_size)

    for step in range(steps):

        real_labels = np.ones((batch_size, 1))
        fake_labels = np.zeros((batch_size, 1))

        correct_batch, degraded_batch = next(data_iterator)

        gen_hr = generator_net.predict(degraded_batch)

        discriminator_net.trainable = True

        disc_loss_1 = discriminator_net.train_on_batch(correct_batch,
                                                       real_labels)
        disc_loss_2 = discriminator_net.train_on_batch(gen_hr, fake_labels)
        disc_loss = 0.5 * np.add(disc_loss_1, disc_loss_2)

        discriminator_net.trainable = False
        tf.config.run_functions_eagerly(True)

        end_loss = end_to_end.train_on_batch(degraded_batch,
                                             [correct_batch, real_labels])

        print('step: ' + str(step))
        print('discriminator loss: ', disc_loss)
        print('end_loss: ', end_loss)

        if step % 100 == 0:
            generator_net.save(ckpt_path + '/generator_' + str(step) + '.h5')
Пример #3
0
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]

decoder_inputs = Input(shape=(None, num_output_features))
decoder_lstm = LSTM(neurons, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
                                     initial_state=encoder_states)
decoder_dense = Dense(num_output_features, activation='linear')
decoder_outputs = decoder_dense(decoder_outputs)

model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.compile(optimizer=optimizer, loss=loss)

pacing_text = ['LV', 'RV', 'BiV', 'LBBB']
data_dir = '../parse_data'
X, y = data_loader(pacing_text, data_dir, normalize=True)
X_train = X[[0, 2, 3], :, :]
y_train = y[[0, 2, 3], :, :]
X_test = np.expand_dims(X[1, :, :], axis=0)
y_test = np.expand_dims(y[1, :, :], axis=0)
train_data = data_generator(X_train, y_train)
val_data = data_generator(X_test, y_test)
model.fit_generator(train_data,
                    epochs=50,
                    steps_per_epoch=8,
                    validation_data=val_data,
                    validation_steps=1)

encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(neurons, ))
decoder_state_input_c = Input(shape=(neurons, ))
Пример #4
0
import itertools
from load_data import data_loader
import sys
sys.stdout = open('log.txt', 'w')
from sklearn.cross_validation import KFold

def float32(x):
    return np.cast['float32'](x)



NTRAIN = 50000
NTEST = 200
EPOCHS = 100

loader = data_loader()


pathes = ["train/%s.png" %  (i) for i in range(1, NTRAIN+1)]
X, X_hog, y = loader.load_data(pathes)








lin = layers.InputLayer((None, 3, 32, 32))
lhog = layers.InputLayer((None, 324))
Пример #5
0
            running_loss += loss.item()

        if args.method == 'mdmn' or (args.method == 'darn' and args.mode == 'L2'):

            logger.info("Epoch %d, Alpha on %s: %s" % (t, data_names[i], alpha))
            alpha_list[i, :, t] = alpha

        logger.info("Epoch %d, loss = %.6g" % (t, running_loss))

    logger.info("Finish training %s in %.6g seconds" % (data_names[i],
                                                        time.time() - time_start))

    model.eval()

    # Test (use another hold-out target)
    test_loader = data_loader(test_insts[i], test_labels[i], batch_size=1000, shuffle=False)
    test_acc = 0.
    for xt, yt in test_loader:
        xt = torch.tensor(xt, requires_grad=False, dtype=torch.float32).to(device)
        yt = torch.tensor(yt, requires_grad=False, dtype=torch.int64).to(device)
        preds_labels = torch.squeeze(torch.max(model.inference(xt), 1)[1])
        test_acc += torch.sum(preds_labels == yt).item()
    test_acc /= test_insts[i].shape[0]
    logger.info("Test accuracy on %s = %.6g" % (data_names[i], test_acc))
    test_results[data_names[i]] = test_acc
    np_test_results[i] = test_acc

logger.info("All test accuracies: ")
logger.info(test_results)

# Save results to files
Пример #6
0
        print('validation loss:{:.4f}'.format(val_loss / valid_size))
        print('validation accuracy:{:.4f}\n'.format(val_correct.item() /
                                                    valid_size))
    model.load_state_dict(best_model_wts)
    print('best accuracy: {:.4f}'.format(best_val_accu))
    return model


model = train_model(epoch)

model.eval()
test_loss = 0
test_correct = 0
model = model.to(device)
testloaders, test_size, class_to_idx = data_loader('test', data_dir)
for inputs, labels in testloaders:
    inputs = inputs.to(device)
    labels = labels.to(device)
    #print(inputs.shape)
    outputs = model(inputs)
    _, preds = torch.max(outputs, 1)
    loss = criterion(outputs, labels)
    test_loss += loss.item() * inputs.size(0)
    test_correct += torch.sum(preds == labels.data)

print('test loss:{:.4f}'.format(test_loss / test_size))
print('test accuracy:{:.4f}\n'.format(test_correct.item() / test_size))

check_point = {
    'class_to_idx': class_to_idx,