Example #1
0
def test_model(model, X, Y, batch_size):

    test_loss = model.evaluate(X[1000:1300], Y[1000:1300], batch_size)
    print('Test Loss {:.4f}'.format(test_loss))

    y1 = model.predict(X[50], 10)
    y2 = model.predict(X[1000], 10)
    y3 = model.predict(X[1500], 10)
    y4 = model.predict(X[1345], 10)

    plot_result(restore_patch(X[50].numpy(), (4, 4)),
                restore_patch(Y[50].numpy(), (4, 4)),
                restore_patch(y1, (4, 4)))

    plot_result(restore_patch(X[1000].numpy(), (4, 4)),
                restore_patch(Y[1000].numpy(), (4, 4)),
                restore_patch(y2, (4, 4)))

    plot_result(restore_patch(X[1500].numpy(), (4, 4)),
                restore_patch(Y[1500].numpy(), (4, 4)),
                restore_patch(y3, (4, 4)))

    plot_result(restore_patch(X[1345].numpy(), (4, 4)),
                restore_patch(Y[1345].numpy(), (4, 4)),
                restore_patch(y4, (4, 4)))
Example #2
0
def test_model(model, X, Y, batch_size):

    test_loss = model.evaluate(X[300:], Y[300:], batch_size)
    print('Test Loss {:.4f}'.format(test_loss))

    y1 = model.predict(X[50], 5)
    y2 = model.predict(X[365], 5)
    y3 = model.predict(X[410], 5)
    y4 = model.predict(X[446], 5)

    plot_result(restore_patch(X[50].numpy(), (3, 4)),
                restore_patch(Y[50].numpy(), (3, 4)),
                restore_patch(y1, (3, 4)))

    plot_result(restore_patch(X[365].numpy(), (3, 4)),
                restore_patch(Y[365].numpy(), (3, 4)),
                restore_patch(y2, (3, 4)))

    plot_result(restore_patch(X[410].numpy(), (3, 4)),
                restore_patch(Y[410].numpy(), (3, 4)),
                restore_patch(y3, (3, 4)))

    plot_result(restore_patch(X[446].numpy(), (3, 4)),
                restore_patch(Y[446].numpy(), (3, 4)),
                restore_patch(y4, (3, 4)))
def test_model(model, X, Y):
    #e1 = model.evaluate(X[700:800], Y[700:800], True)
    test_loss = model.evaluate(X[800:], Y[800:], False)
    print('Test Loss {:.4f}'.format(test_loss))

    y1 = model.predict(X[50], 10)
    y2 = model.predict(X[915], 10)
    y3 = model.predict(X[936], 10)
    y4 = model.predict(X[956], 10)

    plot_result(
        restore_patch(X[50].numpy(), (2, 2)),
        restore_patch(Y[50].numpy(), (2, 2)),
        restore_patch(y1, (2, 2))
    )
    
    plot_result(
        restore_patch(X[915].numpy(), (2, 2)),
        restore_patch(Y[915].numpy(), (2, 2)),
        restore_patch(y2, (2, 2))
    )
    
    plot_result(
        restore_patch(X[936].numpy(), (2, 2)),
        restore_patch(Y[936].numpy(), (2, 2)),
        restore_patch(y3, (2, 2))
    )
    
    plot_result(
        restore_patch(X[956].numpy(), (2, 2)),
        restore_patch(Y[956].numpy(), (2, 2)),
        restore_patch(y4, (2, 2))
    )
Example #4
0
def train(trainloader, generator, discriminator, loss, optimizer_g, optimizer_d):
    ctr = 0
    minibatch_disc_losses = []
    minibatch_gen_losses = []

    fixed_noise = Variable(torch.FloatTensor(8 * 8, z_dim, 1, 1).normal_(0, 1), volatile=True)

    if cuda_available:
        print("CUDA is available!")
        fixed_noise.cuda()

    print("Epoch, Inception Score, MMD Score", file=open("logs/eval.csv", "a"))

    for epoch in range(50):
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            ctr += 1
            if cuda_available:
                inputs, targets = inputs.cuda(), targets.cuda()

            inputs, targets = Variable(inputs), Variable(targets)

            zeros = Variable(torch.zeros(inputs.size(0)))
            ones = Variable(torch.ones(inputs.size(0)))

            if cuda_available:
                zeros, ones = zeros.cuda(), ones.cuda()

            # print("Updating discriminator...")
            minibatch_noise = sample_noise(inputs.size(0), z_dim)

            # Zero gradients for the discriminator
            optimizer_d.zero_grad()

            # Train with real examples
            d_real = discriminator(inputs)

            if discriminator.model_name == 'DCGAN':
                d_real_loss = loss(d_real, ones)  # Train discriminator to recognize real examples
            else:
                d_real_loss = 0.5 * torch.mean((d_real - ones) ** 2)

            # print("Applying gradients to discriminator...")
            d_real_loss.backward()

            # print("Train with fake examples from the generator")
            fake = generator(minibatch_noise).detach()  # Detach to prevent backpropping through the generator

            d_fake = discriminator(fake)

            d_fake_loss = loss(d_fake, zeros)  # Train discriminator to recognize generator samples
            d_fake_loss.backward()
            minibatch_disc_losses.append(d_real_loss.data[0] + d_fake_loss.data[0])

            # # the discriminator
            optimizer_d.step()

            # print("Updating the generator...")
            optimizer_g.zero_grad()

            # print("Sample z ~ N(0, 1)")
            minibatch_noise = sample_noise(inputs.size(0), z_dim)

            d_fake = discriminator(generator(minibatch_noise))
            if generator.model_name == 'DCGAN':
                g_loss = loss(d_fake, ones)  # Train generator to fool the discriminator into thinking these are real.
            else:
                g_loss = 0.5 * torch.mean((d_fake - ones) ** 2)
            g_loss.backward()

            # print("Applying gradients to generator...")
            optimizer_g.step()

            minibatch_gen_losses.append(g_loss.data[0])
            if ctr % 10 == 0:
                print("Iteration {} of epoch {}".format(ctr, epoch))

        print('Generator loss : %.3f' % (np.mean(minibatch_gen_losses)))
        print('Discriminator loss : %.3f' % (np.mean(minibatch_disc_losses)))

        inc_score = inception_score.evaluate(generator, z_dim, cuda=cuda_available)
        mmd_score = eval_mmd(generator, z_dim)
        print('MMD score      : {}'.format(mmd_score))
        print('Inception score: {}'.format(inc_score))
        print("{}, {}, {}".format(epoch, inc_score, mmd_score), file=open("logs/eval.csv", "a"))

        utility.plot_result(generator, fixed_noise, epoch)
        loss_name = "{0}_epoch{1}".format(generator.model_name, epoch)
        utility.save_losses(minibatch_disc_losses, minibatch_gen_losses, loss_name)
        utility.save(discriminator, generator, epoch)
Example #5
0
# From:https://campus.datacamp.com
# Import the libraries
import utility as u

train, test = u.load_data()
train = u.prepare_data(train)

# Create train_two with the newly defined feature
train_two = train.copy()
train_two["Family_size"] = 1
train_two["Family_size"] = train_two["SibSp"] + train_two["Parch"] + 1
train_two["Family_size"] = train_two["Family_size"].fillna(1)
print(train_two)

# Create a new feature set and add the new feature
feature_list = ["Pclass", "Sex", "Age", "Fare", "SibSp", "Parch", "Family_size"]

#Control overfitting by setting "max_depth" to 10 and "min_samples_split" to 5 : my_tree_two
max_depth = 10
min_samples_split = 5
my_tree, features, target = u.train_model(train_two, feature_list, max_depth, min_samples_split)

# Look at the importance and score of the included features
print(my_tree.feature_importances_)
print(my_tree.score(features, target))

u.plot_result(feature_list, my_tree, features, target)
    print hidden, af, optimizer, timesteps, batch_size, nb_epochs, T, p, l_rate, decay, b_1, b_2, mom, rho, fname

    #load data
    char_dict, all_music_encoded = load_data()

    input_dim = len(char_dict.keys())

    #get model
    rnn = model.build_simplernn_model(input_dim, hidden, af, optimizer, T, p,
                                      l_rate, decay, b_1, b_2, mom, rho)

    #Train model
    hist = model.train_rnn(rnn, all_music_encoded, timesteps, batch_size,
                           nb_epochs)
    util.plot_result(hist.history, fname)

    #Generation of music

    #load trained weights from file to model
    #rnn.load_weights("../model/weights.09-2.05.hdf5")

    #pick a random input pattern as our seed sequence, then generate music character by character
    prime_text = '$'  #K:F\r\n X:2\r\n'

    generated_music = model.generate_music_soft(rnn, prime_text, char_dict)

    text_file = open("../music/music_soft.txt", "w")
    text_file.write("%s" % generated_music)
    text_file.close()
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
#import sklearn.ensemble.RandomForestClassifier as RandomForestClassifier
import utility as u

train, test = u.load_data()
train = u.prepare_data(train)
test = u.prepare_data(test)

# We want the Pclass, Age, Sex, Fare, SibSp, Parch, and Embarked variables
feature_list = ["Pclass", "Age", "Sex", "Fare", "SibSp", "Parch", "Embarked"]
features_forest = train[feature_list].values
target = train["Survived"].values

# Building and fitting my_forest
forest = RandomForestClassifier(max_depth=10,
                                min_samples_split=2,
                                n_estimators=100,
                                random_state=1)
my_forest = forest.fit(features_forest, target)

# Print the score of the fitted random forest
print(my_forest.score(features_forest, target))

# Compute predictions on our test set features then print the length of the prediction vector
test_features = test[feature_list].values
pred_forest = my_forest.predict(test_features)
print(len(pred_forest))

u.plot_result(feature_list, my_forest, features_forest, target)