def evaluate_model():
    nn_regressor = nn_model.build_model()
    test_features = pd.read_csv("test_features.csv")
    test_prices = pd.read_csv("test_price.csv")

    eval_input_fn = tf.estimator.inputs.pandas_input_fn(x=test_features,
                                                        y=test_prices["price"],
                                                        batch_size=32,
                                                        shuffle=False,
                                                        num_epochs=1)
    result = nn_regressor.evaluate(input_fn=eval_input_fn)
    print(result)
Beispiel #2
0
def train_model():
    # Show training progress
    logging.getLogger().setLevel(logging.INFO)

    nn_regressor = nn_model.build_model()
    train_features = pd.read_csv("train_features.csv")
    train_prices = pd.read_csv("train_price.csv")

    training_input_fn = tf.estimator.inputs.pandas_input_fn(
        x=train_features,
        y=train_prices["price"],
        batch_size=128,
        shuffle=True,
        num_epochs=1000)
    nn_regressor.train(input_fn=training_input_fn)
Beispiel #3
0
def predict():
    nn_regressor = nn_model.build_model()
    test_features = pd.read_csv("test_features.csv")
    test_prices = pd.read_csv("test_price.csv")

    predict_input_fn = tf.estimator.inputs.pandas_input_fn(
        x=test_features,
        y=None,
        batch_size=32,
        shuffle=False,
        num_epochs=1)
    results = nn_regressor.predict(input_fn = predict_input_fn)
    
    for pair in zip(results, test_prices.values):
        result = pair[0]
        predicted_price = result["predictions"][0]
        actual_price = pair[1][0]

        #Get price from log values
        print("Predicted:", np.exp(predicted_price), 
          "Actual:", np.exp(actual_price))
Beispiel #4
0
# Choose the directory in wich it was stored
# output files of this script will also be stored there

import nn_model

save_path = './Results/'

# recover number of filters the NN was trained with
with open(save_path + 'model_options.log') as inputfile:
    for row in csv.reader(inputfile):
        print row[0]
        if 'filters' in row[0]:
            filters = int(row[0][9:])

# build model
model = nn_model.build_model(filters)

# load trained parameters
model_parameters = save_path + 'model_parameters.hdf'
model.load_weights(model_parameters)

# calculate new reconstructions with the NN
g_nn = model.predict(f)
# resize to original resolution
g_nn = bib_utils.resize_NN_image(g_nn, training=False)

print 'g_nn:', g_nn.shape, g_nn.dtype

# -------------------------------------------------------------------------
# Plot grid of reconstructions
Beispiel #5
0
# checkpoint_dir = "./layers_exp_2/selected/training_checkpoints_"+db_type+"_" + rnn_type + "_units_"+units
checkpoint_dir = checkpoints_folder + "/training_checkpoints_" + db_type + "_" + rnn_type + "_units_" + units

model_params = pickle.load(open(checkpoint_dir + "/model_params.p"))

vocab_size = model_params["vocab_size"]
embedding_dim = model_params["embedding_dim"]
rnn_units = model_params["rnn_units"]
min_note = model_params["min_note"]

# import pdb; pdb.set_trace()
tf.train.latest_checkpoint(checkpoint_dir)

model = nn_model.build_model(vocab_size,
                             embedding_dim,
                             rnn_units,
                             batch_size=1,
                             rnn_type=rnn_type)
model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
model.build(tf.TensorShape([1, None]))

model.summary()

# ### The prediction loop
#
# The following code block generates the text:
# * It starts by choosing a start string, initializing the RNN state and setting the number of characters to generate.
# * Get the prediction distribution of the next character using the start string and the RNN state.
# * Use a multinomial distribution to calculate the index of the predicted character. Use this predicted character as our next input to the model.
# * The RNN state returned by the model is fed back into the model so that it now has more context, instead of only one word. After predicting the next word, the modified RNN states are again fed back into the model, which is how it learns as it gets more context from the previously predicted words.
Beispiel #6
0
testImages, testClassification, fileNameList = load_image_samples(
    "test_images")

#Number of classes
K = testClassification.shape[1]
#Number of training samples
m = testImages.shape[0]

#Image dimensions
imageHeight = testImages[0].shape[0]
imageWidth = testImages[0].shape[1]
numChannels = testImages[0].shape[2]  #Should be 3 for RGB

print "Number of classes:", K
print "Shape of training samples:", imageHeight, imageWidth, numChannels

model = build_model(K, imageHeight, imageWidth, numChannels)

model.load("./marker-classifier.tfl")

predictedClassification = model.predict(testImages)

for idx, c in enumerate(testClassification):
    print "Expected for: ", fileNameList[idx]
    print c

    print "Predicted"
    print predictedClassification[idx]
    print "======="
rnn_units = args.rnn_units
epochs = args.epochs

dataset_train, dataset_validation, vocab_size, BATCH_SIZE, steps_per_epoch, min_note = data_preparation.prepare_data(
    db_type)
# ## Build The Model

# Length of the vocabulary in chars
# vocab_size = len(vocab)

# The embedding dimension
embedding_dim = 256

model = nn_model.build_model(vocab_size=vocab_size,
                             embedding_dim=embedding_dim,
                             rnn_units=rnn_units,
                             batch_size=BATCH_SIZE,
                             rnn_type=rnn_type)

model.summary()

# ## Train the model


# At this point the problem can be treated as a standard classification problem. Given the previous RNN state, and the input this time step, predict the class of the next character.
# ### Attach an optimizer, and a loss function
# The standard `tf.keras.losses.sparse_categorical_crossentropy` loss function works in this case because it is applied across the last dimension of the predictions.
# Because our model returns logits, we need to set the `from_logits` flag.
def loss(labels, logits):
    return tf.keras.losses.sparse_categorical_crossentropy(labels,
                                                           logits,
Beispiel #8
0
nn_callback.write(
    'loss: %s\nfilters: %i\nepochs: %i\nlr: %e\ndropout_rate: %.2f\nbatch_size: %i\n'
    % (loss, filters, epochs, lr, dropout_rate, batch_size),
    logname,
    reset=True)

train_batches = float(len(f_train)) / float(batch_size)
valid_batches = float(len(f_valid)) / float(batch_size)

print 'batch_size:', batch_size, '(train: %.3f, valid: %.3f)' % (train_batches,
                                                                 valid_batches)

# Use Adam optimizer
opt = Adam(lr=lr)

# Load and compile NN model
model = nn_model.build_model(filters, dropout_rate)
model.compile(loss=loss, optimizer=opt)

# start training
# 2 files will be created
# train.log - where loss function values are stored
# model_parameters.hdf - where best parameter values for the NN are stored
model.fit(x=f_train,
          y=g_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=verbose,
          callbacks=[nn_callback.MyCallback(save_path)],
          validation_data=(f_valid, g_valid))
    train_Y = {'policy_out': probabilities, 'value_out': y_values}
    # test_Y = {'policy_out':testing_probs, 'value_out':testing_values}

    return train_X, train_Y


def calculate_probabilities(visits):
    normalize_sums = visits.sum(axis=1).sum(axis=1)
    reshaped = visits.reshape((visits.shape[0], visits.shape[1] * visits.shape[2]))

    normalized = reshaped / normalize_sums[:, None]

    probabilities = normalized.reshape((visits.shape[0], visits.shape[1] * visits.shape[2]))

    return probabilities


def calculate_values(moves, values):
    y_values = np.array([value[move[0]][move[1]] for move, value in zip(moves, values)])
    return y_values


train_X, train_Y = load_data('hex_data.npz')
model = build_model()
history = model.fit(train_X, train_Y, verbose=1, validation_split=0.2, epochs=25, shuffle=True)

# loss, accuracy = model.evaluate(test_X, test_Y, verbose = 1)
# print("accuracy: {}%".format(accuracy*100))

model.save('new_supervised_zero.h5')