def main(depth, train_mode):
    start = time.time()
    model = create_network(depth, ARTIST_NAME=ARTIST_NAME, TRAIN_MODE=TRAIN_MODE)
    # change the lyrics file to the file with the lyrics you want to be trained on
    text_file = "ostr_lyrics.txt"

    if train_mode == True:
        bars = split_lyrics_file(text_file)

    if train_mode == False:
        bars = generate_lyrics(text_file)
        logger.debug("Generated lyrics:")
        logger.debug(bars)

    rhyme_list = prepare_list_of_all_rhymes(bars)
    if train_mode == True:
        x_data, y_data = build_dataset(bars, rhyme_list)
        train(x_data, y_data, model)

    if train_mode == False:
        vectors = compose_rap(bars, rhyme_list, text_file, model)
        rap = vectors_into_song(vectors, bars, rhyme_list)
        # f = open(FILE_WITH_GENERATED_LYRICS, "w")
        # for bar in rap:
        #     f.write(bar)
        #     f.write("\n")
        return rap
    stop = time.time()
    logger.DEBUG("Execution time in sec:{}".format(stop - start))
Example #2
0
def test_xor():
    X = np.array(([3, 5], [5, 1], [10, 2]), dtype=float)
    y = np.array(([75], [82], [93]), dtype=float)

    X = X / np.amax(X, axis=0)
    y = y / 100  # Max test score is 100

    X = np.array(([1, 1], [0, 1], [0, 0], [1, 0]), dtype=float)
    y = np.array(([0], [1], [0], [1]), dtype=float)

    NN = Neural_Network()
    train(NN, X, y)

    X = np.array(([1, 1]), dtype=float)
    yHat = NN.forward(X)
    print('estimate for {}: {}'.format(X, yHat))
    X = np.array(([0, 1]), dtype=float)
    yHat = NN.forward(X)
    print('estimate for {}: {}'.format(X, yHat))
    X = np.array(([1, 0]), dtype=float)
    yHat = NN.forward(X)
    print('estimate for {}: {}'.format(X, yHat))
    X = np.array(([0, 0]), dtype=float)
    yHat = NN.forward(X)
    print('estimate for {}: {}'.format(X, yHat))
Example #3
0
def run_training_session(all_data_x, all_data_y, model_save_name, fnames, lead):
    cfg.current_lead = lead
    cfg.model_save_name = "lead" + str(lead) + model_save_name

    data_x = all_data_x.copy()[:, :, (lead,)]
    data_y = all_data_y.copy()

    print(data_x.shape)

    x_train, y_train, x_val, y_val, x_test, y_test = nn.prepare_train_val_data(
        data_x, 
        data_y, 
        cfg.tvt_split, 
        split_on = cfg.split_on,
        patient_ids = [dgen.filename_info(f, "ID") for f in fnames]
    )

    cfg.train_size = x_train.shape[0]
    cfg.validation_size = x_val.shape[0]
    cfg.test_size = x_test.shape[0]

    model = nn.ffnet((cfg.nn_input_size, ))
    nn.train(
        model, x_train, y_train, x_val, y_val, 
        batch_size = cfg.training_batch_size, 
        epochs = cfg.epochs
    )

    r = nn.eval(model, x_test, y_test, batch_size = cfg.evaluation_batch_size)
    if cfg.verbosity:
        print(
            "loss\t\t", r[0],
            "\naccuracy\t", r[1],
            "\nprecision\t", r[2],
            "\nrecall\t\t", r[3],
            "\nROC-AUC\t\t", r[4],
            "\nPR-AUC\t\t", r[5],
            "\nF1-score\t", r[6],
        )

    # prediction_dict = nn.get_ecg_predictions(model, data_x, fnames)
    # af_r, acc = nn.find_best_af_ratio(prediction_dict)
    # af_r_f.write(str(af_r) + "\n")

    # print(af_r, acc)

    # act_r = nn.evaluate_model(ecg_data_x, ecg_data_y, ecg_fnames, model)

    if cfg.logging:
        write_log(lead, model, r)
def predict_nn(data, lr, num_epoch, lamb):
    predictions = []
    zero_train_matrix, train_matrix, valid_data, test_data = load_data()
    zero_train_matrix, train_matrix = resample_tensor(
        [zero_train_matrix, train_matrix])
    nn_sample = torch.FloatTensor(train_matrix)
    nn_model = AutoEncoder(nn_sample.shape[1], 9)
    train(nn_model, lr, lamb, nn_sample, zero_train_matrix, data, num_epoch)

    for i in range(len(data["is_correct"])):
        uid = data["user_id"][i]
        qid = data["question_id"][i]
        inputs = Variable(zero_train_matrix[uid]).unsqueeze(0)
        outputs = nn_model(inputs)
        pred = outputs[0][qid]
        predictions.append(pred)
    return predictions
Example #5
0
 def train(self, normalized_train_data, normalized_test_data):
     self.loss, self.accuracy = wnn.train(normalized_train_data, normalized_test_data,
         dense_layers=self.dense_layer,
         neurons_per_layer=self.neurons_per_layer,
         activation_per_layer=self.activation_per_layer,
         optimizer=self.optimizer,
         epochs=self.epochs,
         output_activation=self.output_activation,
     )
Example #6
0
def test_train_ocr():
    X1 = np.array(([3, 5], [5, 1], [10, 2]), dtype=float)
    y1 = np.array(([75], [82], [93]), dtype=float)

    a, b, c, c_to_recognized = alphabet()
    inputLayerSize = len(a[0])
    hiddenLayerSize = 3 * inputLayerSize
    outputLayerSize = 1

    NN = Neural_Network(inputLayerSize=inputLayerSize,
                        hiddenLayerSize=hiddenLayerSize,
                        outputLayerSize=outputLayerSize)
    X = np.array((a[0], b[0], c[0]), dtype=float)
    y = np.array((a[1], b[1], c[1]), dtype=float)

    train(NN, X, y)

    X = np.array((c[0]), dtype=float)
    yHat = NN.forward(X)
    print('estimate for good C: {}'.format(yHat))
    X = np.array((c_to_recognized), dtype=float)
    yHat = NN.forward(X)
    print('estimate for bad C: {}'.format(yHat))
def one_hot_encode(Y):
    n_labels = Y.shape[0]
    result = np.zeros((n_labels, 2))
    for i in range(n_labels):
        result[i][Y[i]] = 1
    return result


x1, x2, y = np.loadtxt('non_linearly_separable.txt', skiprows=1, unpack=True)
X_train = X_test = np.column_stack((x1, x2))
Y_train_unencoded = Y_test = y.astype(int).reshape(-1, 1)
Y_train = one_hot_encode(Y_train_unencoded)
w1, w2 = nn.train(X_train, Y_train,
                  X_test, Y_test,
                  n_hidden_nodes=HIDDEN_NODES,
                  epochs=100000,
                  batch_size=X_train.shape[0],
                  lr=0.1)


# Generate a mesh over one-dimensional data
# (The mesh() and plot_boundary() functionality were inspired by the
# documentation of the BSD-licensed scikit-learn library.)
def mesh(values):
    range = values.max() - values.min()
    padding_percent = 5
    padding = range * padding_percent * 0.01
    resolution = 1000
    interval = (range + 2 * range * padding) / resolution
    return np.arange(values.min() - padding, values.max() + padding, interval)
Example #8
0
    for i in range(n_labels):
        result[i][Y[i]] = 1
    return result


# Uncomment one of the next three lines to decide which dataset to load
# x1, x2, y = np.loadtxt('linearly_separable.txt', skiprows=1, unpack=True)
x1, x2, y = np.loadtxt('non_linearly_separable.txt', skiprows=1, unpack=True)
# x1, x2, y = np.loadtxt('circles.txt', skiprows=1, unpack=True)

# Train classifier
X_train = X_test = np.column_stack((x1, x2))
Y_train_unencoded = Y_test = y.astype(int).reshape(-1, 1)
Y_train = one_hot_encode(Y_train_unencoded)
w1, w2 = nn.train(X_train, Y_train,
                  X_test, Y_test,
                  n_hidden_nodes=10, iterations=100000, lr=0.3)

# Plot the axes
sns.set(rc={"axes.facecolor": "white", "figure.facecolor": "white"})
ax = plt.figure().gca(projection="3d")
ax.set_zticks([0, 0.5, 1])
ax.set_xlabel("Input A", labelpad=15, fontsize=30)
ax.set_ylabel("Input B", labelpad=15, fontsize=30)
ax.set_zlabel("ลท", labelpad=5, fontsize=30)

# Plot the data points
blue_squares = X_train[(Y_train_unencoded == 0).flatten()]
ax.scatter(blue_squares[:, 0], blue_squares[:, 1], 0, c='b', marker='s')
green_triangles = X_train[(Y_train_unencoded == 1).flatten()]
ax.scatter(green_triangles[:, 0], green_triangles[:, 1], 1, c='g', marker='^')
Example #9
0
    "\n--------------------------------  Weight Guess module----------------------------------------\n"
)
print(
    '## Training the individual atomic NNs to get a suitable starting point for actual training  ##\n\n'
)


def train(nn, a, e_ref, learning_rate):
    """A simple training function to train the atomic NN"""
    output = nn.forward_prop(a)
    w1, w2, w3, _, _, _ = nn.backward_prop(a, output, e_ref)
    nn.NN_optimize(w1, w2, w3, 0, 0, 0, learning_rate)


for i in range(10000):
    train(nn_Ti_1a, Ti_input, Ti_output, learning_rate=l_r)

predicted_energy = nn_Ti_1a.forward_prop(Ti_input)
print('Reference =', Ti_output, '-------------', 'Predicted = ',
      predicted_energy)

for i in range(10000):
    train(nn_O_1a, O_input, O_output, learning_rate=l_r)

predicted_energy2 = nn_O_1a.forward_prop(O_input)
print('Reference =', O_output, '-------------', 'Predicted = ',
      predicted_energy2)

Ti_weights = {
    'w1': nn_Ti_1a.weights1,
    'w2': nn_Ti_1a.weights2,
Example #10
0
def create_index(data, index):
    neural_network = data['data']
    index['data'] = nn.train(neural_network, data['tf'], data['idf'])
    printjson(index, index['current'])
    y_train = training_values[:, 13]
    y_test = test_values[:, 13]

    x_train = training_values[:, 0:taken_columns]
    x_test = test_values[:, 0:taken_columns]

    nn.INPUT_LAYER_SIZE = taken_columns
    nn.HIDDEN_LAYERS = 1
    nn.HIDDEN_LAYER_SIZE = 15
    nn.OUTPUT_LAYER_SIZE = 1

    weights = nn.init_weights()
    biases = nn.init_bias()

    alpha = 3
    [weights, biases, cost_history] = nn.train(x_train, y_train, weights,
                                               biases, alpha, 50000)

    correct_predictions = 0

    for x, y in zip(x_train, y_train):
        pred = nn.feed_forward(x, weights, biases)
        if y == 1 and pred.sum() > 0.5:
            correct_predictions += 1
        elif y == 0 and pred.sum() <= 0.5:
            correct_predictions += 1

    print(
        str(correct_predictions) + " correct predictions from " +
        str(len(training_values)) + " train values")

    correct_predictions = 0
# Compare the network's accuracy on MNIST with and without standardization.

import neural_network as nn
import mnist as normal
import mnist_standardized as standardized

print("Regular MNIST:")
nn.train(normal.X_train,
         normal.Y_train,
         normal.X_validation,
         normal.Y_validation,
         n_hidden_nodes=200,
         epochs=2,
         batch_size=60,
         lr=0.1)

print("Standardized MNIST:")
nn.train(standardized.X_train,
         standardized.Y_train,
         standardized.X_validation,
         standardized.Y_validation,
         n_hidden_nodes=200,
         epochs=2,
         batch_size=60,
         lr=0.1)
Example #13
0
obj_linear_regression = linear_regression(train_input,train_output,learing_rate,max_steps,C=0.0)
linear_regression_weights = obj_linear_regression.weights
test_loss = mse(prediction = np.dot(test_input1,linear_regression_weights)[:,0], target = test_output)/100
test_loss1 = rmse(prediction = np.dot(test_input1,linear_regression_weights)[:,0], target = test_output)/100
print(f'\nMse Test loss in Linear Regression is \t {test_loss}')
print(f'Rmse Test loss in Linear Regression is \t {test_loss1}\n')



# Neural Network
nn_max_epochs = 1000
nn_batch_size = 128
nn_learning_rate = 5e-8
num_layers = 1
num_units = 32
lamda = 0.00002
network = neural_network(train_input,num_layers,num_units)
optimizer = optimizer(nn_learning_rate)
train(network, optimizer, lamda, nn_batch_size, nn_max_epochs,train_input, train_output)
yout=network(test_input)
nn_test_loss = mse(yout,test_output)/100000
nn_test_loss1 = rmse(yout,test_output)/10000
nn_cross_entrophy = cross_entrophy(yout,test_output)
print(f'\nMse Test loss in Neural Network is \t {nn_test_loss}')
print(f'Rmse Test loss in Neural Network is \t {nn_test_loss1}')
print(f'Cross Entrophy loss in Neural Network is \t {nn_cross_entrophy}')




Example #14
0
import neural_network as nn
import mnist_standardized as data

nn.train(data.X_train, data.Y_train, data.X_test, data.Y_test,
         n_hidden_nodes=100, epochs=10, batch_size=256, lr=1)
Example #15
0
#     plt.plot(X_pred[i])
#     plt.savefig(name)

x_event_lda = []
x_event_neural = []

print()
print("If you want to load model from file: 1")
print("If you want to train new model     : 0")
print()
model_load = input("Load model? 1/0: ")
if (model_load == '1'):
    config.model = load_model('save_models/mymodel_5.h5')
else:
    if (model_load == '0'):
        neural_network.train(x, y)
    else:
        print("Invalid option")

for i in range(data_predicting_count):
    x_event_lda.append(lda.solve(x, y, x_pred[i]))
    x_event_neural.append(neural_network.solve(x_pred[i]))

for i in range(data_predicting_count):
    print()
    print("##########################################")
    print()

    if (i < config.instruction_files_to_pred):

        print(i + 1, ".) Expected solve: ", true_prediction[i])
Example #16
0
def train_temperature():
    neural_network.train(layers=4, debug=False)
    result = neural_network.kfold_test(5)
    return '{0}'.format(result)
Example #17
0
classes = []
documents = []
ignore_words = ['?']

# create our training data
training = []
output = []

data.data_actualidad()
data.data_food()
data.data_games()
data.data_hobbie()

data.init_data(words, classes, documents, ignore_words)
data.init_training_data(training, output, words, classes, documents)

X = np.array(training)
y = np.array(output)

start_time = time.time()
hidden_neurons = 20
alpha = 0.1
epochs = 100000
dropout = False
dropout_percent = 0.2

nn.train(words, classes, X, y, hidden_neurons, alpha, epochs, dropout,
         dropout_percent)

elapsed_time = time.time() - start_time
print("processing time:", elapsed_time, "seconds")
Example #18
0
    x_train, y_train, x_val, y_val, x_test, y_test = nn.prepare_train_val_data(
        data_x,
        data_y,
        cfg.tvt_split,
        split_on=cfg.split_on,
        patient_ids=[dgen.filename_info(f, "ID") for f in fnames])

    cfg.train_size = x_train.shape[0]
    cfg.validation_size = x_val.shape[0]
    cfg.test_size = x_test.shape[0]

    model = ffnet((cfg.nn_input_size, ))
    nn.train(model,
             x_train,
             y_train,
             x_val,
             y_val,
             batch_size=cfg.training_batch_size,
             epochs=cfg.epochs,
             save=cfg.save_on_train)

    x_sine = np.array([x for i, x in enumerate(x_train) if y_train[i] == 0])
    x_af = np.array([x for i, x in enumerate(x_train) if y_train[i] == 1])
    x_sine = np.mean(x_sine, axis=0)
    x_af = np.mean(x_af, axis=0)

    a1 = get_activations(model,
                         np.array([x_sine]),
                         print_shape_only=True,
                         layer_name='attention_vec')[0].flatten()

    a2 = get_activations(model,