예제 #1
0
    def __init__(self, train):
        dataset_number = env.DATASET_NUMBER
        num_dataset_to_use = env.NUM_DATASET_TO_USE
        chords_in_vector = env.CHORDS_IN_BAR * dataset_number
        chord_length = env.CHORD_LENGTH

        num_layers = env.NUM_LAYERS
        num_hidden = env.NUM_HIDDEN
        batch_size = env.BATCH_SIZE
        epoch = env.EPOCH
        dropout_pb = env.DROPOUT_PB

        with tf.variable_scope(str(dataset_number) + str(num_dataset_to_use) + str(chords_in_vector) + str(chord_length)):

            cell_type = tf.nn.rnn_cell.GRUCell

            # Number of examples, number of input, dimension of each input
            data = tf.placeholder(tf.float64, [None, chords_in_vector, chord_length])
            target = tf.placeholder(tf.float64, [None, 2])

            cell = cell_type(num_hidden)

            reader = Reader()

            if train:
                reader.read_training_data(dataset_number, num_dataset_to_use)
                reader.read_testing_data(dataset_number, num_dataset_to_use)
                # make sure it has the correct format for the RNN
                reader.convert_to_rnn_format(chords_in_vector, chord_length)

                self.model = model = MultiRNNModel(cell, data, target, train, batch_size, epoch, dropout_pb, num_hidden, num_layers, reader.training_attributes, reader.training_labels, reader.testing_attributes, reader.testing_labels)
            else:
                #reader.read_training_data(dataset_number, 0)
                #reader.read_testing_data(dataset_number, num_dataset_to_use)
                # make sure it has the correct format for the RNN
                #reader.convert_to_rnn_format(chords_in_vector, chord_length)
                self.model = model = MultiRNNModel(cell, data, target, train, batch_size, epoch, dropout_pb, num_hidden, num_layers)
예제 #2
0
from reader import Reader
from random import randint
from pprint import pprint

np.set_printoptions(precision=6, suppress=True)

# GRU

dataset_number = 8
num_dataset_to_use = 10000
chords_in_vector = 4 * dataset_number
chord_length = 33

reader = Reader()
reader.read_training_data(dataset_number, num_dataset_to_use)
reader.read_testing_data(dataset_number, num_dataset_to_use)

C = 1.0
gamma = 0.1

model = SVMModel(C, gamma)

model.train(reader.training_attributes, reader.training_labels)

train_error = model.test(reader.training_attributes, reader.training_labels)

test_error = model.test(reader.testing_attributes, reader.testing_labels)

print("Train Accuracy {:2.2f}%".format(train_error * 100))
print("Test Accuracy {:2.2f}%".format(test_error * 100))
예제 #3
0
#####################################################################

import csv
import cv2
import numpy as np
import time
import smtplib
from email.mime.text import MIMEText
from reader import Reader

SENDING_EMAIL = True

for dataset_number in [1, 4, 8]:
    reader = Reader()
    reader.read_testing_data(dataset_number, 30000)
    for num_dataset_to_use in [100, 1000, 10000, 15000, 20000, 25000, 30000, np.inf]:
        reader.read_training_data(dataset_number, num_dataset_to_use)
        print("Loaded dataset %s, %s vectors" % (dataset_number, num_dataset_to_use))

        start = time.time()

        trainingset_size = len(reader.training_attributes)
        testingset_size = len(reader.testing_attributes)
        dataset_size = trainingset_size + testingset_size
        feature_vector_length = len(reader.training_attributes[0])

        ############ Perform Training -- Random Forest

        # define random forest object