Exemplo n.º 1
0
    def generate_from_dnn(data_dir, seql, generate_x_blocks, hidden_dim):

        fft_dir = os.path.join(data_dir, 'fft')
        fft_glob = os.path.join(fft_dir, '*.npy')

        weights_dir = os.path.join(data_dir, 'weights')
        gen_dir = os.path.join(data_dir, 'gen')

        datatools.ensure_dir_exists(weights_dir)
        datatools.ensure_dir_exists(gen_dir)

        filenames = Set()

        for g in glob.glob(fft_glob):
            filenames.add(g.replace('_x.npy','').replace('_y.npy',''))

        for f in filenames:
            X_train = np.load(f+'_x.npy')
            filename = f.split('/')[-1]
            weight_file = os.path.join(weights_dir, filename+'.hdf5')
            trained_file_location = os.path.join(gen_dir, filename)

            write_flush("# Generating for '{}'\n\n".format(filename))
            write_flush("-- Preparing data...")
            output = np.zeros((generate_x_blocks+seql, X_train.shape[1], X_train.shape[2]))
            output = np.append(X_train[0:seql], output, axis=0)
            fft_output = np.zeros((generate_x_blocks, X_train.shape[2]))
            write_flush("finished.\n")

            write_flush("-- Building dnn...")
            model = nntools.get_current_model(X_train.shape[2], hidden_dim)
            write_flush("finished.\n")

            write_flush("-- Loading weights...")
            if os.path.exists(weight_file):
	        model.load_weights(weight_file)
            write_flush("finished.\n")

            i = 0
            write_flush("-- Generating...")
            while True:
	        write_flush("\r-- Generating... ({}/{})".format(i, fft_output.shape[0]))
	        if i >= fft_output.shape[0]:
	            break

	        next_val = model.predict(output[i:i+seql])

                for k in range(0, seql-1):
	            for x in range(0, output.shape[2]):
		        output[i+seql+1][k][x] = next_val[k][x]
		        fft_output[i][x] = next_val[seql-1][x]

	        i = i + 1

            write_flush("\r-- Generating... finished.\n")
            write_flush("-- Saving numpy array...")
            np.save(trained_file_location, fft_output)
            write_flush("finished.\n\n")
Exemplo n.º 2
0
    def train_dnn(data_dir, epochs_per_round, max_training_iterations, hidden_dim):
        fft_dir = os.path.join(data_dir, 'fft')
        fft_glob = os.path.join(fft_dir, '*.npy')

        weights_dir = os.path.join(data_dir, 'weights')
        datatools.ensure_dir_exists(weights_dir)

        filenames = Set()

        for g in glob.glob(fft_glob):
            filenames.add(g.replace('_x.npy','').replace('_y.npy',''))

        for f in filenames:
            X_train = np.load(f+'_x.npy')
            y_train = np.load(f+'_y.npy')
            filename = f.split('/')[-1]
            weight_file = os.path.join(weights_dir, filename+'.hdf5')

            write_flush('# Training on \'{}\'\n\n'.format(filename))
            write_flush('-- Building dnn...')
            model = nntools.get_current_model(X_train.shape[2], hidden_dim)
            write_flush('finished.       \n')
            checkpointer = ModelCheckpoint(filepath=weight_file, verbose=0, save_best_only=True)
            i = 0

            write_flush('-- Training model... ({}/{})'.format(i, max_training_iterations))
            while True:
                if i >= max_training_iterations:
	            break

    	        if os.path.exists(weight_file):
	            model.load_weights(weight_file)

                model.fit(X_train, y_train, nb_epoch=epochs_per_round, verbose=0, validation_split=0.1, callbacks=[checkpointer])

	        i = i + epochs_per_round
                write_flush('\r-- Training model... ({}/{})'.format(i, max_training_iterations))

            write_flush('\r-- Training model...finished.\n')