Beispiel #1
0
        # decay
        if iter_n % args['decay_after'] == 0:
            model.decay_learning_rate()
            print 'New learning rate:', model.lr

        if SAVE_TO_DISK:
            with open(args['log_path'], 'a+') as f:
                spamwriter = csv.writer(f)
                spamwriter.writerow([
                    new_loss, l2_train, l2_test, l2_valid, mean_std_pred,
                    std_std_pred, log_valid, mean_std_class, std_std_class
                ])


if __name__ == '__main__':
    args, data_iter, test_iter, valid_data = parser.get_parse('train')

    # import model class
    module = __import__('models.' + args['method_name'])
    method_class = getattr(getattr(module, args['method_name']),
                           args['method_name'])

    model = method_class(args)
    if args['debug']:
        __print_model(model)
    if args['load_path'] != None:
        print 'Load model ...', args['load_path']
        model.load(args['load_path'])

    SAVE_TO_DISK = not args['debug']
    train(model, data_iter, test_iter, valid_data, args)
Beispiel #2
0
            for i in range(self.periods):
                for x, y in iter1:
                    x_train, x_test, y_train, y_test = cross_validation.train_test_split(
                        x, y, test_size=self.cv_splits)
                    self.model.fit([x_train, y_train],
                                   [x_train, y_train, x_train, y_train],
                                   shuffle=True,
                                   epochs=self.epochs,
                                   batch_size=self.batch_size,
                                   validation_data=([x_test, y_test], [
                                       x_test, y_test, x_test, y_test
                                   ]),
                                   callbacks=[self.history])

                y_test_decoded = self.translator.predict(x_test[:1])
                image.plot_batch_1D(y_test[:1], y_test_decoded)
                # self.model.save_weights(self.load_path, overwrite=True)
                iter1, iter2 = tee(iter2)

            # data_iterator = iter2

        # model_vars = [NAME, self.latent_dim, self.timesteps, self.batch_size]
        # embedding_plotter.see_embedding(self.encoder, data_iterator, model_vars)
        # self.history.record(self.log_path, model_vars)


if __name__ == '__main__':
    data_iterator, config = parser.get_parse(NAME)
    ae = LSTM_UNIT(config)
    ae.run(data_iterator)
Beispiel #3
0
            data_iterator = iter2

        # metric_baselines.compare(self)
        # metrics.gen_long_sequence(valid_data, self)

        # embedding_plotter.see_hierarchical_embedding(self.encoder, self.decoder, data_iterator, valid_data, model_vars, self.label_dim)
        # iter1, iter2 = tee(data_iterator)
        # metrics.validate(valid_data, self)

        #nn = NN.Forward_NN({'input_dim':self.latent_dim, 'output_dim':self.latent_dim, 'mode':'sample'})
        #nn.run(None)
        # metrics.plot_metrics(self, data_iterator, valid_data)
        # association_evaluation.plot_best_distance_function(self, valid_data, data_iterator)
        # association_evaluation.eval_generation(self, valid_data, data_iterator)
        # association_evaluation.eval_center(self, valid_data, 'sitting')
        # association_evaluation.transfer_motion(self, valid_data, 'sitting', 'walking', data_iterator)
        # association_evaluation.plot_transfer_motion(self, '../new_out/transfer_motion-sitting-to-greeting-scores.npy')
        # association_evaluation.plot_transfer_motion(self, '../new_out/transfer_motion-sitting-to-walking-scores.npy')
        # association_evaluation.eval_generation_from_label(self, data_iterator)
        # association_evaluation.plot_add(self, data_iterator)
        # metrics.plot_metrics_labels(self, data_iterator, valid_data)
        # metric_baselines.compare_label_embedding(self, nn, data_iterator)
        # association_evaluation.eval_distance(self, valid_data)
        # evaluate.eval_pattern_reconstruction(self.encoder, self.decoder, iter2)


if __name__ == '__main__':
    data_iterator, valid_data, config = parser.get_parse(NAME, labels=True)
    ae = R_H_LSTM(config)
    ae.run(data_iterator, valid_data)
Beispiel #4
0
        if self.trained:
            self.model.load_weights(self.load_path)
            # blah

        else:
            iter1, iter2 = tee(data_iterator)
            for i in range(self.periods):
                for x, y in iter1:
                    norm_y = y / np.pi / 2
                    x_train, x_test, y_train, y_test = cross_validation.train_test_split(
                        x, norm_y, test_size=self.cv_splits)
                    self.model.fit(x_train,
                                   y_train,
                                   shuffle=True,
                                   epochs=self.epochs,
                                   batch_size=self.batch_size,
                                   validation_data=(x_test, y_test))

                    y_test_decoded = self.model.predict(x_test[:1])
                    image.plot_batch_1D(y_test[:1], y_test_decoded)
                    # self.model.save_weights(self.load_path, overwrite=True)

                iter1, iter2 = tee(iter2)


if __name__ == '__main__':
    data_iterator, config = parser.get_parse('LSTM_AE')
    ae = Stacked_LSTM(config)
    ae.run(data_iterator)
Beispiel #5
0
                    #score_name = score_name + '%s:%2f_' %(method, error[basename][method]['euler'][-1])

                    #poses[k+2] = self.recover(model_pred[:1])

                    # error[method]['z'] = np.mean([np.linalg.norm(new_enc[i] - enc[i,-1]) for i in range(_N)])
                    # print error[method]['z']

                    # for i in range(_N):
                    # 	pose_err = metrics.pose_seq_error(gtp_x[i], model_pred[i,:,:self.euler_start], cumulative=True)
                    # 	error[method]['pose'] = error[method]['pose'] + np.array(pose_err)
                    # error[method]['pose'] = error[method]['pose']/_N
                    # print error[method]['pose']
                    # error[method]['pose'] = error[method]['pose'].tolist()

                #poses = np.concatenate(poses, axis=0)
                #image.plot_fk_from_euler(poses, title='%s_gt_agt_%s'%(basename, score_name), image_dir='../new_out/')

            with open(
                    '../new_out/zero_velocity_validation-testset-mseMartinez.json',
                    'wb') as result_file:
                json.dump(error, result_file)

            #with open('../new_out/%s_t%d_l%d_opt-%s_validation-testset-mseMartinez.json'%(NAME, self.timesteps, self.latent_dim, self.loss_opt_str), 'wb') as result_file:
            #	json.dump(error, result_file)


if __name__ == '__main__':
    data_iterator, valid_data, config = parser.get_parse(NAME)
    ae = H_euler_RNN_R(config)
    ae.run(data_iterator, valid_data)
Beispiel #6
0
		opt = RMSprop(lr=self.lr)
		self.autoencoder.compile(optimizer=opt, loss=self.loss)

	# def run(self, data_iterator):
	# 	self.load()
	# 	if not self.trained:
	# 		# from keras.utils import plot_model
	# 		# plot_model(self.autoencoder, to_file='model.png')
	# 		for x in data_iterator:
	# 			x_data = self.alter_label(x)
	# 			x_train, x_test, y_train, y_test = cross_validation.train_test_split(x_data, x_data, test_size=self.cv_splits)
	# 			y_train = self.alter_y(y_train)
	# 			y_test = self.alter_y(y_test)
	# 			print x_train.shape, x_test.shape, y_train.shape, y_test.shape
	# 			#from utils import image
 #                #xyz = translate__.batch_expmap2xyz(y_train[:5,:5], self)
 #                #image.plot_poses(xyz)

	# 			history = self.autoencoder.fit(x_train, y_train,
	# 						shuffle=True,
	# 						epochs=self.epochs,
	# 						batch_size=self.batch_size,
	# 						validation_data=(x_test, y_test))

	# 			self.post_train_step(history.history['loss'][0], x_test)

if __name__ == '__main__':
	train_set_gen, test_set, config = parser.get_parse(MODEL_NAME, HAS_LABELS)
	ae = R_RNN(config, HAS_LABELS)
	ae.run(train_set_gen, test_set, HAS_LABELS)
import time
import os
import h5py
import numpy as np
from tqdm import tqdm
from utils.parser import get_parse
from utils.testdataset import configdataset
from utils.general import get_data_root, htime
from utils.evaluate import compute_map_and_print

args = get_parse()

datasets = args.datasets.split(',')

h5 = h5py.File(args.features, 'r')
keys = list(h5.keys())


def get_features(images):
    vecs = []
    for img in tqdm(images):
        basename = os.path.basename(img).split('.')[0]
        vecs.append(np.array(h5[basename]))
    vecs = np.asarray(vecs)
    return vecs


for dataset in datasets:
    start = time.time()
    cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
    images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]