Пример #1
0
 def load_validation_data(self, load_path):
     y = [None] * 15
     i = 0
     for basename in metric_baselines.iter_actions():
         y[i] = np.load(load_path + 'euler/' + basename +
                        '_cond.npy')[:, -self.timesteps:]
         i += 1
     y = np.concatenate(y, axis=0)
     y, x = self.__alter_parameterization(y)
     return y, x
Пример #2
0
	def load_validation_data(self, load_path):
		y = [None]*15
		i = 0
		cut = (self.predict_hierarchies[0]+1)*self.partial_ts
		for basename in metric_baselines.iter_actions():
			cond = np.load(load_path + 'euler/' + basename + '_cond.npy')[:,-self.timesteps:]
			y[i] = np.zeros(cond.shape)
			y[i][:,:cut] = cond[:,-cut:]
			y[i][:,cut:] = np.load(load_path + 'euler/' + basename + '_gt.npy')[:,:self.timesteps-cut]
			i += 1
		y = np.concatenate(y, axis=0)
		print y.shape
		y, x = self.__alter_parameterization(y)
		return y, x
Пример #3
0
	def load_validation_data(self, load_path):
		x = [None]*15
		y = [None]*15
		i = 0
		for basename in metric_baselines.iter_actions():
			x[i] = np.load(load_path + 'euler/' + basename + '_cond.npy')[:,-self.timesteps:]
			y[i] = np.load(load_path + 'euler/' + basename + '_gt.npy')[:,:self.timesteps_out]
			i += 1
		x = np.concatenate(x, axis=0)
		y = np.concatenate(y, axis=0)
		# image.plot_fk_from_euler(y[:2,10:20], title='test')
		# image.plot_fk_from_euler(y[2:4,10:20], title='test')

		_,x = self.__alter_parameterization(x)
		y = wrap_angle(y)
		return y, x
Пример #4
0
    def run(self, data_iterator, valid_data):
        model_vars = [NAME, self.latent_dim, self.timesteps, self.batch_size]
        self.get_ignored_dims()

        used_idx = [
            6, 7, 8, 9, 12, 13, 14, 15, 21, 22, 23, 24, 27, 28, 29, 30, 36, 37,
            38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 51, 52, 53, 54, 55, 56, 57,
            60, 61, 62, 75, 76, 77, 78, 79, 80, 81, 84, 85, 86
        ]

        def euler_error(yTrue, yPred):
            yPred = self.__recover_parameterization(yPred)[:, :, used_idx]
            error = np.square(wrap_angle(yTrue[:, :, used_idx]) - yPred)
            error = np.sum(error, -1)
            error = np.sqrt(error)
            return np.mean(error, 0)

        def wrap_angle(rad):
            return (rad + np.pi) % (2 * np.pi) - np.pi

        if not self.load():
            # from keras.utils import plot_model
            # plot_model(self.autoencoder, to_file='model.png')
            loss = 10000
            iter1, iter2 = tee(data_iterator)
            for i in range(self.periods):
                for x_data, y_data in iter1:
                    # x_data, y_data = self.__alter_label(x, y)
                    x_train, x_test, y_train_, y_test_ = cross_validation.train_test_split(
                        x_data, y_data, test_size=self.cv_splits)
                    _y_train = self.__alter_parameterization(y_train_)
                    _y_test = self.__alter_parameterization(y_test_)
                    y_train = self.__alter_y(_y_train)
                    y_test = self.__alter_y(_y_test)
                    # print np.sum(y_train[:,0,-self.label_dim:], axis=0)
                    history = self.autoencoder.fit(_y_train,
                                                   y_train,
                                                   shuffle=True,
                                                   epochs=self.epochs,
                                                   batch_size=self.batch_size,
                                                   validation_data=(_y_test,
                                                                    y_test))

                    new_loss = np.mean(history.history['loss'])
                    if new_loss < loss:
                        print 'Saved model - ', loss
                        loss = new_loss
                        # y_test_decoded = self.autoencoder.predict(x_test[:1])
                        # y_test_decoded = np.reshape(y_test_decoded, (len(self.hierarchies), self.timesteps, -1))
                        # image.plot_poses(x_test[:1,:,:-self.label_dim], y_test_decoded[:,:,:-self.label_dim])
                        # image.plot_hierarchies(y_test_orig[:,:,:-self.label_dim], y_test_decoded[:,:,:-self.label_dim])
                        self.autoencoder.save_weights(self.save_path,
                                                      overwrite=True)
                    rand_idx = np.random.choice(x_test.shape[0],
                                                25,
                                                replace=False)
                    #metrics.validate(x_test[rand_idx], self, self.log_path, history.history['loss'])
                    y_test_pred = self.encoder.predict(_y_test[rand_idx])[:,
                                                                          -1]
                    y_test_pred = self.decoder.predict(y_test_pred)
                    mse_ = np.mean(
                        np.square(y_test[rand_idx, -self.timesteps:] -
                                  y_test_pred))

                    y_test_gt = y_test_[rand_idx]
                    mse = euler_error(y_test_gt, y_test_pred)
                    y_test_pred = self.__recover_parameterization(y_test_pred)
                    mae = np.mean(
                        np.abs(
                            np.arctan2(np.sin(y_test_gt - y_test_pred),
                                       np.cos(y_test_gt - y_test_pred))))
                    wrap_mae = np.mean(
                        np.abs(
                            wrap_angle(y_test_gt[:, :, used_idx]) -
                            y_test_pred[:, :, used_idx]))
                    print 'MSE_Sin_Cos', mse_
                    print 'MAE', mae
                    print 'Wrap_MAE', wrap_mae
                    print 'MSE', mse

                    with open(
                            '../new_out/%s_t%d_l%d_log.csv' %
                        (NAME, self.timesteps, self.latent_dim), 'a+') as f:
                        spamwriter = csv.writer(f)
                        spamwriter.writerow([
                            new_loss, mse_, mae, wrap_mae, mse, LEARNING_RATE
                        ])

                    del x_train, x_test, y_train, y_test, y_train_, y_test_
                iter1, iter2 = tee(iter2)

            data_iterator = iter2
        else:
            # load embedding
            embedding = []
            for _, y in data_iterator:
                y = self.__alter_parameterization(y)
                e = self.encoder.predict(y)
                if len(embedding) == 0:
                    embedding = e[:, self.hierarchies]
                else:
                    embedding = np.concatenate(
                        (embedding, e[:, self.hierarchies]), axis=0)
                break
            embedding = np.array(embedding)
            print 'emb', embedding.shape
            mean_diff, diff = metrics.get_embedding_diffs(
                embedding[:, 1], embedding[:, 0])

            load_path = '../human_motion_pred/baselines/euler/'
            cut = self.hierarchies[0]
            methods = ['closest_partial', 'mean-100', 'add']
            _N = 8
            pred_n = self.hierarchies[1] - cut

            error = {m: np.zeros((15, pred_n)) for m in methods}
            a_n = 0
            for basename in metric_baselines.iter_actions():
                print basename, '================='

                cond = np.zeros((_N, self.timesteps, self.input_dim))
                cond[:, -cut - 1:] = self.__alter_parameterization(
                    np.load(load_path + basename + '_cond.npy')[:, -cut - 1:])
                # pd = np.load(load_path + basename + '_pred.npy')
                gtp = np.load(load_path + basename + '_gt.npy')[:, :pred_n]

                enc = self.encoder.predict(cond)[:, cut]

                # autoencoding error
                autoenc = self.decoder.predict(enc)[:, :cut + 1]
                print euler_error(cond[:, :cut + 1], autoenc)

                for method in methods:
                    new_enc = np.zeros(enc.shape)
                    for i in range(_N):
                        if method == 'closest_partial':
                            new_enc[i] = metrics.closest_partial_index(
                                embedding[:, 0], enc[i])
                        elif method == 'mean-100':
                            new_enc[i] = metrics.closest_mean(embedding[:, 1],
                                                              enc[i],
                                                              n=100)
                        elif method == 'add':
                            new_enc[i] = enc[i] + mean_diff

                    model_pred = self.decoder.predict(new_enc)[:, cut + 1:]
                    error[method][a_n] = euler_error(gtp, model_pred)
                    #print method
                    #print error[method][a_n]

                a_n += 1
            print 'total ================='
            for method in methods:
                print np.mean(error[method], 0)
                error[method] = error[method].tolist()

            with open(
                    '../new_out/%s_t%d_l%d_compared.json' %
                (NAME, self.timesteps, self.latent_dim), 'wb') as result_file:
                json.dump(error, result_file)
Пример #5
0
    def run(self, data_iterator, valid_data):
        load_path = '../human_motion_pred/baselines/'
        # model_vars = [NAME, self.latent_dim, self.timesteps, self.batch_size]
        if not self.load():
            test_data_y, test_data_x = self.load_validation_data(load_path)
            test_data_y = wrap_angle(test_data_y)
            print self.loss_opt_str

            # from keras.utils import plot_model
            # plot_model(self.autoencoder, to_file='model.png')
            loss = 10000
            iter1, iter2 = tee(data_iterator)
            for i in range(self.periods):
                for x, _ in iter1:
                    #image.plot_fk_from_euler(x[:3], title='test')
                    y, x = self.__alter_parameterization(x)
                    x_train, x_test, y_train, y_test = cross_validation.train_test_split(
                        x, x, test_size=self.cv_splits)
                    y_train = self.__alter_y(y_train)
                    y_test = self.__alter_y(y_test)

                    history = self.autoencoder.fit(x_train,
                                                   y_train,
                                                   shuffle=True,
                                                   epochs=self.epochs,
                                                   batch_size=self.batch_size,
                                                   validation_data=(x_test,
                                                                    y_test))
                    # callbacks=[tbCallBack])

                    print history.history['loss']
                    new_loss = np.mean(history.history['loss'])
                    if new_loss < loss:
                        self.autoencoder.save_weights(self.save_path,
                                                      overwrite=True)
                        loss = new_loss
                        print 'Saved model - ', loss

                    rand_idx = np.random.choice(x.shape[0],
                                                5000,
                                                replace=False)
                    # y_test_pred = self.encoder.predict(x[rand_idx])[:,-1]
                    # y_test_pred = self.decoder.predict(y_test_pred)
                    # #y_gt = x_test[rand_idx]
                    # y_test_pred = self.unormalize_angle(y_test_pred)
                    y_gt = wrap_angle(y[rand_idx])

                    # mae = np.mean(np.abs(y_gt-y_test_pred))
                    # mse = self.euler_error(y_gt, y_test_pred)
                    mse = self.validate_autoencoding(x[rand_idx], y_gt)
                    mse_test = self.validate_autoencoding(
                        test_data_x, test_data_y)
                    add_std, mse_pred = self.validate_prediction(
                        x[rand_idx], test_data_x, test_data_y)

                    # print 'MAE', mae
                    add_mean_std, add_std_std = np.mean(add_std), np.std(
                        add_std)
                    print 'STD', add_mean_std, add_std_std
                    print 'MSE', np.mean(mse)
                    print 'MSE TEST', np.mean(mse_test)
                    print 'MSE PRED', mse_pred[[-9, -7, -3, -1]]

                    with open(
                            '../new_out/%s_t%d_l%d_%s_log.csv' %
                        (NAME, self.timesteps, self.latent_dim,
                         self.loss_opt_str), 'a+') as f:
                        spamwriter = csv.writer(f)
                        spamwriter.writerow([
                            new_loss, mse, mse_test, mse_pred, add_mean_std,
                            add_std_std, self.loss_opt_str
                        ])

                iter1, iter2 = tee(iter2)

            data_iterator = iter2
        else:
            # load embedding
            embedding = []
            i = 0
            for x, y in data_iterator:
                y, norm_y = self.__alter_parameterization(x)
                e = self.encoder.predict(norm_y)
                #y_test_pred = self.decoder.predict(e[:,-1])
                #y_test_pred = self.unormalize_angle(y_test_pred)
                #y_gt = wrap_angle(y)
                #print self.euler_error(y_gt, y_test_pred)
                #np.save('../data/embedding/t40-l1024-euler-nadam-meanAbs-lr0.0001/emb_%d.npy'%i, e[:,self.predict_hierarchies])
                #i += 1
                #print i
                #continue
                if len(embedding) == 0:
                    embedding = e[:, self.predict_hierarchies]
                else:
                    embedding = np.concatenate(
                        (embedding, e[:, self.predict_hierarchies]), axis=0)
                #break
            #return
            embedding = np.array(embedding)
            print 'emb', embedding.shape
            mean_diff, diff = metrics.get_embedding_diffs(
                embedding[:, 1], embedding[:, 0])
            print 'std', np.std(diff)

            _N = 8
            #methods = ['closest', 'closest_partial', 'mean-5', 'add', 'fn']
            methods = ['0v']
            nn = NN.Forward_NN({
                'input_dim': self.latent_dim,
                'output_dim': self.latent_dim,
                'mode': 'sample'
            })
            nn.run(None)
            #nn = None

            cut_e = self.predict_hierarchies[0]
            cut_x = self.hierarchies[0] + 1
            pred_n = self.hierarchies[1] - cut_x + 1
            error = {
                act: {m: {
                    'euler': None
                }
                      for m in methods}
                for act in metric_baselines.iter_actions()
            }

            # a_n = 0
            for basename in metric_baselines.iter_actions():
                print basename, '================='

                # x, y = valid_data
                # x = np.zeros((_N, self.timesteps, 96))
                # x[:,:cut_x+1] = np.load(load_path + 'xyz/' + basename + '_cond.npy')[:,-cut_x-1:]

                y = np.zeros((_N, self.timesteps, 99))
                y[:, :cut_x] = np.load(load_path + 'euler/' + basename +
                                       '_cond.npy')[:, -cut_x:]

                # y = np.load(load_path + 'euler/' + basename + '_cond.npy')[:,-25:]
                # gtp_x = np.load(load_path + 'xyz/' + basename + '_gt.npy')[:,:pred_n][:,:,self.used_xyz_idx]

                gtp_y_orig = np.load(load_path + 'euler/' + basename +
                                     '_gt.npy')[:, :pred_n]
                #print np.mean(np.abs(y[:,cut_x-2] - y[:,cut_x-1])), np.mean(np.abs(y[:,cut_x-1] - gtp_y[:,0]))
                #y[:,cut_x:] = gtp_y
                #image.plot_fk_from_euler(gtp_y[:4])
                #image.plot_fk_from_euler( wrap_angle(gtp_y[:4]))
                gtp_y = wrap_angle(gtp_y_orig[:, :, self.used_euler_idx])

                # rand_idx = np.random.choice(x.shape[0], _N, replace=False)
                # x, y, xy = self.__merge_n_reparameterize(x[rand_idx],y[rand_idx], True)

                y, norm_y = self.__alter_parameterization(y)
                y = wrap_angle(y)
                enc = self.encoder.predict(norm_y)
                partial_enc = enc[:, cut_e]
                #y = wrap_angle(y[:_N])
                #partial_enc = e[:_N, cut_e]

                # autoencoding error for partial seq
                #dec = self.decoder.predict(enc[:,-1])
                #dec = self.unormalize_angle(dec)
                #print self.euler_error(y, dec)
                #image.plot_poses_euler(x[:2,:cut+1], dec[:2,:,:self.euler_start], title='autoencoding', image_dir='../new_out/')

                fn_pred = nn.model.predict(partial_enc)
                #poses = [None]*(len(methods)+2)
                #poses[1] = self.recover(gtp_y[0:1])
                #poses[0] = gtp_y_orig[0:1]
                #score_name = ''

                for k, method in enumerate(methods):
                    new_enc = np.zeros(partial_enc.shape)
                    for i in tqdm(range(_N)):
                        if method == 'closest_partial':
                            idx = metrics.closest_partial_index(
                                embedding[:, 0], partial_enc[i])
                            new_enc[i] = embedding[idx, 1]
                        elif 'mean' in method:
                            n = int(method.split('-')[1])
                            new_enc[i] = metrics.closest_mean(embedding[:, 1],
                                                              partial_enc[i],
                                                              n=n)
                        elif method == 'add':
                            new_enc[i] = partial_enc[i] + mean_diff
                        elif method == 'closest':
                            new_enc[i] = metrics.closest(
                                embedding[:, 1], partial_enc[i])
                        elif method == 'fn':
                            new_enc[i] = fn_pred[i]

                    model_pred = None
                    if method == '0v':
                        model_pred = np.zeros(gtp_y.shape)
                        for i in range(pred_n):
                            model_pred[:, i] = y[:, cut_x - 1]
                    else:
                        model_pred = self.decoder.predict(new_enc)[:, cut_x:]
                        model_pred = self.unormalize_angle(model_pred)

                    error[basename][method]['euler'] = self.euler_error(
                        gtp_y, model_pred)  #y[:, -pred_n:], model_pred)
                    print method
                    print error[basename][method]['euler'][[1, 3, 7, 9]]
                    error[basename][method]['euler'] = error[basename][method][
                        'euler'].tolist()
                    #score_name = score_name + '%s:%2f_' %(method, error[basename][method]['euler'][-1])

                    #poses[k+2] = self.recover(model_pred[:1])

                    # error[method]['z'] = np.mean([np.linalg.norm(new_enc[i] - enc[i,-1]) for i in range(_N)])
                    # print error[method]['z']

                    # for i in range(_N):
                    # 	pose_err = metrics.pose_seq_error(gtp_x[i], model_pred[i,:,:self.euler_start], cumulative=True)
                    # 	error[method]['pose'] = error[method]['pose'] + np.array(pose_err)
                    # error[method]['pose'] = error[method]['pose']/_N
                    # print error[method]['pose']
                    # error[method]['pose'] = error[method]['pose'].tolist()

                #poses = np.concatenate(poses, axis=0)
                #image.plot_fk_from_euler(poses, title='%s_gt_agt_%s'%(basename, score_name), image_dir='../new_out/')

            with open(
                    '../new_out/zero_velocity_validation-testset-mseMartinez.json',
                    'wb') as result_file:
                json.dump(error, result_file)
Пример #6
0
	def run(self, data_iterator, valid_data):
		# model_vars = [NAME, self.latent_dim, self.timesteps, self.batch_size]
		if not self.load():
			# from keras.utils import plot_model
			# plot_model(self.autoencoder, to_file='model.png')
			loss = 10000
			iter1, iter2 = tee(data_iterator)
			for i in range(self.periods):
				for x, y in iter1:
					x = self.__merge_n_reparameterize(x,y)
					x_train, x_test, y_train, y_test = cross_validation.train_test_split(x, x, test_size=self.cv_splits)
					y_train = self.__alter_y(y_train)
					y_test = self.__alter_y(y_test)
					history = self.autoencoder.fit(x_train, y_train,
								shuffle=True,
								epochs=self.epochs,
								batch_size=self.batch_size,
								validation_data=(x_test, y_test))
								# callbacks=[tbCallBack])

					print history.history['loss']
					new_loss = np.mean(history.history['loss'])
					if new_loss < loss:
						self.autoencoder.save_weights(self.save_path, overwrite=True)
						loss = new_loss
						print 'Saved model - ', loss

					rand_idx = np.random.choice(x.shape[0], 25, replace=False)
					y_test_pred = self.encoder.predict(x[rand_idx])[:,-1]
					y_test_pred = self.decoder.predict(y_test_pred)[:,:,self.euler_start:]

					y_test_pred = self.unormalize_angle(y_test_pred)
					y_gt = wrap_angle(y[rand_idx][:,:,self.used_euler_idx])

					mae = np.mean(np.abs(y_gt-y_test_pred))
					mse = self.euler_error(y_gt, y_test_pred)

					print 'MAE', mae
					print 'MSE', mse

					with open('../new_out/%s_t%d_l%d_log.csv'%(NAME, self.timesteps, self.latent_dim), 'a+') as f:
						spamwriter = csv.writer(f)
						spamwriter.writerow([new_loss, mae, mse, L_RATE])


				iter1, iter2 = tee(iter2)

			data_iterator = iter2
		else:
			# load embedding
			embedding = []
			for x,y in data_iterator:
				x = self.__merge_n_reparameterize(x,y)
				e = self.encoder.predict(x)
				if len(embedding) == 0:
					embedding = e[:, self.predict_hierarchies]
				else:
					embedding = np.concatenate((embedding, e[:,self.predict_hierarchies]), axis=0)
				break
			embedding = np.array(embedding)
			print 'emb', embedding.shape
			mean_diff, diff = metrics.get_embedding_diffs(embedding[:,1], embedding[:,0])

			_N = 8
			methods = ['closest', 'closest_partial', 'add']
			cut_e = self.predict_hierarchies[0]
			cut_x = self.hierarchies[0]
			pred_n = self.hierarchies[1]-cut_x

			# a_n = 0
			load_path = '../human_motion_pred/baselines/'
			for basename in metric_baselines.iter_actions():
				print basename, '================='

				error = {m: {'euler': None,
						# 'z': None
						'pose': np.zeros(pred_n)}  for m in methods}

				# x, y = valid_data
				x = np.zeros((_N, self.timesteps, 96))
				x[:,:cut_x+1] = np.load(load_path + 'xyz/' + basename + '_cond.npy')[:,-cut_x-1:]
				y = np.zeros((_N, self.timesteps, 99))
				y[:,:cut_x+1] = np.load(load_path + 'euler/' + basename + '_cond.npy')[:,-cut_x-1:]
				gtp_x = np.load(load_path + 'xyz/' + basename + '_gt.npy')[:,:pred_n][:,:,self.used_xyz_idx]
				gtp_y = np.load(load_path + 'euler/' + basename + '_gt.npy')[:,:pred_n][:,:,self.used_euler_idx]

				# rand_idx = np.random.choice(x.shape[0], _N, replace=False)
				# x, y, xy = self.__merge_n_reparameterize(x[rand_idx],y[rand_idx], True)
				x, y, xy = self.__merge_n_reparameterize(x,y, True)
				# y = unormalize_angle(y)
				enc = self.encoder.predict(xy)
				partial_enc = enc[:,cut_e]

				# autoencoding error for partial seq
				dec = self.decoder.predict(partial_enc)[:,:cut_x+1]
				dec_euler = unormalize_angle(dec[:,:,self.euler_start:])
				print self.euler_error(y[:,:cut_x+1], dec_euler)
				#image.plot_poses_euler(x[:2,:cut+1], dec[:2,:,:self.euler_start], title='autoencoding', image_dir='../new_out/')

				for method in methods:
					new_enc = np.zeros(partial_enc.shape)
					for i in tqdm(range(_N)):
						if method == 'closest_partial':
							idx = metrics.closest_partial_index(embedding[:,0], partial_enc[i])
							new_enc[i] = embedding[idx,1]
						elif 'mean' in method:
							n = int(method.split('-')[1])
	                                                new_enc[i] = metrics.closest_mean(embedding[:,1], partial_enc[i], n=n)
						elif method == 'add':
							new_enc[i] = partial_enc[i]+mean_diff
						elif method == 'closest':
							new_enc[i] = metrics.closest(embedding[:,1], partial_enc[i])

					model_pred = self.decoder.predict(new_enc)[:,cut_x+1:]
					model_pred_euler = unormalize_angle(model_pred[:,:,self.euler_start:])
					# error[method]['euler'] = self.euler_error(y[:,cut_x+1:], model_pred_euler)
					error[method]['euler'] = self.euler_error(gtp_y, model_pred_euler)
					print method
					print error[method]['euler']
					error[method]['euler'] = error[method]['euler'].tolist()

					#image.plot_poses_euler(gtp_x[:2], model_pred[:2,:,:self.euler_start], title=method, image_dir='../new_out/')

					# error[method]['z'] = np.mean([np.linalg.norm(new_enc[i] - enc[i,-1]) for i in range(_N)])
					# print error[method]['z']

					for i in range(_N):
						pose_err = metrics.pose_seq_error(gtp_x[i], model_pred[i,:,:self.euler_start], cumulative=True)
						error[method]['pose'] = error[method]['pose'] + np.array(pose_err)
					error[method]['pose'] = error[method]['pose']/_N
					print error[method]['pose']
					error[method]['pose'] = error[method]['pose'].tolist()
Пример #7
0
	def run(self, data_iterator, valid_data):
		load_path = '../human_motion_pred/baselines/'
		if not self.load():
			test_data_y, test_data_x = self.load_validation_data(load_path)
			print self.loss_opt_str
			# from keras.utils import plot_model
			# plot_model(self.autoencoder, to_file='model.png')
			loss = 10000
			iter1, iter2 = tee(data_iterator)
			for i in range(self.periods):
				for x, _ in iter1:
					#image.plot_fk_from_euler(x[:3], title='test')
					x_orig, x = self.__alter_parameterization(x)
					x, y = self.__alter_data(x)
					_, y_orig = self.__alter_data(x_orig)
					x_train, x_test, y_train, y_test = cross_validation.train_test_split(x, y, test_size=self.cv_splits)

					history = self.autoencoder.fit(x_train, y_train,
								shuffle=True,
								epochs=self.epochs,
								batch_size=self.batch_size,
								validation_data=(x_test, y_test))

					print history.history['loss']
					new_loss = np.mean(history.history['loss'])
					if new_loss < loss:
						self.autoencoder.save_weights(self.save_path, overwrite=True)
						loss = new_loss
						print 'Saved model - ', loss

					rand_idx = np.random.choice(x.shape[0], 100, replace=False)
					y_gt = wrap_angle(y_orig[rand_idx])

					mse = self.validate(x[rand_idx], y_gt)
					mse_test = self.validate(test_data_x, test_data_y)
					mse_pred = self.validate(test_data_x, test_data_y)

					print 'MSE', np.mean(mse)
					print 'MSE TEST', np.mean(mse_test)
					print 'MSE PRED', mse_pred[-10:]

					with open('../new_out/%s_t%d_l%d_%s_log.csv'%(NAME, self.timesteps, self.latent_dim, self.loss_opt_str), 'a+') as f:
						spamwriter = csv.writer(f)
						spamwriter.writerow([new_loss, mse, mse_test, mse_pred, self.loss_opt_str])

				iter1, iter2 = tee(iter2)

			data_iterator = iter2
		else:
			_N = 8
			error = {act: 0 for act in metric_baselines.iter_actions()}

			# a_n = 0
			for basename in metric_baselines.iter_actions():
				print basename, '================='

				x = np.load(load_path + 'euler/' + basename + '_cond.npy')[:,-self.timesteps:]
				y = np.load(load_path + 'euler/' + basename + '_gt.npy')[:,:self.timesteps_out]
				_,x = self.__alter_parameterization(x)
				y = wrap_angle(y)

				mse_pred = self.validate(test_data_x, test_data_y)
				error[basename] = self.validate(test_data_x, test_data_y).tolist()
				print error[basename]

				#image.plot_poses_euler(gtp_x[:2], model_pred[:2,:,:self.euler_start], title=method, image_dir='../new_out/')

				# error[method]['z'] = np.mean([np.linalg.norm(new_enc[i] - enc[i,-1]) for i in range(_N)])
				# print error[method]['z']

				# for i in range(_N):
				# 	pose_err = metrics.pose_seq_error(gtp_x[i], model_pred[i,:,:self.euler_start], cumulative=True)
				# 	error[method]['pose'] = error[method]['pose'] + np.array(pose_err)
				# error[method]['pose'] = error[method]['pose']/_N
				# print error[method]['pose']
				# error[method]['pose'] = error[method]['pose'].tolist()


			with open('../new_out/%s_t%d_l%d_opt-%s_validation-testset-mseMartinez.json'%(NAME, self.timesteps, self.latent_dim, self.loss_opt_str), 'wb') as result_file:
				json.dump(error, result_file)