def eval_center(model, action_data, action_name, n=200, n_comp=1000, cut=-1):
	LABEL_GEN_CENTERS = '../new_out/L_RNN-t30-l400/generate_from_labels/eval_generation_from_label-gen_poses-L_GRU.npy'
	if cut == -1:
		cut = model.hierarchies[-1]

	encoded = metrics.__get_latent_reps(model.encoder, action_data, model.MODEL_CODE, n=cut)
	center_a = np.array([np.mean(encoded, axis=0)])
	center_a = metrics.__get_decoded_reps(model.decoder, center_a, model.MODEL_CODE)[0]
	center_raw = np.mean(action_data, axis=0)
	pseudo_center_idxs = np.random.choice(action_data.shape[0], n, replace=False)
	comp_idxs = np.random.choice(action_data.shape[0], min(n_comp, action_data.shape[0]), replace=False)
	scores = np.array([[1000.0]*(n+3)]*n_comp)
	print scores.shape

	if model.MODEL_CODE == metrics.L_LSTM:
		center_a = center_a[:,:-model.label_dim]
		center_raw = center_raw[:,:-model.label_dim]
		action_data = action_data[:,:,:-model.label_dim]

	center_from_label = np.load(LABEL_GEN_CENTERS)[model.labels[action_name]]

	for l, i in enumerate(tqdm(comp_idxs)):
		for k, j in enumerate(pseudo_center_idxs):
			scores[l][k] = metrics.__pose_seq_error(action_data[i], action_data[j])
		scores[l][-3] = metrics.__pose_seq_error(action_data[i], center_from_label)
		scores[l][-2] = metrics.__pose_seq_error(action_data[i], center_a)
		scores[l][-1] = metrics.__pose_seq_error(action_data[i], center_raw)

	__save_score(scores, model, 'eval_center')
	print 'animating...'
	import fk_animate
	fk_animate.animate_motion(center_raw, 'center raw', '../new_out/center_raw_animate-%s-t%d-l%d'%(model.NAME, model.timesteps, model.latent_dim))
	fk_animate.animate_motion(center_a, 'center latent', '../new_out/center_latent_animate-%s-t%d-l%d'%(model.NAME, model.timesteps, model.latent_dim))
Exemple #2
0
def animate_results(from_path,
                    predict_path,
                    predict_name,
                    baseline='1',
                    baseline_name='Residual sup. (MA)',
                    ground_truth='2'):
    for basename in iter_actions(from_path):
        pds = np.load(predict_path + basename + '.npy')
        for i in range(_N):
            print basename, i
            gt = np.load(from_path + LOAD_PATH + basename + '_0-%d.npy' % i)
            gtp = np.load(from_path + LOAD_PATH + basename + '_%s-%d.npy' %
                          (ground_truth, i))
            bpd = np.load(from_path + LOAD_PATH + basename + '_%s-%d.npy' %
                          (baseline, i))

            pd_score = metrics.__pose_seq_error(pds[i], gtp[:pds.shape[1]])
            bl_score = metrics.__pose_seq_error(bpd[:pds.shape[1]],
                                                gtp[:pds.shape[1]])

            score = bl_score / pd_score

            fk_animate.animate_compare(
                gt, gtp[:pds.shape[1]], pds[i], predict_name,
                bpd[:pds.shape[1]], baseline_name,
                predict_path + '%6.2f-p%6.2f-b%6.2f-images-%s-%d-%s' %
                (score, pd_score, bl_score, basename, i,
                 predict_name.replace('.', '').replace('/', '-').replace(
                     ' ', '-')))
Exemple #3
0
def plot_results_npy(from_path, npy_files_dirs, method_names):
    for basename in iter_actions(from_path):
        for i in range(len(method_names)):
            pd = np.load(npy_files_dirs[i] + basename + '.npy')
            score = [None] * _N
            score_ = [None] * _N
            t = pd.shape[1]

            for j in range(_N):
                gt = np.load(from_path + LOAD_PATH + basename +
                             '_2-%d.npy' % j)[:t]
                score[j] = metrics.__pose_seq_error(pd[j], gt, cumulative=True)
                if i == 0:
                    pd_b = np.load(from_path + LOAD_PATH + basename +
                                   '_1-%d.npy' % j)[:t]
                    score_[j] = metrics.__pose_seq_error(pd_b,
                                                         gt,
                                                         cumulative=True)

            if i == 0:
                plt.plot(range(1, t + 1),
                         np.mean(score_, axis=0),
                         linestyle=':',
                         linewidth=3,
                         label='Base')  #Residual sup. (MA)')
            if i > len(method_names) / 2 - 1:
                plt.plot(range(1, t + 1),
                         np.mean(score, axis=0),
                         label=method_names[i],
                         linestyle='--')
            else:
                plt.plot(range(1, t + 1),
                         np.mean(score, axis=0),
                         label=method_names[i])

        plt.legend()
        plt.xlabel('time-steps')
        plt.ylabel('error')
        plt.title(basename)
        plt.savefig('../../new_out/L-RNN-%s.png' % (basename))
        plt.close()
Exemple #4
0
def compare_embedding(model, data_iterator):
    import image
    embedding = metrics.get_embedding(model,
                                      data_iterator,
                                      subspace=model.hierarchies[-2:])
    mean_diff, diff = metrics.get_embedding_diffs(embedding[1], embedding[0])
    std_diff = np.std(diff, axis=0)
    cut = model.hierarchies[-2] + 1
    pred_n = model.timesteps - cut
    for basename in iter_actions():
        print basename
        n = 8
        pose_ref = np.zeros((n, model.timesteps, model.input_dim))
        pose_pred_bl = np.zeros((n, model.timesteps - cut, model.input_dim))
        pose_gt = np.zeros((n, model.timesteps - cut, model.input_dim))

        for i in tqdm(range(n)):
            gt = np.load(LOAD_PATH + basename + '_0-%d.npy' % i)
            pd = np.load(LOAD_PATH + basename + '_1-%d.npy' % i)
            gtp = np.load(LOAD_PATH + basename + '_2-%d.npy' % i)

            pose_ref[i, :cut] = gt[-cut:]
            pose_pred_bl[i] = pd[:pred_n]
            pose_gt[i] = gtp[:pred_n]

        new_enc = model.encoder.predict(pose_ref)[:, cut - 1] + mean_diff
        pose_pred = model.decoder.predict(new_enc)[:, :pred_n]
        error_bl = [
            metrics.__pose_seq_error(pose_gt[i], pose_pred_bl[i])
            for i in range(n)
        ]
        error = [
            metrics.__pose_seq_error(pose_gt[i], pose_pred[i])
            for i in range(n)
        ]
        print np.mean(error), np.mean(error_bl)
        image.plot_poses(pose_pred, title='rnn')
        image.plot_poses(pose_pred_bl, title='baseline')
        image.plot_poses(pose_gt, title='gt')
        np.save('../new_out/HRNN-%s.npy' % basename, pose_pred)
Exemple #5
0
def eval_nearest_neighbor(validation_data,
                          training_data,
                          n_valid=250,
                          n_random=1000,
                          n_input=15):
    error_score = [1000] * n_valid
    error_x = [None] * n_valid
    idxs = np.random.choice(len(validation_data), n_valid, replace=False)
    for xs, _ in training_data:
        idx_comp = np.random.choice(xs.shape[0],
                                    min(n_random, xs.shape[0]),
                                    replace=False)
        for i, idx in enumerate(tqdm(idxs)):
            for x in xs[idx_comp]:
                score = metrics.__pose_seq_error(
                    x[:n_input], validation_data[idx, :n_input])
                if score < error_score[i]:
                    error_score[i] = score
                    error_x[i] = np.copy(x[n_input:])

    error = [None] * n_valid
    error_ = [None] * n_valid
    for i, idx in enumerate(idxs):
        error[i] = metrics.__pose_seq_error(error_x[i],
                                            validation_data[idx, n_input:],
                                            cumulative=True)
        # fk_animate.animate_compare(gt[basename][i], gtp[basename][i],
        # 	error_x[basename][i], 'Nearest Neighbor (1/%d)'%(DATA_ITER_SIZE/RANDOM_N),
        # 	pd[basename][i], 'Residual sup. (MA)', from_path+LOAD_PATH+'images/')

    _err = np.mean(error, axis=0)
    plt.plot(range(1, _err.shape[0] + 1), _err)
    plt.xlabel('time-steps')
    plt.ylabel('error')
    plt.title('Nearest Neighbor (1/10)')
    plt.savefig('../new_out/nn-random-sampled-v%d-r%d.png' %
                (n_valid, n_random))
    plt.close()
def transfer_motion(model, action_data, from_motion_name, to_motion_name, data_iterator, n=10, n_comp=1000, cut=-1):
	LABEL_GEN_Z = '../new_out/L_RNN-t30-l400/generate_from_labels/eval_generation_from_label-gen_z-L_GRU.npy'
	LABEL_GEN_CENTERS = '../new_out/L_RNN-t30-l400/generate_from_labels/eval_generation_from_label-gen_poses-L_GRU.npy'
	if cut == -1:
		cut = model.hierarchies[-1]

	action_data = action_data[np.random.choice(action_data.shape[0], n, replace=False)]
	z_actions = metrics.__get_latent_reps(model.encoder, action_data, model.MODEL_CODE, n=cut)

	z_labels = np.load(LABEL_GEN_Z)[[model.labels[from_motion_name], model.labels[to_motion_name]]]
	z_infered = z_actions - z_labels[0] + z_labels[1]
	action_from_z = metrics.__get_decoded_reps(model.decoder, z_infered, model.MODEL_CODE)
	# action_normalized = metrics.__get_decoded_reps(model.decoder, z_infered/np.linalg.norm(z_infered), model.MODEL_CODE)

	center_from_label = np.zeros((2, model.timesteps, model.input_dim))
	center_from_label[:,:,:-model.label_dim] = np.load(LABEL_GEN_CENTERS)[[model.labels[from_motion_name], model.labels[to_motion_name]]]
	center_from_label[0,:,-model.label_dim+model.labels[from_motion_name]] = 1
	center_from_label[1,:,-model.label_dim+model.labels[to_motion_name]] = 1
	z_labels = metrics.__get_latent_reps(model.encoder, center_from_label, model.MODEL_CODE, n=cut)
	z_infered = z_actions - z_labels[0] + z_labels[1]
	action_from_pose = metrics.__get_decoded_reps(model.decoder, z_infered, model.MODEL_CODE)

	print 'animating...'
	import fk_animate
	save_path = '../new_out/transfer_motion-%s-to-%s-'%(from_motion_name, to_motion_name)
	for i in range(z_actions.shape[0]):
		fk_animate.animate_motion([action_data[i], action_from_z[i], action_from_pose[i]], [from_motion_name, to_motion_name, to_motion_name+'(+name)'], save_path+str(i))
		# fk_animate.animate_motion([action_data[i], action_from_z[i], action_normalized[i]], [from_motion_name, to_motion_name, to_motion_name+'(norm)'], save_path+str(i)+'-')

	scores = [[1000.0]*len(model.labels)]*n
	count = 0
	for xs,_ in data_iterator:
		x_idx = np.random.choice(xs.shape[0], min(n_comp, xs.shape[0]), replace=False)
		for x in tqdm(xs[x_idx]):
			for i, z in enumerate(action_from_z):
				s = metrics.__pose_seq_error(z[:,:-model.label_dim], x[:,:-model.label_dim])
				label_idx = np.argmax(x[-model.label_dim:]) - model.input_dim + model.label_dim
				if scores[i][label_idx] > s:
					scores[i][label_idx] = s
		del xs
		print count
		count += 1

	print scores
	np.save(save_path+'scores.npy', scores)
def eval_generation(model, action_data, data_iterator, n=20, n_comp=1000, cut=-1):
	if cut == -1:
		cut = model.hierarchies[-1]
	ind_rand = np.random.choice(action_data.shape[0], n, replace=False)
	n = n/2
	encoded = metrics.__get_latent_reps(model.encoder, action_data[ind_rand], model.MODEL_CODE, n=cut)
	encoded = np.array([(encoded[i] + encoded[i+n])/2 for i in range(n)])
	action_data = metrics.__get_decoded_reps(model.decoder, encoded, model.MODEL_CODE)
	scores = [[1000.0]*len(model.labels)]*n
	count = 0
	for xs,_ in data_iterator:
		x_idx = np.random.choice(xs.shape[0], min(n_comp, xs.shape[0]), replace=False)
		for x in tqdm(xs[x_idx]):
			for i, z in enumerate(action_data):
				s = metrics.__pose_seq_error(z[:,:-model.label_dim], x[:,:-model.label_dim])
				label_idx = np.argmax(x[-model.label_dim:]) - model.input_dim + model.label_dim
				if scores[i][label_idx] > s:
					scores[i][label_idx] = s
		del xs
		print count
		count += 1

	__save_score(scores, model, 'eval_generation')
def plot_best_distance_function(model, data, data_iterator, nn, n=50):
	idx = np.random.choice(data.shape[0], n, replace=False)
	enc = metrics.__get_latent_reps(model.encoder, data[idx], model.MODEL_CODE, n=model.hierarchies)
	nn_pred_z = nn.model.predict(enc[:,-2])
	N = 3
	colors = ["#1f77b4", "#ff7f0e", "#2ca02c", "#9467bd", "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf"]

	avg_error_raw = np.zeros(N)
	avg_error_lat = np.zeros(N)

	errors = np.zeros(n+2)
	dists = np.zeros(n+2)
	zs = np.zeros((N,model.latent_dim))
	poses_plot = np.zeros((N+3, model.timesteps, model.input_dim-model.label_dim))

	emb = metrics.get_label_embedding(model, data_iterator, subspaces=model.hierarchies)
	cut = model.hierarchies[-2]+1
	# new_e = np.zeros((N,n,model.latent_dim))
	for j in tqdm(range(n)):
		z_ref = enc[j,-2]
		z_true = enc[j,-1]
		p_enc_dec = metrics.__get_decoded_reps(model.decoder, np.array([z_ref, z_true, nn_pred_z[j]]), model.MODEL_CODE)
		poses_plot[:3] = p_enc_dec[:,:,:-model.label_dim]

		for i in range(N):
			dist_name = metrics.__dist_name__(i)
			ls = emb[:,-1]
			weights, w_i = metrics.__get_weights(ls, z_ref, mode=i)
			zs[i] = ls[w_i[0]]

			preds = metrics.__get_decoded_reps(model.decoder, ls[w_i[:n]], model.MODEL_CODE)
			poses_plot[3+i] = preds[0,:,:-model.label_dim]
			for k in range(n):
				errors[k] = metrics.__pose_seq_error(preds[k,:,:-model.label_dim],data[idx[j],:,:-model.label_dim])
				dists[k] = metrics.__distance__(ls[w_i[k]], z_true, mode=i)

			errors[-2] = metrics.__pose_seq_error(p_enc_dec[0,:cut,:-model.label_dim],data[idx[j],:cut,:-model.label_dim])
			dists[-2] = metrics.__distance__(z_ref, z_true, mode=i)
			errors[-1] = metrics.__pose_seq_error(p_enc_dec[-1,:,:-model.label_dim],data[idx[j],:,:-model.label_dim])
			dists[-1] = metrics.__distance__(nn_pred_z[j], z_true, mode=i)

			plt.scatter(dists, errors, label=dist_name, s=30, c=colors[i])
			if i == N-1:
				plt.scatter(dists[:1], errors[:1], c='black', alpha='0.3', s=100, label='closest')
				plt.scatter(dists[-1:], errors[-1:], c=colors[-1], alpha='0.3', s=100, label='FN')
				plt.scatter(dists[-2:-1], errors[-2:-1], c='red', alpha='0.3', s=100, label='dec-part')
			else:
				plt.scatter(dists[:1], errors[:1], c='black', alpha='0.3', s=100)
				plt.scatter(dists[-1:], errors[-1:], c=colors[-1], alpha='0.3', s=100)
				plt.scatter(dists[-2:-1], errors[-2:-1], c='red', alpha='0.3', s=100)
			avg_error_raw[i] += errors[0]

		error = metrics.__pose_seq_error(p_enc_dec[1,:,:-model.label_dim],data[idx[j],:,:-model.label_dim])
		plt.scatter([0], [error], label='dec-comp', c=colors[3])

		plt.legend()
		plt.xlabel('distance to comp. seq. rep.')
		plt.ylabel('error in raw space')
		plt.title('distance vs error in raw space (sample %d)'%(j))
                plt.savefig('../new_out/__plot_best_distance_function_%d-1.png'%(j))
                plt.close()

		for i in range(N):
			dist_name = metrics.__dist_name__(i)
			error = [metrics.__latent_error(z, z_true) for z in [zs[i], nn_pred_z[j], z_true]]
			dist = [metrics.__distance__(z, z_ref, mode=i) for z in [zs[i], nn_pred_z[j], z_true]]
			plt.scatter(dist, error, label=dist_name, s=30, c=colors[i])
			if i == N-1:
				plt.scatter(dist[-1:], error[-1:], c='black', alpha='0.3', s=100, label='true')
				plt.scatter(dist[-2:-1], error[-2:-1], c=colors[-1], alpha='0.3', s=100, label='FN')
			else:
				plt.scatter(dist[-1:], error[-1:], c='black', alpha='0.3', s=100)
				plt.scatter(dist[-2:-1], error[-2:-1], c=colors[-1], alpha='0.3', s=100)
			avg_error_lat[i] += error[0]

		plt.legend()
		plt.xlabel('distance to part. seq. rep.')
		plt.ylabel('error in latent space')
		plt.title('distance vs error in latent space(sample %d)'%(j))
		plt.savefig('../new_out/__plot_best_distance_function_%d-2.png'%(j))
		plt.close()

		image.plot_poses([data[idx[j],:,:-model.label_dim]], poses_plot, title='part-comp-l2-l1-cos (sample %d)'%j, image_dir='../new_out/')

	tot = N*n
	print 'Avg error in raw space'
	print avg_error_raw / tot
	print 'Avg error in latent space'
	print avg_error_lat / tot
Exemple #9
0
def compare_raw_closest(from_path, data_iterator):
    import csv

    def load__(i, cut=0):
        if cut > 0:
            return {
                basename: [
                    np.load(from_path + LOAD_PATH + basename + '_%d-%d.npy' %
                            (i, j))[:cut] for j in range(_N)
                ]
                for basename in iter_actions(from_path)
            }
        else:
            return {
                basename: [
                    np.load(from_path + LOAD_PATH + basename + '_%d-%d.npy' %
                            (i, j)) for j in range(_N)
                ]
                for basename in iter_actions(from_path)
            }

    with open('../../results/nn_15_results.csv', 'wb') as csvfile:
        spamwriter = csv.writer(csvfile)
        # iter1, iter2 = tee(data_iterator)
        error_score = {
            basename: [10000] * _N
            for basename in iter_actions(from_path)
        }
        error_x = {
            basename: [None] * _N
            for basename in iter_actions(from_path)
        }
        gt = load__(0)
        gtp = load__(2, _N_PRED)
        pd = load__(1, _N_PRED)
        iterations = 0

        for xs, _ in data_iterator:
            idx = np.random.choice(xs.shape[0],
                                   min(RANDOM_N, xs.shape[0]),
                                   replace=False)
            for x in tqdm(xs[idx]):
                for basename in iter_actions(from_path):
                    for i in range(_N):
                        score = metrics.__pose_seq_error(
                            x[:_N_INPUT], gt[basename][i][-_N_INPUT:])
                        if score < error_score[basename][i]:
                            error_score[basename][i] = score
                            error_x[basename][i] = np.copy(x[_N_INPUT:])

            del xs
            iterations += 1
            print iterations
            # break

        for basename in iter_actions(from_path):
            error = [None] * _N
            error_ = [None] * _N
            for i in range(_N):
                error[i] = metrics.__pose_seq_error(error_x[basename][i],
                                                    gtp[basename][i],
                                                    cumulative=True)
                error_[i] = metrics.__pose_seq_error(pd[basename][i],
                                                     gtp[basename][i],
                                                     cumulative=True)
                np.save(from_path + LOAD_PATH + basename + '_nn_15-%d.npy' % i,
                        error_x[basename][i])
                # fk_animate.animate_compare(gt[basename][i], gtp[basename][i],
                # 	error_x[basename][i], 'Nearest Neighbor (1/%d)'%(DATA_ITER_SIZE/RANDOM_N),
                # 	pd[basename][i], 'Residual sup. (MA)', from_path+LOAD_PATH+'images/')

            print basename
            _err = np.mean(error, axis=0)
            print 'nearest neighbor'
            print _err
            spamwriter.writerow([
                basename,
                'Nearest nei. (1/%d)' % (DATA_ITER_SIZE / RANDOM_N)
            ] + _err.tolist())
            _err = np.mean(error_, axis=0)
            print 'baseline error'
            print np.mean(error_, axis=0)
            spamwriter.writerow([basename, 'Residual sup. (MA)'] +
                                _err.tolist())
Exemple #10
0
def compare(model, data_iterator):
    import image
    h = model.timesteps
    embedding = metrics.get_embedding(model, data_iterator,
                                      model.timesteps - 1)
    # methods = ['Closest', 'Mean-45', 'Mean-75', 'Mean-100', 'Random-5000', 'Random-10000', 'Multi']
    methods = [
        'Add', 'Add-Closest', 'Add-Mean-30', 'Add-Mean-45', 'Add-Mean-75'
    ]
    # methods = ['Closest', 'Mean-30', 'Mean-45', 'Random-5000', 'Random-10000']
    for k, cut in enumerate(model.hierarchies[-2:-1]):
        # k = (h-cut)/3
        errors = {}
        errors_ = {}
        for basename in iter_actions():
            errors[basename] = []
            errors_[basename] = []
            print basename
            for i in tqdm(range(8)):
                gt = np.load(LOAD_PATH + basename + '_0-%d.npy' % i)
                pd = np.load(LOAD_PATH + basename + '_1-%d.npy' % i)
                gtp = np.load(LOAD_PATH + basename + '_2-%d.npy' % i)
                if model.MODEL_CODE == metrics.HL_LSTM:
                    l = np.zeros((gt.shape[0], model.label_dim))
                    l[:, model.labels[basename]] = 1
                    gt = np.concatenate((gt, l), axis=1)

                # pose = metrics.__get_next_half(embedding, gt[-10:], encoder, decoder, h, model_name)
                # poses = metrics.__get_consecutive(embedding, gt[-h:], model, cut+1, k)
                # poses = metrics.__get_consecutive_multi(embedding, gt[-h:], model, cut+1, k)
                poses = metrics.__get_consecutive_add(embedding, gt[-h:],
                                                      model, cut + 1)
                ground_truth = metrics.__autoencode(model.autoencoder,
                                                    np.array([gtp[:h]]),
                                                    model.MODEL_CODE)
                ground_truth = np.reshape(
                    ground_truth, (-1, model.timesteps, model.input_dim))[-1]
                # pose_ref = poses[0]
                # poses = poses[1:]
                n = poses.shape[1]
                # metrics.__get_embedding_path(gt, encoder, decoder, h, model_name)
                if len(errors[basename]) == 0:
                    errors[basename] = np.zeros((8, poses.shape[0], n))
                    errors_[basename] = np.zeros((8, 4, n))
                for j in range(n):
                    for k, p in enumerate(poses):
                        errors[basename][i, k, j] = metrics.__pose_seq_error(
                            gtp[:j + 1], p[:j + 1])
                    errors_[basename][i, 3, j] = metrics.__pose_seq_error(
                        gtp[:j + 1], pd[:j + 1])

                errors_[basename][i, 0, :] = metrics.__zeros_velocity_error(
                    gt[-n:], gtp[:n])[:]
                errors_[basename][i, 1, :] = metrics.__average_2_error(
                    gt[-n:], gtp[:n])[:]
                errors_[basename][i, 2, :] = metrics.__average_4_error(
                    gt[-n:], gtp[:n])[:]

                # image.plot_poses([gt[-h:], gtp[:h]], [pose[:h], pd[:h]])
                best_error = np.min(errors[basename][i, :, -1])
                b_error = errors_[basename][i, -1, -1]
                image_dir = '../results/t30-l200/low_error/refined-add/'
                if best_error > errors_[basename][i, -1, -1]:
                    image_dir = '../results/t30-l200/high_error/refined-add/'

                image.plot_poses(
                    [gt[-n:], gtp[:n], ground_truth[:n]],
                    np.concatenate([poses, [pd[:n]]], axis=0),
                    title='%6.3f-%6.3f (%s - C, M, R-5000-10000, B) %d-%d' %
                    (b_error, best_error / b_error, basename, cut, k),
                    image_dir=image_dir)
            # print basename, mean_err[[1,3,7,9,-1]]
            # if basename in ['walking', 'eating', 'smoking', 'discussion']:
            x = np.arange(n) + 1
            for k, method in enumerate(methods):
                mean_err = np.mean(errors[basename][:, k, :], axis=0)
                plt.plot(x, mean_err, label=method)
            for k, method in enumerate(
                ['0 velocity', 'avg-2', 'avg-4', 'baseline']):
                mean_err_ = np.mean(errors_[basename][:, k, :], axis=0)
                plt.plot(x, mean_err_, '--', label=method)

            plt.legend()
            plt.title('%s-%d' % (basename, cut))
            # plt.show()
            plt.savefig('../results/t30-l200/graphs/refined-add/%s-%d-%d.png' %
                        (basename, cut, k))
            plt.close()

        errors = np.reshape(np.array(errors.values()), (-1, poses.shape[0], n))
        errors_ = np.reshape(np.array(errors_.values()), (-1, 4, n))
        for k, method in enumerate(methods):
            mean_err = np.mean(errors[:, k, :], axis=0)
            plt.plot(x, mean_err, label=method)
        for k, method in enumerate(
            ['0 velocity', 'avg-2', 'avg-4', 'baseline']):
            mean_err_ = np.mean(errors_[:, k, :], axis=0)
            plt.plot(x, mean_err_, '--', label=method)

        plt.legend()
        plt.title('Total-%d' % (cut))
        # plt.show()
        plt.savefig('../results/t30-l200/graphs/refined-add/total-%d-%d.png' %
                    (cut, k))
        plt.close()
Exemple #11
0
def compare_label_embedding(model, nn, data_iterator, with_label=True):
    import image
    embedding = metrics.get_label_embedding(
        model, data_iterator,
        subspaces=model.hierarchies[-2:])  #, without_label_only=True)
    print len(embedding)
    mean_diff, diff = metrics.get_embedding_diffs(embedding[:, 1],
                                                  embedding[:, 0])
    cut = model.hierarchies[-2] + 1
    pred_n = model.timesteps - cut
    for basename in iter_actions():
        print basename
        pose_ref = np.zeros((_N, model.timesteps, model.input_dim))
        pose_pred_bl = np.zeros(
            (_N, pred_n, model.input_dim - model.label_dim))
        pose_gt = np.zeros((_N, pred_n, model.input_dim - model.label_dim))

        for i in tqdm(range(_N)):
            gt = np.load(LOAD_PATH + basename + '_0-%d.npy' % i)
            pd = np.load(LOAD_PATH + basename + '_1-%d.npy' % i)
            gtp = np.load(LOAD_PATH + basename + '_2-%d.npy' % i)

            pose_ref[i, :cut, :-model.label_dim] = gt[-cut:]
            pose_pred_bl[i] = pd[:pred_n]
            pose_gt[i] = gtp[:pred_n]

            # pose_ref[i,cut:,:-model.label_dim] = gtp[:pred_n]

        if with_label:
            print model.labels[basename]
            pose_ref[:, :, -model.label_dim + model.labels[basename]] = 1

        enc = model.encoder.predict(pose_ref)[:, cut - 1]
        new_enc_partial = np.zeros(enc.shape)
        for i in tqdm(range(_N)):
            new_e_idx = metrics.__closest_partial_index(
                embedding[:, 0], enc[i])
            #new_enc_partial[i] = metrics.__closest(embedding[:,1], enc[i])
            new_enc_partial[i] = embedding[new_e_idx, 1]
        print enc.shape, pose_ref.shape, new_enc_partial.shape
        pose_pred_from_part = model.decoder.predict(
            new_enc_partial)[:, -pred_n:, :-model.label_dim]

        #new_enc = nn.model.predict(enc)
        #new_enc = enc + mean_diff
        # # pose_pred = model.decoder.predict(new_enc)
        #pose_pred = model.decoder.predict(new_enc)[:,-pred_n:,:-model.label_dim]
        error_bl = [
            metrics.__pose_seq_error(pose_gt[i], pose_pred_bl[i])
            for i in range(_N)
        ]
        error_part = [
            metrics.__pose_seq_error(pose_gt[i], pose_pred_from_part[i])
            for i in range(_N)
        ]
        #error = [metrics.__pose_seq_error(pose_gt[i], pose_pred[i]) for i in range(_N)]
        #print error
        #print error_part
        #print error_bl
        # image.plot_poses(pose_pred, title='rnn', image_dir='../new_out/')
        # image.plot_poses(pose_pred_bl, title='baseline', image_dir='../new_out/')
        # image.plot_poses(pose_gt, title='gt', image_dir='../new_out/')
        #np.save('../new_out/R-RNN-t25-l512/npy/RRNN-add-nl-%s.npy'%basename, pose_pred)
        np.save('../new_out/t25-l512-r/LRNN-r-%s.npy' % basename,
                pose_pred_from_part)