コード例 #1
0
ファイル: embedding_plotter.py プロジェクト: ytixu/DNNdumps
def go_up_hierarchy_convexhall(embedding,
                               convex_hall,
                               validation_data,
                               encoder,
                               decoder,
                               cut=1):
    import image
    for _ in tqdm(range(10)):
        n = np.random.randint(len(validation_data))
        enc = encoder.predict(validation_data[n:n + 1])
        pose = np.copy(validation_data[n])
        pose[cut + 1:] = 0
        z_ref = enc[0, cut]
        zs = [z_ref]
        weights = [
            np.linalg.norm(convex_hall[j] - z_ref)
            for j in range(len(convex_hall))
        ]
        w_i = np.argsort(weights)[:30]
        new_e = np.sum([convex_hall[d] / weights[d] for d in w_i],
                       axis=0) / np.sum([1.0 / weights[d] for d in w_i])
        zs.append(new_e)
        p_poses = decoder.predict(np.array(zs))
        image.plot_poses(
            [pose, validation_data[n]],
            p_poses,
            title='Pattern matching (convex hall) (prediction in bold)')
コード例 #2
0
ファイル: metrics.py プロジェクト: ytixu/DNNdumps
def gen_long_sequence(embedding, validation_data, model, l_n=60, numb=10):
	import image
	h = model.timesteps
	cut = h/2
	idxs = np.random.randint(0, len(validation_data)-l_n, numb)
	ls = __get_subspace(embedding, h-1, model.MODEL_CODE)
	err = np.zeros(numb)
	for i, n in enumerate(tqdm(idxs)):
		true_pose = np.reshape(validation_data[n:n+l_n,0], (1, l_n, -1))
		poses = np.zeros((l_n/cut, l_n, true_pose.shape[-1]))
		current_pose = validation_data[n]
		poses[0, :h] = current_pose
		for j in range(l_n/cut-2):
			current_pose = __get_consecutive(ls, current_pose, model, cut)
			poses[j+1,(j+1)*cut:(j+3)*cut] = np.copy(current_pose)
			# poses[j,j+1:j+1+h] = current_pose[0]

		poses[-1] = np.sum(poses, axis=0)
		poses[-1, h/2:l_n-h/2] = poses[-1, h/2:l_n-h/2]/2
		err[i] = __pose_error(true_pose[0], poses[-1])
		if model.MODEL_CODE == HL_LSTM:
			image.plot_poses(true_pose[:,:,:-model.label_dim], poses[:,:,:-model.label_dim], title='Pattern matching (long) (prediction in bold)')
		else:
			image.plot_poses(true_pose, poses, title='Pattern matching (long) (prediction in bold)')

	print np.mean(err), np.std(err)
コード例 #3
0
ファイル: metrics.py プロジェクト: ytixu/DNNdumps
def gen_random(embedding, validation_data, encoder, decoder, h, model_name, numb=10):
	import image
	idxs = np.random.randint(0, len(validation_data), numb)
	enc = __get_latent_reps(encoder, validation_data[idxs], model_name)
	for i, n in enumerate(tqdm(idxs)):
		pose = np.copy(validation_data[n])
		pose[h/2:] = 0
		z_ref = enc[i,h/2-1]
		zs = []
		# cut variation
		for cut in __cuts():
			ls = __get_subspace(embedding, cut, model_name)
			new_e = __random(ls, z_ref, 10000)
			zs.append(new_e)
		p_poses = decoder.predict(np.array(zs))
		image.plot_poses([pose, validation_data[n]], p_poses, title='Pattern matching (random) (prediction in bold)')

		zs = []
		ls = __get_subspace(embedding, h-1, model_name)
		weights, w_i = __get_weights(ls, z_ref)
		# random variation
		for nn in __rn()[:-1]:
			new_e = __random(ls, z_ref, nn, weights, w_i)
			zs.append(new_e)

		p_poses = decoder.predict(np.array(zs))
		image.plot_poses([pose, validation_data[n]], p_poses, title='Pattern matching (random) (prediction in bold)')
コード例 #4
0
ファイル: embedding_plotter.py プロジェクト: ytixu/DNNdumps
def k_mean_clusters(embedding, decoder, n=10):
    import image
    from sklearn.cluster import KMeans
    n_random = np.random.choice(len(embedding), 2000)
    kmeans = KMeans(n_clusters=n, random_state=0).fit(embedding[n_random])
    poses = decoder.predict(kmeans.cluster_centers_)
    for i in range(len(poses) / 5):
        image.plot_poses(poses[i * 5:(i + 1) * 5])
コード例 #5
0
ファイル: embedding_plotter.py プロジェクト: ytixu/DNNdumps
def k_mean(embedding, decoder, n=8):
    from sklearn.cluster import KMeans
    import image
    kmeans = KMeans(n_clusters=n, random_state=0).fit(embedding)
    zs_pred = kmeans.cluster_centers_
    zs = [None] * len(zs_pred)
    for i, z in enumerate(zs_pred):
        zs[i] = embedding[np.argmin(
            [np.linalg.norm(embedding[j] - z) for j in range(len(embedding))])]
    zs = np.array(zs)
    t_poses = decoder.predict(zs)
    print t_poses.shape
    image.plot_poses(t_poses)
コード例 #6
0
ファイル: embedding_plotter.py プロジェクト: ytixu/DNNdumps
def go_up_hierarchy_enc(encoder, decoder, data_iterator, h):
    import image
    for x, y in data_iterator:
        n = np.random.randint(len(x))
        x_ref = np.copy(x[n:n + 1])
        zs = []
        for i in range(h, 0, -1):
            print x_ref.shape
            x_ref[:, i:] = 0
            x_enc = encoder.predict(x_ref)
            zs.append(x_enc[0, -1])
        p_poses = decoder.predict(np.array(zs))
        image.plot_poses(p_poses, x[n:n + 1])
コード例 #7
0
ファイル: embedding_plotter.py プロジェクト: ytixu/DNNdumps
def go_up_hierarchy_sim(embedding, random_data, encoder, decoder, h, cut=1):
    import image
    n = np.random.randint(len(validation_data))
    enc = encoder.predict(validation_data[n:n + 1])
    pose = np.copy(validation_data[n])
    pose[cut + 1:] = 0
    z_ref = enc[0, cut]
    zs = [z_ref]
    for i in range(1, h, 2):
        e = embedding[:, i]
        weights = [np.linalg.norm(e[j] - z_ref) for j in range(len(e))]
        zs.append(e[np.argmin(weights)])
    p_poses = decoder.predict(np.array(zs))
    image.plot_poses([pose, validation_data[n]],
                     p_poses,
                     title='Pattern matching (best) (prediction in bold)')
コード例 #8
0
ファイル: embedding_plotter.py プロジェクト: ytixu/DNNdumps
def plot_centers(decoder):
    import image
    # centers = np.load('../data/src/model_anal/centers.npy')
    # print centers.shape
    # p_poses = decoder.predict(centers)
    # for i in range(len(p_poses)/5+1):
    # 	image.plot_poses(p_poses[i*5:(i+1)*5])

    # centers = np.load('../data/src/model_anal/centers_orig.npy')
    # centers =
    # # p_poses = decoder.predict(centers)
    # for i in range(len(centers)/5+1):
    # 	image.plot_poses(centers[i*5:(i+1)*5])

    centers = np.load('../data/src/model_anal/centers_orig_0.npy')
    for i in range(len(centers) / 5):
        image.plot_poses(centers[i * 5:(i + 1) * 5])
コード例 #9
0
ファイル: embedding_plotter.py プロジェクト: ytixu/DNNdumps
def plot_communities(decoder):
    import image
    import glob
    for std in [2]:
        # for c_files in glob.glob('../data/src/model_anal/H_LSTM_l100_t10/communities/community_*_orig_%dstd.npy'%(std)):
        # 	args = '_orig_'+str(std)+'_'+ c_files.split('_')[-3]+'_'
        # 	poses = np.load(c_files)
        # 	for i in range(len(poses)/5):
        # 		image.plot_poses(poses[i*5:(i+1)*5], args=args)

        for c_files in glob.glob(
                '../data/src/model_anal/H_LSTM_l100_t10/communities/community_*_emb_vector_%dstd.npy'
                % (std)):
            args = '_gen_' + str(std) + '_' + c_files.split('_')[-4] + '_'
            emb = np.load(c_files)
            poses = decoder.predict(emb)
            for i in range(len(poses) / 5):
                image.plot_poses(poses[i * 5:(i + 1) * 5], args=args)
コード例 #10
0
ファイル: metrics.py プロジェクト: ytixu/DNNdumps
def go_up_hierarchy(embedding, validation_data, encoder, decoder, h, model_name, cut=1, l_n=10, numb=10, method=CLOSEST, nn=10000):
	import image
	idxs = np.random.randint(0, len(validation_data)-l_n, numb)
	for i, n in enumerate(tqdm(idxs)):
		enc = __get_latent_reps(encoder, np.array(validation_data[n:n+1]), model_name)
		pose = np.copy(validation_data[n])
		pose[cut+1:] = 0
		z_ref = enc[0,cut]
		zs = [z_ref]
		for i in range(1,h,2):
			e = __get_subspace(embedding, i, model_name)
			new_e = None
			if method == CLOSEST:
				new_e = __closest(e, z_ref)
			elif method == MEAN:
				new_e = __mean(e, z_ref)
			else:
				new_e = __random(e, z_ref, nn)
			zs.append(new_e)
		p_poses = decoder.predict(np.array(zs))
		image.plot_poses([pose, validation_data[n]], p_poses, title='Pattern matching (best) (prediction in bold)')
コード例 #11
0
ファイル: embedding_plotter.py プロジェクト: ytixu/DNNdumps
def interpolate(embedding, encoder, decoder, l=8):
    import image
    n = np.random.choice(len(embedding), 2)
    dist = (embedding[n[1]] - embedding[n[0]]) / l
    zs = [embedding[n[0]]]
    for i in range(l):
        zs.append(zs[0] + i * dist)
    zs.append(embedding[n[1]])
    zs = np.array(zs)
    t_poses = decoder.predict(zs)
    # image.plot_poses(t_poses)

    x1 = np.concatenate([t_poses[:, 0], t_poses[-1, 1:]], axis=0)[::2]
    x2 = np.concatenate([t_poses[0, :], t_poses[1:, -1]], axis=0)[::2]
    x3 = np.array([t_poses[i, i] for i in range(10)])

    print x1.shape, x2.shape, x3.shape
    x_poses = np.array([x1, x2, x3])
    zs_pred = encoder.predict(x_poses)
    p_poses = decoder.predict(zs_pred[:, -1])
    image.plot_poses(t_poses, p_poses, title="Interpolation")
コード例 #12
0
ファイル: embedding_plotter.py プロジェクト: ytixu/DNNdumps
def go_up_hierarchy_vl(embedding, validation_data, encoder, decoder, h, q=4):
    import image
    for _ in range(q):
        n = np.random.randint(len(validation_data))
        pose = np.reshape(np.repeat(validation_data[n], h, axis=0), (h, h, -1))
        for i in range(h):
            pose[i, i + 1:] = 0
        enc = encoder.predict(pose)
        p_poses = decoder.predict(enc)
        image.plot_poses(pose[-1:], p_poses, title='')

        for cut in range(h - 1):
            z_ref = enc[cut]
            zs_sim = [z_ref, enc[-1]]
            zs_dist = [z_ref, enc[-1]]
            for i in range(1, h, 2):
                e = embedding[i]
                weights = [np.linalg.norm(e[j] - z_ref) for j in range(len(e))]
                zs_sim.append(e[np.argmin(weights)])

                w_i = np.argsort(weights)[:30]
                new_e = np.sum([e[d] / weights[d]
                                for d in w_i], axis=0) / np.sum(
                                    [1.0 / weights[d] for d in w_i])
                zs_dist.append(new_e)

            sim_poses = decoder.predict(np.array(zs_sim))
            dist_poses = decoder.predict(np.array(zs_dist))

            image.plot_poses(sim_poses[:2],
                             sim_poses[2:],
                             title='Pattern matching (closest)')
            image.plot_poses(dist_poses[:2],
                             dist_poses[2:],
                             title='Pattern matching (mean)')
コード例 #13
0
ファイル: embedding_plotter.py プロジェクト: ytixu/DNNdumps
def go_up_hierarchy_dist(embedding,
                         validation_data,
                         encoder,
                         decoder,
                         h,
                         cut=1):
    import image
    n = np.random.randint(len(validation_data))
    enc = encoder.predict(validation_data[n:n + 1])
    pose = np.copy(validation_data[n])
    pose[cut + 1:] = 0
    z_ref = enc[0, cut]
    zs = [z_ref]
    for i in range(1, h, 2):
        e = embedding[:, i]
        weights = [np.linalg.norm(e[j] - z_ref) for j in range(len(e))]
        w_i = np.argsort(weights)[:30]
        new_e = np.sum([e[d] / weights[d] for d in w_i], axis=0) / np.sum(
            [1.0 / weights[d] for d in w_i])
        zs.append(new_e)
    p_poses = decoder.predict(np.array(zs))
    image.plot_poses([pose, validation_data[n]],
                     p_poses,
                     title='Pattern matching (mean) (prediction in bold)')
コード例 #14
0
def compare_embedding(model, data_iterator):
    import image
    embedding = metrics.get_embedding(model,
                                      data_iterator,
                                      subspace=model.hierarchies[-2:])
    mean_diff, diff = metrics.get_embedding_diffs(embedding[1], embedding[0])
    std_diff = np.std(diff, axis=0)
    cut = model.hierarchies[-2] + 1
    pred_n = model.timesteps - cut
    for basename in iter_actions():
        print basename
        n = 8
        pose_ref = np.zeros((n, model.timesteps, model.input_dim))
        pose_pred_bl = np.zeros((n, model.timesteps - cut, model.input_dim))
        pose_gt = np.zeros((n, model.timesteps - cut, model.input_dim))

        for i in tqdm(range(n)):
            gt = np.load(LOAD_PATH + basename + '_0-%d.npy' % i)
            pd = np.load(LOAD_PATH + basename + '_1-%d.npy' % i)
            gtp = np.load(LOAD_PATH + basename + '_2-%d.npy' % i)

            pose_ref[i, :cut] = gt[-cut:]
            pose_pred_bl[i] = pd[:pred_n]
            pose_gt[i] = gtp[:pred_n]

        new_enc = model.encoder.predict(pose_ref)[:, cut - 1] + mean_diff
        pose_pred = model.decoder.predict(new_enc)[:, :pred_n]
        error_bl = [
            metrics.__pose_seq_error(pose_gt[i], pose_pred_bl[i])
            for i in range(n)
        ]
        error = [
            metrics.__pose_seq_error(pose_gt[i], pose_pred[i])
            for i in range(n)
        ]
        print np.mean(error), np.mean(error_bl)
        image.plot_poses(pose_pred, title='rnn')
        image.plot_poses(pose_pred_bl, title='baseline')
        image.plot_poses(pose_gt, title='gt')
        np.save('../new_out/HRNN-%s.npy' % basename, pose_pred)
コード例 #15
0
def plot_best_distance_function(model, data, data_iterator, nn, n=50):
	idx = np.random.choice(data.shape[0], n, replace=False)
	enc = metrics.__get_latent_reps(model.encoder, data[idx], model.MODEL_CODE, n=model.hierarchies)
	nn_pred_z = nn.model.predict(enc[:,-2])
	N = 3
	colors = ["#1f77b4", "#ff7f0e", "#2ca02c", "#9467bd", "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf"]

	avg_error_raw = np.zeros(N)
	avg_error_lat = np.zeros(N)

	errors = np.zeros(n+2)
	dists = np.zeros(n+2)
	zs = np.zeros((N,model.latent_dim))
	poses_plot = np.zeros((N+3, model.timesteps, model.input_dim-model.label_dim))

	emb = metrics.get_label_embedding(model, data_iterator, subspaces=model.hierarchies)
	cut = model.hierarchies[-2]+1
	# new_e = np.zeros((N,n,model.latent_dim))
	for j in tqdm(range(n)):
		z_ref = enc[j,-2]
		z_true = enc[j,-1]
		p_enc_dec = metrics.__get_decoded_reps(model.decoder, np.array([z_ref, z_true, nn_pred_z[j]]), model.MODEL_CODE)
		poses_plot[:3] = p_enc_dec[:,:,:-model.label_dim]

		for i in range(N):
			dist_name = metrics.__dist_name__(i)
			ls = emb[:,-1]
			weights, w_i = metrics.__get_weights(ls, z_ref, mode=i)
			zs[i] = ls[w_i[0]]

			preds = metrics.__get_decoded_reps(model.decoder, ls[w_i[:n]], model.MODEL_CODE)
			poses_plot[3+i] = preds[0,:,:-model.label_dim]
			for k in range(n):
				errors[k] = metrics.__pose_seq_error(preds[k,:,:-model.label_dim],data[idx[j],:,:-model.label_dim])
				dists[k] = metrics.__distance__(ls[w_i[k]], z_true, mode=i)

			errors[-2] = metrics.__pose_seq_error(p_enc_dec[0,:cut,:-model.label_dim],data[idx[j],:cut,:-model.label_dim])
			dists[-2] = metrics.__distance__(z_ref, z_true, mode=i)
			errors[-1] = metrics.__pose_seq_error(p_enc_dec[-1,:,:-model.label_dim],data[idx[j],:,:-model.label_dim])
			dists[-1] = metrics.__distance__(nn_pred_z[j], z_true, mode=i)

			plt.scatter(dists, errors, label=dist_name, s=30, c=colors[i])
			if i == N-1:
				plt.scatter(dists[:1], errors[:1], c='black', alpha='0.3', s=100, label='closest')
				plt.scatter(dists[-1:], errors[-1:], c=colors[-1], alpha='0.3', s=100, label='FN')
				plt.scatter(dists[-2:-1], errors[-2:-1], c='red', alpha='0.3', s=100, label='dec-part')
			else:
				plt.scatter(dists[:1], errors[:1], c='black', alpha='0.3', s=100)
				plt.scatter(dists[-1:], errors[-1:], c=colors[-1], alpha='0.3', s=100)
				plt.scatter(dists[-2:-1], errors[-2:-1], c='red', alpha='0.3', s=100)
			avg_error_raw[i] += errors[0]

		error = metrics.__pose_seq_error(p_enc_dec[1,:,:-model.label_dim],data[idx[j],:,:-model.label_dim])
		plt.scatter([0], [error], label='dec-comp', c=colors[3])

		plt.legend()
		plt.xlabel('distance to comp. seq. rep.')
		plt.ylabel('error in raw space')
		plt.title('distance vs error in raw space (sample %d)'%(j))
                plt.savefig('../new_out/__plot_best_distance_function_%d-1.png'%(j))
                plt.close()

		for i in range(N):
			dist_name = metrics.__dist_name__(i)
			error = [metrics.__latent_error(z, z_true) for z in [zs[i], nn_pred_z[j], z_true]]
			dist = [metrics.__distance__(z, z_ref, mode=i) for z in [zs[i], nn_pred_z[j], z_true]]
			plt.scatter(dist, error, label=dist_name, s=30, c=colors[i])
			if i == N-1:
				plt.scatter(dist[-1:], error[-1:], c='black', alpha='0.3', s=100, label='true')
				plt.scatter(dist[-2:-1], error[-2:-1], c=colors[-1], alpha='0.3', s=100, label='FN')
			else:
				plt.scatter(dist[-1:], error[-1:], c='black', alpha='0.3', s=100)
				plt.scatter(dist[-2:-1], error[-2:-1], c=colors[-1], alpha='0.3', s=100)
			avg_error_lat[i] += error[0]

		plt.legend()
		plt.xlabel('distance to part. seq. rep.')
		plt.ylabel('error in latent space')
		plt.title('distance vs error in latent space(sample %d)'%(j))
		plt.savefig('../new_out/__plot_best_distance_function_%d-2.png'%(j))
		plt.close()

		image.plot_poses([data[idx[j],:,:-model.label_dim]], poses_plot, title='part-comp-l2-l1-cos (sample %d)'%j, image_dir='../new_out/')

	tot = N*n
	print 'Avg error in raw space'
	print avg_error_raw / tot
	print 'Avg error in latent space'
	print avg_error_lat / tot
コード例 #16
0
def compare(model, data_iterator):
    import image
    h = model.timesteps
    embedding = metrics.get_embedding(model, data_iterator,
                                      model.timesteps - 1)
    # methods = ['Closest', 'Mean-45', 'Mean-75', 'Mean-100', 'Random-5000', 'Random-10000', 'Multi']
    methods = [
        'Add', 'Add-Closest', 'Add-Mean-30', 'Add-Mean-45', 'Add-Mean-75'
    ]
    # methods = ['Closest', 'Mean-30', 'Mean-45', 'Random-5000', 'Random-10000']
    for k, cut in enumerate(model.hierarchies[-2:-1]):
        # k = (h-cut)/3
        errors = {}
        errors_ = {}
        for basename in iter_actions():
            errors[basename] = []
            errors_[basename] = []
            print basename
            for i in tqdm(range(8)):
                gt = np.load(LOAD_PATH + basename + '_0-%d.npy' % i)
                pd = np.load(LOAD_PATH + basename + '_1-%d.npy' % i)
                gtp = np.load(LOAD_PATH + basename + '_2-%d.npy' % i)
                if model.MODEL_CODE == metrics.HL_LSTM:
                    l = np.zeros((gt.shape[0], model.label_dim))
                    l[:, model.labels[basename]] = 1
                    gt = np.concatenate((gt, l), axis=1)

                # pose = metrics.__get_next_half(embedding, gt[-10:], encoder, decoder, h, model_name)
                # poses = metrics.__get_consecutive(embedding, gt[-h:], model, cut+1, k)
                # poses = metrics.__get_consecutive_multi(embedding, gt[-h:], model, cut+1, k)
                poses = metrics.__get_consecutive_add(embedding, gt[-h:],
                                                      model, cut + 1)
                ground_truth = metrics.__autoencode(model.autoencoder,
                                                    np.array([gtp[:h]]),
                                                    model.MODEL_CODE)
                ground_truth = np.reshape(
                    ground_truth, (-1, model.timesteps, model.input_dim))[-1]
                # pose_ref = poses[0]
                # poses = poses[1:]
                n = poses.shape[1]
                # metrics.__get_embedding_path(gt, encoder, decoder, h, model_name)
                if len(errors[basename]) == 0:
                    errors[basename] = np.zeros((8, poses.shape[0], n))
                    errors_[basename] = np.zeros((8, 4, n))
                for j in range(n):
                    for k, p in enumerate(poses):
                        errors[basename][i, k, j] = metrics.__pose_seq_error(
                            gtp[:j + 1], p[:j + 1])
                    errors_[basename][i, 3, j] = metrics.__pose_seq_error(
                        gtp[:j + 1], pd[:j + 1])

                errors_[basename][i, 0, :] = metrics.__zeros_velocity_error(
                    gt[-n:], gtp[:n])[:]
                errors_[basename][i, 1, :] = metrics.__average_2_error(
                    gt[-n:], gtp[:n])[:]
                errors_[basename][i, 2, :] = metrics.__average_4_error(
                    gt[-n:], gtp[:n])[:]

                # image.plot_poses([gt[-h:], gtp[:h]], [pose[:h], pd[:h]])
                best_error = np.min(errors[basename][i, :, -1])
                b_error = errors_[basename][i, -1, -1]
                image_dir = '../results/t30-l200/low_error/refined-add/'
                if best_error > errors_[basename][i, -1, -1]:
                    image_dir = '../results/t30-l200/high_error/refined-add/'

                image.plot_poses(
                    [gt[-n:], gtp[:n], ground_truth[:n]],
                    np.concatenate([poses, [pd[:n]]], axis=0),
                    title='%6.3f-%6.3f (%s - C, M, R-5000-10000, B) %d-%d' %
                    (b_error, best_error / b_error, basename, cut, k),
                    image_dir=image_dir)
            # print basename, mean_err[[1,3,7,9,-1]]
            # if basename in ['walking', 'eating', 'smoking', 'discussion']:
            x = np.arange(n) + 1
            for k, method in enumerate(methods):
                mean_err = np.mean(errors[basename][:, k, :], axis=0)
                plt.plot(x, mean_err, label=method)
            for k, method in enumerate(
                ['0 velocity', 'avg-2', 'avg-4', 'baseline']):
                mean_err_ = np.mean(errors_[basename][:, k, :], axis=0)
                plt.plot(x, mean_err_, '--', label=method)

            plt.legend()
            plt.title('%s-%d' % (basename, cut))
            # plt.show()
            plt.savefig('../results/t30-l200/graphs/refined-add/%s-%d-%d.png' %
                        (basename, cut, k))
            plt.close()

        errors = np.reshape(np.array(errors.values()), (-1, poses.shape[0], n))
        errors_ = np.reshape(np.array(errors_.values()), (-1, 4, n))
        for k, method in enumerate(methods):
            mean_err = np.mean(errors[:, k, :], axis=0)
            plt.plot(x, mean_err, label=method)
        for k, method in enumerate(
            ['0 velocity', 'avg-2', 'avg-4', 'baseline']):
            mean_err_ = np.mean(errors_[:, k, :], axis=0)
            plt.plot(x, mean_err_, '--', label=method)

        plt.legend()
        plt.title('Total-%d' % (cut))
        # plt.show()
        plt.savefig('../results/t30-l200/graphs/refined-add/total-%d-%d.png' %
                    (cut, k))
        plt.close()