Ejemplo n.º 1
0
def main():
    training_images, training_labels, testing_images, testing_labels = get_data(CIFAR10_FOLDER)

    (training_set, training_labels,
     testing_set, testing_labels) = subsample_data(training_images, training_labels,
                                                   testing_images, testing_labels,
                                                   training_num=5000, testing_num=500)
    normalize_data(training_set, testing_set)
    cross_validate_all_classifiers(training_set, training_labels,
                                   testing_set, testing_labels)
    def read_all_data(self, actions, data_dir, one_hot=False):
        """
        Loads data for training/testing and normalizes it.
        
        Args
        actions: list of strings (actions) to load
        seq_length_in: number of frames to use in the burn-in sequence
        seq_length_out: number of frames to use in the output sequence
        data_dir: directory to load the data from
        one_hot: whether to use one-hot encoding per action
        Returns
        train_set: dictionary with normalized training data
        test_set: dictionary with test data
        data_mean: d-long vector with the mean of the training data
        data_std: d-long vector with the standard dev of the training data
        dim_to_ignore: dimensions that are not used becaused stdev is too small
        dim_to_use: dimensions that we are actually using in the model
        """

        train_subject_ids = [1, 6, 7, 8, 9, 11]
        test_subject_ids = [5]

        train_set, complete_train = data_utils.load_data(
            data_dir, train_subject_ids, actions, one_hot)
        test_set, complete_test = data_utils.load_data(data_dir,
                                                       test_subject_ids,
                                                       actions, one_hot)

        # Compute normalization stats
        data_mean, data_std, dim_to_ignore, dim_to_use = data_utils.normalization_stats(
            complete_train)

        # Normalize -- subtract mean, divide by stdev
        train_set = data_utils.normalize_data(train_set, data_mean, data_std,
                                              dim_to_use, actions, one_hot)
        test_set = data_utils.normalize_data(test_set, data_mean, data_std,
                                             dim_to_use, actions, one_hot)
        print("done reading data.")

        self.train_set = train_set
        self.test_set = test_set

        self.data_mean = data_mean
        self.data_std = data_std

        self.dim_to_ignore = dim_to_ignore
        self.dim_to_use = dim_to_use

        self.train_keys = list(self.train_set.keys())
def read_all_data(actions=walking_lst,
                  seq_length_in=50,
                  seq_length_out=25,
                  data_dir="./data/h3.6m/dataset",
                  one_hot=True):
    """
  Loads data for training/testing and normalizes it.

  Args
    actions: list of strings (actions) to load
    seq_length_in: number of frames to use in the burn-in sequence
    seq_length_out: number of frames to use in the output sequence
    data_dir: directory to load the data from
    one_hot: whether to use one-hot encoding per action
  Returns
    train_set: dictionary with normalized training data
    test_set: dictionary with test data
    data_mean: d-long vector with the mean of the training data
    data_std: d-long vector with the standard dev of the training data
    dim_to_ignore: dimensions that are not used becaused stdev is too small
    dim_to_use: dimensions that we are actually using in the model
  """

    # === Read training data ===
    print("Reading training data (seq_len_in: {0}, seq_len_out {1}).".format(
        seq_length_in, seq_length_out))

    train_subject_ids = [1, 6, 7, 8, 9, 11]
    test_subject_ids = [5]

    train_set, complete_train = data_utils.load_data(data_dir,
                                                     train_subject_ids,
                                                     actions, one_hot)
    test_set, complete_test = data_utils.load_data(data_dir, test_subject_ids,
                                                   actions, one_hot)

    # Compute normalization stats
    data_mean, data_std, dim_to_ignore, dim_to_use = data_utils.normalization_stats(
        complete_train)

    # Normalize -- subtract mean, divide by stdev
    train_set = data_utils.normalize_data(train_set, data_mean, data_std,
                                          dim_to_use, actions, one_hot)
    test_set = data_utils.normalize_data(test_set, data_mean, data_std,
                                         dim_to_use, actions, one_hot)
    print("done reading data.")

    return train_set, test_set, data_mean, data_std, dim_to_ignore, dim_to_use
Ejemplo n.º 4
0
def perform_noisy_actions(train_set_noisy, train_set_2d_org, camera_frame,
                          rcams, predict_14, percent):
    #validate(train_set, train_set)
    print('Performing noisy actions now..')
    train_set_3d_noisy = addNoiseTo3D(train_set_noisy, percent)

    #got the 3d data and now do 2d things
    train_set_2d_noisy = du.project_to_cameras(train_set_3d_noisy, rcams)

    #there can be augmented 2d data
    train_set_2d_org.update(train_set_2d_noisy)

    # Compute normalization statistics.
    complete_train = copy.deepcopy(np.vstack(train_set_2d_org.values()))
    data_mean, data_std, dim_to_ignore, dim_to_use = du.normalization_stats(
        complete_train, dim=2)

    # Divide every dimension independently
    train_set_2d_org = du.normalize_data(train_set_2d_org, data_mean, data_std,
                                         dim_to_use)
    print("For noisy, mean :")
    print(data_mean)
    print("For noisy, std :")
    print(data_std)
    print('noisy actions done..')
    return train_set_2d_org, data_mean, data_std
    def create_task(self, ind_task, x_tr, y_tr, x_te, y_te):

        # select only the good classes
        class_min, class_max, i_tr = self.select_index(ind_task, y_tr)
        _, _, i_te = self.select_index(ind_task, y_te)

        i_tr, i_va = self.get_valid_ind(i_tr)

        x_tr = normalize_data(self.dataset, x_tr)
        x_te = normalize_data(self.dataset, x_te)

        x_tr_t = self.transformation(ind_task, x_tr[i_tr])
        x_va_t = self.transformation(ind_task, x_tr[i_va])
        x_te_t = self.transformation(ind_task, x_te[i_te])

        y_tr_t = self.label_transformation(ind_task, y_tr[i_tr])
        y_va_t = self.label_transformation(ind_task, y_tr[i_va])
        y_te_t = self.label_transformation(ind_task, y_te[i_te])

        return class_min, class_max, x_tr_t, y_tr_t, x_va_t, y_va_t, x_te_t, y_te_t
Ejemplo n.º 6
0
def predict_on_stocks(array: numpy.array, model_path: str, interval: str,
                      stock_path: str):
    scaler = StandardScaler()
    open_data, close_data = init_data(array)

    open_data, close_data = normalize_data(open_data, close_data, scaler)

    (x_train, y_train, x_test, y_test) = split_data(open_data, close_data)
    (x_train, y_train) = shuffle_data(x_train, y_train)

    (model, checkpoint_callback) = create_model(model_path)
    model.fit(x_train,
              y_train,
              validation_data=(x_test, y_test),
              batch_size=64,
              epochs=EPOCHS,
              callbacks=[checkpoint_callback])

    #test_model(model, x_test, y_test, scaler, interval) // uncomment this if you want to test the ai efficiency

    dump(scaler, f'{model_path}/std_scaler.bin', compress=True)
Ejemplo n.º 7
0
def process_images(names, out_loc, mean=None, sd=None):
    print('Names: ', names)
    dataset = NORBDataset(dataset_root='/dfs/scratch1/thomasat/datasets/norb', names=names)

    Xs = []
    Ys = []

    print('Dataset names: ', dataset.data.keys())

    for name in names:
        X, Y = process_data(dataset.data[name])
        print('X,Y shape: ', X.shape, Y.shape)
        Xs.append(X)
        Ys.append(Y)

    X = np.vstack(Xs)
    Y = np.vstack(Ys)

    # Shuffle
    idx = np.arange(0, X.shape[0])  
    np.random.shuffle(idx)
    X = X[idx,:]
    Y = Y[idx,:]

    if mean is None and sd is None:
        X, mean, sd  = normalize_data(X)
        print('X, Y: ', X.shape, Y.shape)
    else:
        X = apply_normalization(X,mean,sd)

    # Save
    data_dict = {'X': X, 'Y': Y}

    pkl.dump(data_dict, open(out_loc, 'wb'), protocol=2)

    return mean,sd
Ejemplo n.º 8
0
def get_testing_batch():
    while True:
        for sequence in test_loader:
            batch = data_utils.normalize_data(opt, dtype, sequence)
            yield batch
Ejemplo n.º 9
0
 def preprocess(self):
     self.mappings = enumerate_strings(self.df)
     self.df = normalize_data(self.df, self.target)
     self.df.head()
Ejemplo n.º 10
0
    enc = OneHotEncoder()
    Y = enc.fit_transform(Y).todense()
    return X, Y


train_loc = '/dfs/scratch1/thomasat/datasets/convex/convex_train.amat'
test_loc = '/dfs/scratch1/thomasat/datasets/convex/50k/convex_test.amat'
train_out = '/dfs/scratch1/thomasat/datasets/convex/train_normalized'
test_out = '/dfs/scratch1/thomasat/datasets/convex/test_normalized'

train_data = np.genfromtxt(train_loc)
train_X, train_Y = process_data(train_data)

test_data = np.genfromtxt(test_loc)
test_X, test_Y = process_data(test_data)

# Normalize
train_X, mean, sd = normalize_data(train_X)
test_X = apply_normalization(test_X, mean, sd)

# Save
print('test_X, test_Y shape: ', test_X.shape, test_Y.shape)
print('train_X, train_Y shape: ', train_X.shape, train_Y.shape)
train = {'X': train_X, 'Y': train_Y}
test = {'X': test_X, 'Y': test_Y}

pkl.dump(train, open(train_out, 'wb'), protocol=2)
pkl.dump(test, open(test_out, 'wb'), protocol=2)
print('Saved train to: ', train_out)
print('Saved test to: ', test_out)
#!/usr/bin/env python

import data_utils as a1
import numpy as np
import matplotlib.pyplot as plt

(countries, features, values) = a1.load_unicef_data()

targets = values[:,1]
x = values[:,7:]
x = a1.normalize_data(x)

N_TRAIN = 100;
x_train = x[0:N_TRAIN,:]
x_test = x[N_TRAIN:,:]
t_train = targets[0:N_TRAIN]
t_test = targets[N_TRAIN:]


train_err = dict()
test_err = dict()
for i in range(1,7):
    (w,train_err[i]) = a1.linear_regression(x_train,t_train,'polynomial',degree = i)
    (t_est,test_err[i]) = a1.evaluate_regression(x_test,t_test,w,'polynomial',degree = i)

# Produce a plot of results.
plt.plot(list(train_err.keys()), list(train_err.values()))
plt.plot(list(test_err.keys()), list(test_err.values()))
plt.ylabel('RMS')
plt.legend(['Training error','Test error'])
plt.title('Fit with polynomials, no regularization')
import warnings
import os

# suppress warnings
warnings.filterwarnings('ignore')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

# read data
data = pd.read_csv("prices-split-adjusted.csv", index_col=0)
data['adj close'] = data.close
data.drop(['close'], 1, inplace=True)
data = data[data.symbol == 'GOOG']
data.drop(['symbol'], 1, inplace=True)

# normalize data
df = normalize_data(data)

# load data
window = 22
x_train, y_train, x_test, y_test = load_data(df, seq_len=window)

# create model and fit dataset
model = lstm_rnn_model([5, window, 1])
model.fit(x_train,
          y_train,
          batch_size=512,
          epochs=90,
          validation_split=0.1,
          verbose=1)

diff = []
Ejemplo n.º 13
0
import data_utils
from viz import Ax3DPose

data_dir = './data/h36m/dataset'

train_subject_ids = [1, 6, 7, 8, 9, 11]
test_subject_ids = [5]
actions = [
    "walking", "eating", "smoking", "discussion", "directions", "greeting",
    "phoning", "posing", "purchases", "sitting", "sittingdown", "takingphoto",
    "waiting", "walkingdog", "walkingtogether"
]
one_hot = True

train_set, complete_train = data_utils.load_data(data_dir, train_subject_ids,
                                                 actions, one_hot)
test_set, complete_test = data_utils.load_data(data_dir, test_subject_ids,
                                               actions, one_hot)

data_mean, data_std, dim_to_ignore, dim_to_use = data_utils.normalization_stats(
    complete_train)

train_set = data_utils.normalize_data(train_set, data_mean, data_std,
                                      dim_to_use, actions, one_hot)
test_set = data_utils.normalize_data(test_set, data_mean, data_std, dim_to_use,
                                     actions, one_hot)

print("done reading data.")

print(train_set[(1, "walking", 1, "even")])
Ejemplo n.º 14
0
for file_name in sorted(os.listdir(args.input_dir)):
    file_path = os.path.abspath(os.path.join(args.input_dir, file_name))
    # Extract the cell_id from the file name.
    cell_id = get_base_file_name(file_name)

    # Load or generate synthetic level set data
    # if params_dict['genData']:
    if False:
        # TODO: data = genSphereLevelSet(DEFAULT_MLE_SPHERE_PARAM_DICT, bounding_box, param_dict, nLevelSet)
        meanR = [0, 0, 0]
        # TODO: save(PrjCtrl.inputFileLevelSetDataM,'data')
    else:
        data = data_utils.load_raw3d_data(file_path)
        log_fh.write("First 5 data points before normalization: {}\n".format(
            data[:5]))
        data, meanR = data_utils.normalize_data(data, log_fh)
        log_fh.write(
            "\nFirst 5 data points after normalization: {}\n\n".format(
                data[:5]))
        log_fh.write("mean radius {}".format(meanR))
        # TODO: nLevelSet = data.shape[0]

    # Set summary parameters
    param_ss = param_ss.ParamSS(data.shape[0], meanR)

    # Starting value for parameters
    if param_dict['randomStart']:
        mles_param_dict = mle_sphere.mle_sphere(data, cell_id, param_dict,
                                                log_fh)
    else:
        mles_param_dict = DEFAULT_MLE_SPHERE_PARAM_DICT
Ejemplo n.º 15
0
    def sample(self, input_):
        """Sample predictions for srnn's seeds"""
        actions = ["posing"]

        if True:
            # === Create the model ===
            # print("Creating %d layers of %d units." % (args.num_layers, args.size))
            # sampling     = True
            # model = create_model(actions, sampling)
            if not self.args.use_cpu:
                self.model = self.model.cuda()
            # print("Model created")

            # Load all the data
            # train_set, test_set, data_mean, data_std, dim_to_ignore, dim_to_use = read_all_data(
            #   actions, args.seq_length_in, args.seq_length_out, args.data_dir, not args.omit_one_hot )

            #"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
            nactions = len(actions)
            subjects = [5]
            testData = {}

            tmp = input_
            # start = time.time()
            for subj in subjects:
                for action_idx in np.arange(len(actions)):

                    action = actions[action_idx]

                    for subact in [1]:  # subactions

                        action_sequence = tmp

                        n, d = action_sequence.shape
                        even_list = range(0, n, 2)

                        if not self.args.omit_one_hot:
                            # Add a one-hot encoding at the end of the representation
                            the_sequence = np.zeros(
                                (len(even_list), d + nactions), dtype=float)
                            the_sequence[:,
                                         0:d] = action_sequence[even_list, :]
                            the_sequence[:, d + action_idx] = 1
                            testData[(subj, action, subact,
                                      'even')] = the_sequence
                        else:
                            testData[(subj, action, subact,
                                      'even')] = action_sequence[even_list, :]

            test_set = data_utils.normalize_data(testData, self.data_mean,
                                                 self.data_std,
                                                 self.dim_to_use, actions,
                                                 not self.args.omit_one_hot)
            #"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"

            # === Read and denormalize the gt with srnn's seeds, as we'll need them
            # many times for evaluation in Euler Angles ===

            #srnn_gts_expmap = self.get_srnn_gts( actions, self.model, test_set, self.data_mean,
            #self.data_std, self.dim_to_ignore, not self.args.omit_one_hot, to_euler=False )
            #srnn_gts_exmap是ground truth!!
            # print(data_mean.shape)
            # print(data_std.shape)
            # # srnn_gts_euler = get_srnn_gts( actions, model, test_set, data_mean,
            #                           data_std, dim_to_ignore, not args.omit_one_hot )

            # Clean and create a new h5 file of samples
            # SAMPLES_FNAME = 'samples.h5'
            # try:
            #   os.remove( SAMPLES_FNAME )
            # except OSError:
            #   pass

            # Predict and save for each action
            for action in actions:

                # Make prediction with srnn' seeds
                encoder_inputs, decoder_inputs = self.model.get_batch_srnn(
                    test_set, action, False)
                # print("shape:",encoder_inputs.shape)
                # print(decoder_inputs.shape)
                encoder_inputs = torch.from_numpy(encoder_inputs).float()
                decoder_inputs = torch.from_numpy(decoder_inputs).float()
                #decoder_outputs = torch.from_numpy(decoder_outputs).float()
                if not self.args.use_cpu:
                    encoder_inputs = encoder_inputs.cuda()
                    decoder_inputs = decoder_inputs.cuda()
                    #decoder_outputs = decoder_outputs.cuda()
                encoder_inputs = Variable(encoder_inputs)
                decoder_inputs = Variable(decoder_inputs)
                #decoder_outputs = Variable(decoder_outputs)

                srnn_poses = self.model(encoder_inputs, decoder_inputs)
                # print(encoder_inputs.shape)
                # print(decoder_inputs.shape)
                #srnn_loss = (srnn_poses - decoder_outputs)**2
                #srnn_loss.cpu().data.numpy()
                #srnn_loss = srnn_loss.mean()

                srnn_poses = srnn_poses.cpu().data.numpy()
                srnn_poses = srnn_poses.transpose([1, 0, 2])

                #srnn_loss = srnn_loss.cpu().data.numpy()
                # denormalizes too
                srnn_pred_expmap = data_utils.revert_output_format(
                    srnn_poses, self.data_mean, self.data_std,
                    self.dim_to_ignore, actions, not self.args.omit_one_hot)

        return srnn_pred_expmap[0]
def main(_):
    actions_all = data_utils.define_actions("All")
    actions = data_utils.define_actions("Discussion")

    # Load camera parameters
    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)

    # Load 3d data and load (or create) 2d projections
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)
    train_set_3d = data_utils.remove_first_frame(train_set_3d)
    test_set_3d = data_utils.remove_first_frame(test_set_3d)
    train_root_positions = data_utils.remove_first_frame(train_root_positions)
    test_root_positions = data_utils.remove_first_frame(test_root_positions)
    print("Finished Read 3D Data")

    # train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(actions_all, FLAGS.data_dir)
    # train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.transform_to_2d_biframe_prediction(train_set_2d,
    #                                                                                                                                        test_set_2d,
    #                                                                                                                                        data_mean_2d,
    #                                                                                                                                        data_std_2d,
    #                                                                                                                                        dim_to_ignore_2d,
    #                                                                                                                                        dim_to_use_2d)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.create_2d_data(
        actions_all, FLAGS.data_dir, rcams)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.transform_to_2d_biframe_prediction(
        train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d,
        dim_to_use_2d)

    SH_TO_GT_PERM = np.array(
        [SH_NAMES.index(h) for h in H36M_NAMES if h != '' and h in SH_NAMES])
    assert np.all(SH_TO_GT_PERM == np.array(
        [6, 2, 1, 0, 3, 4, 5, 7, 8, 9, 13, 14, 15, 12, 11, 10]))

    test_set = {}

    manipulation_dir = os.path.dirname(FLAGS.data_dir)
    manipulation_dir = os.path.dirname(manipulation_dir)
    manipulation_dir += '/manipulation_video/'
    manipulation_folders = glob.glob(manipulation_dir + '*')

    subj = 1
    action = 'manipulation-video'
    for folder in manipulation_folders:
        seqname = os.path.basename(folder)
        with h5py.File(folder + '/' + seqname + '.h5', 'r') as h5f:
            poses = h5f['poses'][:]

            # Permute the loaded data to make it compatible with H36M
            poses = poses[:, SH_TO_GT_PERM, :]

            # Reshape into n x (32*2) matrix
            poses = np.reshape(poses, [poses.shape[0], -1])
            poses_final = np.zeros([poses.shape[0], len(H36M_NAMES) * 2])

            dim_to_use_x = np.where(
                np.array([x != '' and x != 'Neck/Nose'
                          for x in H36M_NAMES]))[0] * 2
            dim_to_use_y = dim_to_use_x + 1

            dim_to_use = np.zeros(len(SH_NAMES) * 2, dtype=np.int32)
            dim_to_use[0::2] = dim_to_use_x
            dim_to_use[1::2] = dim_to_use_y
            poses_final[:, dim_to_use] = poses

            print(seqname, poses_final.shape)
            poses_final[poses_final == 0.] = 0.1
            test_set[(subj, action, seqname)] = poses_final

    test_set = data_utils.uni_frame_to_bi_frame(test_set)
    test_set_2d = data_utils.normalize_data(test_set, data_mean_2d,
                                            data_std_2d, dim_to_use_2d)
    for key in test_set.keys():
        test_set[key] = test_set[key][0::2, :]

    dim_to_use_12_manipulation_joints = np.array([
        3, 4, 5, 6, 7, 8, 9, 10, 11, 18, 19, 20, 21, 22, 23, 24, 25, 26, 51,
        52, 53, 54, 55, 56, 57, 58, 59, 75, 76, 77, 78, 79, 80, 81, 82, 83
    ])

    print("Finished Normalize Manipualtion Videos")
    device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
    with tf.Session(config=tf.ConfigProto(device_count=device_count)) as sess:
        # === Create the model ===
        print("Creating %d layers of %d units." %
              (FLAGS.num_layers, FLAGS.linear_size))
        batch_size = FLAGS.batch_size  #Intial code is 64*2
        model = predict_3dpose_biframe.create_model(sess, actions_all,
                                                    batch_size)
        print("Model loaded")

        j = 0
        for key2d in test_set_2d.keys():

            (subj, b, fname) = key2d
            # if fname !=  specific_seqname + '.h5':
            #     continue
            print("Subject: {}, action: {}, fname: {}".format(subj, b, fname))

            enc_in = test_set_2d[key2d]
            n2d, _ = enc_in.shape

            # Split into about-same-size batches
            enc_in = np.array_split(enc_in, n2d // 1)
            all_poses_3d = []

            for bidx in range(len(enc_in)):

                # Dropout probability 0 (keep probability 1) for sampling
                dp = 1.0
                anything = np.zeros((enc_in[bidx].shape[0], 48))
                _, _, poses3d = model.step(sess,
                                           enc_in[bidx],
                                           anything,
                                           dp,
                                           isTraining=False)

                # Denormalize
                enc_in[bidx] = data_utils.unNormalizeData(
                    enc_in[bidx], data_mean_2d, data_std_2d, dim_to_ignore_2d)
                poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                     data_std_3d,
                                                     dim_to_ignore_3d)
                all_poses_3d.append(poses3d)

            # Put all the poses together
            enc_in, poses3d = map(np.vstack, [enc_in, all_poses_3d])

            enc_in, poses3d = map(np.vstack, [enc_in, poses3d])

            poses3d_12_manipulation = poses3d[:,
                                              dim_to_use_12_manipulation_joints]

            annotated_images = glob.glob(manipulation_dir + fname +
                                         '/info/*.xml')
            annotated_images = sorted(annotated_images)

            # 1080p	= 1,920 x 1,080
            fig = plt.figure(j, figsize=(10, 10))
            gs1 = gridspec.GridSpec(3, 3)
            gs1.update(wspace=-0, hspace=0.1)  # set the spacing between axes.
            plt.axis('off')

            subplot_idx = 1
            nsamples = 3
            for i in np.arange(nsamples):
                # Plot 2d Detection
                ax1 = plt.subplot(gs1[subplot_idx - 1])
                img = mpimg.imread(
                    manipulation_dir + fname + '/skeleton_cropped/' +
                    os.path.basename(annotated_images[i]).split('_')[0] +
                    '.jpg')
                ax1.imshow(img)

                # Plot 2d pose
                ax2 = plt.subplot(gs1[subplot_idx])
                # p2d = enc_in[i,:]
                # viz.show2Dpose( p2d, ax2 )
                # ax2.invert_yaxis()
                ax2.imshow(img)

                # Plot 3d predictions
                # Compute first the procrustion and print error
                gt = getJ3dPosFromXML(annotated_images[i])
                A = poses3d_12_manipulation[i, :].reshape(gt.shape)
                _, Z, T, b, c = procrustes.compute_similarity_transform(
                    gt, A, compute_optimal_scale=True)
                sqerr = np.sqrt(np.sum((gt - (b * A.dot(T)) - c)**2, axis=1))
                print("{0} - {1} - Mean Error (mm) : {2}".format(
                    fname, os.path.basename(annotated_images[i]),
                    np.mean(sqerr)))

                ax3 = plt.subplot(gs1[subplot_idx + 1], projection='3d')
                temp = poses3d[i, :].reshape((32, 3))
                temp = c + temp.dot(T)  #Do not scale
                # p3d = temp.reshape((1, 96))
                p3d = poses3d[i, :]
                viz.show3Dpose(p3d, ax3, lcolor="#9b59b6", rcolor="#2ecc71")
                ax3.invert_zaxis()
                ax3.invert_yaxis()

                subplot_idx = subplot_idx + 3

            plt.show()
            j += 1
print("Normalized: " + str(NORMALIZED))
print("Center Data: " + str(CENTER_DATA))

if (".h5" in TEST_FILE):
    TEST_DATA, TEST_LABELS = data_utils.load_h5(TEST_FILE)
else:
    TEST_DATA, TEST_LABELS = data_utils.load_data(TEST_FILE,
                                                  NUM_POINT,
                                                  with_bg_pl=WITH_BG)

if (CENTER_DATA):
    TEST_DATA = data_utils.center_data(TEST_DATA)

if (NORMALIZED):
    TEST_DATA = data_utils.normalize_data(TEST_DATA)


def log_string(out_str):
    LOG_FOUT.write(out_str + '\n')
    LOG_FOUT.flush()
    print(out_str)


def corrupt(batch_data):
    output = []
    for i in range(batch_data.shape[0]):
        pc = batch_data[i, :, :]
        pc = pc[pc[:, 2] > THRESH, :]
        output.append(pc)
Ejemplo n.º 18
0
def read_all_data(actions, data_dir, one_hot, GT=False):
    """
  Loads data for training/testing and normalizes it.

  Args

  Returns

  """

    # === Read training data ===
    print("Reading training data")

    #####
    #need to add one-hot vector to each frame later
    #####
    for i, action in enumerate(actions):
        action_seq = np.load(data_dir + '/' + action + '.npz')['seq']
        action_seq_len = np.load(data_dir + '/' + action + '.npz')['seq_len']
        all_seq = np.concatenate(
            (all_seq, action_seq), axis=0) if i > 0 else action_seq
        all_seq_len = np.concatenate((all_seq_len, action_seq_len), axis=0) \
                      if i > 0 else action_seq_len
    for i in range(len(all_seq)):
        if all_seq[i, 0, 0, 0, 0] > all_seq[i, 1, 0, 0, 0]:
            all_seq[i, :, :, :, 1] = -all_seq[i, :, :, :, 1]
    """
  all_seq: [data_index, skeleton_num, frame_len, joints, xyz]
  """
    all_seq = data_utils.rotate_data(all_seq)

    shape = all_seq.shape
    all_seq = np.reshape(all_seq, [shape[0], shape[1], shape[2], -1])

    np.savez('val.npz', seq=all_seq)

    from sklearn.model_selection import train_test_split
    train_seq, test_seq, train_seq_len, test_seq_len = train_test_split(
        all_seq, all_seq_len, test_size=0.25, random_state=1)
    if GT:
        return test_seq
    for i, (seq, seq_len) in enumerate(zip(train_seq, train_seq_len)):
        try:
            complete_seq = np.concatenate((complete_seq, seq[: ,:int(seq_len)]), axis=1) if i > 0 \
                           else seq[:, :int(seq_len)]
        except TypeError:
            import pdb
            pdb.set_trace()

    complete_real_person, complete_character = complete_seq
    '''
  #for data_visualization
  np.savez('all_data_mirror.npz',
           real_person=complete_real_person,
           character=complete_character,
           ground_truth=complete_character,
           loss=0)
  sys.exit()
  '''
    # Compute normalization stats
    rp_stats = data_utils.normalization_stats(complete_real_person,
                                              FLAGS.dim_to_compressed)
    ch_stats = data_utils.normalization_stats(complete_character,
                                              FLAGS.dim_to_compressed)

    # Normalize -- subtract mean, divide by stdev
    train_shape = train_seq.shape
    test_shape = test_seq.shape
    if rp_stats[4] is not None:
        normalized_train_seq = np.zeros([
            train_shape[0], train_shape[1], train_shape[2],
            rp_stats[4].get_params()['n_components']
        ])
        normalized_test_seq = np.zeros([
            test_shape[0], test_shape[1], test_shape[2],
            rp_stats[4].get_params()['n_components']
        ])
    else:
        normalized_train_seq = np.zeros(train_shape)
        normalized_test_seq = np.zeros(test_shape)
    normalized_train_seq[:, 0] = data_utils.normalize_data(
        train_seq[:, 0], rp_stats, actions, one_hot)
    normalized_train_seq[:, 1] = data_utils.normalize_data(
        train_seq[:, 1], ch_stats, actions, one_hot)
    normalized_test_seq[:,
                        0] = data_utils.normalize_data(test_seq[:,
                                                                0], rp_stats,
                                                       actions, one_hot)
    normalized_test_seq[:,
                        1] = data_utils.normalize_data(test_seq[:,
                                                                1], ch_stats,
                                                       actions, one_hot)

    print("done reading data.")

    return normalized_train_seq, normalized_test_seq, train_seq_len, test_seq_len, rp_stats, ch_stats, \
      max(all_seq_len)