def load_test_set(self):

        self.test_labels = []

        self.train_data_path = []
        self.test_data_path = []
        rollouts = glob.glob(os.path.join(self.cfg.BC_HELD_OUT, '*_*'))

        count = 0

        for rollout_p in rollouts:
            #rollout_p = rollouts[0]
            rollout = pickle.load(open(rollout_p + '/rollout.p'))

            grasp_rollout = self.cfg.break_up_rollouts(rollout)

            for grasp_point in grasp_rollout:

                if self.cfg.USE_DEPTH:
                    grasp_point[0] = datum_to_net_dim(grasp_point[0])

                    features = self.yc.extract_conv_features(
                        grasp_point[0]['d_img'])
                else:
                    features = self.yc.extract_conv_features(
                        grasp_point[0]['c_img'])

                label = self.cfg.compute_label(grasp_point[0])
                self.test_labels.append({
                    'c_img': grasp_point[0]['c_img'],
                    'label': label,
                    'features': features
                })
                self.test_data_path.append(rollout_p)
    def load_rollouts(self):

        self.train_labels = []

        self.train_data_path = []
        self.test_data_path = []
        rollouts = glob.glob(os.path.join(self.rollout_path, '*_*'))

        count = 0

        for rollout_p in rollouts:
            #rollout_p = rollouts[0]
            rollout = pickle.load(open(rollout_p + '/rollout.p'))

            grasp_rollout = self.cfg.break_up_rollouts(rollout)
            for grasp_point in grasp_rollout:

                count = 0

                for data in grasp_point:

                    if self.cfg.USE_DEPTH:
                        data = datum_to_net_dim(data)

                    data_a = augment_data(data, self.cfg.USE_DEPTH)

                    for datum_a in data_a:

                        features = self.yc.extract_conv_features(
                            datum_a['c_img'])

                        label = self.cfg.compute_label(datum_a)

                        self.train_labels.append({
                            'c_img': datum_a['c_img'],
                            'label': label,
                            'features': features
                        })

                self.train_data_path.append(rollout_p)

        return
    print("keys: {}".format(datum.keys()))
    print("datum['class']: {} (_their_ format, 1=success)".format(
        datum['class']))

    assert 'c_img' in datum and 'd_img' in datum \
        and 'class' in datum and 'type' in datum
    assert datum['type'] == 'success'
    assert datum['c_img'].shape == (480, 640, 3)
    assert datum['d_img'].shape == (480, 640)

    # Need to process it because it's their data.
    if np.isnan(np.sum(datum['d_img'])):
        print("patching NaNs to zero ...")
        cv2.patchNaNs(datum['d_img'], 0.0)
    assert not np.isnan(np.sum(datum['d_img']))
    datum = datum_to_net_dim(datum, robot=ROBOT)

    # NOTE Don't forget to change their labeling !!!!!!
    assert datum['d_img'].shape == (480, 640, 3)
    #if datum['class'] == 0:
    #    datum['class'] = 1
    #    num_failures += 1
    #elif datum['class'] == 1:
    #    datum['class'] = 0
    #    num_successes += 1
    #else:
    #    raise ValueError(datum['class'])

    # UPDATE UPDATE: OK, their three failure cases here were actually successes...
    datum['class'] = 0
    num_successes += 1
Exemple #4
0
    g_in_rollout = 0
    s_in_rollout = 0

    for (d_idx, datum) in enumerate(data):
        # Ignore the first thing which is the 'starting' points.
        if type(datum) == list:
            continue
        print("\ncurrently on item {} in this rollout, out of {}:".format(
            d_idx, len(data)))
        print('type:   {}'.format(datum['type']))
        print('side:   {}'.format(datum['side']))
        print('class:  {}'.format(datum['class']))
        print('pose:   {}'.format(datum['pose']))

        # All this does is modify the datum['d_img'] key; it leaves datum['c_img'] alone.
        datum_to_net_dim(datum)
        c_img = (datum['c_img']).copy()
        d_img = (datum['d_img']).copy()
        pose = datum['pose']

        # Grasping. For these, overlay the actual pose to the image (red circle, black border).
        if datum['type'] == 'grasp':
            loss = analyze(rnum, g_in_rollout, c_img, d_img, pose)
            losses.append(loss)
            if FLIP:
                c_img = cv2.flip(c_img.copy(), 1)
                d_img = cv2.flip(d_img.copy(), 1)
                h, w, channel = c_img.shape
                pose[0] = w - pose[0]
                f_loss = analyze(rnum,
                                 g_in_rollout,
Exemple #5
0
        number = str(ff.split('_')[-1])
        file_path = os.path.join(ROLLOUTS, pkl_f)
        datum = pickle.load(open(file_path, 'rb'))
        assert type(datum) is dict

        # Debug and accumulate statistics for plotting later.
        print("\nOn data: {}, number {}".format(file_path, number))
        print("    data['pose']: {}".format(np.array(datum['pose'])))
        print("    data['type']: {}".format(np.array(datum['type'])))
        num = str(number).zfill(3)
        assert datum['c_img'].shape == (480, 640, 3)
        assert datum['d_img'].shape == (480, 640)
        cv2.patchNaNs(datum['d_img'], 0)  # NOTE the patching!

        # As usual, datum to net dim must be done before data augmentation.
        datum = datum_to_net_dim(datum, robot='Fetch')  # NOTE robot!
        assert datum['d_img'].shape == (480, 640, 3)
        assert 'c_img' in datum.keys() and 'd_img' in datum.keys(
        ) and 'pose' in datum.keys()

        # SKIP points that have pose[0] (i.e., x value) less than some threshold.
        if datum['pose'][0] <= 30:
            print("    NOTE: skipping this due to pose: {}".format(
                datum['pose']))
            num_skipped += 1
            continue
        if datum['type'] != 'grasp':
            print("    NOTE: skipping, type: {}".format(datum['type']))
            num_skipped += 1
            continue