示例#1
0
def select_data(dataset, keys):
    """
    Given a dataset with the float data and a set of keys, get the subset of these keys.
    Args:
        dataset:
        keys:

    Returns:

    """

    if g_conf.DATA_USED == 'central':
        camera_names = \
            dataset.measurements[np.where(dataset.meta_data[:, 0] == b'camera'), :][0][0]
        keys = splitter.label_split(camera_names, keys, [[1]])[0]
    elif g_conf.DATA_USED == 'sides':
        camera_names = \
            dataset.measurements[np.where(dataset.meta_data[:, 0] == b'camera'), :][0][0]
        keys = splitter.label_split(camera_names, keys, [[0, 2]])[0]
    elif g_conf.DATA_USED != 'all':
        raise ValueError(" Invalid data used keyname")

    if not g_conf.USE_NOISE_DATA:
        steerings = dataset.measurements[np.where(
            dataset.meta_data[:, 0] == b'steer'), :][0][0]
        steerings_noise = dataset.measurements[np.where(
            dataset.meta_data[:, 0] == b'steer_noise'), :][0][0]
        noise_vec = steerings[:] != steerings_noise[:]
        non_noise_data = splitter.label_split(noise_vec, keys, [[0]])
        keys = list(set(non_noise_data[0]).intersection(set(keys)))

    return keys
示例#2
0
    def test_real_data(self):

        dataset = CoILDataset(self.root_test_dir)

        steerings = dataset.measurements[0, :]
        print(dataset.meta_data)
        # TODO: read meta data and turn into a coool dictionary ?
        print(np.where(dataset.meta_data[:, 0] == 'control'))
        labels = dataset.measurements[24, :]

        print(np.unique(labels))

        keys = range(0, len(steerings))

        splitted_labels = splitter.label_split(
            labels, keys, g_conf.param.INPUT.LABELS_DIVISION)

        # print (splitted_labels)
        # Another level of splitting
        splitted_steer_labels = []
        for keys in splitted_labels:
            splitter_steer = splitter.float_split(
                steerings, keys, g_conf.param.INPUT.STEERING_DIVISION)

            splitted_steer_labels.append(splitter_steer)

        #weights = [1.0/len(g_conf.param.INPUT.STEERING_DIVISION)]*len(g_conf.param.INPUT.STEERING_DIVISION)

        sampler = CoILSampler(splitted_steer_labels)

        for i in BatchSampler(sampler, 120, False):
            print(i)
    def test_split_sequence(self):
        measurements = self.generate_float_data()
        labels = self.generate_label_data()

        g_conf.param.MISC.NUMBER_IMAGES_SEQUENCE = 20
        keys = range(0, measurements.shape[0])
        splitted_labels = splitter.label_split(
            labels, keys, g_conf.param.INPUT.LABELS_DIVISION)

        # Another level of splitting
        splitted_steer_labels = []
        for keys in splitted_labels:
            splitter_steer = splitter.float_split(
                measurements, keys, g_conf.param.INPUT.STEERING_DIVISION)

            for i in range(0, len(splitter_steer)):
                sum_now = 0
                for key in splitter_steer[i]:
                    sum_now += measurements[key]

                avg_now = sum_now / len(splitter_steer[i])
                print(avg_now)
                #if i > 0:
                #self.assertLess(avg_previous, avg_now)

                avg_previous = avg_now

            splitted_steer_labels.append(splitter_steer)
    def test_split_real_data(self):

        root_test_dir = 'testing/unit_tests/data'

        dataset = CoILDataset(root_test_dir)
        steerings = dataset.measurements[0, :]
        print(dataset.meta_data)
        # TODO: read meta data and turn into a coool dictionary ?
        print(np.where(dataset.meta_data[:, 0] == 'control'))
        labels = dataset.measurements[np.where(
            dataset.meta_data[:, 0] == 'control'), :]
        print(labels)

        print(np.unique(labels))

        keys = range(0, len(steerings))

        splitted_labels = splitter.label_split(
            labels[0][0], keys, g_conf.param.INPUT.LABELS_DIVISION)

        print(splitted_labels)
        # Another level of splitting
        splitted_steer_labels = []
        for keys in splitted_labels:
            splitter_steer = splitter.float_split(
                steerings, keys, g_conf.param.INPUT.STEERING_DIVISION)

            print(splitter_steer)

            for i in range(0, len(splitter_steer)):
                sum_now = 0
                for key in splitter_steer[i]:
                    sum_now += steerings[key]

                avg_now = sum_now / len(splitter_steer[i])
                #print (avg_now)
                if i > 0:
                    self.assertLess(avg_previous, avg_now)

                avg_previous = avg_now

            splitted_steer_labels.append(splitter_steer)
示例#5
0
    def test_split_real_data(self):

        root_test_dir = '/home/felipe/Datasets/CVPR02Noise/SeqTrain'

        print('SPLITING REAL DATA !')
        dataset = CoILDataset(root_test_dir)
        steerings = dataset.measurements[0, :]

        print(dataset.meta_data)
        print(dataset.meta_data[:, 0])
        print(" Where is control ",
              np.where(dataset.meta_data[:, 0] == b'control'))
        labels = dataset.measurements[np.where(
            dataset.meta_data[:, 0] == b'control'), :]

        keys = range(0, len(steerings))

        print(labels)
        splitted_labels = splitter.label_split(labels[0][0], keys,
                                               g_conf.LABELS_DIVISION)

        # Another level of splitting
        splitted_steer_labels = []
        for keys in splitted_labels:
            splitter_steer = splitter.float_split(steerings, keys,
                                                  g_conf.STEERING_DIVISION)

            print(splitter_steer)

            for i in range(0, len(splitter_steer)):
                sum_now = 0
                for key in splitter_steer[i]:
                    sum_now += steerings[key]

                avg_now = sum_now / len(splitter_steer[i])
                #print (avg_now)
                if i > 0:
                    self.assertLess(avg_previous, avg_now)

                avg_previous = avg_now

            splitted_steer_labels.append(splitter_steer)
    def test_real_data_central_sampler(self):

        try:
            os.mkdir('_images')
        except:
            pass
        augmenter = Augmenter(g_conf.AUGMENTATION)

        dataset = CoILDataset('/home/felipe/Datasets/1HoursW1-3-6-8',
                              augmenter)

        g_conf.NUMBER_IMAGES_SEQUENCE = 1
        g_conf.SEQUENCE_STRIDE = 1
        #g_conf.LABELS_DIVISION = [[0,2,5], [0,2,5], [0,2,5]]
        g_conf.NUMBER_ITERATIONS = 1200
        g_conf.BATCH_SIZE = 120

        steerings = dataset.measurements[0, :]

        # TODO: read meta data and turn into a coool dictionary ?

        labels = dataset.measurements[24, :]

        print(np.unique(labels))

        print('position of camera',
              np.where(dataset.meta_data[:, 0] == b'camera'))

        camera_names = dataset.measurements[np.where(
            dataset.meta_data[:, 0] == b'camera'), :][0][0]
        print(" Camera names ")
        print(camera_names)

        keys = range(0, len(steerings) - g_conf.NUMBER_IMAGES_SEQUENCE)

        one_camera_data = splitter.label_split(camera_names, keys, [[0]])

        splitted_steer_labels = splitter.control_steer_split(
            dataset.measurements, dataset.meta_data, one_camera_data[0])

        for split_1 in splitted_steer_labels:
            for split_2 in split_1:
                for split_3 in split_2:
                    if split_3 not in one_camera_data[0]:
                        raise ValueError("not one camera")

        #weights = [1.0/len(g_conf.STEERING_DIVISION)]*len(g_conf.STEERING_DIVISION)

        #sampler = BatchSequenceSampler(splitted_steer_labels, 0, 120, g_conf.NUMBER_IMAGES_SEQUENCE,
        #                              g_conf.SEQUENCE_STRIDE, False)

        sampler = SubsetSampler(one_camera_data[0])

        big_steer_vec = []
        count = 0

        data_loader = torch.utils.data.DataLoader(dataset,
                                                  sampler=sampler,
                                                  batch_size=120,
                                                  num_workers=12,
                                                  pin_memory=True)

        for data in data_loader:

            image, measurements = data

            print(image['rgb'].shape)
            for i in range(120):
                name = '_images/' + str(count) + '.png'
                image_to_save = transforms.ToPILImage()(
                    image['rgb'][i][0].cpu())
                image_to_save.save(name)

                count += 1