Exemple #1
0
    def run(self, img_arr: np.ndarray, other_arr: np.ndarray = None) \
            -> Tuple[Union[float, np.ndarray], ...]:
        """
        Donkeycar parts interface to run the part in the loop.

        :param img_arr:     uint8 [0,255] numpy array with image data
        :param other_arr:   numpy array of additional data to be used in the
                            pilot, like IMU array for the IMU model or a
                            state vector in the Behavioural model
        :return:            tuple of (angle, throttle)
        """
        norm_arr = normalize_image(img_arr)
        x_t = process_image(norm_arr)

        if not self.s_t:
            self.s_t = np.stack((x_t, x_t, x_t, x_t), axis=2)
            # In Keras, need to reshape
            self.s_t = self.s_t.reshape(1, self.s_t.shape[0],
                                        self.s_t.shape[1],
                                        self.s_t.shape[2])  #1*80*80*4
        else:
            x_t = x_t.reshape(1, x_t.shape[0], x_t.shape[1], 1)  #1x80x80x1
            self.s_t = np.append(x_t, self.s_t[:, :, :, :3],
                                 axis=3)  #1x80x80x4

        return self.inference(self.s_t, other_arr), 0.5
Exemple #2
0
    def _transform_record(self, record):
        for key, value in record.items():
            if key == 'cam/image_array' and isinstance(value, str):
                image_path = os.path.join(record['_image_base_path'], value)
                image = load_image_arr(image_path, self.config)
                record[key] = normalize_image(image)

        return record
Exemple #3
0
 def image_processor(self, img_arr):
     """ Transformes the images and augments if in training. Then
         normalizes it. """
     img_arr = self.transformation.run(img_arr)
     if self.is_train:
         img_arr = self.augmentation.run(img_arr)
     norm_img = normalize_image(img_arr)
     return norm_img
Exemple #4
0
def test_training_pipeline(config: Config, model_type: str, car_dir: str,
                           train_filter: Callable[[TubRecord], bool]) -> None:
    """
    Testing consistency of the model interfaces and data used in training
    pipeline.

    :param config:                  donkey config
    :param model_type:              test specification of model type
    :param tub_dir:                 tub directory (car_dir/tub)
    :param train_filter:            filter for records
    :return:                        None
    """
    config.TRAIN_FILTER = train_filter
    kl = get_model_by_type(model_type, config)
    tub_dir = os.path.join(car_dir, 'tub')
    # don't shuffle so we can identify data for testing
    config.TRAIN_FILTER = train_filter
    dataset = TubDataset(config, [tub_dir], shuffle=False)
    training_records, validation_records = dataset.train_test_split()
    seq = BatchSequence(kl, config, training_records, True)
    data_train = seq.create_tf_data()
    num_whole_batches = len(training_records) // config.BATCH_SIZE
    # this takes all batches into one list
    tf_batch = list(data_train.take(num_whole_batches).as_numpy_iterator())
    it = iter(training_records)
    for xy_batch in tf_batch:
        # extract x and y values from records, asymmetric in x and y b/c x
        # requires image manipulations
        batch_records = [next(it) for _ in range(config.BATCH_SIZE)]
        records_x = [
            kl.x_translate(normalize_image(kl.x_transform(r)))
            for r in batch_records
        ]
        records_y = [kl.y_translate(kl.y_transform(r)) for r in batch_records]
        # from here all checks are symmetrical between x and y
        for batch, o_type, records \
                in zip(xy_batch, kl.output_types(), (records_x, records_y)):
            # check batch dictionary have expected keys
            assert batch.keys() == o_type.keys(), \
                'batch keys need to match models output types'
            # convert record values into arrays of batch size
            values = defaultdict(list)
            for r in records:
                for k, v in r.items():
                    values[k].append(v)
            # now convert arrays of floats or numpy arrays into numpy arrays
            np_dict = dict()
            for k, v in values.items():
                np_dict[k] = np.array(v)
            # compare record values with values from tf.data
            for k, v in batch.items():
                assert np.isclose(v, np_dict[k]).all()
Exemple #5
0
    def run(self, img_arr: np.ndarray, other_arr: np.ndarray = None) \
            -> Tuple[Union[float, np.ndarray], ...]:
        """
        Donkeycar parts interface to run the part in the loop.

        :param img_arr:     uint8 [0,255] numpy array with image data
        :param other_arr:   numpy array of additional data to be used in the
                            pilot, like IMU array for the IMU model or a
                            state vector in the Behavioural model
        :return:            tuple of (angle, throttle)
        """
        norm_arr = normalize_image(img_arr)
        return self.inference(norm_arr, other_arr)
Exemple #6
0
 def evaluate(self, record: TubRecord,
              augmentation: 'ImageAugmentation' = None) \
         -> Tuple[Union[float, np.ndarray], ...]:
     # extract model input from record
     x0 = self.x_transform(record)
     x1 = x0[0] if isinstance(x0, tuple) else x0
     # apply augmentation to training data only
     x2 = augmentation.augment(x1) if augmentation else x1
     # normalise image, assume other input data comes already normalised
     x3 = normalize_image(x2)
     if isinstance(x0, tuple):
         return self.inference(x3, *x0[1:])
     else:
         return self.inference(x3, None)
Exemple #7
0
 def get_x(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:
     """ Extracting x from record for training"""
     # this transforms the record into x for training the model to x,y
     x0 = self.model.x_transform(record)
     # for multiple input tensors the return value here is a tuple
     # where the image is in first slot otherwise x0 is the image
     x1 = x0[0] if isinstance(x0, tuple) else x0
     # apply augmentation to training data only
     x2 = self.augmentation.augment(x1) if self.is_train else x1
     # normalise image, assume other input data comes already normalised
     x3 = normalize_image(x2)
     # fill normalised image back into tuple if necessary
     x4 = (x3, ) + x0[1:] if isinstance(x0, tuple) else x3
     # convert tuple to dictionary which is understood by tf.data
     x5 = self.model.x_translate(x4)
     return x5