Example #1
0
def test_recreating_tub(tub):
    """ Recreating a Tub should restore it to working state """
    assert tub.get_num_records() == 10
    assert tub.current_ix == 10
    assert tub.get_last_ix() == 9
    path = tub.path
    tub = None

    inputs = ['cam/image_array', 'angle', 'throttle']
    types = ['image_array', 'float', 'float']
    t = Tub(path, inputs=inputs, types=types)
    assert t.get_num_records() == 10
    assert t.current_ix == 10
    assert t.get_last_ix() == 9
Example #2
0
class MakeMovie(BaseCommand):

    def parse_args(self, args):
        parser = argparse.ArgumentParser(prog='makemovie')
        parser.add_argument('--tub', help='The tub to make movie from')
        parser.add_argument('--out', default='tub_movie.mp4', help='The movie filename to create. default: tub_movie.mp4')
        parser.add_argument('--config', default='./config.py', help='location of config file to use. default: ./config.py')
        parsed_args = parser.parse_args(args)
        return parsed_args, parser

    def run(self, args):
        """
        Load the images from a tub and create a movie from them.
        Movie
        """
        import moviepy.editor as mpy


        args, parser = self.parse_args(args)

        if args.tub is None:
            parser.print_help()
            return

        conf = os.path.expanduser(args.config)

        if not os.path.exists(conf):
            print("No config file at location: %s. Add --config to specify\
                 location or run from dir containing config.py." % conf)
            return

        try:
            cfg = dk.load_config(conf)
        except:
            print("Exception while loading config from", conf)
            return

        self.tub = Tub(args.tub)
        self.num_rec = self.tub.get_num_records()
        self.iRec = 0

        print('making movie', args.out, 'from', self.num_rec, 'images')
        clip = mpy.VideoClip(self.make_frame, duration=(self.num_rec//cfg.DRIVE_LOOP_HZ) - 1)
        clip.write_videofile(args.out,fps=cfg.DRIVE_LOOP_HZ)

        print('done')

    def make_frame(self, t):
        """
        Callback to return an image from from our tub records.
        This is called from the VideoClip as it references a time.
        We don't use t to reference the frame, but instead increment
        a frame counter. This assumes sequential access.
        """
        self.iRec = self.iRec + 1

        if self.iRec >= self.num_rec - 1:
            return None

        rec = self.tub.get_record(self.iRec)
        image = rec['cam/image_array']

        return image # returns a 8-bit RGB array
Example #3
0
class MakeMovie(BaseCommand):
    def parse_args(self, args):
        parser = argparse.ArgumentParser(prog='makemovie')
        parser.add_argument('--tub', help='The tub to make movie from')
        parser.add_argument(
            '--out',
            default='tub_movie.mp4',
            help='The movie filename to create. default: tub_movie.mp4')
        parser.add_argument(
            '--config',
            default='./config.py',
            help='location of config file to use. default: ./config.py')
        parser.add_argument('--model',
                            default='None',
                            help='the model to use to show control outputs')
        parser.add_argument('--model_type',
                            default='categorical',
                            help='the model type to load')
        parser.add_argument(
            '--salient',
            action="store_true",
            help='should we overlay salient map showing avtivations')
        parser.add_argument('--start',
                            type=int,
                            default=1,
                            help='first frame to process')
        parser.add_argument('--end',
                            type=int,
                            default=-1,
                            help='last frame to process')
        parser.add_argument('--scale',
                            type=int,
                            default=2,
                            help='make image frame output larger by X mult')
        parsed_args = parser.parse_args(args)
        return parsed_args, parser

    def run(self, args):
        '''
        Load the images from a tub and create a movie from them.
        Movie
        '''
        import moviepy.editor as mpy

        args, parser = self.parse_args(args)

        if args.tub is None:
            parser.print_help()
            return

        if args.salient:
            #imported like this, we make TF conditional on use of --salient
            #and we keep the context maintained throughout our callbacks to
            #compute the salient mask
            from keras import backend as K
            import tensorflow as tf
            os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

        conf = os.path.expanduser(args.config)

        if not os.path.exists(conf):
            print("No config file at location: %s. Add --config to specify\
                 location or run from dir containing config.py." % conf)
            return

        try:
            cfg = dk.load_config(conf)
        except:
            print("Exception while loading config from", conf)
            return

        self.tub = Tub(args.tub)
        self.num_rec = self.tub.get_num_records()

        if args.start == 1:
            self.start = self.tub.get_index(shuffled=False)[0]
        else:
            self.start = args.start

        if args.end != -1:
            self.end = args.end
        else:
            self.end = self.num_rec - self.start

        self.num_rec = self.end - self.start

        self.iRec = args.start
        self.scale = args.scale
        self.keras_part = None
        self.convolution_part = None
        if not args.model == "None":
            self.keras_part = get_model_by_type(args.model_type, cfg=cfg)
            self.keras_part.load(args.model)
            self.keras_part.compile()
            if args.salient:
                self.init_salient(self.keras_part.model)

                #This method nested in this way to take the conditional import of TF
                #in a manner that extends to this callback. Done this way, we avoid
                #importing in the below method, which triggers a new cuda device allocation
                #each call.
                def compute_visualisation_mask(img):
                    #from https://github.com/ermolenkodev/keras-salient-object-visualisation

                    activations = self.functor([np.array([img])])
                    activations = [
                        np.reshape(
                            img, (1, img.shape[0], img.shape[1], img.shape[2]))
                    ] + activations
                    upscaled_activation = np.ones((3, 6))
                    for layer in [5, 4, 3, 2, 1]:
                        averaged_activation = np.mean(
                            activations[layer],
                            axis=3).squeeze(axis=0) * upscaled_activation
                        output_shape = (activations[layer - 1].shape[1],
                                        activations[layer - 1].shape[2])
                        x = tf.constant(
                            np.reshape(averaged_activation,
                                       (1, averaged_activation.shape[0],
                                        averaged_activation.shape[1], 1)),
                            tf.float32)
                        conv = tf.nn.conv2d_transpose(
                            x,
                            self.layers_kernels[layer],
                            output_shape=(1, output_shape[0], output_shape[1],
                                          1),
                            strides=self.layers_strides[layer],
                            padding='VALID')
                        with tf.Session() as session:
                            result = session.run(conv)
                        upscaled_activation = np.reshape(result, output_shape)
                    final_visualisation_mask = upscaled_activation
                    return (final_visualisation_mask -
                            np.min(final_visualisation_mask)) / (
                                np.max(final_visualisation_mask) -
                                np.min(final_visualisation_mask))

                self.compute_visualisation_mask = compute_visualisation_mask

        print('making movie', args.out, 'from', self.num_rec, 'images')
        clip = mpy.VideoClip(self.make_frame,
                             duration=(self.num_rec // cfg.DRIVE_LOOP_HZ) - 1)
        clip.write_videofile(args.out, fps=cfg.DRIVE_LOOP_HZ)

        print('done')

    def draw_model_prediction(self, record, img):
        '''
        query the model for it's prediction, draw the user input and the predictions
        as green and blue lines on the image
        '''
        if self.keras_part is None:
            return

        import cv2

        user_angle = float(record["user/angle"])
        user_throttle = float(record["user/throttle"])
        pilot_angle, pilot_throttle = self.keras_part.run(img)

        a1 = user_angle * 45.0
        l1 = user_throttle * 3.0 * 80.0
        a2 = pilot_angle * 45.0
        l2 = pilot_throttle * 3.0 * 80.0

        p1 = tuple((74, 119))
        p2 = tuple((84, 119))
        p11 = tuple(
            (int(p1[0] + l1 * math.cos((a1 + 270.0) * math.pi / 180.0)),
             int(p1[1] + l1 * math.sin((a1 + 270.0) * math.pi / 180.0))))
        p22 = tuple(
            (int(p2[0] + l2 * math.cos((a2 + 270.0) * math.pi / 180.0)),
             int(p2[1] + l2 * math.sin((a2 + 270.0) * math.pi / 180.0))))

        cv2.line(img, p1, p11, (0, 255, 0), 2)
        cv2.line(img, p2, p22, (0, 0, 255), 2)

    def init_salient(self, model):
        #from https://github.com/ermolenkodev/keras-salient-object-visualisation
        from keras.layers import Input, Dense, merge
        from keras.models import Model
        from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
        from keras.layers import Activation, Dropout, Flatten, Dense

        img_in = Input(shape=(120, 160, 3), name='img_in')
        x = img_in
        x = Convolution2D(24, (5, 5),
                          strides=(2, 2),
                          activation='relu',
                          name='conv1')(x)
        x = Convolution2D(32, (5, 5),
                          strides=(2, 2),
                          activation='relu',
                          name='conv2')(x)
        x = Convolution2D(64, (5, 5),
                          strides=(2, 2),
                          activation='relu',
                          name='conv3')(x)
        x = Convolution2D(64, (3, 3),
                          strides=(2, 2),
                          activation='relu',
                          name='conv4')(x)
        conv_5 = Convolution2D(64, (3, 3),
                               strides=(1, 1),
                               activation='relu',
                               name='conv5')(x)
        self.convolution_part = Model(inputs=[img_in], outputs=[conv_5])

        for layer_num in ('1', '2', '3', '4', '5'):
            self.convolution_part.get_layer('conv' + layer_num).set_weights(
                model.get_layer('conv2d_' + layer_num).get_weights())

        from keras import backend as K
        import tensorflow as tf
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

        self.inp = self.convolution_part.input  # input placeholder
        self.outputs = [
            layer.output for layer in self.convolution_part.layers[1:]
        ]  # all layer outputs
        self.functor = K.function([self.inp], self.outputs)

        kernel_3x3 = tf.constant(
            np.array([[[[1]], [[1]], [[1]]], [[[1]], [[1]], [[1]]],
                      [[[1]], [[1]], [[1]]]]), tf.float32)

        kernel_5x5 = tf.constant(
            np.array([[[[1]], [[1]], [[1]], [[1]], [[1]]],
                      [[[1]], [[1]], [[1]], [[1]], [[1]]],
                      [[[1]], [[1]], [[1]], [[1]], [[1]]],
                      [[[1]], [[1]], [[1]], [[1]], [[1]]],
                      [[[1]], [[1]], [[1]], [[1]], [[1]]]]), tf.float32)

        self.layers_kernels = {
            5: kernel_3x3,
            4: kernel_3x3,
            3: kernel_5x5,
            2: kernel_5x5,
            1: kernel_5x5
        }

        self.layers_strides = {
            5: [1, 1, 1, 1],
            4: [1, 2, 2, 1],
            3: [1, 2, 2, 1],
            2: [1, 2, 2, 1],
            1: [1, 2, 2, 1]
        }

    def draw_salient(self, img):
        #from https://github.com/ermolenkodev/keras-salient-object-visualisation
        import cv2
        alpha = 0.004
        beta = 1.0 - alpha

        salient_mask = self.compute_visualisation_mask(img)
        salient_mask_stacked = np.dstack((salient_mask, salient_mask))
        salient_mask_stacked = np.dstack((salient_mask_stacked, salient_mask))
        blend = cv2.addWeighted(img.astype('float32'), alpha,
                                salient_mask_stacked, beta, 0.0)
        return blend

    def make_frame(self, t):
        '''
        Callback to return an image from from our tub records.
        This is called from the VideoClip as it references a time.
        We don't use t to reference the frame, but instead increment
        a frame counter. This assumes sequential access.
        '''

        if self.iRec >= self.end:
            return None

        rec = None

        while rec is None and self.iRec < self.end:
            try:
                rec = self.tub.get_record(self.iRec)
            except Exception as e:
                print(e)
                print("Failed to get image for frame", self.iRec)
                self.iRec = self.iRec + 1
                rec = None

        image = rec['cam/image_array']

        if self.convolution_part:
            image = self.draw_salient(image)
            image = image * 255
            image = image.astype('uint8')

        self.draw_model_prediction(rec, image)

        if self.scale != 1:
            import cv2
            h, w, d = image.shape
            dsize = (w * self.scale, h * self.scale)
            image = cv2.resize(image,
                               dsize=dsize,
                               interpolation=cv2.INTER_CUBIC)

        self.iRec = self.iRec + 1

        return image  # returns a 8-bit RGB array