Exemplo n.º 1
0
def car_dir(tmpdir_factory, base_config, imu_fields) -> str:
    """ Creating car dir with sub dirs and extracting tub """
    car_dir = tmpdir_factory.mktemp('mycar')
    os.mkdir(os.path.join(car_dir, 'models'))
    # extract tub.tar.gz into car_dir/tub
    this_dir = os.path.dirname(os.path.abspath(__file__))
    with tarfile.open(os.path.join(this_dir, 'tub', 'tub.tar.gz')) as file:
        file.extractall(car_dir)
    # now create a second tub with additonal imu data
    tub_dir = os.path.join(car_dir, 'tub')
    tub = Tub(base_path=tub_dir)
    full_dir = os.path.join(car_dir, 'tub_full')
    tub_full = Tub(base_path=full_dir,
                   inputs=tub.manifest.inputs + imu_fields +
                   ['behavior/one_hot_state_array', 'localizer/location'],
                   types=tub.manifest.types + ['float'] * 6 + ['list', 'int'])
    count = 0
    for record in tub:
        t = TubRecord(base_config, tub.base_path, record)
        img = t.image()
        record['cam/image_array'] = img
        for field in imu_fields:
            record[field] = np.random.rand()
        # add behavioural input
        bhv = [1., 0.] if count < len(tub) // 2 else [0., 1.]
        record["behavior/one_hot_state_array"] = bhv
        record['localizer/location'] = 3 * count // len(tub)
        tub_full.write_record(record)
        count += 1
    return car_dir
Exemplo n.º 2
0
class TestTub(unittest.TestCase):
    def setUp(self):
        self._path = tempfile.mkdtemp()
        inputs = ['input']
        types = ['int']
        self.tub = Tub(self._path, inputs, types)

    def test_basic_tub_operations(self):
        entries = list(self.tub)
        self.assertEqual(len(entries), 0)
        write_count = 10
        delete_indexes = [0, 8]

        records = [{'input': i} for i in range(write_count)]
        for record in records:
            self.tub.write_record(record)

        for index in delete_indexes:
            self.tub.delete_records(index)

        count = 0
        for record in self.tub:
            print('Record %s' % (record))
            count += 1

        self.assertEqual(count, (write_count - len(delete_indexes)))
        self.assertEqual(len(self.tub), (write_count - len(delete_indexes)))

    def tearDown(self):
        shutil.rmtree(self._path)
Exemplo n.º 3
0
    def run(self, args, parser):
        '''
        Load the images from a tub and create a movie from them.
        Movie
        '''

        if args.tub is None:
            print("ERR>> --tub argument missing.")
            parser.print_help()
            return

        conf = os.path.expanduser(args.config)
        if not os.path.exists(conf):
            print("No config file at location: %s. Add --config to specify\
                 location or run from dir containing config.py." % conf)
            return

        self.cfg = dk.load_config(conf)

        if args.type is None and args.model is not None:
            args.type = self.cfg.DEFAULT_MODEL_TYPE
            print("Model type not provided. Using default model type from config file")

        if args.salient:
            if args.model is None:
                print("ERR>> salient visualization requires a model. Pass with the --model arg.")
                parser.print_help()

            if args.type not in ['linear', 'categorical']:
                print("Model type {} is not supported. Only linear or categorical is supported for salient visualization".format(args.type))
                parser.print_help()
                return

        self.tub = Tub(args.tub)

        start = args.start
        self.end_index = args.end if args.end != -1 else len(self.tub)
        num_frames = self.end_index - start

        # Move to the correct offset
        self.current = 0
        self.iterator = self.tub.__iter__()
        while self.current < start:
            self.iterator.next()
            self.current += 1

        self.scale = args.scale
        self.keras_part = None
        self.do_salient = False
        self.user = args.draw_user_input
        if args.model is not None:
            self.keras_part = get_model_by_type(args.type, cfg=self.cfg)
            self.keras_part.load(args.model)
            if args.salient:
                self.do_salient = self.init_salient(self.keras_part.model)

        print('making movie', args.out, 'from', num_frames, 'images')
        clip = mpy.VideoClip(self.make_frame, duration=((num_frames - 1) / self.cfg.DRIVE_LOOP_HZ))
        clip.write_videofile(args.out, fps=self.cfg.DRIVE_LOOP_HZ)
Exemplo n.º 4
0
def convert_to_tub_v2(paths, output_path):
    """
    Convert from old tubs to new one

    :param paths:               legacy tub paths
    :param output_path:         new tub output path
    :return:                    None
    """
    empty_record = {'__empty__': True}
    if type(paths) is str:
        paths = [paths]
    legacy_tubs = [LegacyTub(path) for path in paths]
    print(f'Total number of tubs: {len(legacy_tubs)}')

    for legacy_tub in legacy_tubs:
        # add input and type for empty records recording
        inputs = legacy_tub.inputs + ['__empty__']
        types = legacy_tub.types + ['boolean']
        output_tub = Tub(output_path, inputs, types,
                         list(legacy_tub.meta.items()))
        record_paths = legacy_tub.gather_records()
        bar = IncrementalBar('Converting', max=len(record_paths))
        previous_index = None
        for record_path in record_paths:
            try:
                contents = Path(record_path).read_text()
                record = json.loads(contents)
                image_path = record['cam/image_array']
                ms = record['milliseconds']
                current_index = int(image_path.split('_')[0])
                image_path = os.path.join(legacy_tub.path, image_path)
                image_data = Image.open(image_path)
                record['cam/image_array'] = image_data
                # first record or they are continuous, just append
                if not previous_index or current_index == previous_index + 1:
                    output_tub.write_record(record, ms)
                    previous_index = current_index
                # otherwise fill the gap with empty records
                else:
                    # Skipping over previous record here because it has
                    # already been written.
                    previous_index += 1
                    # Adding empty record nodes, and marking them deleted
                    # until the next valid record.
                    delete_list = []
                    while previous_index < current_index:
                        idx = output_tub.manifest.current_index
                        output_tub.write_record(empty_record, ms)
                        delete_list.append(idx)
                        previous_index += 1
                    output_tub.delete_records(delete_list)
                bar.next()
            except Exception as exception:
                print(f'Ignoring record path {record_path}\n', exception)
                traceback.print_exc()
        # writing session id into manifest metadata
        output_tub.close()
Exemplo n.º 5
0
    def clips_of_tub(self, tub_path):
        tub = Tub(tub_path)

        clips = []
        for record in tub:
            index = record['_index']
            images_relative_path = os.path.join(Tub.images(),
                                                record['cam/image_array'])
            record['cam/image_array'] = images_relative_path
            clips.append(record)

        return [clips]
Exemplo n.º 6
0
def benchmark():
    # Change to a non SSD storage path
    path = Path('/media/rahulrav/Cruzer/benchmark')

    # Recreate paths
    if os.path.exists(path.absolute().as_posix()):
        shutil.rmtree(path)

    inputs = ['input']
    types = ['int']
    tub = Tub(path.as_posix(), inputs, types, max_catalog_len=1000)
    write_count = 1000
    for i in range(write_count):
        record = {'input': i}
        tub.write_record(record)

    deletions = np.random.randint(0, write_count, 100)
    for index in deletions:
        index = int(index)
        tub.delete_record(index)
 
    for record in tub:
        print('Record %s' % record)

    tub.close()
Exemplo n.º 7
0
    def __init__(self, path):
        DriveFormat.__init__(self)

        if not os.path.exists(path):
            raise IOError(
                "Tubv2Format directory does not exist: {}".format(path))
        if not os.path.isdir(path):
            raise IOError(
                "Tubv2Format path is not a directory: {}".format(path))

        self.path = path
        self.tub = Tub(path, read_only=False)
        self.meta = self.tub.manifest.metadata  # Bug. tub.metadata doesn't get updated with info from disc
        self.deleted_indexes = self.tub.manifest.deleted_indexes
        print(f"Deleted: {self.deleted_indexes}")
        self.edit_list = set()
        self.shape = None
Exemplo n.º 8
0
 def __init__(self, tub_paths, test_size=0.2, shuffle=True):
     self.tub_paths = tub_paths
     self.test_size = test_size
     self.shuffle = shuffle
     self.tubs = [
         Tub(tub_path, read_only=True) for tub_path in self.tub_paths
     ]
     self.records = list()
Exemplo n.º 9
0
def dataframe_from_tubs(tubs):
    dfs = []
    for tub in tubs:
        df = pd.DataFrame(tub)
        name = Path(tub.base_path).name
        pref = os.path.join(tub.base_path, Tub.images()) + "/"
        df["cam/image_array"] = pref + df["cam/image_array"]
        dfs.append(df)
        #print( f"Tub {name}: {df['user/throttle'].min()} - {df['user/throttle'].max()}" )
    return pd.concat(dfs)
Exemplo n.º 10
0
 def __init__(self,
              config: Config,
              tub_paths: List[str],
              shuffle: bool = True) -> None:
     self.config = config
     self.tub_paths = tub_paths
     self.shuffle = shuffle
     self.tubs: List[Tub] = [
         Tub(tub_path, read_only=True) for tub_path in self.tub_paths
     ]
     self.records: List[TubRecord] = list()
Exemplo n.º 11
0
    def post(self, tub_id):
        tub_path = os.path.join(self.data_path, tub_id)
        tub = Tub(tub_path)
        old_clips = self.clips_of_tub(tub_path)
        new_clips = tornado.escape.json_decode(self.request.body)

        import itertools
        old_frames = list(itertools.chain(*old_clips))
        old_indexes = set()
        for frame in old_frames:
            old_indexes.add(frame['_index'])

        new_frames = list(itertools.chain(*new_clips['clips']))
        new_indexes = set()
        for frame in new_frames:
            new_indexes.add(frame['_index'])

        frames_to_delete = [
            index for index in old_indexes if index not in new_indexes
        ]
        tub.delete_records(frames_to_delete)
Exemplo n.º 12
0
 def __init__(self,
              config: Config,
              tub_paths: List[str],
              shuffle: bool = True) -> None:
     self.config = config
     self.tub_paths = tub_paths
     self.shuffle = shuffle
     self.tubs: List[Tub] = [
         Tub(tub_path, read_only=True) for tub_path in self.tub_paths
     ]
     self.records: List[TubRecord] = list()
     self.train_filter = getattr(config, 'TRAIN_FILTER', None)
    def update_tub(self, reload=False):
        if not self.base_path or not self.app.config_loader.config:
            return
        if not os.path.exists(os.path.join(self.base_path, 'manifest.json')):
            self.app.update_status(
                f'Path {self.base_path} is not a valid tub.')
            return
        try:
            self.tub = Tub(self.base_path)
        except Exception as e:
            self.app.update_status(f'Failed loading tub: {str(e)}')
            return

        # Use filter, this defines the function
        def select(underlying):
            if self.app.tub_manipulator.filter_expression is None:
                return True
            else:
                record = TubRecord(self.app.config_loader.config,
                                   self.tub.base_path, underlying)
                res = eval(self.app.tub_manipulator.filter_expression)
                return res

        self.records = [
            TubRecord(self.app.config_loader.config, self.tub.base_path,
                      record) for record in self.tub if select(record)
        ]
        self.len = len(self.records)
        self.state.i = 0
        self.label.config(text=self.base_path)
        if self.len > 0:
            self.state.record = self.records[self.state.i]
            # update app components, manipulator, slider and plot
            self.app.tub_manipulator.set_lr(is_l=True)
            self.app.tub_manipulator.set_lr(is_l=False)
            # clear bars for new tub only but not for reloading existing tub
            if not reload:
                self.app.data_panel.clear()
            self.app.slider.slider.configure(to=self.len - 1)
            # update graph
            self.app.data_plot.update_dataframe_from_tub()
            msg = f'Loaded tub {self.base_path} with {self.len} records'
        else:
            msg = f'No records in tub {self.base_path}'
        if self.app.tub_manipulator.record_filter:
            msg += f' using filter {self.app.tub_manipulator.record_filter}'
        self.app.update_status(msg)
Exemplo n.º 14
0
    def update_tub(self, event=None):
        if not self.file_path:
            return False
        # If config not yet loaded return
        cfg = tub_screen().ids.config_manager.config
        if not cfg:
            return False
        # At least check if there is a manifest file in the tub path
        if not os.path.exists(os.path.join(self.file_path, 'manifest.json')):
            tub_screen().status(f'Path {self.file_path} is not a valid tub.')
            return False
        try:
            self.tub = Tub(self.file_path)
        except Exception as e:
            tub_screen().status(f'Failed loading tub: {str(e)}')
            return False
        # Check if filter is set in tub screen
        expression = tub_screen().ids.tub_filter.filter_expression

        # Use filter, this defines the function
        def select(underlying):
            if not expression:
                return True
            else:
                try:
                    record = TubRecord(cfg, self.tub.base_path, underlying)
                    res = eval(expression)
                    return res
                except KeyError as err:
                    Logger.error(f'Filter: {err}')
                    return True

        self.records = [TubRecord(cfg, self.tub.base_path, record)
                        for record in self.tub if select(record)]
        self.len = len(self.records)
        if self.len > 0:
            tub_screen().index = 0
            tub_screen().ids.data_plot.update_dataframe_from_tub()
            msg = f'Loaded tub {self.file_path} with {self.len} records'
        else:
            msg = f'No records in tub {self.file_path}'
        if expression:
            msg += f' using filter {tub_screen().ids.tub_filter.record_filter}'
        tub_screen().status(msg)
        return True
Exemplo n.º 15
0
def tubs_from_directory(tub_dir, verbose=False):
    """ Load all tubs in the given directory """
    tubs = []
    count = 0
    root_path = Path(tub_dir)
    for item in root_path.iterdir():
        if item.is_dir():
            try:
                t = Tub(str(item), read_only=True)
                count += len(t)
            except FileNotFoundError as ex:
                continue
            except ValueError as ex:
                # In case the catalog file is empty
                continue
            tubs.append(t)
    if verbose:
        print(f"Loaded {count} records.")

    return tubs
Exemplo n.º 16
0
def tubs_from_filelist(file_list, verbose=False):
    """ Load all tubs listed in all files in file_list """
    tub_dirs = preprocessFileList(file_list)
    tubs = []
    count = 0
    root_path = Path("data")
    for item in tub_dirs:
        if Path(item).is_dir():
            try:
                t = Tub(str(item), read_only=True)
            except FileNotFoundError as ex:
                continue
            except ValueError as ex:
                # In case the catalog file is empty
                continue
            tubs.append(t)
            count += len(t)
    if verbose:
        print(f"Loaded {count} records.")

    return tubs
Exemplo n.º 17
0
    def __init__(self, config: Any, tub_paths: List[str], transform=None):
        """Create a PyTorch Lightning Data Module to contain all data loading logic

        Args:
            config (object): the configuration information
            tub_paths (List[str]): a list of paths to the tubs to use (minimum size of 1).
                                   Each tub path corresponds to another training run.
            transform (function, optional): a transform to apply to the data
        """
        super().__init__()

        self.config = config
        self.tub_paths = tub_paths

        # Handle the transforms
        if transform:
            self.transform = transform
        else:
            self.transform = get_default_transform()

        self.tubs: List[Tub] = [
            Tub(tub_path, read_only=True) for tub_path in self.tub_paths
        ]
        self.records: List[TubRecord] = []
Exemplo n.º 18
0
    def plot_predictions(self, cfg, tub_paths, model_path, limit, model_type):
        '''
        Plot model predictions for angle and throttle against data from tubs.

        '''
        import matplotlib.pyplot as plt
        import pandas as pd

        model_path = os.path.expanduser(model_path)
        model = dk.utils.get_model_by_type(model_type, cfg)
        # This just gets us the text for the plot title:
        if model_type is None:
            model_type = cfg.DEFAULT_MODEL_TYPE
        model.load(model_path)

        user_angles = []
        user_throttles = []
        pilot_angles = []
        pilot_throttles = []

        from donkeycar.parts.tub_v2 import Tub
        from pathlib import Path

        base_path = Path(os.path.expanduser(tub_paths)).absolute().as_posix()
        tub = Tub(base_path)
        records = list(tub)
        records = records[:limit]
        bar = IncrementalBar('Inferencing', max=len(records))

        for record in records:
            img_filename = os.path.join(base_path, Tub.images(),
                                        record['cam/image_array'])
            img = load_image(img_filename, cfg)
            user_angle = float(record["user/angle"])
            user_throttle = float(record["user/throttle"])
            pilot_angle, pilot_throttle = model.run(img)

            user_angles.append(user_angle)
            user_throttles.append(user_throttle)
            pilot_angles.append(pilot_angle)
            pilot_throttles.append(pilot_throttle)
            bar.next()

        angles_df = pd.DataFrame({
            'user_angle': user_angles,
            'pilot_angle': pilot_angles
        })
        throttles_df = pd.DataFrame({
            'user_throttle': user_throttles,
            'pilot_throttle': pilot_throttles
        })

        fig = plt.figure()

        title = "Model Predictions\nTubs: " + tub_paths + "\nModel: " + model_path + "\nType: " + model_type
        fig.suptitle(title)

        ax1 = fig.add_subplot(211)
        ax2 = fig.add_subplot(212)

        angles_df.plot(ax=ax1)
        throttles_df.plot(ax=ax2)

        ax1.legend(loc=4)
        ax2.legend(loc=4)

        plt.savefig(model_path + '_pred.png')
        plt.show()
Exemplo n.º 19
0
 def setUp(self):
     self._path = tempfile.mkdtemp()
     inputs = ['input']
     types = ['int']
     self.tub = Tub(self._path, inputs, types)
Exemplo n.º 20
0
 def setUpClass(cls) -> None:
     cls._path = tempfile.mkdtemp()
     inputs = ['input']
     types = ['int']
     cls.tub = Tub(cls._path, inputs, types)
Exemplo n.º 21
0
class Tubv2Format(DriveFormat):
    """ A class to represent a DonkeyCar Tub v2 on disc.

        Current assumptions:
            Tub records are 1 indexed and sequential with no gaps.
            We only care about editing steering and throttle.
            Steering and throttle should be clipped to -1/1.
    """
    def __init__(self, path):
        DriveFormat.__init__(self)

        if not os.path.exists(path):
            raise IOError(
                "Tubv2Format directory does not exist: {}".format(path))
        if not os.path.isdir(path):
            raise IOError(
                "Tubv2Format path is not a directory: {}".format(path))

        self.path = path
        self.tub = Tub(path, read_only=False)
        self.meta = self.tub.manifest.metadata  # Bug. tub.metadata doesn't get updated with info from disc
        self.deleted_indexes = self.tub.manifest.deleted_indexes
        print(f"Deleted: {self.deleted_indexes}")
        self.edit_list = set()
        self.shape = None

    def _load(self, path, image_norm=True, progress=None):

        records = {}
        indexes = []
        images = [
        ]  # Store images separately so we can easily write changed records back to the tub
        total = len(self.tub)
        for idx, rec in enumerate(self.tub):
            img_path = os.path.join(self.path, self.tub.images(),
                                    rec['cam/image_array'])
            try:
                img = Image.open(img_path)
                img_arr = np.asarray(img)
                if self.shape is None:
                    self.shape = img_arr.shape
            except Exception as ex:
                print(f"Failed to load image: {img_path}")
                print(f"   Exception: {ex}")
            records[idx] = rec
            indexes.append(idx)
            images.append(img_arr)
            progress(idx, total)
        self.records = records
        self.indexes = indexes
        self.images = images

    def load(self, progress=None):
        self._load(self.path, progress=progress)
        self.setClean()

    def update_line(self, line_num, new_rec):
        contents = json.dumps(new_rec, allow_nan=False, sort_keys=True)
        if contents[-1] == NEWLINE:
            line = contents
        else:
            line = f'{contents}{NEWLINE}'
        self.tub.manifest.current_catalog.seekable.update_line(
            line_num + 1, line)

    def save(self):
        if self.isClean():
            return

        self.tub.manifest.deleted_indexes = self.deleted_indexes

        for ix in self.edit_list:
            rec = self.records[ix]
            self.update_line(ix, rec)

        self.tub.manifest._update_catalog_metadata(update=True)
        self.edit_list.clear()
        self.setClean()

    def count(self):
        return len(self.records)

    def imageForIndex(self, index):
        idx = self.indexes[index]
        img = self.images[idx]
        if self.isIndexDeleted(index):
            # This grayed out image ends up looking ugly, can't figure out why
            tmp = img.mean(axis=-1, dtype=img.dtype, keepdims=False)
            tmp = np.repeat(tmp[:, :, np.newaxis], 3, axis=2)
            return tmp
        return img

    def get_angle_throttle(self, json_data):
        angle = float(json_data['user/angle'])
        throttle = float(json_data["user/throttle"])

        # If non-valid user entries and we have pilot data (e.g. AI), use that instead.
        if (0.0 == angle) and (0.0 == throttle):
            if "pilot/angle" in json_data:
                pa = json_data['pilot/angle']
                if pa is not None:
                    angle = float(pa)
            if "pilot/throttle" in json_data:
                pt = json_data['pilot/throttle']
                if pt is not None:
                    throttle = float(pt)

        return angle, throttle

    def actionForIndex(self, index):
        idx = self.indexes[index]
        rec = self.records[idx]
        angle, throttle = self.get_angle_throttle(rec)
        return [angle, throttle]

    def setActionForIndex(self, new_action, index):
        idx = self.indexes[index]
        rec = self.records[idx]
        angle, throttle = self.get_angle_throttle(rec)
        old_action = [angle, throttle]
        if not np.array_equal(old_action, new_action):
            if (rec["user/angle"] != new_action[0]) or (rec["user/throttle"] !=
                                                        new_action[1]):
                # Save the original values if not already done
                if "orig/angle" not in rec:
                    rec["orig/angle"] = rec["user/angle"]
                if "orig/throttle" not in rec:
                    rec["orig/throttle"] = rec["user/throttle"]

                rec["user/angle"] = new_action[0]
                rec["user/throttle"] = new_action[1]
                self.edit_list.add(idx)
                self.setDirty()

    def actionForKey(self, keybind, oldAction=None):
        oldAction = copy.copy(oldAction)
        if keybind == 'w':
            oldAction[1] += 0.1
        elif keybind == 'x':
            oldAction[1] -= 0.1
        elif keybind == 'a':
            oldAction[0] -= 0.1
        elif keybind == 'd':
            oldAction[0] += 0.1
        elif keybind == 's':
            oldAction[0] = 0.0
            oldAction[1] = 0.0
        else:
            return None
        return np.clip(oldAction, -1.0, 1.0)

    def deleteIndex(self, index):
        if index >= 0 and index < self.count():
            index += 1
            if index in self.deleted_indexes:
                self.deleted_indexes.remove(index)
            else:
                self.deleted_indexes.add(index)
            self.setDirty()

    def isIndexDeleted(self, index):
        if index >= 0 and index < self.count():
            index += 1
            return index in self.deleted_indexes
        return False

    def metaString(self):
        #{"inputs": ["cam/image_array", "user/angle", "user/throttle", "user/mode"], "start": 1550950724.8622544, "types": ["image_array", "float", "float", "str"]}
        ret = ""
        for k, v in self.meta.items():
            ret += "{}: {}\n".format(k, v)
        return ret

    def actionStats(self):
        stats = defaultdict(int)
        if self.count() > 0:
            actions = []
            for i in range(self.count()):
                act = self.actionForIndex(i)
                actions.append(act)
            stats["Min"] = np.min(actions)
            stats["Max"] = np.max(actions)
            stats["Mean"] = np.mean(actions)
            stats["StdDev"] = np.std(actions)
        return stats

    def supportsAuxData(self):
        return False

    def getAuxMeta(self):
        return None

    def addAuxData(self, meta):
        return None

    def auxDataAtIndex(self, auxName, index):
        return None

    def setAuxDataAtIndex(self, auxName, auxData, index):
        return False

    @classmethod
    def canOpenFile(cls, path):
        if not os.path.exists(path):
            return False
        if not os.path.isdir(path):
            return False

        meta_file = os.path.join(path, "manifest.json")
        if not os.path.exists(meta_file):
            return False

        return True

    @staticmethod
    def defaultInputTypes():
        return [{
            "name": "Images",
            "type": "numpy image",
            "shape": (120, 160, 3)
        }]

    def inputTypes(self):
        res = Tubv2Format.defaultInputTypes()
        if self.shape is not None:
            res[0]["shape"] = self.shape
        return res

    @staticmethod
    def defaultOutputTypes():
        return [{
            "name": "Actions",
            "type": "continuous",
            "range": (-1.0, 1.0)
        }]

    def outputTypes(self):
        res = []
        for act in ["user/angle", "user/throttle"]:
            display_name = act.split("/")[1]
            res.append({
                "name": display_name,
                "type": "continuous",
                "range": (-1.0, 1.0)
            })
        return res
Exemplo n.º 22
0
class MakeMovie(object):
    def run(self, args, parser):
        '''
        Load the images from a tub and create a movie from them.
        Movie
        '''
        global cfg

        if args.tub is None:
            print("ERR>> --tub argument missing.")
            parser.print_help()
            return

        conf = os.path.expanduser(args.config)
        if not os.path.exists(conf):
            print("No config file at location: %s. Add --config to specify\
                 location or run from dir containing config.py." % conf)
            return

        cfg = dk.load_config(conf)

        if args.type is None and args.model is not None:
            args.type = cfg.DEFAULT_MODEL_TYPE
            print(
                "Model type not provided. Using default model type from config file"
            )

        if args.salient:
            if args.model is None:
                print(
                    "ERR>> salient visualization requires a model. Pass with the --model arg."
                )
                parser.print_help()

            #if args.type not in ['linear', 'categorical']:
            #    print("Model type {} is not supported. Only linear or categorical is supported for salient visualization".format(args.type))
            #    parser.print_help()
            #    return

        self.tub = Tub(args.tub)

        start = args.start
        self.end_index = args.end if args.end != -1 else len(self.tub)
        num_frames = self.end_index - start

        # Move to the correct offset
        self.current = 0
        self.iterator = self.tub.__iter__()
        while self.current < start:
            self.iterator.next()
            self.current += 1

        self.scale = args.scale
        self.keras_part = None
        self.do_salient = False
        self.user = args.draw_user_input
        self.pilot_angle = 0.0
        self.pilot_throttle = 0.0
        self.pilot_score = 1.0  # used for color intensity
        self.user_angle = 0.0
        self.user_throttle = 0.0
        self.control_score = 0.25  # used for control size
        self.flag_test_pilot_angle = 1
        self.flag_test_pilot_throttle = 1
        self.flag_test_user_angle = 1
        self.flag_test_user_throttle = 1
        self.throttle_circle_pilot_angle = 0
        self.throttle_circle_user_angle = 0
        self.is_test = False
        self.last_pilot_throttle = 0.0  # used for color transparency
        self.last_user_throttle = 0.0  # used for color transparency
        self.pilot_throttle_trans = 1.0  # used for color transparency
        self.user_throttle_trans = 1.0  # used for color transparency
        self.pilot_throttle_trans_rate = 0.25  # used for color transparency
        self.user_throttle_trans_rate = 0.25  # used for color transparency

        if args.model is not None:
            self.keras_part = get_model_by_type(args.type, cfg=cfg)
            self.keras_part.load(args.model)
            if args.salient:
                self.do_salient = self.init_salient(
                    self.keras_part.interpreter.model)

        print('making movie', args.out, 'from', num_frames, 'images')
        clip = mpy.VideoClip(self.make_frame,
                             duration=((num_frames - 1) / cfg.DRIVE_LOOP_HZ))
        clip.write_videofile(args.out, fps=cfg.DRIVE_LOOP_HZ)

    @staticmethod
    def draw_line_into_image(angle, throttle, is_pred, img, color):
        """
        is_pred:
            True: from draw_model_prediction()
            False: from draw_user_input()
        """

        height, width, _ = img.shape
        mid_h = height // 2
        length = height // 4
        a1 = angle * 45.0
        l1 = throttle * length
        mid_w = width // 2 + (-1 if is_pred else +1)

        p1 = tuple((mid_w - 2, mid_h - 1))
        p11 = tuple((int(p1[0] + l1 * math.cos((a1 + 270.0) * DEG_TO_RAD)),
                     int(p1[1] + l1 * math.sin((a1 + 270.0) * DEG_TO_RAD))))

        cv2.line(img, p1, p11, color, 2)

    def get_user_input(self, record, img):
        """
        Get the user input from record
        """
        if self.is_test:
            self.user_angle_a = float(record["user/angle"])
            self.user_throttle_a = float(record["user/throttle"])
        else:
            self.user_angle = float(record["user/angle"])
            self.user_throttle = float(record["user/throttle"])

    def get_model_prediction(self, img, salient_image):
        """
        Get the pilot input from model prediction
        """
        if self.keras_part is None:
            return

        #expected = tuple(self.keras_part.get_input_shape()[1:])
        expected = self.keras_part.interpreter.model.inputs[0].shape[1:]
        actual = img.shape

        # if model expects grey-scale but got rgb, covert
        if expected[2] == 1 and actual[2] == 3:
            # normalize image before grey conversion
            grey_img = rgb2gray(img)
            actual = grey_img.shape
            img = grey_img.reshape(grey_img.shape + (1, ))

        if expected != actual:
            print(f"expected input dim {expected} didn't match actual dim "
                  f"{actual}")
            return

        if self.is_test:
            self.pilot_angle_a, self.pilot_throttle_a = self.keras_part.run(
                img)
        else:
            self.pilot_angle, self.pilot_throttle = self.keras_part.run(img)

    def draw_steering_distribution(self, img, salient_image):
        """
        query the model for it's prediction, draw the distribution of
        steering choices
        """
        from donkeycar.parts.keras import KerasCategorical

        if self.keras_part is None or type(
                self.keras_part) is not KerasCategorical:
            return

        pred_img = normalize_image(img)
        pred_img = pred_img.reshape((1, ) + pred_img.shape)
        angle_binned, _ = self.keras_part.interpreter.predict(pred_img)

        x = 4
        dx = 4
        y = cfg.IMAGE_H - 4
        iArgMax = np.argmax(angle_binned)
        for i in range(15):
            p1 = (x, y)
            p2 = (x, y - int(angle_binned[0][i] * 100.0))
            if i == iArgMax:
                cv2.line(salient_image, p1, p2, (255, 0, 0), 2)
            else:
                cv2.line(salient_image, p1, p2, (200, 200, 200), 2)
            x += dx

    def init_salient(self, model):
        # Utility to search for layer index by name.
        # Alternatively we can specify this as -1 since it corresponds to the last layer.
        model.summary()
        self.output_names = []

        for i, layer in enumerate(model.layers):
            if "dropout" not in layer.name.lower(
            ) and "out" in layer.name.lower():
                self.output_names += [layer.name]

        if len(self.output_names) == 0:
            print(
                "Failed to find the model layer named with 'out'. Skipping salient."
            )
            return False

        print("####################")
        print("Visualizing activations on layer:", *self.output_names)
        print("####################")

        # Create Saliency object.
        # If `clone` is True(default), the `model` will be cloned,
        # so the `model` instance will be NOT modified, but it takes a machine resources.
        self.saliency = Saliency(model,
                                 model_modifier=self.model_modifier,
                                 clone=False)
        # Create GradCAM++ object, Just only repalce class name to "GradcamPlusPlus"
        self.gradcampp = GradcamPlusPlus(model,
                                         model_modifier=self.model_modifier,
                                         clone=False)

        return True

    def draw_gradcam_pp(self, img):

        x = preprocess_input(img, mode='tf')

        # Generate heatmap with GradCAM++
        salient_map = self.gradcampp(
            self.loss,
            x,
            penultimate_layer=-1,  # model.layers number
        )

        return self.draw_mask(img, salient_map)

    def draw_salient(self, img):
        # https://github.com/keras-team/keras-applications/blob/master/keras_applications/imagenet_utils.py
        x = preprocess_input(img, mode='tf')

        # Generate saliency map with smoothing that reduce noise by adding noise
        salient_map = self.saliency(self.loss, x)
        return self.draw_mask(img, salient_map)

    def draw_mask(self, img, salient_map):
        if salient_map[0].size != cfg.IMAGE_W * cfg.IMAGE_H:
            print("salient size failed.")
            return

        salient_map = salient_map[0]
        salient_map = salient_map * 255
        salient_mask = cv2.cvtColor(salient_map, cv2.COLOR_GRAY2RGB)
        salient = cv2.applyColorMap(salient_map.astype('uint8'),
                                    cv2.COLORMAP_JET)
        salient = cv2.applyColorMap(salient, cv2.COLOR_BGR2RGB)
        salient = cv2.bitwise_and(salient, salient_mask.astype('uint8'))

        blend = cv2.addWeighted(img, 1.0, salient, 1.0, 0.0)

        return blend

    def make_frame(self, t):
        '''
        Callback to return an image from from our tub records.
        This is called from the VideoClip as it references a time.
        We don't use t to reference the frame, but instead increment
        a frame counter. This assumes sequential access.
        '''

        if self.current >= self.end_index:
            return None

        rec = self.iterator.next()
        img_path = os.path.join(self.tub.images_base_path,
                                rec['cam/image_array'])
        camera_image = img_to_arr(Image.open(img_path))

        salient_image = None
        if self.do_salient:
            salient_image = self.draw_salient(camera_image)
            #salient_image = self.draw_gradcam_pp(camera_image)
            salient_image = salient_image.astype('uint8')
        if salient_image is None:
            salient_image = camera_image
        # resize
        if self.scale != 1:
            h, w, d = salient_image.shape
            dsize = (w * self.scale, h * self.scale)
            salient_image = cv2.resize(salient_image,
                                       dsize=dsize,
                                       interpolation=cv2.INTER_CUBIC)

        # draw control
        if self.keras_part is not None:
            self.get_model_prediction(camera_image, salient_image)
            self.draw_steering_distribution(camera_image, salient_image)
        if self.user: self.get_user_input(rec, salient_image)
        salient_image = self.draw_control_into_image(salient_image)
        """
        # left upper text
        display_str = []
        display_str.append(f"pilot_angle: {self.pilot_angle:.2f}")
        display_str.append(f"pilot_throttle: {self.pilot_throttle:.2f}")
        self.draw_text(salient_image, display_str)
        """

        self.current += 1
        # returns a 8-bit RGB array
        return salient_image

    def model_modifier(self, m):
        m.layers[-1].activation = tf.keras.activations.linear

    def loss(self, output):
        return (output[0])

    def draw_control_into_image(self, img):
        """
        test
        self.pilot_score = np.random.default_rng().uniform(low=0.2, high=1.0)
        self.pilot_throttle = np.random.default_rng().uniform(low=-1.0, high=1.0)
        self.pilot_angle = np.random.default_rng().uniform(low=-1.0, high=1.0)
        """
        if self.is_test:
            if self.pilot_angle >= 1.0:
                self.flag_test_pilot_angle = -0.1
            elif self.pilot_angle <= -1.0:
                self.flag_test_pilot_angle = +0.1
            self.pilot_angle += self.flag_test_pilot_angle

            if self.pilot_throttle >= 1.0:
                self.flag_test_pilot_throttle = -0.01
            elif self.pilot_throttle <= -1.0:
                self.flag_test_pilot_throttle = +0.01
            self.pilot_throttle += self.flag_test_pilot_throttle

            if self.user_angle >= 1.0:
                self.flag_test_user_angle = -0.05
            elif self.user_angle <= -1.0:
                self.flag_test_user_angle = +0.05
            self.user_angle += self.flag_test_user_angle

            if self.user_throttle >= 1.0:
                self.flag_test_user_throttle = -0.02
            elif self.user_throttle <= -1.0:
                self.flag_test_user_throttle = +0.02
            self.user_throttle += self.flag_test_user_throttle

            #self.control_score = abs(self.pilot_throttle/4*3) + 0.25

        height, width, _ = img.shape
        y = height // 2
        x = width // 2

        # prepare ellipse mask
        r_base = 6.0
        r_mask = int(height // r_base)
        white = np.ones_like(img) * 255
        ellipse = np.zeros_like(img)

        r_pilot = int(height // r_base + (height // 3.1) * self.control_score)
        r_user = int(height // r_base + (height // 6.2) * self.control_score)

        pilot_trans, user_trans = self.trans_make()

        # draw pilot control
        green = (0, int(255 * pilot_trans), 0)  # green for reverse
        blue = (0, 0, int(255 * pilot_trans))  # blue for reverse
        self.draw_ellipse(self.pilot_angle, self.pilot_throttle, x, y, r_pilot,
                          ellipse, green, blue)

        pilot_circle_mask = cv2.circle(
            white, (int(x), int(y)),
            int(r_user + (height // 10) * self.control_score), (0, 0, 0),
            -1).astype('uint8')
        # pilot mask
        ellipse = cv2.bitwise_and(ellipse, pilot_circle_mask)

        # draw user control
        green = (0, int(255 * user_trans), 0)  # green for reverse
        blue = (0, 0, int(255 * user_trans))  # blue for reverse
        orange = (255, 69, 0)  # orange for reverse
        self.draw_ellipse(self.user_angle, self.user_throttle, x, y, r_user,
                          ellipse, green, blue)
        white = np.ones_like(img) * 255

        user_circle_mask = cv2.circle(
            white, (int(x), int(y)),
            int(r_mask + (height // 10) * self.control_score), (0, 0, 0),
            -1).astype('uint8')
        # user mask
        ellipse = cv2.bitwise_and(ellipse, user_circle_mask)

        # draw circle
        color1 = (0, 0, 215)
        color2 = (0, 25, 78)
        color3 = (0, 255, 215)
        cv2.circle(ellipse, (int(x), int(y)), int(r_pilot), color1, 1)
        cv2.circle(ellipse, (int(x), int(y)), int(r_user), color2, 1)
        cv2.circle(ellipse, (int(x), int(y)), int(r_mask), color3, 1)

        # draw dot circle
        ellipse = self.draw_dot_circle2(ellipse, x, y,
                                        r_pilot + 1 + self.scale // 4,
                                        1 + self.scale // 4, (0, 255, 218), -1,
                                        4 * self.scale, self.pilot_throttle,
                                        True)
        ellipse = self.draw_dot_circle2(ellipse, x, y,
                                        r_user + 1 + self.scale // 4,
                                        1 + self.scale // 4, (0, 255, 218), -1,
                                        4 * self.scale, self.user_throttle,
                                        False)

        # draw speed meter
        #self.draw_analog_meter(ellipse, self.pilot_speed)
        self.draw_analog_direction_meter(ellipse, self.pilot_angle,
                                         self.pilot_throttle)
        ellipse = self.draw_digital_meter(ellipse,
                                          r_mask - (2 + self.scale // 4),
                                          self.pilot_throttle, 18 * self.scale,
                                          pilot_trans)
        ellipse = self.draw_digital_meter(
            ellipse, r_mask - (2 + self.scale // 4) * 2 -
            (width // 10) * self.control_score, self.user_throttle,
            12 * self.scale, user_trans)

        #self.draw_analog_meter(ellipse, -0.75)
        #print(f"r_pilot: {r_pilot}")
        # draw stripe circle
        if self.pilot_throttle >= 0:
            flag_pilot_throttle = 1
            add_pilot_deg = 180
        else:
            flag_pilot_throttle = -1
            add_pilot_deg = 0
        if self.user_throttle >= 0:
            flag_user_throttle = 1
            add_user_deg = 180
        else:
            flag_user_throttle = -1
            #add_user_deg = 0
            add_user_deg = 180
        """
        p1 = (x,y-flag_pilot_throttle*r_pilot)
        p2 = (x,y+flag_pilot_throttle*y//10)
        red = (255,0,0)
        pts = self.points_rotation([p1,p2], center=(x,y), degrees=flag_pilot_throttle*self.pilot_angle*90)
        cv2.line(ellipse, pts[0], pts[1], red, 2)
        cv2.line(ellipse, p1, p2, red, 2)
        """
        red = (255, 0, 0)
        #self.draw_arc_center_line(ellipse, x,y,r_pilot+1+(height//40),r_user+(height//20)*self.control_score, red, 2, degrees=flag_pilot_throttle*self.pilot_angle*90+add_pilot_deg)
        self.draw_arc_center_line(
            ellipse,
            x,
            y,
            r_user + 1 + (height // 40),
            r_mask + (height // 20) * self.control_score,
            red,
            2,
            degrees=flag_user_throttle * self.user_angle * 90 + add_user_deg)

        # left upper text
        """
        display_str = []
        display_str.append(f"x: {x}")
        display_str.append(f"y: {y}")
        display_str.append(f"r_pilot: {r_pilot}")
        display_str.append(f"angle: {self.pilot_angle:.2f}")
        display_str.append(f"throttle: {self.pilot_throttle:.2f}")
        self.draw_text(ellipse, display_str)
        """

        # blur
        if self.scale <= 3:
            ellipse = cv2.GaussianBlur(ellipse, (3, 3), 0)
        else:
            ellipse = cv2.GaussianBlur(ellipse, (5, 5), 0)

        self.last_pilot_throttle = self.pilot_throttle
        self.last_user_throttle = self.user_throttle

        return cv2.addWeighted(img, 1.0, ellipse, 1.0, 0.0)

    def trans_make(self):
        if self.pilot_throttle != self.last_pilot_throttle:
            if self.pilot_throttle_trans >= 0.8:
                self.pilot_throttle_trans_rate = -0.25
            elif self.pilot_throttle_trans <= 0.3:
                self.pilot_throttle_trans_rate = +0.25
            self.pilot_throttle_trans += self.pilot_throttle_trans_rate
        else:
            self.pilot_throttle_trans = 1.0
        if self.user_throttle != self.last_user_throttle:
            if self.user_throttle_trans >= 0.8:
                self.user_throttle_trans_rate = -0.25
            elif self.user_throttle_trans <= 0.3:
                self.user_throttle_trans_rate = +0.25
            self.user_throttle_trans += self.user_throttle_trans_rate
        else:
            self.user_throttle_trans = 1.0
        return self.pilot_throttle_trans, self.user_throttle_trans

    def draw_analog_meter(self, img, value):
        height, width, _ = img.shape
        y = height // 2
        x = width // 2

        r_base = 6.0
        r_pilot = int(height // r_base + (height // 3.1) * self.control_score)
        red = (255, 0, 0)

        p1 = (x, y - r_pilot)
        p2 = (x, y + y // 10)
        pts = self.points_rotation([p1, p2],
                                   center=(x, y),
                                   degrees=value * 180 - 90)
        cv2.line(img, tuple(pts[0]), tuple(pts[1]), red, 2)

    def draw_analog_direction_meter(self, img, angle, throttle):
        height, width, _ = img.shape
        y = height // 2
        x = width // 2

        r_base = 6.0
        r_pilot = int(height // r_base + (height // 3.1) * self.control_score)
        red = (255, 0, 0)

        p1 = (x, y - r_pilot)
        p2 = (x, y + y // 10)
        if throttle >= 0:
            flag_throttle = 1
            add_deg = 0
        else:
            flag_throttle = -1
            #add_deg = 180
            add_deg = 0

        pts = self.points_rotation([p1, p2],
                                   center=(x, y),
                                   degrees=flag_throttle * angle * 90 +
                                   add_deg)
        cv2.line(img, tuple(pts[0]), tuple(pts[1]), red, 2)

    def draw_arc_center_line(self, img, x, y, r1, r2, color, thickness,
                             degrees):
        base_point1 = np.array([0, r1])
        base_point2 = np.array([0, r2])
        rot = self.rot(degrees)
        p1 = np.dot(
            rot,
            base_point1)  # NEVER DO .astype('uint8'). x,y is more than 255.
        p2 = np.dot(rot, base_point2)
        cv2.line(img, (int(p1[0] + x), int(p1[1] + y)),
                 (int(p2[0] + x), int(p2[1] + y)), color, thickness)

    def color_make(self, value):
        """
        Rainbow color maker.
        value: -1.0 to 1.0

        abs(value) 0.0: blue
        abs(value) 0.5: green
        abs(value) 1.0: red
        """
        value = abs(value)
        if value > 1:
            value = 1

        c = int(255 * value)
        c1 = int(255 * (value * 2 - 0.5))
        c05 = int(255 * value * 2)
        if c > 255:
            c = 255
        elif c < 0:
            c = 0
        if c1 > 255:
            c1 = 255
        elif c1 < 0:
            c1 = 0
        if c05 > 255:
            c05 = 255
        elif c05 < 0:
            c05 = 0

        if 0 <= value and value < 0.5:
            color = (0, c05, 255 - c05)  # blue -> green
        elif 0.5 <= value and value <= 1.0:
            color = (c1, c05 - c1, 0)  # green -> red
        elif 1.0 < value:
            color = (255, 0, 0)  # red

        return color

    def draw_digital_meter(self, img, r_mask, value, num, trans):
        """
        Rainbow digital throttle meter.
        img: image to draw
        r_mask: circumferential radius
        value: -1.0 to 1.0
        num: number of boxes
        trans: when the value is changed, boxes transparency is changed with this value
        """
        if num > 36:
            num = 36
        height, width, _ = img.shape
        y = height // 2
        x = width // 2
        h = (height // 20) * self.control_score
        w = (width // 10) * self.control_score
        base_points = np.array([[r_mask, h / 2], [r_mask, -h / 2],
                                [-w + r_mask, -h / 2], [-w + r_mask, h / 2]])
        dot_angle = 360.0 / num
        center = np.atleast_2d((x, y))
        start_angle = 145

        mask_img = np.zeros_like(img)
        meter_img = np.zeros_like(img)
        self.draw_digital_mask(value, x, y, r_mask, mask_img)

        for i in range(0, num):
            deg = i * dot_angle + start_angle
            box = self.points_rotation(base_points, center=(0, 0), degrees=deg)
            box = ((box.T + center.T).T)
            if i == 0:
                color = (0, 0, 255 * trans)
            else:
                if value >= 0:
                    color = self.color_make(i * dot_angle / 270)
                else:
                    color = self.color_make((360 - i * dot_angle) / 270)
                color = (color[0] * trans, color[1] * trans, color[2] * trans)
            cv2.fillPoly(meter_img, np.array([box], dtype=np.int32), color)

        meter_img = cv2.bitwise_and(meter_img.astype('uint8'),
                                    mask_img.astype('uint8'))
        img = cv2.bitwise_or(img, meter_img.astype('uint8'))
        return img

    def draw_dot_circle(self, img, x, y, r, cr, color, thickness, num):
        """
        Draw outer dot circle.
        """
        if num > 36:
            num = 36
        base_point = np.array([r, 0])
        dot_angle = 360.0 / num
        for i in range(0, num):
            deg = i * dot_angle
            rot = self.rot(deg)
            rotated = np.dot(rot, base_point)
            img = cv2.circle(img, (int(rotated[0] + x), int(rotated[1] + y)),
                             cr, color, thickness).astype('uint8')
        return img

    def draw_dot_circle2(self, img, x, y, r, cr, color, thickness, num,
                         throttle, is_pilot):
        """
        Draw outer dot circle.
        This circle rotates according to the throttle.
        """
        if num > 36:
            num = 36
        dot_angle = 360.0 / num
        rot_spd = throttle * dot_angle
        if rot_spd > dot_angle / 2.0:  # rotation speed limit
            rot_spd = dot_angle / 2.0
        elif rot_spd < -dot_angle / 2.0:
            rot_spd = -dot_angle / 2.0
        if is_pilot:
            self.throttle_circle_pilot_angle += rot_spd
            start_angle = self.throttle_circle_pilot_angle
        else:
            self.throttle_circle_user_angle += rot_spd
            start_angle = self.throttle_circle_user_angle
        rot = self.rot(start_angle)
        base_point = np.dot(rot, np.array([r, 0]))
        throttle = abs(throttle)
        for i in range(0, num):
            deg = i * dot_angle
            rot = self.rot(deg)
            rotated = np.dot(rot, base_point)

            color = self.color_make(throttle)
            img = cv2.circle(img, (int(rotated[0] + x), int(rotated[1] + y)),
                             cr, color, thickness).astype('uint8')
            #print(f'i: {i}, deg: {deg}, (x,y): ({int(rotated[0]+x),int(rotated[1]+y)})')
        return img

    def rot(self, degrees):
        rad = np.deg2rad(degrees)
        return np.array([[np.cos(rad), -np.sin(rad)],
                         [np.sin(rad), np.cos(rad)]])

    def points_rotation_test(self, pts, center=(0, 0), degrees=0):
        """
        the same as points_rotation()
        """
        res = None
        for pt in pts:
            base_point = np.array([pt[0] - center[0], pt[1] - center[1]])
            rot = self.rot(degrees)
            p = np.dot(rot, base_point)
            p = (int(p[0] + center[0]), int(p[1] + center[1]))
            if res is None:
                res = p
            else:
                res = np.vstack((res, p))
        return res

    def points_rotation(self, pts, center=(0, 0), degrees=0):
        """
        https://stackoverflow.com/questions/34372480/rotate-point-about-another-point-in-degrees-python
        This function rotates the polygon coordinates.
        pts: 2D polygon coordinates 
        center: Center coordinates of rotation
        degrees: angle
        """
        R = self.rot(degrees)
        o = np.atleast_2d(center)
        pts = np.atleast_2d(pts)
        return np.squeeze((R @ (pts.T - o.T) + o.T).T).astype(
            'int32'
        )  # 'int16' or more. x,y is larger than 255. cv2.fillPoly required int32

    def draw_ellipse(self, angle, throttle, x, y, r, img, color,
                     reverse_color):
        """
        Draw the outer arc.
        """
        angle_deg = int(throttle * 270)
        if throttle < 0:
            center_deg = +90 + abs(angle_deg / 2)
            color = reverse_color
        else:
            center_deg = -90 - abs(angle_deg / 2)

        if throttle < 0:
            angle = angle * -1.0
        rotate_deg = int(angle * 90) + center_deg

        cv2.ellipse(img, (int(x), int(y)), (int(r), int(r)), rotate_deg, 0,
                    angle_deg, color, -1)

    def draw_digital_mask(self, throttle, x, y, r, img):
        """
        throttle: -1.0 to 1.0
        x: x coordinate in the center of the image
        y: y coordinate in the center of the image
        r: radius of arc
        img: mask image
        """
        angle_deg = int(throttle * 270)  # ellipse angle
        rotate_deg = 145  # ellipse start position
        color = (255, 255, 255)
        cv2.ellipse(img, (int(x), int(y)), (int(r), int(r)), rotate_deg, 0,
                    angle_deg, color, -1)

    def draw_text(self, img, display_str=[]):
        """ STATISTICS FONT """
        fontScale = img.shape[0] / 1000.0
        if fontScale < 0.4:
            fontScale = 0.4
        fontThickness = 1 + int(fontScale)
        fontFace = cv2.FONT_HERSHEY_SIMPLEX

        max_text_width = 0
        max_text_height = 0
        """ DRAW BLACK BOX AND TEXT """
        [(text_width, text_height),
         baseLine] = cv2.getTextSize(text=display_str[0],
                                     fontFace=fontFace,
                                     fontScale=fontScale,
                                     thickness=fontThickness)
        x_left = int(baseLine)
        y_top = int(baseLine)
        for i in range(len(display_str)):
            [(text_width, text_height),
             baseLine] = cv2.getTextSize(text=display_str[i],
                                         fontFace=fontFace,
                                         fontScale=fontScale,
                                         thickness=fontThickness)
            if max_text_width < text_width:
                max_text_width = text_width
            if max_text_height < text_height:
                max_text_height = text_height
        """ DRAW BLACK BOX """
        cv2.rectangle(
            img, (x_left - 2, int(y_top)),
            (int(x_left + max_text_width + 2),
             int(y_top + len(display_str) * max_text_height * 1.2 + baseLine)),
            color=(0, 0, 0),
            thickness=-1)
        """ DRAW FPS, TEXT """
        for i in range(len(display_str)):
            cv2.putText(img,
                        display_str[i],
                        org=(x_left, y_top + int(max_text_height * 1.2 +
                                                 (max_text_height * 1.2 * i))),
                        fontFace=fontFace,
                        fontScale=fontScale,
                        thickness=fontThickness,
                        color=(77, 255, 9))
Exemplo n.º 23
0
class MakeMovie(object):
    def __init__(self):
        self.deg_to_rad = math.pi / 180.0

    def run(self, args, parser):
        '''
        Load the images from a tub and create a movie from them.
        Movie
        '''

        if args.tub is None:
            print("ERR>> --tub argument missing.")
            parser.print_help()
            return

        conf = os.path.expanduser(args.config)
        if not os.path.exists(conf):
            print("No config file at location: %s. Add --config to specify\
                 location or run from dir containing config.py." % conf)
            return

        self.cfg = dk.load_config(conf)

        if args.type is None and args.model is not None:
            args.type = self.cfg.DEFAULT_MODEL_TYPE
            print(
                "Model type not provided. Using default model type from config file"
            )

        if args.salient:
            if args.model is None:
                print(
                    "ERR>> salient visualization requires a model. Pass with the --model arg."
                )
                parser.print_help()

            if args.type not in ['linear', 'categorical']:
                print(
                    "Model type {} is not supported. Only linear or categorical is supported for salient visualization"
                    .format(args.type))
                parser.print_help()
                return

        self.tub = Tub(args.tub)

        start = args.start
        self.end_index = args.end if args.end != -1 else len(self.tub)
        num_frames = self.end_index - start

        # Move to the correct offset
        self.current = 0
        self.iterator = self.tub.__iter__()
        while self.current < start:
            self.iterator.next()
            self.current += 1

        self.scale = args.scale
        self.keras_part = None
        self.do_salient = False
        self.user = args.draw_user_input
        if args.model is not None:
            self.keras_part = get_model_by_type(args.type, cfg=self.cfg)
            self.keras_part.load(args.model)
            if args.salient:
                self.do_salient = self.init_salient(self.keras_part.model)

        print('making movie', args.out, 'from', num_frames, 'images')
        clip = mpy.VideoClip(self.make_frame,
                             duration=((num_frames - 1) /
                                       self.cfg.DRIVE_LOOP_HZ))
        clip.write_videofile(args.out, fps=self.cfg.DRIVE_LOOP_HZ)

    def draw_user_input(self, record, img):
        '''
        Draw the user input as a green line on the image
        '''

        import cv2

        user_angle = float(record["user/angle"])
        user_throttle = float(record["user/throttle"])

        height, width, _ = img.shape

        length = height
        a1 = user_angle * 45.0
        l1 = user_throttle * length

        mid = width // 2 - 1

        p1 = tuple((mid - 2, height - 1))
        p11 = tuple(
            (int(p1[0] + l1 * math.cos((a1 + 270.0) * self.deg_to_rad)),
             int(p1[1] + l1 * math.sin((a1 + 270.0) * self.deg_to_rad))))

        # user is green, pilot is blue
        cv2.line(img, p1, p11, (0, 255, 0), 2)

    def draw_model_prediction(self, img):
        """
        query the model for it's prediction, draw the predictions
        as a blue line on the image
        """
        if self.keras_part is None:
            return

        import cv2

        expected = tuple(self.keras_part.get_input_shape()[1:])
        actual = img.shape

        # if model expects grey-scale but got rgb, covert
        if expected[2] == 1 and actual[2] == 3:
            # normalize image before grey conversion
            grey_img = rgb2gray(img)
            actual = grey_img.shape
            img = grey_img.reshape(grey_img.shape + (1, ))

        if expected != actual:
            print("expected input dim", expected, "didn't match actual dim",
                  actual)
            return

        pilot_angle, pilot_throttle = self.keras_part.run(img)
        height, width, _ = img.shape

        length = height
        a2 = pilot_angle * 45.0
        l2 = pilot_throttle * length

        mid = width // 2 - 1

        p2 = tuple((mid + 2, height - 1))
        p22 = tuple(
            (int(p2[0] + l2 * math.cos((a2 + 270.0) * self.deg_to_rad)),
             int(p2[1] + l2 * math.sin((a2 + 270.0) * self.deg_to_rad))))

        # user is green, pilot is blue
        cv2.line(img, p2, p22, (0, 0, 255), 2)

    def draw_steering_distribution(self, img):
        '''
        query the model for it's prediction, draw the distribution of steering choices
        '''
        from donkeycar.parts.keras import KerasCategorical

        if self.keras_part is None or type(
                self.keras_part) is not KerasCategorical:
            return

        import cv2

        pred_img = normalize_image(img)
        pred_img = pred_img.reshape((1, ) + pred_img.shape)
        angle_binned, _ = self.keras_part.model.predict(pred_img)

        x = 4
        dx = 4
        y = 120 - 4
        iArgMax = np.argmax(angle_binned)
        for i in range(15):
            p1 = (x, y)
            p2 = (x, y - int(angle_binned[0][i] * 100.0))
            if i == iArgMax:
                cv2.line(img, p1, p2, (255, 0, 0), 2)
            else:
                cv2.line(img, p1, p2, (200, 200, 200), 2)
            x += dx

    def init_salient(self, model):
        # Utility to search for layer index by name.
        # Alternatively we can specify this as -1 since it corresponds to the last layer.
        first_output_name = None
        for i, layer in enumerate(model.layers):
            if first_output_name is None and "dropout" not in layer.name.lower(
            ) and "out" in layer.name.lower():
                first_output_name = layer.name
                layer_idx = i

        if first_output_name is None:
            print(
                "Failed to find the model layer named with 'out'. Skipping salient."
            )
            return False

        print("####################")
        print("Visualizing activations on layer:", first_output_name)
        print("####################")

        # ensure we have linear activation
        model.layers[layer_idx].activation = activations.linear
        # build salient model and optimizer
        sal_model = utils.apply_modifications(model)
        modifier_fn = get('guided')
        sal_model_mod = modifier_fn(sal_model)
        losses = [(ActivationMaximization(sal_model_mod.layers[layer_idx],
                                          None), -1)]
        self.opt = Optimizer(sal_model_mod.input, losses, norm_grads=False)
        return True

    def compute_visualisation_mask(self, img):
        grad_modifier = 'absolute'
        grads = self.opt.minimize(seed_input=img,
                                  max_iter=1,
                                  grad_modifier=grad_modifier,
                                  verbose=False)[1]
        channel_idx = 1 if K.image_data_format() == 'channels_first' else -1
        grads = np.max(grads, axis=channel_idx)
        res = utils.normalize(grads)[0]
        return res

    def draw_salient(self, img):
        import cv2
        alpha = 0.004
        beta = 1.0 - alpha
        expected = self.keras_part.model.inputs[0].shape[1:]
        actual = img.shape

        # check input depth and convert to grey to match expected model input
        if expected[2] == 1 and actual[2] == 3:
            grey_img = rgb2gray(img)
            img = grey_img.reshape(grey_img.shape + (1, ))

        norm_img = normalize_image(img)
        salient_mask = self.compute_visualisation_mask(norm_img)
        z = np.zeros_like(salient_mask)
        salient_mask_stacked = np.dstack((z, z))
        salient_mask_stacked = np.dstack((salient_mask_stacked, salient_mask))
        blend = cv2.addWeighted(img.astype('float32'), alpha,
                                salient_mask_stacked, beta, 0.0)
        return blend

    def make_frame(self, t):
        '''
        Callback to return an image from from our tub records.
        This is called from the VideoClip as it references a time.
        We don't use t to reference the frame, but instead increment
        a frame counter. This assumes sequential access.
        '''

        if self.current >= self.end_index:
            return None

        rec = self.iterator.next()
        img_path = os.path.join(self.tub.images_base_path,
                                rec['cam/image_array'])
        image = img_to_arr(Image.open(img_path))

        if self.do_salient:
            image = self.draw_salient(image)
            image = image * 255
            image = image.astype('uint8')

        if self.user: self.draw_user_input(rec, image)
        if self.keras_part is not None:
            self.draw_model_prediction(image)
            self.draw_steering_distribution(image)

        if self.scale != 1:
            import cv2
            h, w, d = image.shape
            dsize = (w * self.scale, h * self.scale)
            image = cv2.resize(image,
                               dsize=dsize,
                               interpolation=cv2.INTER_CUBIC)

        self.current += 1
        # returns a 8-bit RGB array
        return image