Exemplo n.º 1
0
    def post_process(self, out_folder_base):

        output_parent_folder = out_folder_base
        tools.check_dir(output_parent_folder)
        output_folder = os.path.join(
            output_parent_folder,
            self.__class__.__name__)
        tools.check_dir(output_folder)

        # image enhancement
        data = copy.deepcopy(self.output_data)
        min_value = data.min()
        max_value = data.max()
        ratio = 255 / (max_value - min_value)
        for i in range(self.param['height']):
            for j in range(self.param['width']):
                data[i][j] = (data[i][j] - min_value) * ratio

        # write image
        out_filename = os.path.join(
            output_folder,
            self.metadata[0] + '.png')
        image.save_img(data, fname=out_filename)
        log_info = '[IrConverter] save image to: {}'.format(out_filename)
        print(log_info)
Exemplo n.º 2
0
    def post_process(self):
        if self.svfolder_path:
            if not os.path.isdir(self.svfolder_path):
                tools.check_dir(self.svfolder_path)
            elif self.lab_flag:
                while (self.query_flag):
                    print('\nThere already exists a folder on '
                          'save folder path...')
                    ans = input("choosing action\n"
                                "1:Don't save, 2:overwrite? ")
                    if ans == '1':
                        print('Exit the post_process...')
                        self.save_flag = False
                        self.query_flag = False
                        break
                    elif ans == '2':
                        shutil.rmtree(self.svfolder_path)
                        tools.check_dir(self.svfolder_path)
                        self.query_flag = False
                        break
            else:
                self.save_flag = True

            if self.save_flag:
                basename = os.path.basename(self.img_path)
                json_name = tools.replace_extension(basename, 'json')
                o_json_path = os.path.join(self.svfolder_path, json_name)
                data.write_json(self.results, o_json_path)
                restore_ordered_json(o_json_path)
        else:
            print('Please set saving folder first !')
            sys.exit(0)
Exemplo n.º 3
0
    def main_process(self):
        """ main_process of CreateSymbolicLinkTask """

        if "output_folder" in self.param.keys():
            tmpdir = self.param["output_folder"]
        else:
            tmpdir = tempfile.mkdtemp()
        self.results = tmpdir
        tools.check_dir(tmpdir)
        self.logger.warning("Training data is linked to %s" % tmpdir)

        image_paths = self.input_data[0]
        labels = self.input_data[1]

        if len(image_paths) != len(labels):
            self.terminate_flag = True
            self.logger.error("Lengths of labels and data do not match")
            return False

        for label in set(labels):
            if not isinstance(label, str):
                label = str(label)
            label_folder = os.path.join(tmpdir, label)
            tools.check_dir(label_folder)

        for i in range(0, len(image_paths)):
            label = labels[i]
            if not isinstance(label, str):
                label = str(label)
            input_path = image_paths[i]
            fname = os.path.basename(input_path)
            output_path = os.path.join(tmpdir, label, fname)
            os.symlink(input_path, output_path)

        return True
Exemplo n.º 4
0
def convert_all_to_png(folder,
                       size=None,
                       keyword=None,
                       outd=None,
                       suffix='png'):
    """Convert the image to png
    @param folder: image folder to look for

    Keyword arguments:
    suffix -- suffix of the image to be converted
    size   --size of the output image
    keyword -- keyword of the image files to be converted
    outd   -- output folder for the converted file

    """

    image_paths = find_images(dir_path=folder, keyword=keyword)
    fail_paths = []

    if outd is None:
        outd = folder
    else:
        tools.check_dir(outd)

    for img_path in image_paths:
        img = read_img(img_path, log=False)
        if img is None or img.shape[0] == 0:
            print('dyda_utils: Error: %s cannot be read' % img_path)
            continue
        _fname = os.path.basename(img_path).split('.')
        _fname[-1] = suffix
        fname = '.'.join(_fname)
        fname = os.path.join(outd, fname)
        save_img(img, fname=fname)
Exemplo n.º 5
0
    def cp_weights(self):
        """ copy weight files to snapshot_folder """

        print("Copying weights...")
        cp_folder = os.path.join(self.snapshot_folder, "weights")
        tools.check_dir(cp_folder)
        for fname in self.weight_files:
            ori_path = os.path.join("/tmp", fname)
            if tools.check_exist(ori_path, log=False):
                destination = os.path.join(cp_folder, fname)
                try:
                    copyfile(ori_path, destination)
                except BaseException:
                    self.logger.error("Fail to copy %s" % ori_path)
            else:
                self.logger.error("%s does not exist, exit dyda." % ori_path)
                sys.exit(0)

        # FIXME: if there are more TF classifiers in the future
        classifier = "ClassifierInceptionv3"
        if self.param["architecture"].find('mobilenet') >= 0:
            classifier = "ClassifierMobileNet"
        if classifier in self.config.keys():
            new_config = self.config[classifier]
            new_config["model_file"] = os.path.join(cp_folder,
                                                    "output_graph.pb")
            new_config["label_file"] = os.path.join(cp_folder,
                                                    "output_labels.txt")
            new_config_path = os.path.join(cp_folder, "dyda.config.learner")
            tools.write_json({classifier: new_config}, new_config_path)
Exemplo n.º 6
0
 def post_process(self):
     if self.sv_flag:
         tools.check_dir(self.sv_folder)
         for data in self.output_data:
             sv_path = os.path.join(self.sv_folder, data["filename"])
             sv_path = tools.replace_extension(sv_path, '.json')
             tools.write_json(data, sv_path)
             dict_tools.restore_ordered_json(sv_path)
Exemplo n.º 7
0
    def __init__(self, pipeline_config_path, dyda_config_path="",
                 force_run_skip=False, parent_result_folder="",
                 verbosity=-1, lab_flag=False):
        """ __init__ of TrainerBase """

        self.logger = logging.getLogger('pipeline')
        formatter1 = logging.Formatter(
            '[pipeline] %(levelname)s %(message)s'
        )
        log_level = logging.WARNING
        if verbosity == 1:
            log_level = logging.INFO
        elif verbosity >= 2:
            log_level = logging.DEBUG
        console1 = logging.StreamHandler()
        console1.setFormatter(formatter1)
        self.logger.setLevel(log_level)
        self.logger.addHandler(console1)
        self.log_level = log_level
        self.force_run_skip = force_run_skip

        self.lab_flag = lab_flag
        if parent_result_folder == "":
            cwd = os.getcwd()
            parent_result_folder = os.path.join(cwd, 'results')
        self.logger.info('Saving output to %s' % parent_result_folder)
        tools.check_dir(parent_result_folder)
        self.parent_result_folder = parent_result_folder

        self.pipeline_config = tools.parse_json(pipeline_config_path, 'utf-8')

        if tools.check_exist(dyda_config_path):
            self.dyda_cfg_path = dyda_config_path
        elif "dyda_config" in self.pipeline_config.keys():
            self.dyda_cfg_path = self.pipeline_config["dyda_config"]
        else:
            self.logger.warning(
                "No valid dyda config found by Pipeline, use default."
            )
            self.dyda_cfg_path = ""

        self.pipeline = OrderedDict({})
        self.define_pipeline()
        # Output is only set if output_type is specified by a component
        # If both components set output_type, the later one wins
        self.output = None

        self.trigger_level = "L3"
        if "trigger_level" in self.pipeline_config.keys():
            if self.pipeline_config["trigger_level"] in ["L1", "L2", "L3"]:
                self.trigger_level = self.pipeline_config["trigger_level"]
                self.logger.info(
                    'Changing trigger level to %s' % self.trigger_level
                )
Exemplo n.º 8
0
    def check_snapshot_folder(self):
        """ Check if self.snapshot_folder is properly set """

        if self.snapshot_folder == "":
            self.snapshot_folder = os.getcwd()
            self.snapshot_folder = os.path.join(
                self.snapshot_folder, self.class_name
            )
            self.base_logger.info(
                "Creating snapshot folder at %s." % self.snapshot_folder
            )
        tools.check_dir(self.snapshot_folder)
Exemplo n.º 9
0
    def post_process(self):
        output_parent_folder = self.lab_output_folder
        tools.check_dir(output_parent_folder)
        output_folder = os.path.join(output_parent_folder,
                                     self.__class__.__name__)
        tools.check_dir(output_folder)

        if len(self.result_list) > 0:
            for i in range(len(self.result_list)):
                result = self.result_list[i]
                out_filename = os.path.join(
                    output_folder,
                    tools.replace_extension(result['filename'], 'json'))
                data.write_json(result, out_filename)
Exemplo n.º 10
0
    def main_process(self):
        """ Main function of dyda component. """

        # all input_data should come from the same source and
        # with the same height and width
        tools.check_dir(self.snapshot_folder)
        self.out_path = os.path.join(self.snapshot_folder, self.filename)
        height, width, layers = self.input_data[0].shape

        size = (width, height)
        out = cv2.VideoWriter(self.out_path, cv2.VideoWriter_fourcc(*'DIVX'),
                              15, size)
        for i in range(len(self.input_data)):
            out.write(self.input_data[i])
        out.release()

        self.results = {"output_file": self.out_path}
 def post_process(self):
     if self.svfolder_path:
         if not os.path.isdir(self.svfolder_path):
             tools.check_dir(self.svfolder_path)
         if self.save_flag:
             i = 0
             for out_meta in self.results['additional_info']:
                 name = out_meta['filename']
                 out_meta['folder'] = os.path.abspath(self.svfolder_path)
                 json_name = tools.replace_extension(name, 'json')
                 json_path = os.path.join(self.svfolder_path, json_name)
                 img_path = os.path.join(self.svfolder_path, name)
                 data.write_json(out_meta, json_path)
                 image.save_img(self.output_data[i], img_path)
                 i += 1
     else:
         print('Please set saving folder first !')
         sys.exit(0)
Exemplo n.º 12
0
    def __init__(self, dyda_config_path=''):
        """ __init__ of LearnerTFClassifier """

        super(LearnerYOLO, self).__init__(dyda_config_path=dyda_config_path)
        self.set_param(self.class_name)
        self.classes = self.param["labels"]
        self.overwrite = True
        if "overwrite" in self.param.keys():
            self.overwrite = self.param["overwrite"]
        if "output_folder" in self.param.keys():
            tmpdir = self.param["output_folder"]
        else:
            tmpdir = tempfile.mkdtemp()
        self.output_file = self.param["output_path"]
        self.label_folder = os.path.join(tmpdir, "labels")
        self.img_folder = os.path.join(tmpdir, "JPEGImages")
        for folder in [tmpdir, self.label_folder, self.img_folder]:
            tools.check_dir(folder)
        self.darknet_path = self.param["darknet_path"]
Exemplo n.º 13
0
    def post_process(self, out_folder_base):

        output_parent_folder = out_folder_base
        tools.check_dir(output_parent_folder)
        output_folder = os.path.join(
            output_parent_folder,
            self.__class__.__name__)
        tools.check_dir(output_folder)

        if 'filename' in self.results.keys():
            # write json
            if self.param['folder'] == "":
                self.results['folder'] = os.path.join(
                    output_parent_folder, 'IrConverter')
            out_filename = os.path.join(
                output_folder,
                tools.replace_extension(self.results['filename'], 'json'))
            tools.write_json(self.results, out_filename)
            log_info = '[TimeScaleShiftConverter] save json to: {}'.format(
                out_filename)
            print(log_info)
Exemplo n.º 14
0
def read_and_random_crop(fimg, size=None, ratio=0.7, save=False):
    """Read images and do random crops

    @param fimg: input image file name

    Keyword arguments:
    size -- tuple of new size (default None)
    ratio -- used to determin the croped size (default 0.7)

    @return imgs: dictionary of the croped images

    """
    img = read_img(fimg)
    if img is None:
        return img
    nrow = len(img)
    ncol = len(img[0])
    imgs = {}
    imgs['crop_img_lt'] = img[0:int(nrow * ratio), 0:int(ncol * ratio)]
    imgs['crop_img_lb'] = img[int(nrow * (1 - ratio)):nrow,
                              0:int(ncol * ratio)]
    imgs['crop_img_rt'] = img[0:int(nrow * ratio),
                              int(ncol * (1 - ratio)):ncol]
    imgs['crop_img_rb'] = img[int(nrow * (1 - ratio)):nrow,
                              int(ncol * (1 - ratio)):ncol]
    for corner in imgs:
        if size is not None:
            imgs[corner] = resize_img(imgs[corner], size)
        if save:
            dirname = os.path.dirname(fimg)
            dirname = os.path.join(dirname, 'crops')
            tools.check_dir(dirname)

            _fname = os.path.basename(fimg).split('.')
            _fname.insert(-1, '_' + corner + '.')

            fname = ''.join(_fname)
            fname = os.path.join(dirname, fname)
            save_img(imgs[corner], fname=fname)
    return imgs
Exemplo n.º 15
0
    def snapshot_metadata(self, out_name=''):
        """
        snapshot_metadata will save a copy of the current
        TrainerBase.metadata as json output

        :param str out_name: preffix of the output files (default: snapshot)
        :return: None
        :rtype: None
        """

        self.base_logger.debug("Creating snapshot of meta for %s"
                               % self.class_name)
        self.check_snapshot_folder()

        if self.snapshot_hierarchy:
            out_folder = self.create_hierarchy_outf('metadata')
        else:
            out_folder = os.path.join(self.snapshot_folder, 'metadata')

        if out_name == "":
            if isinstance(self.metadata[0], str):
                out_name = self.metadata[0]
            else:
                out_name = "snapshot"
        try:
            tools.check_dir(out_folder)
        except Exception:
            self.base_logger.error('Cannot create folder %s' % out_folder)
            traceback.print_exc(file=sys.stdout)
        try:
            metadata_path = os.path.join(out_folder, out_name + '.json')
            data.write_json(self.metadata, fname=metadata_path)
            self.snapshot_fnames.append(metadata_path)
        except Exception:
            self.base_logger.error('Cannot make snapshot for metadata.')
            traceback.print_exc(file=sys.stdout)
Exemplo n.º 16
0
    def run(self, external_data, base_name=None,
            external_meta={}, benchmark=False, force_snapshot=False):
        """ Main function to be called by external code

        @param external_data: data passed by the external function, used by
                              component with data_type == "external"

        """

        if benchmark:
            t0 = time.time()
            # create two strings to compute time of create DataFrame
            t_benchmark_start = []
            t_benchmark_end = []

            t_benchmark_start.append(time.time())
            # create pandas DataFrame to store benchmark
            pd = importlib.import_module('pandas')
            dfcols = ['total time', 'initial setting', 'run',
                      'output_handling']
            benchmark_data = pd.DataFrame(columns=dfcols)
            t_benchmark_end.append(time.time())

        if base_name is not None:
            pass
        else:
            timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")
            base_name = "snapshot_" + timestamp

        logging.info("Using %s as base_name")

        idx = 0
        previous_comp = ''
        skip_this = False
        skip_following = False

        for name, comp in self.pipeline.items():
            if benchmark:
                t1 = time.time()
            self.logger.debug('running component: %s' % name)

            # if force_run_skip is on, do not skip
            if comp['type'] == 'skip':
                if self.force_run_skip:
                    self.logger.info(
                        'force_run_skip is on, skip components will be run'
                    )
                else:
                    continue

            comp['instance'].reset()

            # if it is the first component
            if idx == 0:
                self.logger.debug('Set base_name as %s' % base_name)
                comp['instance'].metadata[0] = base_name
                comp['instance'].external_metadata = external_meta
                comp['instance'].external_data = external_data

            else:
                comp['instance'].metadata = \
                    self.pipeline[previous_comp]['instance'].metadata
                comp['instance'].external_metadata = external_meta
                comp['instance'].external_data = external_data

            pipeline_status = 0 if skip_following else 1
            comp['instance'].pipeline_status = pipeline_status

            idx = idx + 1
            if skip_this and comp['type'] != 'output_generator':
                self.logger.debug('Skipping the component %s' % name)
                comp['instance'].append_results()
                skip_this = False
                continue

            if skip_following and comp['type'] != 'output_generator':
                self.logger.debug('Skipping the component %s' % name)
                comp['instance'].append_results()
                continue

            input_type = comp['input_type']
            self.logger.debug('Setting up input, type: %s' % input_type)
            if input_type == "use_external_data":
                comp['instance'].input_data = external_data

            elif input_type == "package_external_data":
                comp['instance'].input_data = [external_data]

            elif input_type == "use_external_meta":
                comp['instance'].input_data = external_meta

            elif input_type == "package_external_meta":
                comp['instance'].input_data = [external_meta]

            elif input_type == "append_previous_output":
                comp['instance'].input_data.append(
                    self.pipeline[previous_comp]['instance'].output_data
                )
            elif input_type == "use_metadata":
                comp['instance'].input_data = \
                    copy.deepcopy(comp['instance'].metadata)

            elif input_type == "use_previous_attr":
                attr = comp['additional_info']['attribute']
                if not isinstance(attr, str):
                    self.logger.error(
                        "input_data should be string which match the "
                        "attribute you want to use from previous component"
                    )
                    sys.exit(0)
                comp['instance'].input_data = \
                    getattr(self.pipeline[previous_comp]['instance'], attr)

            elif input_type == "use_previous_output":
                comp['instance'].input_data = \
                    self.pipeline[previous_comp]['instance'].output_data

            elif input_type == "package_previous_output":
                comp['instance'].input_data = \
                    [self.pipeline[previous_comp]['instance'].output_data]

            elif input_type == "append_previous_results":
                comp['instance'].input_data.append(
                    self.pipeline[previous_comp]['instance'].results
                )
            elif input_type == "package_previous_results":
                comp['instance'].input_data = \
                    [self.pipeline[previous_comp]['instance'].results]

            elif input_type == "use_previous_results":
                comp['instance'].input_data = \
                    self.pipeline[previous_comp]['instance'].results

            elif input_type == "use":
                comp['instance'].input_data = external_data
                if len(comp['additional_info']['input_data']) > 1:
                    self.logger.error(
                        "Input type use can only accept one input, "
                        "please check your pipeline config."
                    )
                    sys.exit(0)
                to_use = comp['additional_info']['input_data'][0]
                if to_use[0] == 'external':
                    if to_use[1] == 'data':
                        comp['instance'].input_data = external_data
                    else:
                        comp['instance'].input_data = external_meta
                elif to_use[1] == 'results' or to_use[1] == 'metadata':
                    comp['instance'].input_data = \
                        self.pipeline[to_use[0]]['instance'].results
                elif to_use[1] == 'package_results':
                    comp['instance'].input_data = \
                        [self.pipeline[to_use[0]]['instance'].results]
                elif to_use[1] == 'output_data':
                    comp['instance'].input_data = \
                        self.pipeline[to_use[0]]['instance'].output_data
                else:
                    self.logger.error(
                        '%s %s to_use type is not supported'
                        % (to_use[0], to_use[1])
                    )

            elif input_type == "append":
                if 'additional_info' not in comp.keys():
                    self.logger.error('No additional_info found')
                    sys.exit(0)
                if 'input_data' not in comp['additional_info'].keys():
                    self.logger.error('No input_data session'
                                      'found in additional_info')
                    sys.exit(0)
                for to_append in comp['additional_info']['input_data']:
                    if to_append[0] == 'external':
                        if to_append[1] == 'data':
                            comp['instance'].input_data.append(external_data)
                        else:
                            comp['instance'].input_data.append(external_meta)
                    elif to_append[1] == 'metadata':
                        comp['instance'].append_metadata_to_input(
                            self.pipeline[to_append[0]]['order'],
                            self.pipeline[to_append[0]]['class']
                        )
                    elif to_append[1] == 'results':
                        comp['instance'].input_data.append(
                            self.pipeline[to_append[0]]['instance'].results
                        )
                    elif to_append[1] == 'package_results':
                        comp['instance'].input_data.append(
                            [self.pipeline[to_append[0]]['instance'].results]
                        )
                    elif to_append[1] == 'output_data':
                        comp['instance'].input_data.append(
                            self.pipeline[to_append[0]]['instance'].output_data
                        )
                    else:
                        self.logger.error(
                            '%s %s to_append type is not supported'
                            % (to_append[0], to_append[1])
                        )
            elif input_type == "extend":
                if 'additional_info' not in comp.keys():
                    self.logger.error('No additional_info found')
                    sys.exit(0)
                if 'input_data' not in comp['additional_info'].keys():
                    self.logger.error('No input_data session'
                                      'found in additional_info')
                    sys.exit(0)
                for to_extend in comp['additional_info']['input_data']:
                    if to_extend[0] == 'external':
                        if to_extend[1] == 'data':
                            comp['instance'].input_data.extend(external_data)
                        else:
                            comp['instance'].input_data.extend(external_meta)
                    elif to_extend[1] == 'metadata':
                        comp['instance'].extend_input_with_meta(
                            self.pipeline[to_extend[0]]['order'],
                            self.pipeline[to_extend[0]]['class']
                        )
                    elif to_extend[1] == 'package_results':
                        comp['instance'].input_data.extend(
                            [self.pipeline[to_append[0]]['instance'].results]
                        )
                    elif to_extend[1] == 'results':
                        comp['instance'].input_data.extend(
                            self.pipeline[to_extend[0]]['instance'].results
                        )
                    elif to_extend[1] == 'output_data':
                        comp['instance'].input_data.extend(
                            self.pipeline[to_extend[0]]['instance'].output_data
                        )
                    else:
                        self.logger.error(
                            '%s %s to_extend type is not supported'
                            % (to_extend[0], to_extend[1])
                        )
                        sys.exit(0)

            elif input_type == "use_meta_pairs":
                if 'additional_info' not in comp.keys():
                    self.logger.error('No additional_info found')
                comp['instance'].input_data = \
                    comp['additional_info']['comp_key_pairs']

            else:
                self.logger.error('No valid input_type found.')

            if benchmark:
                t2 = time.time()
            snapshot_folder = os.path.join(
                self.parent_result_folder, comp['instance'].class_name
            )
            comp['instance'].snapshot_folder = snapshot_folder
            comp['instance'].run()
            if comp['instance'].terminate_flag:
                self.logger.warning(
                    'Component reports terminate_flag, the following normal '
                    'components will be skipped. Pipeline will go straight '
                    'to output_generator.'
                )
                self.output = None
                skip_following = True
                continue

            if benchmark:
                t3 = time.time()

            # selector behavior: if it is a selector, pass the next one if fail
            if comp['type'] == 'selector':
                # if the data does not pass selector, e.g. not a key frame
                if not comp['instance'].output_data:
                    skip_this = True

            # gate behavior: if it is a gate, pass all following but output
            elif comp['type'] == 'gate':
                # if the data does not pass gate, e.g. not a key frame
                if not comp['instance'].output_data:
                    skip_following = True

            if 'base_name' in comp.keys():
                if isinstance(comp["base_name"], str):
                    comp['instance'].metadata[0] = comp["base_name"]
                    self.logger.warning(
                        "Changing base_name as %s" % comp["base_name"]
                    )
                else:
                    self.logger.error("Fail to change base_name")

            self.logger.debug('metadata: {}'.format(comp['instance'].metadata))

            # make snaoshot if output_type is specified
            if 'output_type' in comp.keys():
                if self.lab_flag is True:
                    self.logger.info('Passing output step because '
                                     'lab_flag is set as True')
                    pass
                else:
                    do_snapshot = False
                    '''
                    snapshot == True
                        => always create snapshot
                    force_snapshotable == True
                        => snapshot when force_snapshot is True
                    '''
                    if 'snapshot' in comp.keys() and comp['snapshot'] is True:
                        do_snapshot = True
                    elif ('force_snapshotable' in comp.keys() and
                          comp['force_snapshotable'] is True):
                        if force_snapshot:
                            do_snapshot = True
                    if comp['output_type'] == 'metadata':
                        self.output = comp['instance'].metadata
                        if do_snapshot:
                            comp['instance'].snapshot_metadata()
                            self.logger.debug('Snapshot for metadata of %s'
                                              % comp['instance'].metadata[0])
                    elif comp['output_type'] == 'output_data':
                        self.output = comp['instance'].output_data
                        if do_snapshot:
                            if False not in [is_pandas_df(i)
                                             for i in self.output]:
                                comp['instance'].snapshot_output_data(
                                    dtype='DataFrame'
                                )
                            else:
                                comp['instance'].snapshot_output_data()
                            self.logger.debug('Making snapshot for %s'
                                              % comp['instance'].metadata[0])
                    elif comp['output_type'] == 'unpack_results':
                        if len(comp['instance'].results) >= 1:
                            comp['instance'].results = \
                                comp['instance'].results[0]
                            self.output = comp['instance'].results
                        else:
                            self.logger.error(
                                "Unpacking results fail, "
                                "keep component results unchanged"
                            )
                        if do_snapshot:
                            comp['instance'].snapshot_results()
                            self.logger.debug('Snapshot for results of %s'
                                              % comp['instance'].metadata[0])
                    elif comp['output_type'] == 'results':
                        self.output = comp['instance'].results
                        if do_snapshot:
                            comp['instance'].snapshot_results()
                            self.logger.debug('Snapshot for results of %s'
                                              % comp['instance'].metadata[0])
                    elif comp['output_type'] == 'post_process':
                        self.output = [
                            comp['instance'].results,
                            comp['instance'].metadata,
                            comp['instance'].output_data,
                        ]
                        self.logger.info('Running post_process under'
                                         'lab_flag = %r' % self.lab_flag)
                        comp['instance'].post_process(
                            out_folder_base=self.parent_result_folder
                        )
                    else:
                        self.logger.warning('%s output_type is not supported'
                                            % comp['output_type'])
                        pass

                comp['output'] = self.output

            if 'print_output' in comp.keys() and comp['print_output'] is True:
                print('[pipeline] printing Pipeline.output of %s (%s)'
                      % (comp['instance'].comp_name,
                         comp['instance'].class_name))
                print(self.output)

            if benchmark:
                t4 = time.time()
                print('[pipeline] Total time used for %s is %.5f seconds'
                      % (comp['instance'].class_name, t4 - t1))
                print(' --- Time used for initial setting is %.5f seconds'
                      % (t2 - t1))
                print(' --- Time used for component run is %.5f seconds'
                      % (t3 - t2))
                print(' --- Time used for output handling is %.5f seconds'
                      % (t4 - t3))

                t_benchmark_start.append(time.time())
                component_benchmark = pd.Series(
                    data=[t4 - t1, t2 - t1,
                          t3 - t2, t4 - t3],
                    index=dfcols)
                benchmark_data.loc[comp['instance'].class_name, :] = \
                    component_benchmark
                t_benchmark_end.append(time.time())

            previous_comp = name

        if benchmark:
            pipeline_time = ((time.time() - t0) -
                             (sum(t_benchmark_end) - sum(t_benchmark_start)))
            print('Total time used for pipeline.run() is %.5f seconds'
                  % pipeline_time)
            benchmark_data.ix['Total time used for pipeline.run()', 0] = \
                pipeline_time
            benchmark_data[:] = benchmark_data[:].apply(pd.to_numeric)
            benchmark_folder = os.path.join(self.parent_result_folder,
                                            'benchmark')
            tools.check_dir(benchmark_folder)
            benchmark_path = os.path.join(benchmark_folder, base_name + '.csv')
            benchmark_data.to_csv(path_or_buf=benchmark_path,
                                  float_format='%.5f')
        return True
Exemplo n.º 17
0
    def snapshot_output_data(self, dtype="image", out_name=''):
        """
        snapshot_data will save a copy of the current TrainerBase.data
        based on the data_type set.

        :param str dtype: data type of output data
        :param str out_name: preffix of the output files (default: snapshot)
        :return: None
        :rtype: None
        """

        self.base_logger.info('Snaoshot output data type: %s.' % dtype)
        self.check_snapshot_folder()

        if self.snapshot_hierarchy:
            out_folder = self.create_hierarchy_outf('output_data')
        else:
            out_folder = os.path.join(self.snapshot_folder, 'output_data')

        if out_name == "":
            if isinstance(self.metadata[0], str):
                out_name = self.metadata[0]
            else:
                out_name = "snapshot"
        try:
            tools.check_dir(out_folder)
        except Exception:
            self.base_logger.error('Cannot create folder' % out_folder)
            traceback.print_exc(file=sys.stdout)
        try:
            counter = 0
            if not isinstance(self.output_data, list):
                output = [self.output_data]
            else:
                output = self.output_data
            for output_data_ in output:
                if dtype == 'image':
                    _tmp = os.path.join(
                        out_folder, out_name + '_tmp.jpg'
                    )
                    if self.snapshot_with_counter:
                        output_img_path = os.path.join(
                            out_folder, out_name + '_' + str(counter) + '.jpg'
                        )
                    else:
                        output_img_path = os.path.join(
                            out_folder, out_name + '.jpg'
                        )
                    image.save_img(output_data_, fname=_tmp)
                    os.rename(_tmp, output_img_path)
                    self.snapshot_fnames.append(output_img_path)
                elif dtype == 'DataFrame':
                    _tmp = os.path.join(
                        out_folder, out_name + '_tmp.csv'
                    )
                    if self.snapshot_with_counter:
                        output_img_path = os.path.join(
                            out_folder, out_name + '_' + str(counter) + '.csv'
                        )
                    else:
                        output_img_path = os.path.join(
                            out_folder, out_name + '.csv'
                        )
                    output_data_.to_csv(_tmp)
                    os.rename(_tmp, output_img_path)
                    self.snapshot_fnames.append(output_img_path)
                else:
                    self.base_logger.error('Type %s is not yet supported'
                                           % dtype)
                counter = counter + 1
        except Exception:
            self.base_logger.error('Fail to snapshot for output_data'
                                   'and metadata')
            traceback.print_exc(file=sys.stdout)
Exemplo n.º 18
0
    def snapshot_results(self, out_name=''):
        """
        snapshot_results will save a copy of the current
        TrainerBase.results as json output

        :param str out_name: preffix of the output files (default: snapshot)
        :return: None
        :rtype: None
        """

        self.base_logger.debug("Creating snapshot of results for %s"
                               % self.class_name)
        self.check_snapshot_folder()

        if self.snapshot_hierarchy:
            out_folder = self.create_hierarchy_outf('results')
        else:
            out_folder = os.path.join(self.snapshot_folder, 'results')

        if out_name == "":
            if isinstance(self.metadata[0], str):
                out_name = self.metadata[0]
            else:
                out_name = "snapshot"
        try:
            tools.check_dir(out_folder)
        except Exception:
            self.base_logger.error('Cannot create folder %s' % out_folder)
            traceback.print_exc(file=sys.stdout)
        try:
            if isinstance(self.results, list):
                # snapshot results in only one json
                if "snapshot_results_all" in self.param.keys() \
                        and self.param["snapshot_results_all"] is True:
                    results_path = os.path.join(
                        out_folder, out_name + '.json'
                    )
                    tools.write_json(self.results, fname=results_path)
                    return True
                # snapshot list of results as different json
                for i in range(0, len(self.results)):
                    tmp_path = os.path.join(
                        out_folder, out_name + '_tmp.json'
                    )
                    if self.snapshot_with_counter:
                        results_path = os.path.join(
                            out_folder, out_name + '_' + str(i) + '.json'
                        )
                    else:
                        results_path = os.path.join(
                            out_folder, out_name + '.json'
                        )
                    tools.write_json(self.results[i], fname=tmp_path)
                    os.rename(tmp_path, results_path)
            else:
                results_path = os.path.join(
                    out_folder, out_name + '.json'
                )
                tools.write_json(self.results, fname=results_path)
        except Exception:
            self.base_logger.error('Cannot make snapshot for metadata.')
            traceback.print_exc(file=sys.stdout)

        return True
Exemplo n.º 19
0
    def main_process(self):
        """ Main function of dyda component. """

        self.reset_output()

        imgs, lab_dicts = self.input_data
        if not isinstance(imgs, list):
            imgs = [imgs]
            self.package_1 = True
        else:
            self.package_1 = False

        if not isinstance(lab_dicts, list):
            lab_dicts = [lab_dicts]
            self.package_2 = True
        else:
            self.package_2 = False

        if self.package_1 != self.package_2:
            self.logger.warning("Inputs should be both list, or both not list")
            self.terminate_flag = True

        if self.package_1 and self.package_2:
            self.package = True
        else:
            self.package = False

        for img, lab_dict in zip(imgs, lab_dicts):
            annotations = lab_dict["annotations"]
            boxes = self.get_bounding_boxes(annotations)
            # face_encodings always return a list of np.ndarray here
            encodings = face_encodings(img,
                                       known_face_locations=boxes,
                                       num_jitters=self.num_jitters)
            self.output_data.append(encodings)

            if self.save_encodings:

                face_name = self.external_data[0].split('/')[-2]

                # only registeration one face in one frame
                if len(encodings) != 1:
                    self.logger.warning("detect {} faces in {}/{}".format(
                        len(encodings), face_name, self.metadata[0]))
                    break

                # automatically use the folder name as the face name
                # if image's path is like /home/shared/NAME/image1.png
                # use NAME as the face name

                self.names.append(face_name)

                encoding = encodings[0]
                self.known_encodings.append(encoding)

                data = {"encodings": self.known_encodings, "names": self.names}

                if self.external_data[0] == self.last_frame:

                    if self.pickle_path == "":

                        tools.check_dir(self.snapshot_folder)

                        self.pickle_path = self.snapshot_folder + \
                            '/encodings.pickle'

                    self.logger.info("saving face encodings to {}".format(
                        self.pickle_path))

                    with open(self.pickle_path, 'wb') as wfp:
                        pickle.dump(data, wfp)

        self.uniform_output()