Example #1
0
def read_csv_to_np(fname):
    """Read CSV file as numpy array

    Keyword arguments:
    fname  -- input filename

    @return numpy array

    """
    tools.check_exist(fname)
    content = read_csv(fname=fname, ftype=float)
    return conv_to_np(content)
Example #2
0
    def cp_weights(self):
        """ copy weight files to snapshot_folder """

        print("Copying weights...")
        cp_folder = os.path.join(self.snapshot_folder, "weights")
        tools.check_dir(cp_folder)
        for fname in self.weight_files:
            ori_path = os.path.join("/tmp", fname)
            if tools.check_exist(ori_path, log=False):
                destination = os.path.join(cp_folder, fname)
                try:
                    copyfile(ori_path, destination)
                except BaseException:
                    self.logger.error("Fail to copy %s" % ori_path)
            else:
                self.logger.error("%s does not exist, exit dyda." % ori_path)
                sys.exit(0)

        # FIXME: if there are more TF classifiers in the future
        classifier = "ClassifierInceptionv3"
        if self.param["architecture"].find('mobilenet') >= 0:
            classifier = "ClassifierMobileNet"
        if classifier in self.config.keys():
            new_config = self.config[classifier]
            new_config["model_file"] = os.path.join(cp_folder,
                                                    "output_graph.pb")
            new_config["label_file"] = os.path.join(cp_folder,
                                                    "output_labels.txt")
            new_config_path = os.path.join(cp_folder, "dyda.config.learner")
            tools.write_json({classifier: new_config}, new_config_path)
Example #3
0
    def _define_legend_args(self, loc, mandarin, size):
        """Define the argument for legend

        @param loc: location of the legend
                    rt   - right top
                    rb   - right bottom
                    lt   - left top
                    lb   - left bottom
                    c/ct - central top
                    cb   - central bottom
        """

        args = {'loc': self.loc_map[loc]}
        font_file = path.join(os.environ['HOME'], '.fonts/noto/',
                              'NotoSansCJKtc-Light.otf')
        if tools.check_exist(font_file) and mandarin:
            chf = font_manager.FontProperties(fname=font_file)
            args["prop"] = chf
        elif type(size) is int:
            args["prop"] = {"size": size}
        if loc == 'rt':
            args['bbox_to_anchor'] = (1.12, 1.0)
        elif loc in ['rb', 'lb', 'lt']:
            args['borderaxespad'] = 1
        elif loc in ['c', 'ct']:
            args['borderaxespad'] = -2.5
        return args
Example #4
0
def read_img(fimg, size=None, log=True):
    """Access image pixels

    @param fimg: input image file name

    Keyword arguments:
    size -- tuple of new size in (height, width)
    log  -- True to print log if the action fails

    """
    if not tools.check_exist(fimg):
        if log:
            print("[IMAGE] Error %s does not exist" % fimg)
        sys.exit(1)

    if tools.check_ext(fimg, 'gif'):
        img = read_gif(fimg)

    else:
        img = cv2.imread(fimg)

    if img is None:
        if log:
            print("[IMAGE] Error reading file %s" % fimg)
        return img

    if size is not None:
        img = resize_img(img, size)

    return img
Example #5
0
 def remove(self, folder_to_rm):
     """ This can only removes the folder under /tmp """
     if folder_to_rm[:4] != "/tmp":
         self.logger.warning("%s is not under /tmp, pass" % folder_to_rm)
         return
     if tools.check_exist(folder_to_rm):
         self.logger.warning("Removing %s" % folder_to_rm)
         shutil.rmtree(folder_to_rm)
     else:
         self.logger.info("%s does not exist, pass" % folder_to_rm)
Example #6
0
    def __init__(self, pipeline_config_path, dyda_config_path="",
                 force_run_skip=False, parent_result_folder="",
                 verbosity=-1, lab_flag=False):
        """ __init__ of TrainerBase """

        self.logger = logging.getLogger('pipeline')
        formatter1 = logging.Formatter(
            '[pipeline] %(levelname)s %(message)s'
        )
        log_level = logging.WARNING
        if verbosity == 1:
            log_level = logging.INFO
        elif verbosity >= 2:
            log_level = logging.DEBUG
        console1 = logging.StreamHandler()
        console1.setFormatter(formatter1)
        self.logger.setLevel(log_level)
        self.logger.addHandler(console1)
        self.log_level = log_level
        self.force_run_skip = force_run_skip

        self.lab_flag = lab_flag
        if parent_result_folder == "":
            cwd = os.getcwd()
            parent_result_folder = os.path.join(cwd, 'results')
        self.logger.info('Saving output to %s' % parent_result_folder)
        tools.check_dir(parent_result_folder)
        self.parent_result_folder = parent_result_folder

        self.pipeline_config = tools.parse_json(pipeline_config_path, 'utf-8')

        if tools.check_exist(dyda_config_path):
            self.dyda_cfg_path = dyda_config_path
        elif "dyda_config" in self.pipeline_config.keys():
            self.dyda_cfg_path = self.pipeline_config["dyda_config"]
        else:
            self.logger.warning(
                "No valid dyda config found by Pipeline, use default."
            )
            self.dyda_cfg_path = ""

        self.pipeline = OrderedDict({})
        self.define_pipeline()
        # Output is only set if output_type is specified by a component
        # If both components set output_type, the later one wins
        self.output = None

        self.trigger_level = "L3"
        if "trigger_level" in self.pipeline_config.keys():
            if self.pipeline_config["trigger_level"] in ["L1", "L2", "L3"]:
                self.trigger_level = self.pipeline_config["trigger_level"]
                self.logger.info(
                    'Changing trigger level to %s' % self.trigger_level
                )
Example #7
0
def read_json_to_df(fname, orient='columns', np=False):
    """Read json file as pandas DataFrame

    @param fname: input filename

    Keyword arguments:
    orient -- split/records/index/columns/values (default: 'columns')
    np     -- true to direct decoding to numpy arrays (default: False)
    @return pandas DataFranm

    """
    if tools.check_exist(fname):
        return pd.read_json(fname, orient=orient, numpy=np)
Example #8
0
    def post_process(self, out_folder_base=""):
        """ define post_process of dyda component """

        if not tools.check_exist(out_folder_base, log=False):
            out_folder_base = os.path.join(os.getcwd(), 'post_process')
        self.base_logger.info('post_process results saved to %s' %
                              out_folder_base)
        tools.dir_check(out_folder_base)
        self.results["output_folder"] = out_folder_base
        self.results["bkg_ref_basename"] = "ref_bkg.png"

        out_filename = os.path.join(self.results["output_folder"],
                                    self.results["bkg_ref_basename"])
        image.save_img(self.normal_image, fname=out_filename)
Example #9
0
    def post_process(self, out_folder_base=""):
        """
        post_process function will be called in the run function after
        main_process.

        Warning: sample code below may be overwritten by components

        Arguments:
            out_folder_base - parent output folder of post_process results
        """
        if not tools.check_exist(out_folder_base, log=False):
            out_folder_base = os.path.join(os.getcwd(), 'post_process')
        self.base_logger.info('post_process results saved to %s'
                              % out_folder_base)
        tools.dir_check(out_folder_base)
Example #10
0
    def main_process(self):
        """ Main function called by the external code """

        self.results['total_frames'] = []
        if isinstance(self.input_data, str):
            input_data = [self.input_data]
        else:
            input_data = self.input_data
        for i in range(0, len(input_data)):
            if not self.concat:
                input_data.append([])
            video_path = input_data[i]
            if not isinstance(video_path, str):
                self.terminate_flag = True
                self.logger.error(
                    "item of input_data should be str of input video path")
                return False
            if not tools.check_exist(video_path):
                self.terminate_flag = True
                self.logger.error(
                    "item of input_data should be str of input video path")
                return False
            count = 0
            try:
                # FPS selection in this component is a workaround because
                # setting CAP_PROP_FRAME_FRAMES does not working
                # details see https://goo.gl/yVimzd and https://goo.gl/GeuwX1
                sel_count = int(30 / self.fps)
                vidcap = cv2.VideoCapture(video_path)
                success, img = vidcap.read()
                success = True
                while success:
                    count += 1
                    if count % sel_count == 0:
                        if self.concat:
                            self.output_data.append(img)
                        else:
                            self.output_data[i].append(img)
                    success, img = vidcap.read()
            except BaseException:
                self.terminate_flag = True
                self.logger.error("Fail to read %ith frame" % count)
                return False

            self.results['data_path'].append(input_data)
            self.results['data_type'] = 'array'
            self.results['total_frames'].append(count)
Example #11
0
def _output_pred(input_path):
    """ Output prediction result based on dyda_utils spec https://goo.gl/So46Jw

    @param input_path: File path of the input

    """

    if not tools.check_exist(input_path):
        print('[dyda_utils] ERRPR: %s does not exist' % input_path)
        return

    input_file = os.path.basename(input_path)
    folder = os.path.dirname(input_path)
    input_size = image.get_img_info(input_path)[0]

    pred_info = {"filename": input_file, "folder": folder}
    pred_info["size"] = {"width": input_size[0], "height": input_size[1]}
    pred_info["sha256sum"] = tools.get_sha256(input_path)

    return pred_info
Example #12
0
    def main_process(self):
        """ main process """

        if not os.path.isdir(self.input_data):
            self.logger.error("%s is not a valid folder" % self.input_data)
            self.terminate_flag = True

        if self.overwrite:
            output = open(self.output_file, 'w')

        else:
            if tools.check_exist(self.output_file):
                output = open(self.output_file, 'a')
            else:
                output = open(self.output_file, 'w')
        print("LearnerYOLO: creating %s" % self.output_file)

        check_keys = ["folder", "filename", "annotations"]
        for json_file in tools.find_files(self.input_data, walkin=False):
            try:
                json_content = tools.parse_json(json_file)
            except BaseException:
                self.logger.error("Fail to open %s" % json_file)
                continue
            for key in check_keys:
                if key not in json_content.keys():
                    self.logger.error("%s is not found in %s" %
                                      (key, json_file))
                    continue
            folder = json_content["folder"]
            filename = json_content["filename"]
            # FIXME
            # folder = folder.replace("results", "labeled_data")
            # folder = folder.replace("_tmp", "")

            in_img_path = os.path.join(folder, filename)
            out_img_path = os.path.join(self.img_folder, filename)
            o_file_path = os.path.join(
                self.label_folder,
                tools.remove_extension(filename) + '.txt')
            o_file = open(o_file_path, 'w')

            annos = json_content["annotations"]
            size, pix = image.get_img_info(in_img_path)

            h = float(size[0])
            w = float(size[1])

            for anno in annos:
                X = []
                Y = []
                cls = anno["label"]
                if cls not in self.classes:
                    self.logger.debug("%s is not in the selected class" % cls)
                    continue
                cls_id = self.classes.index(cls)
                X = [anno["left"], anno["right"]]
                Y = [anno["top"], anno["bottom"]]
                bb = self.convert((w, h), X, Y)
                o_file.write(
                    str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')

            o_file.close()
            self.logger.info("Link %s to %s" % (in_img_path, out_img_path))
            os.symlink(in_img_path, out_img_path)

            output.write(out_img_path + '\n')
        output.close()

        # FIXME darknet env has to be well prepared and fix the classes now
        train_path = os.path.join(self.darknet_path, "train.txt")
        if train_path != self.output_file:
            copyfile(self.output_file, train_path)
        os.chdir(self.darknet_path)
        cmd = ("./darknet detector train cfg/dt42.data"
               " cfg/tiny-yolo-voc-dt42.cfg darknet.weights.13 -gpus 1")
        self.logger.info("Running %s " % cmd)
        output = subprocess.check_output(["bash", "-c", cmd])
        self.results = {
            "root_directory": self.darknet_path,
            "weight_file": "backup_dt42/yolo-voc-dt42_final.weights",
            "data_file": "cfg/dt42.data",
            "names_file": "data/dt42.names",
            "cfg_file": "cfg/tiny-yolo-voc-dt42.cfg"
        }