コード例 #1
0
    def test_main_process(self):

        config_url = 'https://gitlab.com/DT42/galaxy42/dt42-dyda/uploads/'\
            '2f4b1e105fad5d935e83fa8e608c395e/'\
            'dyda.config.TimeScaleShiftConverter'
        dyda_config = lab_tools.pull_json_from_gitlab(config_url)
        input_url = 'https://gitlab.com/DT42/galaxy42/dt42-dyda/uploads/'\
            'a25e50ea3b063d3e57aed402de9d0d25/input_list.json'
        input_list = lab_tools.pull_json_from_gitlab(input_url)
        output_url = 'https://gitlab.com/DT42/galaxy42/dt42-dyda/uploads/'\
            '4e8e4849534e68152507bdf3640d5bd1/output.json'
        output_list = lab_tools.pull_json_from_gitlab(output_url)

        # initialization
        converter_ = TimeScaleShiftConverter(dyda_config)

        for i in range(len(input_list)):

            # run converter
            converter_.reset()
            converter_.input_data.append(
                tools.parse_json(input_list[i]))
            converter_.run()

            # compare results with reference
            ref_data = output_list[i]
            tar_data = converter_.results
            if not ref_data == [] and not tar_data == []:
                report = dict_comparator.get_diff(ref_data, tar_data)
                self.assertEqual(report['extra_field'], [])
                self.assertEqual(report['missing_field'], [])
                self.assertEqual(report['mismatch_val'], [])
コード例 #2
0
    def __init__(self, pipeline_config_path, dyda_config_path="",
                 force_run_skip=False, parent_result_folder="",
                 verbosity=-1, lab_flag=False):
        """ __init__ of TrainerBase """

        self.logger = logging.getLogger('pipeline')
        formatter1 = logging.Formatter(
            '[pipeline] %(levelname)s %(message)s'
        )
        log_level = logging.WARNING
        if verbosity == 1:
            log_level = logging.INFO
        elif verbosity >= 2:
            log_level = logging.DEBUG
        console1 = logging.StreamHandler()
        console1.setFormatter(formatter1)
        self.logger.setLevel(log_level)
        self.logger.addHandler(console1)
        self.log_level = log_level
        self.force_run_skip = force_run_skip

        self.lab_flag = lab_flag
        if parent_result_folder == "":
            cwd = os.getcwd()
            parent_result_folder = os.path.join(cwd, 'results')
        self.logger.info('Saving output to %s' % parent_result_folder)
        tools.check_dir(parent_result_folder)
        self.parent_result_folder = parent_result_folder

        self.pipeline_config = tools.parse_json(pipeline_config_path, 'utf-8')

        if tools.check_exist(dyda_config_path):
            self.dyda_cfg_path = dyda_config_path
        elif "dyda_config" in self.pipeline_config.keys():
            self.dyda_cfg_path = self.pipeline_config["dyda_config"]
        else:
            self.logger.warning(
                "No valid dyda config found by Pipeline, use default."
            )
            self.dyda_cfg_path = ""

        self.pipeline = OrderedDict({})
        self.define_pipeline()
        # Output is only set if output_type is specified by a component
        # If both components set output_type, the later one wins
        self.output = None

        self.trigger_level = "L3"
        if "trigger_level" in self.pipeline_config.keys():
            if self.pipeline_config["trigger_level"] in ["L1", "L2", "L3"]:
                self.trigger_level = self.pipeline_config["trigger_level"]
                self.logger.info(
                    'Changing trigger level to %s' % self.trigger_level
                )
コード例 #3
0
ファイル: bnpipeline.py プロジェクト: victorcasignia/BerryNet
 def __init__(self, config,
              dyda_config_path='',
              warmup_size=(480, 640, 3),
              disable_warmup=False,
              benchmark=False,
              verbosity=0):
     self.launcher = BerryNetPipelineLauncher(
         config,
         dyda_config_path=dyda_config_path,
         verbosity=verbosity, benchmark=benchmark)
     self.pipeline_config = tools.parse_json(config)
     if not disable_warmup:
         self.warmup(shape=warmup_size)
コード例 #4
0
ファイル: launcher.py プロジェクト: numbersprotocol/dyda
def read_meta_single(args, logger, full_path):
    ext_meta = []
    if args.read_meta:
        logger.debug('Reading json for producing binary meta')
        if args.repeated_metadata_path == '':
            meta_path = tools.remove_extension(full_path) + '.json'
        else:
            meta_path = args.repeated_metadata_path
        try:
            ext_meta = tools.parse_json(meta_path, 'utf-8')
        except BaseException:
            logger.error('Fail to parse %s' % meta_path)
            sys.exit(0)
    return ext_meta
コード例 #5
0
def convert2dict(unknowtype, isref):
    o_dict = []
    if isinstance(unknowtype, list):
        o_dict = unknowtype
    elif isinstance(unknowtype, dict):
        o_dict = unknowtype
    elif os.path.isfile(unknowtype):
        o_dict = tools.parse_json(unknowtype)
    else:
        print("Not a valid input")
        if isref:
            print("There does not exists file on ref_json_path")
        else:
            print("There does not exists file on tar_json_path")
    return o_dict
コード例 #6
0
    def create_tf_record(self, json_dir, tfrecord_path):

        writer = tf.python_io.TFRecordWriter(tfrecord_path)

        check_keys = ["folder", "filename", "annotations"]
        for json_file in tools.find_files(json_dir, walkin=False):
            try:
                json_content = tools.parse_json(json_file)
            except BaseException:
                print("Fail to open %s" % json_file)
                continue
            for key in check_keys:
                if key not in json_content.keys():
                    print("%s is not found in %s" % (key, json_file))
                    continue

            tf_example = self.lab_format_to_tf_example(json_content)
            writer.write(tf_example.SerializeToString())
        writer.close()
コード例 #7
0
    def test_main_process(self):
        """ Main process of unit test. """

        dyda_config_TrackerSimple = {
            "TrackerSimple": {
                'matching_thre': 1000,
                'max_missing_frame': 50
            }
        }
        input_data = tools.parse_json(
            '/home/shared/DT42/test_data/test_object_detection_and_tracking/P2_ref_output.json'
        )
        # initialization
        comp = TrackerSimple(dyda_config_path=dyda_config_TrackerSimple)

        # run component
        for data in input_data:
            comp.reset()
            comp.input_data = data
            comp.run()
コード例 #8
0
    def test_main_process(self):

        # pull test data from gitlab
        config_url = 'https://gitlab.com/DT42/galaxy42/dt42-dyda/uploads/'\
            '36356e92cad4a608d4c84bba769c0d53/'\
            'dyda.config.DeterminatorCharacter'
        dyda_config = lab_tools.pull_json_from_gitlab(config_url)
        input_url = 'https://gitlab.com/DT42/galaxy42/dt42-dyda/uploads/'\
            '521feb8def6e63d6e187622b171b7233/input_list.json'
        input_list = lab_tools.pull_json_from_gitlab(input_url)
        output_url = 'https://gitlab.com/DT42/galaxy42/dt42-dyda/uploads/'\
            '9f25865b93a133bdec459ace16de8ccd/output_list.json'
        output_list = lab_tools.pull_json_from_gitlab(output_url)

        # initialization
        determinator_ = DeterminatorCharacter(dyda_config_path=dyda_config)
        # run determinator
        for i in range(len(input_list)):

            # run determinator
            determinator_.reset()
            determinator_.input_data.append(image.read_img(input_list[i]))
            determinator_.input_data.append(
                tools.parse_json(tools.replace_extension(
                    input_list[i], 'json')))
            determinator_.run()

            # compare results with reference
            if not determinator_.results == []:
                ref_data = output_list[i]
                tar_data = determinator_.results
                for j in range(len(ref_data)):
                    for k in range(len(ref_data[j])):
                        report = dict_comparator.get_diff(
                            ref_data[j][k], tar_data[j][k])
                        self.assertEqual(report['extra_field'], [])
                        self.assertEqual(report['missing_field'], [])
                        self.assertEqual(report['mismatch_val'], [])
コード例 #9
0
    def main_process(self):
        """ define main_process of dyda component """

        # input_data should be full path of the data directory
        if not os.path.isdir(self.input_data):
            self.logger.error("%s is not a valid folder" % self.input_data)
            self.terminate_flag = True
        labels = tools.find_folders(self.input_data)
        self.results["labels"] = labels
        self.logger.info("Training for labels: %s." % ' '.join(labels))
        cmd = "python3 " + self.param["retrain_script_path"]
        cmd = cmd + " --learning_rate=" + str(self.param["learning_rate"])
        cmd = cmd + " --testing_percentage=" + str(self.param["test_perc"])
        cmd = cmd + " --validation_percentage=" + str(self.param["val_perc"])
        cmd = cmd + " --train_batch_size=" + str(self.param["train_batch"])
        cmd = cmd + " --validation_batch_size=" + str(self.param["val_batch"])
        if self.param["aug_lip_left_right"]:
            cmd = cmd + " --flip_left_right True"
        if "aug_random_brightness" in self.param.keys():
            cmd = cmd + " --random_brightness=" + \
                str(self.param["aug_random_brightness"])
        if "aug_random_scale" in self.param.keys():
            cmd = cmd + " --random_scale=" + \
                str(self.param["aug_random_scale"])
        cmd = cmd + " --eval_step_interval=" + str(self.param["eval_step"])
        cmd = cmd + " --how_many_training_steps=" + \
            str(self.param["train_steps"])
        cmd = cmd + " --architecture=" + str(self.param["architecture"])
        cmd = cmd + " --image_dir " + self.input_data
        self.logger.info("Running %s " % cmd)
        output = subprocess.check_output(["bash", "-c", cmd])

        # FIXME: get results directly from retrain.py
        training_results = tools.parse_json("./output.json")
        self.results["training_results"] = training_results
        self.cp_weights()
コード例 #10
0
    def main_process(self):
        """ main process """

        if not os.path.isdir(self.input_data):
            self.logger.error("%s is not a valid folder" % self.input_data)
            self.terminate_flag = True

        if self.overwrite:
            output = open(self.output_file, 'w')

        else:
            if tools.check_exist(self.output_file):
                output = open(self.output_file, 'a')
            else:
                output = open(self.output_file, 'w')
        print("LearnerYOLO: creating %s" % self.output_file)

        check_keys = ["folder", "filename", "annotations"]
        for json_file in tools.find_files(self.input_data, walkin=False):
            try:
                json_content = tools.parse_json(json_file)
            except BaseException:
                self.logger.error("Fail to open %s" % json_file)
                continue
            for key in check_keys:
                if key not in json_content.keys():
                    self.logger.error("%s is not found in %s" %
                                      (key, json_file))
                    continue
            folder = json_content["folder"]
            filename = json_content["filename"]
            # FIXME
            # folder = folder.replace("results", "labeled_data")
            # folder = folder.replace("_tmp", "")

            in_img_path = os.path.join(folder, filename)
            out_img_path = os.path.join(self.img_folder, filename)
            o_file_path = os.path.join(
                self.label_folder,
                tools.remove_extension(filename) + '.txt')
            o_file = open(o_file_path, 'w')

            annos = json_content["annotations"]
            size, pix = image.get_img_info(in_img_path)

            h = float(size[0])
            w = float(size[1])

            for anno in annos:
                X = []
                Y = []
                cls = anno["label"]
                if cls not in self.classes:
                    self.logger.debug("%s is not in the selected class" % cls)
                    continue
                cls_id = self.classes.index(cls)
                X = [anno["left"], anno["right"]]
                Y = [anno["top"], anno["bottom"]]
                bb = self.convert((w, h), X, Y)
                o_file.write(
                    str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')

            o_file.close()
            self.logger.info("Link %s to %s" % (in_img_path, out_img_path))
            os.symlink(in_img_path, out_img_path)

            output.write(out_img_path + '\n')
        output.close()

        # FIXME darknet env has to be well prepared and fix the classes now
        train_path = os.path.join(self.darknet_path, "train.txt")
        if train_path != self.output_file:
            copyfile(self.output_file, train_path)
        os.chdir(self.darknet_path)
        cmd = ("./darknet detector train cfg/dt42.data"
               " cfg/tiny-yolo-voc-dt42.cfg darknet.weights.13 -gpus 1")
        self.logger.info("Running %s " % cmd)
        output = subprocess.check_output(["bash", "-c", cmd])
        self.results = {
            "root_directory": self.darknet_path,
            "weight_file": "backup_dt42/yolo-voc-dt42_final.weights",
            "data_file": "cfg/dt42.data",
            "names_file": "data/dt42.names",
            "cfg_file": "cfg/tiny-yolo-voc-dt42.cfg"
        }
コード例 #11
0
def restore_ordered_json(json_path):
    json_data = tools.parse_json(json_path)
    with open(json_path, 'w') as outfile:
        outfile.write(json.dumps(json_data, indent=4, sort_keys=True))
コード例 #12
0
import cv2
import unittest
from dyda_utils import tools
from dyda_utils import tinycv
from dyda_utils import lab_tools
from dyda_utils import dict_comparator
from dyda.components.image_processor import ExtractNonBlackImageProcessor

input_data = cv2.imread(
    '/home/shared/DT42/test_data/test_ExtractNonBlackImageProcessor/input.jpg')
ref_output = cv2.imread(
    '/home/shared/DT42/test_data/test_ExtractNonBlackImageProcessor/ref_output.bmp'
)
ref_results = tools.parse_json(
    '/home/shared/DT42/test_data/test_ExtractNonBlackImageProcessor/ref_results.json'
)


class TestExtractNonBlackImageProcessor_simple(unittest.TestCase):
    """ Test simple case. """
    def test_main_process(self):
        """ Main process of unit test. """

        # initialization
        comp = ExtractNonBlackImageProcessor()

        # run component
        comp.reset()
        comp.input_data = input_data
        comp.run()
コード例 #13
0
ファイル: launcher.py プロジェクト: numbersprotocol/dyda
def main():
    """ main function to run pipeline """

    args = get_args()

    logger = logging.getLogger('launcher')
    log_level = logging.WARNING
    if args.verbosity == 1:
        log_level = logging.INFO
    elif args.verbosity >= 2:
        log_level = logging.DEBUG
    formatter = logging.Formatter('[launcher] %(levelname)s %(message)s')
    console = logging.StreamHandler()
    console.setFormatter(formatter)
    logger.setLevel(log_level)
    logger.addHandler(console)

    logger.info('lab_flag is %r' % args.lab_flag)
    pipeline = dt42pl.Pipeline(args.pipeline_config,
                               dyda_config_path=args.dyda_config_path,
                               parent_result_folder=args.output,
                               verbosity=args.verbosity,
                               lab_flag=args.lab_flag,
                               force_run_skip=args.force_run_skip)

    if args.read_frame:
        fr = frame_reader.FrameReader()
    # looping over input data paths
    logger.info('Running Reader and Selector for frames')
    data_list = args.data_list
    if args.json_list:
        logger.warning('json_list will replace -d/--data_list argument')
        data_list = args.json_list

    force_snapshot = False
    if args.force_snapshot:
        force_snapshot = True

    bfile_list = False
    if args.direct_input:
        fpaths = data_list
    elif os.path.isfile(data_list):
        if tools.check_ext(data_list, ".json"):
            fpaths = tools.parse_json(data_list, 'utf-8')
        else:
            fpaths = tools.txt_to_list(data_list)
        bfile_list = True
    elif os.path.isdir(data_list):
        fpaths = []
        bfile_list = False
    else:
        logger.error("Something wrong with data_list input, please check")
        sys.exit(0)

    ignore_keys = []
    if len(args.ignore_key) > 1:
        ignore_keys = args.ignore_key.split(',')
    all_pass = False
    if args.check_output:
        logger.debug(args.ref_output)
        if os.path.isdir(args.ref_output):
            fn_list = sorted(
                tools.find_files(dir_path=args.ref_output,
                                 keyword=None,
                                 suffix=('.json'),
                                 walkin=True))
            ref_output = []
            for fn in fn_list:
                ref_output.append(tools.parse_json(fn, 'utf-8'))
        elif os.path.isfile(args.ref_output):
            ref_output = tools.parse_json(args.ref_output, 'utf-8')
        else:
            logger.error("Something wrong with reference output, please check")
            sys.exit(0)
        all_pass = True

    benchmark = False
    if args.benchmark:
        benchmark = True

    if bfile_list and args.loop_over_input and args.multi_channels:
        for fi in range(len(fpaths)):
            ext_data = []
            ext_meta = []
            for ci in range(len(fpaths[fi])):
                full_path = fpaths[fi][ci]
                logger.debug(full_path)
                if args.read_frame:
                    logger.debug('Reading frame for producing binary input')
                    fr.reset()
                    fr.input_data = [full_path]
                    fr.run()
                    ext_data.append(fr.output_data[0])
                else:
                    ext_data.append(full_path)
                ext_meta.append(fpaths[fi][ci])
            ext_meta = read_meta_single(args, logger, full_path)

            pipeline.run(ext_data,
                         external_meta=ext_meta,
                         benchmark=benchmark,
                         force_snapshot=force_snapshot)
            if args.check_output:
                if not isinstance(pipeline.output, list):
                    tar_list = [pipeline.output]
                    ref_list = [ref_output[fi]]
                else:
                    tar_list = pipeline.output
                    ref_list = ref_output[fi]
                for ci, tar_data in enumerate(tar_list):
                    all_pass = check_result(tar_data,
                                            ref_list[ci],
                                            full_path,
                                            all_pass,
                                            ignore_keys=ignore_keys)

    elif bfile_list and args.loop_over_input:
        counter = 0
        wrong = 0
        for fi in range(len(fpaths)):

            counter = counter + 1
            full_path = fpaths[fi]
            logger.debug(full_path)
            base_name = tools.remove_extension(full_path,
                                               return_type='base-only')
            # Assign external data and metadata
            if args.do_not_pack:
                ext_data = full_path
            else:
                ext_data = [full_path]
            if args.read_frame:
                logger.debug('Reading frame for producing binary input')
                fr.reset()
                fr.input_data = [full_path]
                fr.run()
                ext_data = fr.output_data[0]
            ext_meta = read_meta_single(args, logger, full_path)
            pipeline.run(ext_data,
                         base_name=base_name,
                         external_meta=ext_meta,
                         benchmark=benchmark,
                         force_snapshot=force_snapshot)

            if args.check_output:
                all_pass = check_result(pipeline.output,
                                        ref_output[fi],
                                        full_path,
                                        all_pass,
                                        ignore_keys=ignore_keys)
    elif bfile_list:
        ext_meta = []
        if args.read_meta:
            logger.debug('Reading json for producing binary meta')
            for full_path in fpaths:
                if args.repeated_metadata_path == '':
                    meta_path = tools.remove_extension(full_path) + '.json'
                else:
                    meta_path = args.repeated_metadata_path
                try:
                    ext_meta.append(tools.parse_json(meta_path, 'utf-8'))
                except BaseException:
                    logger.error('Fail to parse %s' % meta_path)
                    sys.exit(0)

        pipeline.run(fpaths,
                     external_meta=ext_meta,
                     benchmark=benchmark,
                     force_snapshot=force_snapshot)

        if args.check_output:
            all_pass = check_result(pipeline.output,
                                    ref_output,
                                    fpaths,
                                    all_pass,
                                    ignore_keys=ignore_keys)

    else:
        full_path = data_list
        ext_meta = read_meta_single(args, logger, full_path)
        pipeline.run(full_path,
                     external_meta=ext_meta,
                     benchmark=benchmark,
                     force_snapshot=force_snapshot)
        if args.check_output:
            all_pass = check_result(pipeline.output[0],
                                    ref_output[0],
                                    fpaths,
                                    all_pass,
                                    ignore_keys=ignore_keys)

    if args.check_output is True and all_pass is True:
        print("Pass all test data in input data list.")
    print("Lab pipeline launcher completes successfully")