Esempio n. 1
0
    def from_args(cls, args, options=''):
        """
        Initialize this class from some cli arguments. Used in train, test.
        """
        for opt in options:
            args.add_argument(*opt.flags, default=None, type=opt.type)
        if not isinstance(args, tuple):
            args = args.parse_args()

        if args.device is not None:
            os.environ["CUDA_VISIBLE_DEVICES"] = args.device
        if args.resume is not None:
            resume = Path(args.resume)
            cfg_fname = resume.parent / 'config.json'
        else:
            msg_no_cfg = "Configuration file need to be specified. Add '-c config.json', for example."
            assert args.config is not None, msg_no_cfg
            resume = None
            cfg_fname = Path(args.config)

        config = read_json(cfg_fname)
        if args.config and resume:
            # update new config for fine-tuning
            config.update(read_json(args.config))

        # parse custom cli options into dictionary
        modification = {
            opt.target: getattr(args, _get_opt_name(opt.flags))
            for opt in options
        }
        return cls(config, resume, modification)
Esempio n. 2
0
 def __init__(self, config):
     super().__init__()
     print("Initializing AudioFeatureExtracor...")
     self.feature_dim = config["audio"]["feature_dim"]
     self.feature_file = config["audio"]["feature_file"]
     self.data = read_json(config["data_file"])
     self.features = read_json(self.feature_file)
     self.missing_tensor = torch.zeros((self.feature_dim))
Esempio n. 3
0
def main(args_parsed):
    o_path = Path(args_parsed.output)
    o_path.mkdir(parents=True, exist_ok=True)

    res = dict()
    res["name"] = args_parsed.name
    res["createTime"] = int(time.time())
    if args_parsed.describe is not None:
        res["description"] = args_parsed.describe
    mods = list()
    for l_mod in args_parsed.model:
        mod_cont = dict()
        conf = l_mod[0]
        path = l_mod[1]
        # if not ensure_exist(conf) or not ensure_exist(path):
        if not ensure_exist(conf):
            continue
        try:
            cf_content = read_json(conf)
            for sec in ("name", "arch", "metrics", "tester"):
                mod_cont[sec] = cf_content[sec]
            mod_cont["path"] = os.path.basename(path)
            # copy model & rename

            mods.append(mod_cont)
        except Exception as e:
            print(str(e))
            continue
    res["models"] = mods
    write_json(res, o_path / (args_parsed.name + ".json"))
    def from_args(cls, args, options=''):
        """
        Initialize this class from some cli arguments. Used in train, test.
        :param args: Arguments from which to initialize the configuration.
        :param options: additional options to add to the config.
        :return: ConfigParser object configured with the arguments.
        """

        # Update the arguments with the options.
        for opt in options:
            args.add_argument(*opt.flags, default=None, type=opt.type)
        if not isinstance(args, tuple):
            args = args.parse_args()

        # Set cuda device.
        if args.device is not None:
            os.environ["CUDA_VISIBLE_DEVICES"] = args.device

        # Set training resume file.
        if args.resume is not None:
            resume = Path(args.resume)
            cfg_fname = resume.parent / 'config.json'
        else:
            msg_no_cfg = "Configuration file need to be specified. Add '-c config.json', for example."
            assert args.config is not None, msg_no_cfg
            resume = None
            cfg_fname = Path(args.config)

        config = read_json(cfg_fname)
        if args.config and resume:
            # update new config for fine-tuning
            config.update(read_json(args.config))

        config['debug'] = args.debug

        # parse custom cli options into dictionary
        modification = {
            opt.target: getattr(args, _get_opt_name(opt.flags))
            for opt in options
        }

        ConfigParser.resolve_constants(config)

        return cls(config, resume, modification)
Esempio n. 5
0
 def find_config(self, data_path):
     if self.sensor_resolution is None:
         config = os.path.join(data_path, "dataset_config.json")
         if os.path.exists(config):
             self.config = read_json(config)
             self.data_source = self.config['data_source']
             self.sensor_resolution = self.config["sensor_resolution"]
         else:
             data_source = 'unknown'
             self.sensor_resolution = self.infer_resolution()
Esempio n. 6
0
def load_model_for_eval(checkpoint):
    config_file = Path(checkpoint).parent / 'config.json'
    config = read_json(config_file)
    model = get_instance(module_arch, 'arch', config)
    model.summary()
    checkpoint = torch.load(checkpoint, map_location='cpu')
    state_dict = checkpoint['state_dict']
    model.load_state_dict(clean_state_dict(state_dict))
    model.eval()
    return model
Esempio n. 7
0
    def get_result(self, target):
        if self.f_augment:
            self.pred = self.f_augment(self.pred)
        # load previous result
        if os.path.isfile(target+".json"):
            res = read_json(target+".json")
            if not "meta" in res:
                print("Warning: no meta in exist json file.")
            elif self.meta != res["meta"]:
                print("Warning: meta for the current image is not same as previous result, may result in wrong align.")
        else:
            res = dict()
        if self.type == "Building":
            try:
                cnts = self.get_contours(self.pred)
                bboxs = self.get_bboxs(cnts[0])
                building = self.encoding(bboxs, self.meta, fun_prop=random_height)
            except (RuntimeError, TypeError):
                building = list()
            res["meta"] = self.meta
            res["buildings"] = building
            with open(target+".json", 'w') as f:
                f.write(json.dumps(res))

        elif self.type == "Road":
            res["meta"] = self.meta
            try:
                skel = self.pred
                for i in range(2):
                    skel = self.get_skeleton(skel)

                    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
                    for i in range(4):
                        skel = cv2.dilate(skel, kernel, iterations=5)
                        skel = SegmentOutputUtil.connect_line(skel, 10, 1)

                mid_line = self.get_skeleton(skel)
                img = np.zeros(skel.shape).astype(np.uint8)
                img[skel == 255] = [50]
                img[mid_line == 255] = [255]

                alpha = Image.fromarray(skel)
                img = Image.merge('LA', [Image.fromarray(img), alpha])
                img_path = target+"_road.png"
                img.save(img_path)
                res["roadImg"] = img_path

            except (RuntimeError, TypeError):
                res["roadImg"] = ""

            with open(target+".json", 'w') as f:
                f.write(json.dumps(res))

        return target+".json"
Esempio n. 8
0
 def test_draw(self):
     j = read_json("test_res.json")
     img = np.zeros((j["meta"]["h"], j["meta"]["w"], 3), np.uint8)
     for building in j["buildings"]:
         rect = list()
         for coor in building["coordinates"]:
             rect.append([coor["x"], coor["y"]])
         narray = np.array(rect)
         narray *= 10
         narray = narray.astype(np.int)
         cv2.drawContours(img, [narray], 0, (0, 255, 0), 2)  # green
     cv2.imshow("img", img)
     cv2.waitKey(0)
     cv2.destroyAllWindows()
Esempio n. 9
0
    def __init__(self, args, options='', timestamp=True):
        # parse default and custom cli options
        for opt in options:
            args.add_argument(*opt.flags, default=None, type=opt.type)
        args = args.parse_args()

        self.resume = None
        if args.device:
            os.environ["CUDA_VISIBLE_DEVICES"] = args.device
        if args.resume:
            self.resume = Path(args.resume)
            self.cfg_fname = self.resume.parent / 'config.json'
        if args.config:
            self.cfg_fname = Path(args.config)
        msg_no_cfg = ("Configuration file need to be specified. "
                      "Add '-c config.json', for example.")
        assert self.cfg_fname is not None, msg_no_cfg

        # load config file and apply custom cli options
        config = read_json(self.cfg_fname)
        self.__config = _update_config(config, options, args)
        self.__raw = copy.deepcopy(self.__config)

        # set save_dir where trained model and log will be saved.
        save_dir = Path(
            parse_value(self.config['trainer']['extra_args']['save_dir']))
        timestamp = datetime.now().strftime(
            r'%m%d_%H%M%S') if timestamp else ''

        exper_name = self.config['name']
        self.__save_dir = save_dir / 'models' / exper_name / timestamp
        self.__log_dir = save_dir / 'log' / exper_name / timestamp

        self.save_dir.mkdir(parents=True, exist_ok=True)
        self.log_dir.mkdir(parents=True, exist_ok=True)

        # save updated config file to the checkpoint dir
        if get_global_rank() == 0:
            write_json(self.config, self.save_dir / 'config.json')

        # configure logging module
        setup_logging(self.log_dir)
        self.log_levels = {
            0: logging.WARNING,
            1: logging.INFO,
            2: logging.DEBUG
        }
        logger = self.get_logger('config')
        logger.info(f"Experiment name: {exper_name}")
Esempio n. 10
0
def summarise(group_id, log_dir="data/saved/log", model_dir="data/saved/models"):
    seeded_runs = sorted(list(Path(log_dir).glob(f"**/{group_id}/seed-*")))
    print(f"Found a total of {len(seeded_runs)} seed runs in {group_id}")
    msg = f"Found no seeded runs for group_id: {group_id} in {log_dir}"
    assert len(seeded_runs) > 0, msg

    info_logs = OrderedDict()
    for seeded_run in seeded_runs:
        info_log_matches = list(Path(seeded_run).glob("**/info.log"))
        msg = f"expected to find a single info.log file, found {len(info_log_matches)}"
        assert len(info_log_matches) == 1, msg
        info_logs[seeded_run.stem] = info_log_matches[0]

    summary_log = []
    for seeded_run, info_log_path in info_logs.items():
        with open(info_log_path, "r") as f:
            log = f.read().splitlines()
        summary_log.extend(log)
    first_info_log = list(info_logs.values())[0]
    summary_log_name = f"summary-{'_'.join(list(info_logs.keys()))}.json"
    summary_log_path = first_info_log.parent / summary_log_name
    with open(summary_log_path, "w") as f:
        f.write("\n".join(summary_log))
    print(f"Wrote concatenated logs to {summary_log_path}")

    # retrieve the config from the first run
    rel_path = first_info_log.relative_to(log_dir).parent
    config_path = Path(model_dir) / rel_path / "config.json"
    assert config_path.exists(), f"Could not find config at {config_path}"
    config = read_json(config_path)

    logger = logging.getLogger("summary")

    # some care is required with logging to avoid sending all experiment logs
    # to the same file.  We avoid this by essentially resetting the logging utility

    # Remove all handlers associated with the root logger object
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)
    logging.basicConfig(filename=summary_log_path, level=logging.INFO)
    if not logger.handlers:
        logger.addHandler(logging.StreamHandler())

    log_summary(
        logger=logger,
        log_path=summary_log_path,
        eval_mode=config["eval_mode"],
        fixed_num_epochs=config["trainer"]["epochs"],
    )
Esempio n. 11
0
    def load_conf(self, config):
        self.conf_path = Path(config)
        self.config = read_json(config)
        if "models" not in self.config:
            return False
        for m in self.config["models"]:
            # check
            if not os.path.exists(m["path"]):
                if not (self.conf_path.parents[0] / m["path"]).exists():
                    print("Model", m["name"], "path", m["path"],
                          "is not exist.")
                    # continue
                m["path"] = str(self.conf_path.parents[0] / m["path"])

            self.models[m["name"]] = m
Esempio n. 12
0
def setup_logging(save_dir, log_config='logger/logger_config.json', default_level=logging.INFO):
	"""
	Setup logging configuration
	"""
	log_config = Path(log_config)
	if log_config.is_file():
		config = read_json(log_config)
		# modify logging paths based on run config
		for _, handler in config['handlers'].items():
			if 'filename' in handler:
				handler['filename'] = str(save_dir / handler['filename'])

		logging.config.dictConfig(config)
	else:
		print("Warning: logging configuration file is not found in {}.".format(log_config))
		logging.basicConfig(level=default_level)
def summarise(group_id,
              log_dir="data/saved/log",
              model_dir="data/saved/models"):
    seeded_runs = sorted(list(Path(log_dir).glob(f"**/{group_id}/seed-*")))
    print(f"Found a total of {len(seeded_runs)} seed runs in {group_id}")

    info_logs = OrderedDict()
    for seeded_run in seeded_runs:
        info_log_matches = list(Path(seeded_run).glob("**/info.log"))
        msg = f"expected to find a single info.log file, found {len(info_log_matches)}"
        assert len(info_log_matches) == 1, msg
        info_logs[seeded_run.stem] = info_log_matches[0]

    summary_log = []
    for seeded_run, info_log_path in info_logs.items():
        with open(info_log_path, "r") as f:
            log = f.read().splitlines()
        summary_log.extend(log)
    first_info_log = list(info_logs.values())[0]
    summary_log_name = f"summary-{'_'.join(list(info_logs.keys()))}.json"
    summary_log_path = first_info_log.parent / summary_log_name
    with open(summary_log_path, "w") as f:
        f.write("\n".join(summary_log))
    print(f"Wrote summary log to {summary_log_path}")

    # retrieve the config from the first run
    rel_path = first_info_log.relative_to(log_dir).parent
    config_path = Path(model_dir) / rel_path / "config.json"
    assert config_path.exists(), f"Could not find config at {config_path}"
    config = read_json(config_path)

    logger = logging.getLogger("summary")
    logging.basicConfig(filename=summary_log_path, level=logging.INFO)
    logger.addHandler(logging.StreamHandler())

    log_summary(
        logger=logger,
        log_path=summary_log_path,
        eval_mode=config["eval_mode"],
        fixed_num_epochs=config["trainer"]["epochs"],
    )
Esempio n. 14
0
    def init_params(cls, config_fname=None):
        """
        intialize and setup params from config file

        Parameters
        ----------
        config_fname

        Returns
        -------

        """
        if config_fname is None:
            config_fname = os.path.join(Path(__file__).parent.parent, 'config.json')
            logging.info('Using default configuration file: ', config_fname)

        # parse the config file
        _config = read_json(config_fname)
        # specify the saving directory
        proj_name = _config['project_name']
        save_root_dir = Path(_config['resparams']['save_dir'])
        run_id = datetime.now().strftime(r'%m%d_%H%M%S')
        save_model_dir = save_root_dir / proj_name / 'models'
        save_log_dir = save_root_dir / proj_name / 'logs'

        if not os.path.exists(save_model_dir):
            save_model_dir.mkdir(parents=True, exist_ok=True)
        if not os.path.exists(save_log_dir):
            save_log_dir.mkdir(parents=True, exist_ok=True)

        _config['resparams']['run_id'] = run_id
        _config['resparams']['save_model_dir'] = str(save_model_dir)
        _config['resparams']['save_log_dir'] = str(save_log_dir)

        # setup logging
        setup_logging(save_log_dir, run_id=run_id)

        return cls(_config)
Esempio n. 15
0
def setup_logging(save_dir,
                  log_config='logger/logger_config.json',
                  default_level=logging.INFO):
    """
    设置日志配置
    Setup logging configuration

    :param save_dir: 日志保存目录
    :param log_config: 日志配置文件路径
    :param default_level: 默认日志等级
    :return:
    """

    # 解析配置文件路径
    log_config = Path(log_config)

    # 是个文件
    if log_config.is_file():
        # json.load解析
        config = read_json(log_config)

        # 找到并设置日志保存路径
        for _, handler in config['handlers'].items():
            if 'filename' in handler:
                # 保存路径,文件名
                handler['filename'] = str(save_dir / handler['filename'])

        # config给logger
        logging.config.dictConfig(config)

    # 没找到
    else:
        print("Warning: logging configuration file is not found in {}.".format(
            log_config))
        # 用默认配置,并info等级
        logging.basicConfig(level=default_level)
Esempio n. 16
0
    def __init__(self, args, options='', timestamp=True, slave_mode=False):
        # slave_mode - when calling the config parser form an existing process, we
        # avoid reinitialising the logger and ignore sys.argv when argparsing.

        # parse default and custom cli options
        for opt in options:
            args.add_argument(*opt.flags, default=None, type=opt.type)

        if slave_mode:
            args = args.parse_args(args=[])
        else:
            args = args.parse_args()

        if args.device:
            os.environ["CUDA_VISIBLE_DEVICES"] = args.device

        if args.resume and not slave_mode:
            self.resume = Path(args.resume)
            # self.cfg_fname = self.resume.parent / 'config.json'
        else:
            msg_no_cfg = "Config file must be specified"
            assert args.config is not None, msg_no_cfg
            self.resume = None
        self.cfg_fname = Path(args.config)

        # load config file and apply custom cli options
        config = read_json(self.cfg_fname)
        self._config = _update_config(config, options, args)

        if self._config.get("eval_config", False):
            # validate path to evaluation file
            eval_cfg_path = self._config.get("eval_config")
            msg = f"eval_config was specified, but `{eval_cfg_path}` does not exist"
            assert Path(self._config.get("eval_config")).exists(), msg

        # set save_dir where trained model and log will be saved.
        if "trainer" in self.config:
            save_dir = Path(self.config['trainer']['save_dir'])
        else:
            save_dir = Path(self.config['tester']['save_dir'])
        timestamp = datetime.now().strftime(
            r"%Y-%m-%d_%H-%M-%S") if timestamp else ""

        if slave_mode:
            timestamp = f"{timestamp}-eval-worker"

        # We assume that the config files are organised into directories such that
        # each directory has the name of the dataset.
        dataset_name = self.cfg_fname.parent.stem
        exper_name = f"{dataset_name}-{self.cfg_fname.stem}"
        self._save_dir = save_dir / 'models' / exper_name / timestamp
        self._log_dir = save_dir / 'log' / exper_name / timestamp
        self._web_log_dir = save_dir / 'web' / exper_name / timestamp
        self._exper_name = exper_name
        self._args = args

        # if set, remove all previous experiments with the current config
        if vars(args).get("purge_exp_dir", False):
            for dirpath in (self._save_dir, self._log_dir, self._web_log_dir):
                config_dir = dirpath.parent
                existing = list(config_dir.glob("*"))
                print(
                    f"purging {len(existing)} directories from config_dir...")
                tic = time.time()
                os.system(f"rm -rf {config_dir}")
                print(f"Finished purge in {time.time() - tic:.3f}s")

        self.save_dir.mkdir(parents=True, exist_ok=True)
        self.log_dir.mkdir(parents=True, exist_ok=True)

        # save updated config file to the checkpoint dir
        write_json(self.config, self.save_dir / 'config.json')

        # configure logging module
        if not slave_mode:
            self.log_path = setup_logging(self.log_dir)

        self.log_levels = {
            0: logging.WARNING,
            1: logging.INFO,
            2: logging.DEBUG
        }
Esempio n. 17
0
    def from_args(cls, args, options=''):
        """
        用命令行参数,初始化这个类(训练、测试时)
        Initialize this class from some cli arguments. Used in train, test.
        """

        # 处理options(value)到args(key)

        # 遍历options
        for opt in options:
            # opt,添加到args
            # opt.flags,为单个命令list
            args.add_argument(*opt.flags, default=None, type=opt.type)

        # 若args不是元组
        if not isinstance(args, tuple):
            # parse_args(),转元组
            args = args.parse_args()

        # 用args初始化

        # 初始化计算代理
        if args.device is not None:
            os.environ["CUDA_VISIBLE_DEVICES"] = args.device

        # 初始化config配置文件(重启方式、原始配置方式、命令更新方式)

        # 重启方式(args.resume存在)
        if args.resume is not None:
            # Path
            resume = Path(args.resume)
            # 配置文件路径为:重启点的配置文件
            # 'config.json'
            cfg_fname = resume.parent / 'config.json'
        else:
            # msg:'配置文件需要被指定,请添加'-c rnn_config.json'的命令
            msg_no_cfg = "Configuration file need to be specified. Add '-c rnn_config.json', for example."
            # 保证args.config存在
            assert args.config is not None, msg_no_cfg
            # resume设为none
            resume = None
            # 配置文件路径为:原始配置文件
            cfg_fname = Path(args.config)

        # 读取,配置文件路径(cfg_fname),为json.load
        config = read_json(cfg_fname)
        # 计算代理
        config['device_id'] = args.device
        # 配置文件
        config['config_file_name'] = args.config

        # 命令更新方式(若args.config和resume都存在)(可能这里不是这个意思)
        if args.config and resume:
            # 更新新的配置,来fine-tuning
            # update new config for fine-tuning
            config.update(read_json(args.config))

        # 解析命令行参数进字典
        """
        parse custom cli options into dictionary
        1、遍历options,为opt,
        2、获取opt.flags的所有参数名(key);从args(keys)中,寻找对应,
        3、opt.target(k): opt.flag(v)
        """
        modification = {
            opt.target: getattr(args, _get_opt_name(opt.flags))
            for opt in options
        }

        # 返回这个类对象本身,
        # 可能是保持这个对象,后面继续执行解析新的命令行
        return cls(config, resume, modification)