示例#1
0
 def test(self) -> None:
     dataset_len = len(self._dataloader.dataset)
     pbar = tqdm(total=dataset_len)
     with torch.no_grad():
         start_time = time.time()
         for item in self._dataloader:
             imgs = item['image'].to(self._device)
             size = imgs.shape[-2:]
             preds = self._model(imgs)
             if isinstance(preds, (list, tuple)):
                 preds = preds[0].cpu()
             else:
                 preds = preds.cpu()
             preds = self._label_transform_func(preds)
             for i in range(preds.shape[0]):
                 filename = os.path.splitext(
                     item['filename'][i])[0] + '.png'
                 filepath = pathjoin(self._img_save_path, filename)
                 img = preds[i]
                 # if img.shape[-2:] != size:
                 #     img = cv2.resize(img,dsize=(size[1],size[0]),interpolation=cv2.INTER_LINEAR)
                 cv2.imwrite(filepath, img)
                 #pbar.update(1)
                 #pbar.set_description("Processing %s" % filename)
         print(f"{ dataset_len / (time.time()-start_time) } FPS")
示例#2
0
 def __call__(self, model_dict, isFinal: bool = False, hooks=False):
     self.__timer += 1
     if self.__timer % self.__save_interval == 0 or isFinal:
         self.__interval_timer += 1
         model_ext_name = 'final' if isFinal else str(self.__interval_timer)
         model_save_path = pathjoin(
             self.__save_dir_path,
             self.__base_model_name + '-' + model_ext_name + '.pth')
         torch.save(model_dict, model_save_path)
         if hooks:
             hooks(model_save_path, self.__interval_timer)
示例#3
0
 def __init__(self,save_interval,save_dir_path=None,save_base_name:str ='model'):
     self.__timer = 0
     self.__save_interval = save_interval
     self.__save_dir_path = save_dir_path
     if self.__save_dir_path is None:
         self.__save_dir_path = pathjoin(
             '../',
             time.strftime("%F %H-%M-%S",time.localtime())
         )
     mkdirs(self.__save_dir_path)
     self.__base_model_name = save_base_name
     self.__interval_timer = 0
示例#4
0
 def __call__(self, model:Module, isFinal:bool = False,save_base_name = None,is_add=True):
     if is_add:
         self.__timer += 1
     if self.__timer % self.__save_interval == 0 or isFinal:
         self.__interval_timer += 1
         model_ext_name = 'final' if isFinal else str(self.__interval_timer)
         if save_base_name is None:
             save_base_name = self.__base_model_name
         model_save_path = pathjoin(
             self.__save_dir_path,
             save_base_name+'-'+model_ext_name+'.pth'
         )
         torch.save(model.state_dict(),model_save_path)
示例#5
0
文件: tester.py 项目: hb-stone/FC-SOD
 def test(self) -> None:
     dataset_len = len(self._dataloader.dataset)
     pbar = tqdm(total=dataset_len)
     with torch.no_grad():
         for item in self._dataloader:
             imgs = item['image'].to(self._device)
             preds = self._model(imgs).cpu()
             preds = self._label_transform_func(preds)
             for i in range(preds.shape[0]):
                 filename = os.path.splitext(item['filename'][i])[0]+'.png'
                 filepath = pathjoin(self._img_save_path,filename)
                 cv2.imwrite(filepath, preds[i])
                 pbar.update(1)
                 pbar.set_description("Processing %s" % filename)
示例#6
0
def evaluate(config: Configuration) -> None:
    if config.DISABLE_EVAL:
        return
    if config.USE_GPU is not None:
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    sal_measure = get_measure(config.EVAL_MEASURE_LIST, config.EVALUATOR_DIR,
                              config.DATASET_TEST_GT_DIR, device)
    measure = OrderedDict()
    measure['setting'] = config.MODEL_SAVE_DIR_NAME
    measure['dataset'] = config.DATASET_NAME
    measure.update(sal_measure)
    for key, item in measure.items():
        if not isinstance(item, Sequence):
            measure[key] = [item]
        else:
            measure[key] = [str(item)]
    table_content = pandas2markdown(pd.DataFrame(measure))
    record_file_dir = os.path.dirname(config.EVALUATOR_SUMMARY_FILE_PATH)
    mkdirs(record_file_dir)

    # Write xlsx report
    xlsx_filename = os.path.splitext(
        os.path.basename(config.EVALUATOR_SUMMARY_FILE_PATH))[0] + '.xlsx'
    xlsx_filepath = pathjoin(record_file_dir, xlsx_filename)
    data_dict = OrderedDict(
        **{
            'setting': str(config.MODEL_SAVE_DIR_NAME),
            'dataset': str(config.DATASET_NAME),
            'g_lr': str(config.G_LEARNING_RATE),
            'd_lr': str(config.D_LEARNING_RATE),
            'step_size': str(config.STEP_INTERVAL),
            'batch_size': str(config.BATCH_SIZE),
            'crop_size': str(config.CROP_SIZE),
            'partial_data': str(config.PARTIAL_DATA),
            'max_iter': str(config.MAX_ITER),
            'lambda_pred_sal': str(config.LAMBDA_PRED_SAL),
            'lambda_pred_adv': str(config.LAMBDA_PRED_ADV),
            'lambda_semi_sal': str(config.LAMBDA_SEMI_SAL),
            'lambda_semi_adv': str(config.LAMBDA_SEMI_ADV),
        })
    data_dict.update(sal_measure)
    record_dataframe = pd.DataFrame(data_dict, index=[0])
    if not os.path.exists(xlsx_filepath):
        record_dataframe.to_excel(xlsx_filepath, index=False)
    else:
        pd.concat([pd.read_excel(xlsx_filepath), record_dataframe],
                  sort=False).to_excel(xlsx_filepath, index=False)

    title = config.MODEL_SAVE_DIR_NAME
    file_content = (f"\n"
                    f"# Experiment {title}  \n"
                    f"Time:{time.strftime('%Y-%m-%d %X')}  \n"
                    f"Dataset:{config.DATASET_NAME}  \n"
                    f"Test folder:{config.EVALUATOR_DIR}  \n"
                    f"Test index:{' '.join(config.EVAL_MEASURE_LIST)}  \n"
                    f"Command parameters:\n"
                    f"```bash\n"
                    f"{config.CMD_STR}\n"
                    f"```\n"
                    f"\n"
                    f"## Experimental results\n"
                    f"{table_content}\n")
    with portalocker.Lock(config.EVALUATOR_SUMMARY_FILE_PATH, 'a+', \
                          encoding='utf-8',timeout=600) as f:
        f.write(file_content)
示例#7
0
    def init(self, use_arg_parser=True, *args, **kwargs):
        super().__init__(*args, **kwargs)
        # running setting
        self.DISABLE_TRAIN = False
        self.DISABLE_TEST = False
        self.DISABLE_EVAL = False

        # dataset setting
        self.DATASET_TRAIN_ROOT_DIR = None
        self.DATASET_TRAIN_LIST_PATH = None
        self.DATASET_TEST_ROOT_DIR = None
        self.DATASET_TEST_LIST_PATH = None
        self.DATASET_NAME = None
        self.DATASET_TRAIN_DIR = None
        self.DATASET_TEST_DIR = None
        self.DATASET_IS_SCALE = False
        self.RANDOM_BRIGHT = False

        # training setting
        self.BATCH_SIZE = 1
        self.CROP_SIZE = None
        self.LEARNING_RATE = 0.007
        self.EPOCH = 20
        self.MOMENTUM = 0.9
        self.WEIGHT_DECAY = 0.0
        self.PRETRAINED_MODEL_PATH = None
        self.STEP_INTERVAL = 10
        self.DROP_RATE = 0
        self.DISABLE_VISUAL = False
        self.OPTIM = "ADAM"
        self.MODEL_NAME = "efficientnet-b3"
        self.RESNET_SCALE = 4

        # gpu
        self.USE_GPU = None

        # model saving
        self.MODEL_SAVE_ROOT_DIR = None
        self.MODEL_SAVE_DIR_NAME = None
        self.LOG_DIR = pathjoin(
            os.path.dirname(__file__),
            'logs',
        )

        # hyper parameters
        self.TRAIN_DOWN_ITER = 2 / 3
        self.S_LOSS_GAMA = 3

        # testting model path
        self.TEST_MODEL_PTH_PATH = None

        # eval setting
        self.EVALUATOR_DIR = None
        self.EVAL_MEASURE_LIST = None
        self.EVALUATOR_SUMMARY_FILE_PATH = None
        self.EVALUATOR_DATASETS = None
        # proc name
        self.PROC_NAME = None

        parser = ArgumentParser(description="Configuration of the System")
        parser.add_argument('-is_scale', '--DATASET_IS_SCALE', action='store_true', \
                            default=self.DATASET_IS_SCALE)
        parser.add_argument('-random_bright', '--RANDOM_BRIGHT', action='store_true', \
                            default=self.RANDOM_BRIGHT)
        parser.add_argument('-model_name',
                            '--MODEL_NAME',
                            type=str,
                            default=self.MODEL_NAME)
        parser.add_argument('-s_loss_gama',
                            '--S_LOSS_GAMA',
                            type=int,
                            default=self.S_LOSS_GAMA)
        parser.add_argument('-resnet_scale',
                            '--RESNET_SCALE',
                            type=float,
                            default=self.RESNET_SCALE)

        parser.add_argument('-train_down_iter',
                            '--TRAIN_DOWN_ITER',
                            type=float,
                            default=self.TRAIN_DOWN_ITER)

        parser.add_argument('-disable_test', '--DISABLE_TEST', action='store_true', \
                            default=self.DISABLE_TEST)
        parser.add_argument('-disable_train', '--DISABLE_TRAIN',  action='store_true', \
                            default=self.DISABLE_TRAIN)
        parser.add_argument('-disable_eval', '--DISABLE_EVAL', action='store_true', \
                            default=self.DISABLE_EVAL)
        parser.add_argument('-disable_visual', '--DISABLE_VISUAL', action='store_true', \
                            default=self.DISABLE_VISUAL)
        parser.add_argument('-d', '--DATASET_NAME', required=True, type=str, \
                            default=self.DATASET_NAME, \
                            choices="DUT-OMRON DUTS ECSSD HKU-IS PASCAL-S SOD".split(" "))
        parser.add_argument('-b',
                            '--BATCH_SIZE',
                            type=int,
                            default=self.BATCH_SIZE)
        parser.add_argument('-crop',
                            '--CROP_SIZE',
                            type=int,
                            default=self.CROP_SIZE)
        parser.add_argument('-lr',
                            '--LEARNING_RATE',
                            type=float,
                            default=self.LEARNING_RATE)
        parser.add_argument('-epoch', '--EPOCH', type=int, default=self.EPOCH)
        parser.add_argument('-weight_decay',
                            '--WEIGHT_DECAY',
                            type=float,
                            default=self.WEIGHT_DECAY)
        parser.add_argument('-drop_rate',
                            '--DROP_RATE',
                            type=float,
                            default=self.DROP_RATE)
        parser.add_argument('-step',
                            '--STEP_INTERVAL',
                            type=int,
                            default=self.STEP_INTERVAL)
        parser.add_argument('-optim', '--OPTIM', type=str, default=self.OPTIM)
        parser.add_argument('-momentum',
                            '--MOMENTUM',
                            type=float,
                            default=self.MOMENTUM)
        parser.add_argument('-pretrain', '--PRETRAINED_MODEL_PATH', type=str, \
                            default=self.PRETRAINED_MODEL_PATH)
        parser.add_argument('-gpu',
                            '--USE_GPU',
                            type=str,
                            default=self.USE_GPU)
        parser.add_argument('-proc',
                            '--PROC_NAME',
                            type=str,
                            default=self.PROC_NAME)
        parser.add_argument('-test_model',
                            '--TEST_MODEL_PTH_PATH',
                            type=str,
                            default=self.TEST_MODEL_PTH_PATH)
        parser.add_argument('-save', '--MODEL_SAVE_DIR_NAME', required=True, type=str, \
                            default=self.MODEL_SAVE_DIR_NAME)
        parser.add_argument('-eval_dir',
                            '--EVALUATOR_DIR',
                            type=str,
                            default=self.EVALUATOR_DIR)
        parser.add_argument('-eval_m_list', '--EVAL_MEASURE_LIST', nargs='+', \
                            type=str, choices="max-F mean-F MAE S precision recall".split(" "),default=self.EVAL_MEASURE_LIST)
        parser.add_argument('-eval_file', '--EVALUATOR_SUMMARY_FILE_PATH', \
                            type=str, default=self.EVALUATOR_SUMMARY_FILE_PATH)
        parser.add_argument('-eval_d', '--EVALUATOR_DATASETS', nargs='+', \
                            type=str, choices="ALL DUT-OMRON DUTS ECSSD HKU-IS PASCAL-S SOD".split(" "), \
                            default=self.EVALUATOR_DATASETS)

        if use_arg_parser:
            args = parser.parse_args()
            self.update(args.__dict__)

        # MODEL_SAVE_PATH
        self.MODEL_SAVE_ROOT_DIR = pathjoin(
            self.LOG_DIR,
            self.MODEL_SAVE_DIR_NAME,
        )

        self.MODEL_SAVE_PATH = pathjoin(
            self.MODEL_SAVE_ROOT_DIR,
            'save_models',
        )

        if self.BATCH_SIZE != 1:
            assert self.CROP_SIZE is not None, "CROP_SIZE can't be null, if " \
                                               "BATCH_SIZE != 1, please use -crop to specify the paramter"

        if self.EVAL_MEASURE_LIST is None:
            self.EVAL_MEASURE_LIST = ["max-F", "MAE", "S"]

        tmp_dict = get_dataset_path_by_name(self.DATASET_NAME)
        self.DATASET_TRAIN_ROOT_DIR = tmp_dict['train_dir_path']
        self.DATASET_TRAIN_LIST_PATH = tmp_dict['train_lst_path']
        self.DATASET_TEST_ROOT_DIR = tmp_dict['test_dir_path']
        self.DATASET_TEST_LIST_PATH = tmp_dict['test_lst_path']
        self.DATASET_TRAIN_DIR = tmp_dict['train_dir_name']
        self.DATASET_TEST_DIR = tmp_dict['test_dir_name']
        self.DATASET_TEST_GT_DIR = pathjoin(self.DATASET_TEST_ROOT_DIR, 'GT')

        if self.TEST_MODEL_PTH_PATH is None:
            self.TEST_MODEL_PTH_PATH = pathjoin(self.MODEL_SAVE_PATH,
                                                'model-final.pth')

        self.TEST_IMG_SAVE_PATH = pathjoin(
            self.MODEL_SAVE_ROOT_DIR, 'test', self.DATASET_TEST_DIR
            if len(self.DATASET_TEST_DIR) != 0 else self.DATASET_NAME)

        self.OPTIM = self.OPTIM.upper()

        if self.EVALUATOR_DIR is None:
            self.EVALUATOR_DIR = self.TEST_IMG_SAVE_PATH
        if self.EVALUATOR_SUMMARY_FILE_PATH is None:
            self.EVALUATOR_SUMMARY_FILE_PATH = pathjoin(
                self.LOG_DIR, 'ExperimentalNotes.md')
        else:
            self.EVALUATOR_SUMMARY_FILE_PATH = pathjoin(
                self.LOG_DIR, self.EVALUATOR_SUMMARY_FILE_PATH)

        self.CMD_STR = "python " + " ".join(sys.argv)

        if self.EVALUATOR_DATASETS is not None:
            if "ALL" in self.EVALUATOR_DATASETS:
                self.EVALUATOR_DATASETS = "DUT-OMRON DUTS ECSSD HKU-IS PASCAL-S SOD".split(
                )
            self.EVALUATOR_GTS = []
            self.TEST_IMG_SAVE_PATHS = []
            self.DATASET_TEST_ROOT_DIRS = []
            self.DATASET_TEST_LIST_PATHS = []
            for dataset_name in self.EVALUATOR_DATASETS:
                dataset_info = get_dataset_path_by_name(dataset_name)
                self.EVALUATOR_GTS.append(
                    pathjoin(dataset_info['test_dir_path'], 'GT'))
                dataset_test_dir_name = dataset_info['test_dir_name']
                testpath = pathjoin(
                    self.MODEL_SAVE_ROOT_DIR, 'test', dataset_test_dir_name
                    if len(dataset_test_dir_name) != 0 else dataset_name)
                self.TEST_IMG_SAVE_PATHS.append(testpath)
                self.DATASET_TEST_ROOT_DIRS.append(
                    dataset_info['test_dir_path'])
                self.DATASET_TEST_LIST_PATHS.append(
                    dataset_info['test_lst_path'])
            self.EVALUATOR_DIRS = self.TEST_IMG_SAVE_PATHS
            if self.EVALUATOR_DIR not in self.EVALUATOR_DIRS:
                self.EVALUATOR_DIRS.append(self.EVALUATOR_DIR)
                self.EVALUATOR_GTS.append(self.DATASET_TEST_GT_DIR)
                self.DATASET_TEST_ROOT_DIRS.append(self.DATASET_TEST_ROOT_DIR)
                self.DATASET_TEST_LIST_PATHS.append(
                    self.DATASET_TEST_LIST_PATH)
                self.EVALUATOR_DATASETS.append(self.DATASET_NAME)

        else:
            self.EVALUATOR_DIRS = [self.TEST_IMG_SAVE_PATH]
            self.EVALUATOR_GTS = [self.DATASET_TEST_GT_DIR]
            self.TEST_IMG_SAVE_PATHS = [self.TEST_IMG_SAVE_PATH]
            self.DATASET_TEST_ROOT_DIRS = [self.DATASET_TEST_ROOT_DIR]
            self.DATASET_TEST_LIST_PATHS = [self.DATASET_TEST_LIST_PATH]
            self.EVALUATOR_DATASETS = [self.DATASET_NAME]
示例#8
0
文件: config.py 项目: hb-stone/FC-SOD
    def init(self, use_arg_parser=True, *args, **kwargs):
        super().__init__(*args, **kwargs)
        # Run configuration
        self.DISABLE_TRAIN = False
        self.DISABLE_TEST = False
        self.DISABLE_EVAL = False

        # dataset configuration
        self.DATASET_TRAIN_ROOT_DIR = None
        self.DATASET_TRAIN_LIST_PATH = None
        self.DATASET_TEST_ROOT_DIR = None
        self.DATASET_TEST_LIST_PATH = None
        self.DATASET_NAME = None
        self.DATASET_TRAIN_DIR = None
        self.DATASET_TEST_DIR = None

        # Training configuration
        self.SEED = 2345
        self.BATCH_SIZE = 10
        self.CROP_SIZE = 321
        self.G_LEARNING_RATE = 2.5e-4
        self.D_LEARNING_RATE = 1e-4
        self.MAX_ITER = 25000
        self.MOMENTUM = 0.9
        self.PRETRAINED_MODEL_PATH = None
        self.STEP_INTERVAL = 1
        self.WEIGHT_DECAY = 0.0005
        self.DISABLE_VISUAL = False
        self.DATA_IDX_PKL_PATH = None
        self.SEMI_START = 5000
        self.MASK_T = 0.2
        self.PARTIAL_DATA = 0.125
        self.LAMBDA_SEMI_ADV = 0.007
        self.LAMBDA_SEMI_SAL = 1
        self.LAMBDA_PRED_SAL = 1
        self.LAMBDA_PRED_ADV = 0.01
        self.USE_GRAB = False
        # Graphics card use configuration
        self.USE_GPU = None

        # Model save settings
        self.MODEL_SAVE_ROOT_DIR = None
        self.MODEL_SAVE_DIR_NAME = None
        self.LOG_DIR = pathjoin(
            os.path.dirname(__file__),
            'logs',
        )
        self.DATA_IDX_PKL_SAVE_PATH = pathjoin(
            self.LOG_DIR,
            'idx.pkl',
        )

        # test setting
        self.TEST_MODEL_PTH_PATH = None

        # eval setting
        self.EVALUATOR_DIR = None
        self.EVAL_MEASURE_LIST = None
        self.EVALUATOR_SUMMARY_FILE_PATH = None
        self.EVALUATOR_DATASETS = None

        # proc_name setting
        self.PROC_NAME = None

        parser = ArgumentParser(description="Configuration of the System")

        parser.add_argument('-disable_test', '--DISABLE_TEST', action='store_true', \
                            default=self.DISABLE_TEST)
        parser.add_argument('-disable_train', '--DISABLE_TRAIN',  action='store_true', \
                            default=self.DISABLE_TRAIN)
        parser.add_argument('-disable_eval', '--DISABLE_EVAL', action='store_true', \
                            default=self.DISABLE_EVAL)
        parser.add_argument('-disable_visual', '--DISABLE_VISUAL', action='store_true', \
                            default=self.DISABLE_VISUAL)
        parser.add_argument('-seed', '--SEED', type=int, default=self.SEED)
        parser.add_argument('-d', '--DATASET_NAME', required=True, type=str, \
                            default=self.DATASET_NAME, \
                            choices="DUT-OMRON DUTS PASCAL-S SOD".split(" "))

        parser.add_argument('-b',
                            '--BATCH_SIZE',
                            type=int,
                            default=self.BATCH_SIZE)
        parser.add_argument('-crop',
                            '--CROP_SIZE',
                            type=int,
                            default=self.CROP_SIZE)
        parser.add_argument('-g_lr',
                            '--G_LEARNING_RATE',
                            type=float,
                            default=self.G_LEARNING_RATE)
        parser.add_argument('-d_lr',
                            '--D_LEARNING_RATE',
                            type=float,
                            default=self.D_LEARNING_RATE)
        parser.add_argument('-max_iter',
                            '--MAX_ITER',
                            type=int,
                            default=self.MAX_ITER)
        parser.add_argument('-step',
                            '--STEP_INTERVAL',
                            type=int,
                            default=self.STEP_INTERVAL)
        parser.add_argument('-momentum',
                            '--MOMENTUM',
                            type=float,
                            default=self.MOMENTUM)
        parser.add_argument('-weight_decay',
                            '--WEIGHT_DECAY',
                            type=float,
                            default=self.WEIGHT_DECAY)
        parser.add_argument('-idx',
                            '--DATA_IDX_PKL_PATH',
                            type=str,
                            default=self.DATA_IDX_PKL_PATH)
        parser.add_argument('-start',
                            '--SEMI_START',
                            type=int,
                            default=self.SEMI_START)
        parser.add_argument('-mask_T',
                            '--MASK_T',
                            type=float,
                            default=self.MASK_T)
        parser.add_argument('-part',
                            '--PARTIAL_DATA',
                            type=float,
                            default=self.PARTIAL_DATA)
        parser.add_argument('-grab',
                            '--USE_GRAB',
                            type=bool,
                            default=self.USE_GRAB)
        parser.add_argument('-l_semi_adv',
                            '--LAMBDA_SEMI_ADV',
                            type=float,
                            default=self.LAMBDA_SEMI_ADV)
        parser.add_argument('-l_semi_sal',
                            '--LAMBDA_SEMI_SAL',
                            type=float,
                            default=self.LAMBDA_SEMI_SAL)
        parser.add_argument('-l_pred_adv',
                            '--LAMBDA_PRED_ADV',
                            type=float,
                            default=self.LAMBDA_PRED_ADV)
        parser.add_argument('-l_pred_sal',
                            '--LAMBDA_PRED_SAL',
                            type=float,
                            default=self.LAMBDA_PRED_SAL)
        parser.add_argument('-pretrain', '--PRETRAINED_MODEL_PATH', type=str, \
                            default=self.PRETRAINED_MODEL_PATH)

        parser.add_argument('-gpu',
                            '--USE_GPU',
                            type=str,
                            default=self.USE_GPU)
        parser.add_argument('-proc',
                            '--PROC_NAME',
                            type=str,
                            default=self.PROC_NAME)
        parser.add_argument('-test_model',
                            '--TEST_MODEL_PTH_PATH',
                            type=str,
                            default=self.TEST_MODEL_PTH_PATH)
        parser.add_argument('-save', '--MODEL_SAVE_DIR_NAME', required=True, type=str, \
                            default=self.MODEL_SAVE_DIR_NAME)
        parser.add_argument('-eval_dir',
                            '--EVALUATOR_DIR',
                            type=str,
                            default=self.EVALUATOR_DIR)
        parser.add_argument('-eval_m_list', '--EVAL_MEASURE_LIST', nargs='+', \
                            type=str, choices="max-F mean-F MAE S precision recall".split(" "),default=self.EVAL_MEASURE_LIST)
        parser.add_argument('-eval_file', '--EVALUATOR_SUMMARY_FILE_PATH', \
                            type=str, default=self.EVALUATOR_SUMMARY_FILE_PATH)
        parser.add_argument('-eval_d', '--EVALUATOR_DATASETS', nargs='+', \
                            type=str, choices="ALL DUT-OMRON DUTS PASCAL-S SOD".split(" "), \
                            default=self.EVALUATOR_DATASETS)

        if use_arg_parser:
            args = parser.parse_args()
            self.update(args.__dict__)
        # handle MODEL_SAVE_PATH
        self.MODEL_SAVE_ROOT_DIR = pathjoin(
            self.LOG_DIR,
            self.MODEL_SAVE_DIR_NAME,
        )
        self.MODEL_SAVE_PATH = pathjoin(
            self.MODEL_SAVE_ROOT_DIR,
            'save_models',
        )

        if self.BATCH_SIZE != 1:
            assert self.CROP_SIZE is not None, "CROP_SIZE can't be null, if " \
                                               "BATCH_SIZE != 1, please use -crop to specify the paramter"
        # handle EVAL_MEASURE_LIST
        if self.EVAL_MEASURE_LIST is None:
            self.EVAL_MEASURE_LIST = ["max-F", "MAE", "S"]
        # handle DATASET_ROOT_DIR DATASET_LIST_PATH
        tmp_dict = get_dataset_path_by_name(self.DATASET_NAME)
        self.DATASET_TRAIN_ROOT_DIR = tmp_dict['train_dir_path']
        self.DATASET_TRAIN_LIST_PATH = tmp_dict['train_lst_path']
        self.DATASET_TEST_ROOT_DIR = tmp_dict['test_dir_path']
        self.DATASET_TEST_LIST_PATH = tmp_dict['test_lst_path']
        self.DATASET_TRAIN_DIR = tmp_dict['train_dir_name']
        self.DATASET_TEST_DIR = tmp_dict['test_dir_name']
        self.DATASET_TEST_GT_DIR = pathjoin(self.DATASET_TEST_ROOT_DIR, 'GT')
        if self.TEST_MODEL_PTH_PATH is None:
            self.TEST_MODEL_PTH_PATH = pathjoin(self.MODEL_SAVE_PATH,
                                                'G-final.pth')
        self.TEST_IMG_SAVE_PATH = pathjoin(
            self.MODEL_SAVE_ROOT_DIR, 'test', self.DATASET_TEST_DIR
            if len(self.DATASET_TEST_DIR) != 0 else self.DATASET_NAME)

        # handle EVALUATOR_DIR
        if self.EVALUATOR_DIR is None:
            self.EVALUATOR_DIR = self.TEST_IMG_SAVE_PATH
        if self.EVALUATOR_SUMMARY_FILE_PATH is None:
            self.EVALUATOR_SUMMARY_FILE_PATH = pathjoin(
                self.LOG_DIR, 'ExperimentalNotes.md')
        else:
            self.EVALUATOR_SUMMARY_FILE_PATH = pathjoin(
                self.LOG_DIR, self.EVALUATOR_SUMMARY_FILE_PATH)
        if self.EVALUATOR_DATASETS is not None:
            if "ALL" in self.EVALUATOR_DATASETS:
                self.EVALUATOR_DATASETS = "DUT-OMRON DUTS PASCAL-S SOD".split()
            self.EVALUATOR_GTS = []
            self.TEST_IMG_SAVE_PATHS = []
            self.DATASET_TEST_ROOT_DIRS = []
            self.DATASET_TEST_LIST_PATHS = []
            for dataset_name in self.EVALUATOR_DATASETS:
                dataset_info = get_dataset_path_by_name(dataset_name)
                self.EVALUATOR_GTS.append(
                    pathjoin(dataset_info['test_dir_path'], 'GT'))
                dataset_test_dir_name = dataset_info['test_dir_name']
                testpath = pathjoin(
                    self.MODEL_SAVE_ROOT_DIR, 'test', dataset_test_dir_name
                    if len(dataset_test_dir_name) != 0 else dataset_name)
                self.TEST_IMG_SAVE_PATHS.append(testpath)
                self.DATASET_TEST_ROOT_DIRS.append(
                    dataset_info['test_dir_path'])
                self.DATASET_TEST_LIST_PATHS.append(
                    dataset_info['test_lst_path'])
            self.EVALUATOR_DIRS = self.TEST_IMG_SAVE_PATHS
            if self.EVALUATOR_DIR not in self.EVALUATOR_DIRS:
                self.EVALUATOR_DIRS.append(self.EVALUATOR_DIR)
                self.EVALUATOR_GTS.append(self.DATASET_TEST_GT_DIR)
                self.DATASET_TEST_ROOT_DIRS.append(self.DATASET_TEST_ROOT_DIR)
                self.DATASET_TEST_LIST_PATHS.append(
                    self.DATASET_TEST_LIST_PATH)
                self.EVALUATOR_DATASETS.append(self.DATASET_NAME)

        else:
            self.EVALUATOR_DIRS = [self.TEST_IMG_SAVE_PATH]
            self.EVALUATOR_GTS = [self.DATASET_TEST_GT_DIR]
            self.TEST_IMG_SAVE_PATHS = [self.TEST_IMG_SAVE_PATH]
            self.DATASET_TEST_ROOT_DIRS = [self.DATASET_TEST_ROOT_DIR]
            self.DATASET_TEST_LIST_PATHS = [self.DATASET_TEST_LIST_PATH]
            self.EVALUATOR_DATASETS = [self.DATASET_NAME]

        import sys
        # Command parameter acquisition
        self.CMD_STR = "python " + " ".join(sys.argv)
示例#9
0
 def __init__( self, catch_time_interval: int ) -> None:
     super(MyVisualHelper, self).__init__(catch_time_interval)
     from torch.utils.tensorboard import SummaryWriter
     self.writer = SummaryWriter(pathjoin(config.MODEL_SAVE_ROOT_DIR,'running'))
示例#10
0
def evaluate(config: Configuration) -> None:
    if config.DISABLE_EVAL:
        return
    if config.USE_GPU is not None:
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    sal_measure = get_measure(config.EVAL_MEASURE_LIST, config.EVALUATOR_DIR,config.DATASET_TEST_GT_DIR, device)
    assert 'MAE' in sal_measure.keys()

    measure = OrderedDict()
    measure['setting'] = config.MODEL_SAVE_DIR_NAME
    measure['dataset'] = config.DATASET_NAME
    measure.update(sal_measure)
    for key,item in measure.items():
        if not isinstance(item, Sequence):
            measure[key] = [item]
        else:
            measure[key] = [str(item)]
    table_content = pandas2markdown(pd.DataFrame(measure))
    record_file_dir = os.path.dirname(config.EVALUATOR_SUMMARY_FILE_PATH)
    mkdirs(record_file_dir)

    # export cvv report
    csv_filename = os.path.splitext(os.path.basename(config.EVALUATOR_SUMMARY_FILE_PATH))[0] + '.csv'
    csv_filepath = pathjoin(
        record_file_dir,
        csv_filename
    )

    data_dict = OrderedDict(**{
        'setting':str(config.MODEL_SAVE_DIR_NAME),
        'dataset':str(config.DATASET_NAME),
        'lr':str(config.LEARNING_RATE),
        'epoch':str(config.EPOCH),
        'step_size':str(config.STEP_INTERVAL),
        'optim':str(config.OPTIM),
        'batch_size':str(config.BATCH_SIZE),
        'crop_size':str(config.CROP_SIZE),
        'weight_decay':str(config.WEIGHT_DECAY),
        'drop_rate':str(config.DROP_RATE),
    })
    data_dict.update(sal_measure)
    record_dataframe = pd.DataFrame(data_dict,index=[0])
    if not os.path.exists(csv_filepath):
        record_dataframe.to_csv(csv_filepath,index=False)
    else:
        pd.concat([pd.read_csv(csv_filepath), record_dataframe],sort=False).to_csv(xlsx_filepath,index=False)

    title = config.MODEL_SAVE_DIR_NAME
    file_content = (f"\n"
                    f"# setting {title}  \n"
                    f"time:{time.strftime('%Y-%m-%d %X')}  \n"
                    f"dataset:{config.DATASET_NAME}  \n"
                    f"test dir:{config.EVALUATOR_DIR}  \n"
                    f"command string:\n"
                    f"```bash\n"
                    f"{config.CMD_STR}\n"
                    f"```\n"
                    f"\n"
                    f"## result\n"
                    f"{table_content}\n")
    with portalocker.Lock(config.EVALUATOR_SUMMARY_FILE_PATH, 'a+', \
                          encoding='utf-8',timeout=600) as f:
        f.write(file_content)