def add_job(
        job_name,
        sub_index,
        backbone_arch,
        init_model_nickname,
        init_model_path,
        extra_params=None,
    ):
        job_name = f"{config_job_name}.{sub_index}.{job_name}_seed{config['random_seed']}"

        d = OrderedDict()

        d["--config-file"] = config_file
        d["model.backbone_arch"] = backbone_arch
        d["init.model"] = init_model_path

        log_folder = job_name + "_" + backbone_arch + "_init_" + init_model_nickname
        log_folder = os.path.join(log_path, log_folder)

        d["output.path"] = log_folder

        if extra_params:
            d.update(extra_params)

        commands = [main_command + " " + launcher.parameters_to_str(d)]

        exp_job_names.append(job_name)
        exp_commands.append(commands)
        exp_log_paths.append(log_folder)
Exemplo n.º 2
0
    def add_job(sub_index,
                backbone_arch,
                model_path,
                model_checkpoint,
                episodes,
                eval_scale,
                test_augmentation,
                folder_suffix="",
                extra_params=None):
        job_name = f"{config_job_name}.{sub_index}"
        commands = []

        d = OrderedDict()
        d["--config-file"] = config_file

        d["model.use_inverse_geom_model"] = True
        d["model.use_simplified_affine_model"] = False
        d["model.backbone_arch"] = backbone_arch

        d["eval.dataset_scales"] = f"[{eval_scale}]"

        if test_augmentation:
            d["eval.class_image_augmentation"] = test_augmentation

        if extra_params:
            d.update(extra_params)

        # set output folder
        log_folder = f"{config_job_name}"
        if folder_suffix:
            log_folder += "." + folder_suffix
        log_folder = os.path.join(log_path, log_folder)

        d["train.do_training"] = False

        # choose init
        if "init.transform" in d:
            del d["init.transform"]
        if os.path.isfile(model_path):
            d["init.model"] = model_path
        else:
            d["init.model"] = os.path.join(model_path, model_checkpoint)

        for episode in episodes:
            d["eval.dataset_names"] = f"[\\\"imagenet-repmet-test-episode-{episode}\\\"]"

            commands.append(main_command + " " + launcher.parameters_to_str(d))

        exp_job_names.append(job_name)
        exp_commands.append(commands)
        exp_log_paths.append(log_folder)
        exp_log_file_prefix.append(
            f"eval_scale{d['eval.dataset_scales'][1:-1]}_epi{min(episodes)}-{max(episodes)}_"
        )
    def add_job(
        job_name,
        sub_index,
        arch,
        init_net,
        dataset_train,
        dataset_val,
        extra_params=None,
    ):
        job_name = f"{config_job_name}.{sub_index}.{job_name}"

        log_folder = job_name
        log_folder = os.path.join(log_path, log_folder)

        commands = []

        # stage 1
        d = OrderedDict()
        d["--cuda"] = ""
        d["--dataset"] = dataset_train
        d["--dataset_val"] = dataset_val
        d["--init_weights"] = init_net
        d["--disp_interval"] = "1"
        d["--val_interval"] = "1"
        d["--nw"] = "4"
        d["--bs"] = "4"
        d["--s"] = 1
        d["--epochs"] = "100"  # set 20x less epochs for instre comapared to grozi as ezch epoch is 20x bigger
        d["--lr_decay_milestones"] = "50 75"
        d["--lr"] = 0.01  # default starting learning rate
        d["--lr_decay_gamma"] = 0.1
        d["--lr_reload_best_after_decay"] = "True"
        d["--save_dir"] = log_folder
        d["--net"] = arch
        d["--class_image_augmentation"] = "rotation90"
        d["--set"] = ""
        d["DATA_DIR"] = data_path
        # put smth here, but those are not used in CoAE
        d["TRAIN.MAX_SIZE"] = "3000"
        d["TEST.MAX_SIZE"] = "3000"

        if extra_params:
            d.update(extra_params)

        commands += [main_command + " " + launcher.parameters_to_str(d)]

        exp_job_names.append(job_name)
        exp_commands.append(commands)
        exp_log_paths.append(log_folder)
    def add_job(
        arch,
        eval_dataset,
        model_path,
        model_checkpoint,
        folder_suffix="",
        test_augment=None,
        extra_params=None,
    ):
        job_name = f"{config_job_name}.{eval_dataset}.{arch}"

        # set output folder
        log_folder = f"{config_job_name}"
        if folder_suffix:
            log_folder += "." + folder_suffix
        log_folder = os.path.join(log_path, log_folder)

        commands = []

        # stage 1
        d = OrderedDict()
        d["--cuda"] = ""
        if os.path.isfile(model_path):
            d["--weights"] = os.path.join(log_folder, model_path)
        else:
            d["--weights"] = os.path.join(model_path, model_checkpoint)
        d["--dataset"] = eval_dataset
        d["--net"] = arch
        if test_augment is not None:
            d["--class_image_augmentation"] = test_augment
        d["--set"] = ""
        d["TRAIN.USE_FLIPPED"] = "False"
        d["DATA_DIR"] = data_path
        # put smth here, but those are not used in CoAE
        d["TRAIN.MAX_SIZE"] = "5000"
        d["TEST.MAX_SIZE"] = "5000"

        if extra_params:
            d.update(extra_params)

        commands += [main_command + " " + launcher.parameters_to_str(d)]

        exp_job_names.append(job_name)
        exp_commands.append(commands)
        exp_log_paths.append(log_folder)
        exp_log_file_prefix.append(f"eval_{eval_dataset}_{arch}_")
    def add_job(
        job_type,  # "v1" or "v2"
        sub_index,
        backbone_arch,
        init_model_nickname,
        init_model_path,
        extra_params=None,
    ):
        job_name = f"{config_job_name}.{sub_index}.{job_type}_seed{config['random_seed']}"

        d = OrderedDict()

        d["--config-file"] = config_file

        if job_type == "v1":
            d.update(config_dict_v1)
        elif job_type == "v2":
            d.update(config_dict_v2)
        else:
            raise RuntimeError("Unknown job_type {0}".format(job_type))

        d["model.backbone_arch"] = backbone_arch
        d["init.model"] = init_model_path

        log_folder = job_name + "_" + backbone_arch + "_init_" + init_model_nickname
        log_folder = os.path.join(log_path, log_folder)

        d["output.path"] = log_folder

        if extra_params:
            d.update(extra_params)

        commands = [main_command + " " + launcher.parameters_to_str(d)]

        exp_job_names.append(job_name)
        exp_commands.append(commands)
        exp_log_paths.append(log_folder)
Exemplo n.º 6
0
    def add_job(
            job_type,  # "v1" or "v2"
            job_id,
            backbone_arch,
            init_model_nickname,
            init_model_path,
            extra_params=None,
            train_data="",
            train_data_scale=None):
        job_name = "{0}.{1}.{2}_{3}_seed{4}".format(config_job_name, job_id,
                                                    job_type, train_data,
                                                    config["random_seed"])

        d = OrderedDict()
        d["--config-file"] = config_file

        if job_type == "v1":
            d.update(config_dict_v1)
        elif job_type == "v2":
            d.update(config_dict_v2)
        else:
            raise RuntimeError("Unknown job_type {0}".format(job_type))

        d["train.dataset_name"] = "\"" + train_data + "\""
        if train_data == "instre-s1-train":
            d["train.dataset_scale"] = 700.0
            main_val_dataset = "instre-s1-val"
            d["eval.dataset_scales"] = "[700.0]"
        elif train_data == "instre-s2-train":
            d["train.dataset_scale"] = 600.0
            main_val_dataset = "instre-s2-val"
            d["eval.dataset_scales"] = "[600.0]"
        else:
            raise RuntimeError(f"Unknown dataset {train_data}")

        d["output.best_model.dataset"] = main_val_dataset
        d["eval.dataset_names"] = f"[\\\"{main_val_dataset}\\\"]"

        d["eval.class_image_augmentation"] = "rotation90"
        d["eval.iter"] = 5000

        # extra augmentation for this run
        d["train.augment.mine_extra_class_images"] = True

        d["model.backbone_arch"] = backbone_arch
        d["init.model"] = init_model_path

        log_folder = job_name + "_" + backbone_arch + "_init_" + init_model_nickname
        log_folder = os.path.join(log_path, log_folder)

        d["output.path"] = log_folder

        if extra_params:
            d.update(extra_params)

        commands = []
        commands.append(main_command + " " + launcher.parameters_to_str(d))

        exp_job_names.append(job_name)
        exp_commands.append(commands)
        exp_log_paths.append(log_folder)
    def add_job(
            sub_index,
            job_type,  # "v1" or "v2"
            backbone_arch,
            eval_dataset,
            model_path,
            model_checkpoint,
            folder_suffix="",
            extra_params=None):
        job_name = f"{config_job_name}.{sub_index}.{eval_dataset}"

        d = OrderedDict()

        d["--config-file"] = config_file

        if job_type == "v1":
            d.update(config_dict_v1)
        elif job_type == "v2":
            d.update(config_dict_v2)
        else:
            raise RuntimeError("Unknown job_type {0}".format(job_type))

        d["model.backbone_arch"] = backbone_arch

        if extra_params:
            d.update(extra_params)

        # set output folder
        log_folder = f"{config_job_name}.{sub_index}"
        if folder_suffix:
            log_folder += "." + folder_suffix
        log_folder = os.path.join(log_path, log_folder)

        # evaluation params
        d["train.do_training"] = False
        d["eval.mAP_iou_thresholds"] = "\"[0.5]\""
        d["eval.train_subset_for_eval_size"] = 0

        # choose init
        if "init.transform" in d:
            del d["init.transform"]
        if os.path.isfile(model_path):
            d["init.model"] = model_path
        else:
            d["init.model"] = os.path.join(model_path, model_checkpoint)

        # choose eval dataset
        if eval_dataset == "grozi-val-new-cl":
            d["eval.dataset_names"] = "\"[\\\"grozi-val-new-cl\\\"]\""
            d["eval.dataset_scales"] = "[1280.0]"
        elif eval_dataset == "grozi-val-old-cl":
            d["eval.dataset_names"] = "\"[\\\"grozi-val-old-cl\\\"]\""
            d["eval.dataset_scales"] = "[1280.0]"
        elif eval_dataset == "dairy":
            d["eval.dataset_names"] = "\"[\\\"dairy\\\"]\""
            d["eval.dataset_scales"] = "[3500.0]"
        elif eval_dataset == "paste-v":
            d["eval.dataset_names"] = "\"[\\\"paste-v\\\"]\""
            d["eval.dataset_scales"] = "[3500.0]"
        elif eval_dataset == "paste-f":
            d["eval.dataset_names"] = "\"[\\\"paste-f\\\"]\""
            d["eval.dataset_scales"] = "[2000.0]"
            # eval with rotations
            d["eval.class_image_augmentation"] = "rotation90"
        else:
            raise f"Unknown eval set {eval_dataset}"

        commands = [main_command + " " + launcher.parameters_to_str(d)]

        exp_job_names.append(job_name)
        exp_commands.append(commands)
        exp_log_paths.append(log_folder)
        exp_log_file_prefix.append(
            f"eval_{eval_dataset}_scale{d['eval.dataset_scales'][1:-1]}_")
Exemplo n.º 8
0
    def add_job(
            training_dataset,
            arch,  # "resnet50" or "resnet101"
            pool="gem",  # "mac", "spoc", "gem"
            test_whiten=False,  # True or False
            local_whitening=False,  # True or False
            regional=False,  # True or False
            whitening=False,  # True or False
            loss_margin=0.85,  # True or False
            image_size=240,  # 240, 360, 500, 750, 1024
            learning_rate=1e-6,  # 1e-6, 5e-7, 5e-6
            pretrained=True,
            loss="contrastive",
            optimizer="adam",
            weight_decay=1e-4,
            neg_num=5,
            query_size=2000,
            pool_size=20000,
            batch_size=5,
            eval_dataset="",
            use_best_model=True,
            retrieval_multiscale="ss",
            class_image_augmentation="",
            folder_suffix=""):

        directory = "{}".format(training_dataset)
        directory += "_{}".format(arch)
        directory += "_{}".format(pool)
        if local_whitening:
            directory += "_lwhiten"
        if regional:
            directory += "_r"
        if whitening:
            directory += "_whiten"
        if not pretrained:
            directory += "_notpretrained"
        directory += "_{}_m{:.2f}".format(loss, loss_margin)
        directory += "_{}_lr{:.1e}_wd{:.1e}".format(optimizer, learning_rate,
                                                    weight_decay)
        directory += "_nnum{}_qsize{}_psize{}".format(neg_num, query_size,
                                                      pool_size)
        directory += "_bsize{}_imsize{}".format(batch_size, image_size)

        job_name = f"{config_job_name}.{eval_dataset}"
        # set output folder
        log_path = config_job_name
        if folder_suffix:
            log_path += "." + folder_suffix
        log_path = os.path.join(log_folder_path, log_path)

        commands = []

        # add testing on top of the detector
        d_testing = OrderedDict()
        if retrieval_multiscale == "ms":
            d_testing["--retrieval_multiscale"] = ""

        if arch == "resnet50":
            d_testing["--maskrcnn_config_file"] = os.path.join(
                detector_path, "config",
                "e2e_faster_rcnn_R_50_FPN_1x_multiscale_noClasses.yaml")
        elif arch == "resnet101":
            d_testing["--maskrcnn_config_file"] = os.path.join(
                detector_path, "config",
                "e2e_faster_rcnn_R_101_FPN_1x_multiscale_noClasses.yaml")
        else:
            raise RuntimeError(f"Unknown arch {arch}")

        if training_dataset.startswith("instre-s1"):
            if arch == "resnet50":
                d_testing["--maskrcnn_weight_file"] = os.path.join(
                    detector_path, "output",
                    "exp0001-R-50-noCl-instre-s1/model_best.pth")
            elif arch == "resnet101":
                d_testing["--maskrcnn_weight_file"] = os.path.join(
                    detector_path, "output",
                    "exp0004-R-101-noCl-instre-s1/model_best.pth")
            else:
                raise RuntimeError(f"Unknown arch {arch}")
        elif training_dataset.startswith("instre-s2"):
            if arch == "resnet50":
                d_testing["--maskrcnn_weight_file"] = os.path.join(
                    detector_path, "output",
                    "exp0002-R-50-noCl-instre-s2/model_best.pth")
            elif arch == "resnet101":
                d_testing["--maskrcnn_weight_file"] = os.path.join(
                    detector_path, "output",
                    "exp0005-R-101-noCl-instre-s2/model_best.pth")
            else:
                raise RuntimeError(f"Unknown arch {arch}")
        else:
            raise f"Unknown training set {training_dataset}"

        if use_best_model:
            d_testing["--retrieval_network_path"] = os.path.join(
                retrieval_path, "output", "instre", directory,
                "model_best.pth.tar")
        else:
            d_testing["--retrieval_network_path"] = os.path.join(
                retrieval_path, "output", "instre", directory,
                "model_epoch0.pth.tar")

        d_testing["--retrieval_image_size"] = image_size
        d_testing["is_cuda"] = "True"
        if "instre-s1" in eval_dataset:
            d_testing["eval.dataset_names"] = f"\"[\\\"{eval_dataset}\\\"]\""
            d_testing["eval.dataset_scales"] = "\"[700]\""
        elif "instre-s2" in eval_dataset:
            d_testing["eval.dataset_names"] = f"\"[\\\"{eval_dataset}\\\"]\""
            d_testing["eval.dataset_scales"] = "\"[600]\""
        else:
            raise RuntimeError(f"Unknown eval dataset {eval_dataset}")

        d_testing["eval.mAP_iou_thresholds"] = "\"[0.5]\""
        if class_image_augmentation:
            d_testing[
                "eval.class_image_augmentation"] = class_image_augmentation

        commands.append(command_testing + " " +
                        launcher.parameters_to_str(d_testing))

        exp_job_names.append(job_name)
        exp_log_paths.append(log_path)
        exp_commands.append(commands)
        exp_log_file_prefix.append(
            f"eval_{eval_dataset}_{'bestModel' if use_best_model else 'initModel'}_{retrieval_multiscale}_"
        )
    def add_job(sub_index,
                job_type, # "v1" or "v2"
                backbone_arch,
                eval_dataset,
                model_path,
                model_checkpoint,
                folder_suffix="",
                extra_params=None):
        job_name = f"{config_job_name}.{sub_index}.{eval_dataset}"
        commands = []

        d = OrderedDict()
        d["--config-file"] = config_file

        if job_type == "v1":
            d.update(config_dict_v1)
        elif job_type == "v2":
            d.update(config_dict_v2)
        else:
            raise RuntimeError("Unknown job_type {0}".format(job_type))

        train_data = eval_dataset + "-train"
        d["train.dataset_name"] = "\"" + train_data + "\""
        if train_data == "instre-s1-train":
            d["train.dataset_scale"] = 700.0
            main_val_dataset = "instre-s1-val"
            d["eval.dataset_scales"] = "[700.0]"
        elif train_data == "instre-s2-train":
            d["train.dataset_scale"] = 600.0
            main_val_dataset = "instre-s2-val"
            d["eval.dataset_scales"] = "[600.0]"
        else:
            raise RuntimeError(f"Unknown dataset {train_data}")

        d["output.best_model.dataset"] = main_val_dataset
        d["eval.dataset_names"] = f"[\\\"{main_val_dataset}\\\"]"

        d["eval.class_image_augmentation"] = "rotation90"
        d["eval.iter"] = 5000

        # extra augmentation for this run
        d["train.augment.mine_extra_class_images"] = True

        d["model.backbone_arch"] = backbone_arch

        if extra_params:
            d.update(extra_params)

        # set output folder
        log_folder = f"{config_job_name}"
        if folder_suffix:
            log_folder += "." + folder_suffix
        log_folder = os.path.join(log_path, log_folder)

        d["train.do_training"] = False
        if train_data == "instre-s1-train":
            d["eval.dataset_names"] = "[\\\"instre-s1-test\\\"]"
            d["eval.dataset_scales"] = "[700.0]"
        elif train_data == "instre-s2-train":
            d["eval.dataset_names"] = "[\\\"instre-s2-test\\\"]"
            d["eval.dataset_scales"] = "[600.0]"
        else:
            raise RuntimeError(f"Unknown dataset {train_data}")

        d["eval.class_image_augmentation"] = "rotation90"

        # choose init
        if "init.transform" in d:
            del d["init.transform"]
        if os.path.isfile(model_path):
            d["init.model"] = model_path
        else:
            d["init.model"] = os.path.join(model_path, model_checkpoint)

        commands.append(main_command + " " + launcher.parameters_to_str(d))

        exp_job_names.append(job_name)
        exp_commands.append(commands)
        exp_log_paths.append(log_folder)
        exp_log_file_prefix.append(f"eval_{eval_dataset}_scale{d['eval.dataset_scales'][1:-1]}_")
    def add_job(sub_index,
                training_dataset,
                arch,
                pool="gem",
                test_whiten=False,
                local_whitening=False,
                regional=False,
                whitening=False,
                loss_margin=0.85,
                image_size=240,
                learning_rate=1e-6,
                pretrained=True,
                loss="contrastive",
                optimizer="adam",
                weight_decay=1e-4,
                neg_num=5,
                query_size=2000,
                pool_size=20000,
                batch_size=5):

        directory = "{}".format(training_dataset)
        directory += "_{}".format(arch)
        directory += "_{}".format(pool)
        if local_whitening:
            directory += "_lwhiten"
        if regional:
            directory += "_r"
        if whitening:
            directory += "_whiten"
        if not pretrained:
            directory += "_notpretrained"
        directory += "_{}_m{:.2f}".format(loss, loss_margin)
        directory += "_{}_lr{:.1e}_wd{:.1e}".format(optimizer, learning_rate,
                                                    weight_decay)
        directory += "_nnum{}_qsize{}_psize{}".format(neg_num, query_size,
                                                      pool_size)
        directory += "_bsize{}_imsize{}".format(batch_size, image_size)

        log_path = os.path.join(log_folder_path, directory)

        job_name = "ret-grozi-{0}.{1}".format(sub_index, directory)

        d = OrderedDict()
        d["--training-dataset"] = training_dataset
        if training_dataset == "grozi-train-retrieval":
            d["--test-datasets"] = "grozi-val-new-cl-retrieval,grozi-val-old-cl-retrieval"
        elif training_dataset == "grozi-train-retrieval-rndCropPerImage10":
            d["--test-datasets"] = "grozi-val-new-cl-retrieval-rndCropPerImage10,grozi-val-new-cl-retrieval-rndCropPerImage10"
        else:
            raise RuntimeError(f"Unknown training set {training_dataset}")

        if test_whiten:
            d["--test-whiten"] = training_dataset

        d["--arch"] = arch
        d["--pool"] = pool
        if local_whitening:
            d["--local-whitening"] = ""
        if regional:
            d["--regional"] = ""
        if whitening:
            d["--whitening"] = ""
        d["--loss-margin"] = loss_margin
        d["--image-size"] = image_size
        d["--learning-rate"] = learning_rate
        if not pretrained:
            d["--not-pretrained"] = ""
        d["--loss"] = loss
        d["--optimizer"] = optimizer
        d["--weight-decay"] = weight_decay
        d["--neg-num"] = neg_num
        d["--query-size"] = query_size
        d["--pool-size"] = pool_size
        d["--batch-size"] = batch_size

        commands = []
        commands.append(main_command + " " + launcher.parameters_to_str(d))

        exp_job_names.append(job_name)
        exp_log_paths.append(log_path)
        exp_commands.append(commands)
Exemplo n.º 11
0
    def add_job(
        job_id,
        model,
        dataset,
        init_weights,
    ):
        job_name = f"exp-{model}-{dataset}-{init_weights}"
        commands = []

        d = OrderedDict()
        if model == "R-50":
            if "pytorch" not in init_weights:
                config_file = os.path.join(
                    config_path,
                    "e2e_faster_rcnn_R_50_FPN_1x_multiscale_noClasses.yaml")
            else:
                config_file = os.path.join(
                    config_path,
                    "e2e_faster_rcnn_R_50_FPN_1x_multiscale_noClasses_fromPytorch.yaml"
                )
        elif model == "R-101":
            if "pytorch" not in init_weights:
                config_file = os.path.join(
                    config_path,
                    "e2e_faster_rcnn_R_101_FPN_1x_multiscale_noClasses.yaml")
            else:
                config_file = os.path.join(
                    config_path,
                    "e2e_faster_rcnn_R_101_FPN_1x_multiscale_noClasses_fromPytorch.yaml"
                )
        else:
            raise RuntimeError(f"Do not know config for model {model}")

        d["--validation_period"] = 5000
        d["--config-file"] = config_file

        if model == "R-50":
            if init_weights == "imagenet-repmet-pytorch":
                d["MODEL.WEIGHT"] = "../../../data/ImageNet-RepMet/pretrain/output/resnet50/model_best_maskrcnnbenchmark.pth.tar"
            elif init_weights == "imagenet-pytorch":
                d["MODEL.WEIGHT"] = "../../../models/resnet50-19c8e357.pth"
            elif init_weights == "imagenet-caffe":
                pass
            else:
                raise RuntimeError(
                    f"Do not recognize weight initialization {init_weights}")
        elif model == "R-101":
            if init_weights == "imagenet-repmet-pytorch":
                d["MODEL.WEIGHT"] = "../../../data/ImageNet-RepMet/pretrain/output/resnet101/model_best_maskrcnnbenchmark.pth.tar"
            elif init_weights == "imagenet-pytorch":
                d["MODEL.WEIGHT"] = "../../../models/resnet101-5d3b4d8f.pth"
            elif init_weights == "imagenet-caffe":
                pass
            else:
                raise RuntimeError(
                    f"Do not recognize weight initialization {init_weights}")
        else:
            raise RuntimeError(f"Do not know config for model {model}")

        if dataset == "imagenet-repmet":
            d["DATASETS.TRAIN"] = "[\\\"imagenet-repmet-train\\\"]"
            d["DATASETS.TEST"] = "[\\\"imagenet-repmet-val-5000\\\"]"  # crop val set from 50k images to 5k GT boxes
            d["INPUT.MIN_SIZE_TRAIN"] = "[225,280,360,450,540,630,720]"
            d["INPUT.MAX_SIZE_TRAIN"] = 2048
            d["INPUT.MIN_SIZE_TEST"] = 450
            d["INPUT.MAX_SIZE_TEST"] = 1280
        else:
            raise RuntimeError(f"Unknown dataset {dataset}")

        log_folder = os.path.join(log_path, job_name)

        d["OUTPUT_DIR"] = log_folder

        commands.append(main_command + " " + launcher.parameters_to_str(d))

        # testing
        d_testing = OrderedDict()
        d_testing["--test_weights"] = os.path.join(log_folder,
                                                   "model_best.pth")
        d_testing.update(d)

        datasets_test = ["[\\\"imagenet-repmet-val-5000\\\"]"]
        scales_test = ["[180,225,288,360,432,504,576]"]

        for dataset, scales in zip(datasets_test, scales_test):
            d_testing_local = copy.deepcopy(d_testing)
            d_testing_local["DATASETS.TEST"] = dataset
            d_testing_local["TEST.BBOX_AUG.ENABLED"] = True
            d_testing_local["TEST.BBOX_AUG.SCALES"] = scales

            commands.append(main_command + " " +
                            launcher.parameters_to_str(d_testing_local))

        exp_job_names.append(job_name)
        exp_commands.append(commands)
        exp_log_paths.append(log_folder)
Exemplo n.º 12
0
    def add_job(
        sub_index,
        training_dataset,
        arch,
        init_weights="",
        pool="gem",
        test_whiten=False,
        local_whitening=False,
        regional=False,
        whitening=False,
        loss_margin=0.85,
        image_size=240,
        learning_rate=1e-6,
        pretrained=True,
        loss="contrastive",
        optimizer="adam",
        weight_decay=1e-4,
        neg_num=5,
        query_size=2000,
        pool_size=20000,
        batch_size=5,
    ):

        directory = "{}".format(training_dataset)
        directory += "_{}".format(arch)
        directory += "_{}".format(pool)
        if local_whitening:
            directory += "_lwhiten"
        if regional:
            directory += "_r"
        if whitening:
            directory += "_whiten"
        if not pretrained:
            directory += "_notpretrained"
        directory += "_{}_m{:.2f}".format(loss, loss_margin)
        directory += "_{}_lr{:.1e}_wd{:.1e}".format(optimizer, learning_rate,
                                                    weight_decay)
        directory += "_nnum{}_qsize{}_psize{}".format(neg_num, query_size,
                                                      pool_size)
        directory += "_bsize{}_imsize{}".format(batch_size, image_size)

        log_path = os.path.join(log_folder_path, init_weights, directory)

        job_name = "ret-imagenet-{0}.{1}.{2}".format(sub_index, init_weights,
                                                     directory)

        d = OrderedDict()
        d[os.path.join(log_folder_path, init_weights)] = ""
        d["--training-dataset"] = training_dataset
        if training_dataset == "instre-s1-train-retrieval":
            d["--test-datasets"] = "instre-s1-val-retrieval"
        elif training_dataset == "instre-s2-train-retrieval":
            d["--test-datasets"] = "instre-s2-val-retrieval"
        elif training_dataset == "instre-s1-train-retrieval-rndCropPerImage10":
            d["--test-datasets"] = "instre-s1-val-retrieval-rndCropPerImage10"
        elif training_dataset == "instre-s2-train-retrieval-rndCropPerImage10":
            d["--test-datasets"] = "instre-s2-val-retrieval-rndCropPerImage10"
        elif training_dataset == "imagenet-repmet-train-retrieval":
            d["--test-datasets"] = "imagenet-repmet-val-5000-retrieval"
        elif training_dataset == "imagenet-repmet-train-retrieval-rndCropPerImage10":
            d["--test-datasets"] = "imagenet-repmet-val-5000-retrieval-rndCropPerImage10"
        else:
            raise RuntimeError(f"Unknown training set {training_dataset}")

        if test_whiten:
            d["--test-whiten"] = training_dataset

        if arch == "resnet50":
            if init_weights == "imagenet-repmet-pytorch":
                d["--network-path"] = "../../../data/ImageNet-RepMet/pretrain/output/resnet50/model_best_cirtorch.pth.tar"
            elif init_weights == "imagenet-pytorch":
                d["--network-path"] = "../../../models/resnet50-19c8e357_cirtorch.pth"
            elif init_weights == "imagenet-caffe":
                # use the built-in caffe weights
                pass
            else:
                raise RuntimeError(
                    f"Do not recognize weight initialization {init_weights}")
        elif arch == "resnet101":
            if init_weights == "imagenet-repmet-pytorch":
                d["--network-path"] = "../../../data/ImageNet-RepMet/pretrain/output/resnet101/model_best_cirtorch.pth.tar"
            elif init_weights == "imagenet-pytorch":
                d["--network-path"] = "../../../models/resnet101-5d3b4d8f_cirtorch.pth"
            elif init_weights == "imagenet-caffe":
                # use the built-in caffe weights
                pass
            else:
                raise RuntimeError(
                    f"Do not recognize weight initialization {init_weights}")
        else:
            raise RuntimeError(f"Unknown arch: {arch}")

        d["--arch"] = arch
        d["--pool"] = pool
        if local_whitening:
            d["--local-whitening"] = ""
        if regional:
            d["--regional"] = ""
        if whitening:
            d["--whitening"] = ""
        d["--loss-margin"] = loss_margin
        d["--image-size"] = image_size
        d["--learning-rate"] = learning_rate
        if not pretrained:
            d["--not-pretrained"] = ""
        d["--loss"] = loss
        d["--optimizer"] = optimizer
        d["--weight-decay"] = weight_decay
        d["--neg-num"] = neg_num
        d["--query-size"] = query_size
        d["--pool-size"] = pool_size
        d["--batch-size"] = batch_size

        commands = []
        commands.append(main_command + " " + launcher.parameters_to_str(d))

        exp_job_names.append(job_name)
        exp_log_paths.append(log_path)
        exp_commands.append(commands)
Exemplo n.º 13
0
    def add_job(
        job_id,
        model,
        use_classes,
        dataset,
    ):
        job_name = f"exp{job_id:04}-{model}-{'withCl' if use_classes else 'noCl'}-{dataset}"
        commands = []

        d = OrderedDict()
        if model == "R-50" and use_classes:
            config_file = os.path.join(
                config_path, "e2e_faster_rcnn_R_50_FPN_1x_multiscale.yaml")
        elif model == "R-101" and use_classes:
            config_file = os.path.join(
                config_path, "e2e_faster_rcnn_R_101_FPN_1x_multiscale.yaml")
        elif model == "R-50" and not use_classes:
            config_file = os.path.join(
                config_path,
                "e2e_faster_rcnn_R_50_FPN_1x_multiscale_noClasses.yaml")
        elif model == "R-101" and not use_classes:
            config_file = os.path.join(
                config_path,
                "e2e_faster_rcnn_R_101_FPN_1x_multiscale_noClasses.yaml")
        else:
            raise RuntimeError(
                f"Do not know config for model {model} and use_classes {use_classes}"
            )

        d["--config-file"] = config_file

        if dataset == "grozi":
            d["DATASETS.TRAIN"] = "[\\\"grozi-train\\\"]"
            if use_classes:
                d["DATASETS.TEST"] = "[\\\"grozi-val-old-cl\\\"]"
            else:
                d["DATASETS.TEST"] = "[\\\"grozi-val-all\\\"]"
            d["INPUT.MIN_SIZE_TRAIN"] = "[480,600,768,960,1152,1344,1536]"
            d["INPUT.MAX_SIZE_TRAIN"] = 2048
            d["INPUT.MIN_SIZE_TEST"] = 960
            d["INPUT.MAX_SIZE_TEST"] = 1280
        elif dataset == "instre-s1":
            d["DATASETS.TRAIN"] = "[\\\"instre-s1-train\\\"]"
            d["DATASETS.TEST"] = "[\\\"instre-s1-val\\\"]"
            d["INPUT.MIN_SIZE_TRAIN"] = "[210,262,336,420,504,588,672]"
            d["INPUT.MAX_SIZE_TRAIN"] = 2048
            d["INPUT.MIN_SIZE_TEST"] = 420
            d["INPUT.MAX_SIZE_TEST"] = 1280
        elif dataset == "instre-s2":
            d["DATASETS.TRAIN"] = "[\\\"instre-s2-train\\\"]"
            d["DATASETS.TEST"] = "[\\\"instre-s2-val\\\"]"
            d["INPUT.MIN_SIZE_TRAIN"] = "[180,225,288,360,432,504,576]"
            d["INPUT.MAX_SIZE_TRAIN"] = 2048
            d["INPUT.MIN_SIZE_TEST"] = 360
            d["INPUT.MAX_SIZE_TEST"] = 1280
        else:
            raise RuntimeError(f"Unknown dataset {dataset}")

        log_folder = os.path.join(log_path, job_name)

        d["OUTPUT_DIR"] = log_folder

        commands.append(main_command + " " + launcher.parameters_to_str(d))

        # testing
        if not use_classes:
            d_testing = OrderedDict()
            d_testing["--test_weights"] = os.path.join(log_folder,
                                                       "model_best.pth")
            d_testing.update(d)

            datasets_test = [
                "[\\\"grozi-val-all\\\"]",
                "[\\\"instre-s1-val\\\",\\\"instre-s1-test\\\"]",
                "[\\\"instre-s2-val\\\",\\\"instre-s2-test\\\"]"
            ]
            scales_test = [
                "[480,600,768,960,1152,1344,1536]",
                "[210,262,336,420,504,588,672]",
                "[180,225,288,360,432,504,576]"
            ]

            for dataset, scales in zip(datasets_test, scales_test):
                d_testing_local = copy.deepcopy(d_testing)
                d_testing_local["DATASETS.TEST"] = dataset
                d_testing_local["TEST.BBOX_AUG.ENABLED"] = True
                d_testing_local["TEST.BBOX_AUG.SCALES"] = scales

                commands.append(main_command + " " +
                                launcher.parameters_to_str(d_testing_local))

        exp_job_names.append(job_name)
        exp_commands.append(commands)
        exp_log_paths.append(log_folder)