def load_adapter_weights_dict_path(path: str):
    weights_path_dict = io.read_json(path)
    base_path = os.path.split(path)[0]
    new_weights_path_dict = {
        k: path if os.path.exists(path) else os.path.join(base_path, path)
        for k, path in weights_path_dict.items()
    }
    return load_adapter_weights_dict(new_weights_path_dict)
Beispiel #2
0
def create_task_dict(multitask_config_path, task_name_ls):
    multitask_config_dict = io.read_json(multitask_config_path)
    if task_name_ls:
        task_name_ls = task_name_ls.split(",")
    else:
        task_name_ls = multitask_config_dict.keys()
    task_dict = {
        task_name: tasks.create_task_from_config_path(
            config_path=multitask_config_dict[task_name],
            verbose=True,
        )
        for task_name in task_name_ls
    }
    return task_dict
Beispiel #3
0
    def getter(self, image_ids):
        if self.dataset == "ILSVRC" and self.dataset_split == "test":
            dataset_config = io.read_json(self.args.imagenet_val_path)
            path_to_image_id_dict = {
                x[0]: "val/" + x[0].split("/")[-1]
                for x in dataset_config["samples"]
            }
        else:
            dataset_root = os.path.join(self.args.wsoleval_dataset_path,
                                        self.dataset)
            image_paths_and_labels = []
            for image_id in image_ids:
                dummy_label = 0
                image_paths_and_labels.append((
                    os.path.join(dataset_root, image_id),
                    dummy_label,
                ))
            dataset_config = {
                "root": os.path.join(self.args.wsoleval_dataset_path,
                                     self.dataset),
                "samples": image_paths_and_labels,
                "classes": None,
                "class_to_idx": None,
            }
            path_to_image_id_dict = None

        data_loader = torch.utils.data.DataLoader(ImagePathDataset(
            config=dataset_config,
            transform=transforms.Compose([
                transforms.Resize([224, 224] if self.break_ratio else 224),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                imagenet_utils.NORMALIZATION,
            ]),
            return_paths=True,
        ),
                                                  batch_size=self.batch_size,
                                                  shuffle=False,
                                                  num_workers=self.workers,
                                                  pin_memory=False)
        for i, ((input_, target), paths) in enumerate(data_loader):
            mask = self.get_mask(input_=input_, target=target)
            mask = mask.detach().cpu().squeeze(1)
            for j, single_mask in enumerate(mask):
                if self.dataset == "ILSVRC" and self.dataset_split == "test":
                    image_id = path_to_image_id_dict[paths[j]]
                else:
                    image_id = paths[j][len(data_loader.dataset.root) + 1:]
                yield single_mask, image_id
Beispiel #4
0
def subsample_train(base_config_path,
                    out_config_path,
                    out_data_path,
                    out_metadata_path,
                    num_samples_per_class=None,
                    num_samples=None):
    config = io.read_json(base_config_path)
    raw_train_examples = io.read_jsonl(config["paths"]["train"])

    new_config = config.copy()
    new_config["paths"]["train"] = out_data_path

    if num_samples_per_class is None and num_samples is not None:
        selected_examples = random.choices(
            list(range(len(raw_train_examples))),
            k=num_samples,
        )
        sub_examples = [raw_train_examples[i] for i in selected_examples]
        metadata = [sub_examples]
    elif num_samples_per_class is not None and num_samples is None:
        index_label_list = [{
            "idx": i,
            "label": example["label"]
        } for i, example in enumerate(raw_train_examples)]
        grouped = datastructures.group_by(index_label_list,
                                          lambda _: _["label"])
        sorted_keys = sorted(list(grouped.keys()))

        sub_examples = []
        metadata = {}
        for key in sorted_keys:
            key_examples = grouped[key]
            indices = [_["idx"] for _ in key_examples]
            selected_key_examples = random.choices(indices,
                                                   k=num_samples_per_class)
            sub_examples += [
                raw_train_examples[i] for i in selected_key_examples
            ]
            metadata[key] = selected_key_examples
    else:
        raise RuntimeError()

    io.create_containing_folder(out_config_path)
    io.create_containing_folder(out_data_path)
    io.create_containing_folder(out_metadata_path)

    io.write_json(new_config, out_config_path)
    io.write_jsonl(sub_examples, out_data_path)
    io.write_json(metadata, out_metadata_path)
Beispiel #5
0
def main(args: RunConfiguration):
    # data loading code
    data_loader = torch.utils.data.DataLoader(ImagePathDataset.from_path(
        config_path=args.val_json,
        transform=transforms.Compose([
            transforms.Resize([224, 224] if args.break_ratio else 224),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            imagenet_utils.NORMALIZATION,
        ]),
        return_paths=True,
    ),
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=args.workers,
                                              pin_memory=False)
    original_classifier = archs.resnet50shared(
        pretrained=True).eval().to(device)

    # get score for special cases
    if args.mode == "max":
        model = {'special': 'max', 'classifier': original_classifier}
    elif args.mode == "min":
        model = {'special': 'min', 'classifier': original_classifier}
    elif args.mode == "center":
        model = {'special': 'center', 'classifier': original_classifier}
    elif args.mode == "ground_truth":
        model = {'special': 'ground_truth', 'classifier': original_classifier}
    elif args.mode == "casme":
        model = casme_load_model(
            args.casm_path, classifier_load_mode=args.classifier_load_mode)
    elif args.mode == "external":
        model = {
            'special': 'external',
            'classifier': original_classifier,
            'bboxes': io.read_json(args.casm_path)
        }
    elif args.mode == "torchray_grad_cam":
        model = {'special': 'grad_cam', 'classifier': original_classifier}
    elif args.mode == "torchray_guided_backprop":
        model = {
            'special': 'guided_backprop',
            'classifier': original_classifier
        }
    else:
        raise KeyError(args.mode)

    gt_bboxes = io.read_json(args.bboxes_path)

    results, candidate_bbox_ls = score(
        args=args,
        model=model,
        data_loader=data_loader,
        bboxes=gt_bboxes,
        original_classifier=original_classifier,
        record_bboxes=args.record_bboxes,
    )

    io.write_json(results, args.output_path)
    if args.record_bboxes:
        assert candidate_bbox_ls
        io.write_json([bbox.to_dict() for bbox in candidate_bbox_ls],
                      args.record_bboxes)
Beispiel #6
0
def load_unsup_examples_from_config_path(unsup_config_path, prefix="unsup-"):
    return load_unsup_examples_from_config(
        unsup_config=io.read_json(unsup_config_path),
        prefix=prefix,
    )
Beispiel #7
0
 def from_path(cls, config_path, *args, **kwargs):
     return cls(config=io.read_json(config_path), *args, **kwargs)
Beispiel #8
0
def load_task_data_from_path(uda_config_path, verbose=True):
    return load_task_data(read_json(uda_config_path), verbose=verbose)
Beispiel #9
0
 def from_json(cls, path):
     return cls(**io.read_json(path))
Beispiel #10
0
def create_task_from_config_path(config_path: str, verbose=False):
    return create_task_from_config(read_json(config_path),
                                   base_path=os.path.split(config_path)[0],
                                   verbose=verbose)