def __init__(self, cfg_filepath: str): set_deterministic_seed() with open(from_root(cfg_filepath), "r") as file: self.cfg = DictObject(json.load(file)) for sub_dirname in ("logs", "checkpoints", "debug"): os.makedirs(self.from_out(sub_dirname), exist_ok=True) self.model = create_resnet(self.cfg) self.model = self.model.to(self.cfg.model.device) self.train_loader, self.infer_train_loader, self.infer_val_loader, \ train_mean, train_std = create_dataset(self.cfg) self.model.normalize.set_parameters(train_mean, train_std) self.logger = SummaryWriter(log_dir=self.from_out("logs")) self.optimizer = create_optimizer(self.model, self.logger, self.cfg) self.scheduler = optim.lr_scheduler.MultiStepLR( self.optimizer, self.cfg.scheduler.milestones, gamma=self.cfg.scheduler.gamma) if isinstance(self.model, ResNet_Softmax): self.criterion = lambda logits, targets: F.nll_loss( logits, targets, reduction="mean") else: assert isinstance(self.model, ResNet_Gaussian) self.criterion = lambda sqr_distances, targets: mmc_loss( sqr_distances, targets, reduction="mean") Tracker.reset(self.cfg.optimizer.n_epochs) if self.cfg.load_checkpoint is not None: self.load_checkpoint(self.cfg.load_checkpoint)
def main() -> None: set_deterministic_seed() dataset = torchvision.datasets.MNIST(DATA_DIRPATH, train=True, download=True) size = len(dataset) indices = list(range(size)) random.shuffle(indices) train_indices = indices[:TRAIN_SIZE] write_lines(os.path.join(SPLIT_DIRPATH, "train.txt"), train_indices) val_indices = indices[TRAIN_SIZE:] write_lines(os.path.join(SPLIT_DIRPATH, "val.txt"), val_indices)
def main() -> None: set_deterministic_seed() dataset = torchvision.datasets.CIFAR10(DATA_DIRPATH, train=True, download=True) size = len(dataset) indices = list(range(size)) random.shuffle(indices) train_size = round(TRAIN_PCT * size) train_indices = indices[:train_size] write_lines(os.path.join(SPLIT_DIRPATH, "train.txt"), train_indices) val_indices = indices[train_size:] write_lines(os.path.join(SPLIT_DIRPATH, "val.txt"), val_indices)
def __init__(self, cfg_filepath: str): set_deterministic_seed() with open(from_root(cfg_filepath), "r") as file: self.cfg = DictObject(json.load(file)) for sub_dirname in ("logs", "checkpoints", "debug"): os.makedirs(self.from_out(sub_dirname), exist_ok=True) self.model = RefineNet_4Cascaded() self.model = self.model.cuda() state_dict = torch.load("TODO", map_location=model_device(self.model)) self.model.backbone.load_state_dict(state_dict) self.train_loader = load_pascal_voc_train(16) self.infer_train_loader = load_pascal_voc_infer("train", 16) self.infer_val_loader = load_pascal_voc_infer("val", 16)
def main() -> None: set_deterministic_seed() dataset = ConcatDataset([ torchvision.datasets.SVHN(DATA_DIRPATH, split="train", download=True), torchvision.datasets.SVHN(DATA_DIRPATH, split="extra", download=True) ]) size = len(dataset) indices = list(range(size)) random.shuffle(indices) train_size = round(TRAIN_PCT * size) train_indices = indices[:train_size] write_lines(os.path.join(SPLIT_DIRPATH, "train.txt"), train_indices) val_indices = indices[train_size:] write_lines(os.path.join(SPLIT_DIRPATH, "val.txt"), val_indices)
def __init__(self, cfg_filepath: str, data_loader: DataLoader, adversary: Optional[Adversary], out_name: str, visualize_adversary: int): set_deterministic_seed() with open(from_root(cfg_filepath), "r") as file: self.cfg = DictObject(json.load(file)) os.makedirs(self.from_out("inference"), exist_ok=True) os.makedirs(self.from_out(f"inference_debug/{out_name}"), exist_ok=True) self.model = create_resnet(self.cfg) self.model = self.model.to(self.cfg.model.device) self.load_best_epoch() self.loader = data_loader self.adversary = adversary self.out_name = out_name self.visualize_adversary = visualize_adversary Tracker.reset()
def setUp(self) -> None: set_deterministic_seed()