def setUp(self) -> None: super().setUp() self._model = Model(arch=deeplabv3_resnet101(pretrained=True)) self._model_ema = EMA_Model(Model(deeplabv3_resnet101(False)), alpha=0.9, weight_decay=1e-4) # self._model_ema._model.load_state_dict(self._model.state_dict()) self._device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") self._img = (ToTensor()( Image.open("img1.jpg").convert("RGB")).unsqueeze(0).to( self._device)) self._model.to(self._device) self._model_ema.to(self._device)
def setUp(self) -> None: super().setUp() arch_dict = { "name": "clusternet6cTwoHead", "input_size": 24, "num_channel": 1, "output_k_A": 50, "output_k_B": 10, "num_sub_heads": 5, } optim_dict = {"name": "Adam"} scheduler_dict = { "name": "MultiStepLR", "milestones": [10, 20, 30, 40, 50, 60, 70, 80, 90], "gamma": 1, } self.model = Model(arch_dict, optim_dict, scheduler_dict) self.scheduler = RampScheduler(100, 500, 10, 1, -5)
assert Trainer, config.get("Trainer").get("name") return Trainer DEFAULT_CONFIG = "config_CIFAR.yaml" merged_config = ConfigManger(DEFAULT_CONFIG_PATH=DEFAULT_CONFIG, verbose=True, integrality_check=True).config train_loader_A, train_loader_B, val_loader = get_dataloader( **merged_config["DataLoader"]) # create model: model = Model( arch_dict=merged_config["Arch"], optim_dict=merged_config["Optim"], scheduler_dict=merged_config["Scheduler"], ) model = to_Apex(model, opt_level=None, verbosity=0) Trainer = get_trainer(merged_config) clusteringTrainer = Trainer(model=model, train_loader_A=train_loader_A, train_loader_B=train_loader_B, val_loader=val_loader, config=merged_config, **merged_config["Trainer"]) clusteringTrainer.start_training() clusteringTrainer.clean_up()
gdl_loss = self.abs_loss(orig_gradient_x, pred_gradient_x) + self.abs_loss( orig_gradient_y, pred_gradient_y ) return self.mse(pred, gt) + self.gdl_weight * gdl_loss img_transform = transforms.Compose( [ transforms.ToTensor(), # transforms.Normalize((0.5), (0.5)) ] ) dataset = MNIST(DATA_PATH, transform=img_transform) dataloader = DataLoader(dataset, batch_size=128, shuffle=True) model = Model() model.torchnet = autoencoder() model.optimizer = torch.optim.Adam( model.torchnet.parameters(), lr=1e-3, weight_decay=1e-5 ) config = ConfigManger().parsed_args if config["loss"] == "mse": criterion = nn.MSELoss() elif config["loss"] == "gdl": criterion = gradient_difference_loss(config["weight"]) trainer = MNISTTrainer( model=model, train_loader=dataloader, val_loader=dataloader,
**config["DataLoader"]) datainterface.drop_last = True train_loader = datainterface.ParallelDataLoader( default_mnist_img_transform["tf1"], default_mnist_img_transform["tf2"], default_mnist_img_transform["tf2"], default_mnist_img_transform["tf2"], default_mnist_img_transform["tf2"], default_mnist_img_transform["tf2"], ) datainterface.split_partitions = ["val"] datainterface.drop_last = False val_loader = datainterface.ParallelDataLoader( default_mnist_img_transform["tf3"]) model = Model(config["Arch"], config["Optim"], config["Scheduler"]) assert config["Trainer"]["name"] in ( "IIC", "IIC_enhance", "IIC_adv_enhance", "IMSAT", "IMSAT_enhance", ) if config["Trainer"]["name"] == "IMSAT": # MI(x,p) + CE(p,adv(p)) or MI(x,p) + CE(p,geom(p)) Trainer = IMSAT_Trainer elif config["Trainer"]["name"] == "IMSAT_enhance": # MI(x,p) + CE(p,adv(p)) + CE(p,geom(p)) Trainer = IMSAT_Enhanced_Trainer elif config["Trainer"]["name"] == "IIC":
img_transforms["tf1"], img_transforms["tf2"], img_transforms["tf2"], img_transforms["tf2"], img_transforms["tf2"], ) val_loader = DatasetInterface( split_partitions=val_split_partition, **merged_config["DataLoader"]).ParallelDataLoader( img_transforms["tf3"]) return train_loader_A, train_loader_B, val_loader train_loader_A, train_loader_B, val_loader = get_dataloader(merged_config) # create model: model = Model( arch_dict=merged_config.get("Arch"), optim_dict=merged_config.get("Optim"), scheduler_dict=merged_config.get("Scheduler"), ) trainer = MixUpTrainer(model=model, train_loader_A=train_loader_A, train_loader_B=train_loader_B, val_loader=val_loader, config=merged_config, **merged_config.get("Trainer")) trainer.start_training() trainer.clean_up()
from deepclustering.manager import ConfigManger from deepclustering.model import Model, to_Apex from arch import _register_arch from data import get_dataloader from scheduler import CustomScheduler from trainer import AdaNetTrainer, VAT_Trainer _ = _register_arch # to enable the network registration DEFAULT_CONFIG_PATH = "config.yaml" config = ConfigManger(DEFAULT_CONFIG_PATH, verbose=True, integrality_check=False).config model = Model(config.get("Arch"), config.get("Optim"), config.get("Scheduler")) model = to_Apex(model, opt_level=None) label_loader, unlabel_loader, val_loader = get_dataloader( config["DataLoader"].get("name"), config["DataLoader"].get("aug", False), config.get("DataLoader"), ) scheduler = CustomScheduler(max_epoch=config["Trainer"]["max_epoch"]) assert config["Trainer"].get("name") in ("vat", "ada") Trainer = VAT_Trainer if config["Trainer"]["name"].lower( ) == "vat" else AdaNetTrainer trainer = Trainer(model=model, labeled_loader=label_loader, unlabeled_loader=unlabel_loader, val_loader=val_loader,