def start_fashionClassTrainer(gpus=(), nepochs=10, run_type="train"): """" An example of fashion-mnist classification """ num_class = 10 depth = 32 gpus = gpus batch_size = 4 nepochs = nepochs opt_hpm = {"optimizer": "Adam", "lr_decay": 0.94, "decay_position": 10, "position_type": "epoch", "lr_reset": {2: 5e-4, 3: 1e-3}, "lr": 1e-4, "weight_decay": 2e-5, "betas": (0.9, 0.99)} print('===> Build dataset') mnist = FashionMNIST(batch_size=batch_size) # mnist.dataset_train = mnist.dataset_test torch.backends.cudnn.benchmark = True print('===> Building model') net = Model(SimpleModel(depth=depth), gpu_ids_abs=gpus, init_method="kaiming", check_point_pos=1) print('===> Building optimizer') opt = Optimizer(net.parameters(), **opt_hpm) print('===> Training') print("using `tensorboard --logdir=log` to see learning curves and net structure." "training and valid_epoch data, configures info and checkpoint were save in `log` directory.") Trainer = FashionClassTrainer("log/fashion_classify", nepochs, gpus, net, opt, mnist, num_class) if run_type == "train": Trainer.train() elif run_type == "debug": Trainer.debug()
def prepare_model(weights_path, gpu_ids_abs, net, verbose): model = Model(net, gpu_ids_abs=gpu_ids_abs, verbose=verbose) model.load_weights(weights_path, strict=True) if verbose: print("load model successfully!") model.eval() return model.model
x2_1 = self.conv2_1(torch.cat([x2_0, self.up(x3_0)], 1)) x1_2 = self.conv1_2(torch.cat([x1_0, x1_1, self.up(x2_1)], 1)) x0_3 = self.conv0_3(torch.cat([x0_0, x0_1, x0_2, self.up(x1_2)], 1)) x4_0 = self.conv4_0(self.pool(x3_0)) x3_1 = self.conv3_1(torch.cat([x3_0, self.up(x4_0)], 1)) x2_2 = self.conv2_2(torch.cat([x2_0, x2_1, self.up(x3_1)], 1)) x1_3 = self.conv1_3(torch.cat([x1_0, x1_1, x1_2, self.up(x2_2)], 1)) x0_4 = self.conv0_4( torch.cat( [x0_0, x0_1, x0_2, x0_3, self.up(x1_3)], 1)) if self.deepsupervision: output1 = nn.Tanh()(self.final1(x0_1)) output2 = nn.Tanh()(self.final2(x0_2)) output3 = nn.Tanh()(self.final3(x0_3)) output4 = nn.Tanh()(self.final4(x0_4)) return [output1, output2, output3, output4] else: output = self.final(x0_4) return nn.Tanh()(self.up(output)) # return nn.Tanh()(output) if __name__ == '__main__': from jdit import Model unet = Model(NestedUNet(1)) print(unet.num_params)
def deartifact(img, gpus, save_name): recon_np = np.asarray(img) / 255 recon_np = recon_np[np.newaxis, np.newaxis, :] tensor = torch.Tensor(recon_np).float() if len(gpus) > 0: tensor = tensor.cuda(device) with torch.no_grad(): out = model(tensor).detach() recon_np = out.cpu().numpy()[0][0] * 255 Image.fromarray(recon_np * 255).convert("L").save(save_name) device = torch.device(1) weights_path = "log/spd40_80/checkpoint/Weights_netG_60.pth" model = Model(NestedUNet(), gpu_ids_abs=[1]) model.load_weights(weights_path, strict=True) model.eval() model = model.model model.share_memory() if __name__ == '__main__': titls, angles = init_test_img() length = titls.shape[1] # 321 pbar = tqdm(total=length) pool = Pool(mp.cpu_count()) for i in range(length): titl = titls[:, i, :] pool.apply_async(iradon_and_save, ( titl, angles,
def forward(self, x, g): out = torch.cat((x, g), 1) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = self.layer5(out) out = self.layer6(out) return out if __name__ == '__main__': from jdit import Model unet = Model(NLD_UNION(), []) print(unet.num_params) input = torch.randn((2, 1, 256, 256), requires_grad=True) d2 = torch.randn((2, 64, 128, 128), requires_grad=True) d3 = torch.randn((2, 128, 64, 64), requires_grad=True) d4 = torch.randn((2, 256, 32, 32), requires_grad=True) d5 = torch.randn((2, 512, 16, 16), requires_grad=True) target = torch.Tensor([1]).squeeze() output = unet(input, input, (d2, d3, d4, d5)) print(sum(i.mean() for i in output), target) res = torch.autograd.gradcheck(torch.nn.MSELoss(), (sum(i.mean() for i in output), target), eps=1e-6, raise_exception=True) print(output.size())
torch.backends.cudnn.benchmark = True print('===> Build dataset') datasets = RadonNoisedDatasets("/home/dgl/dataset", "stpd40_80", batch_size=batch_size, valid_size=valid_size, num_workers=2) print('===> Building model') # G_net = UNet(1, (64, 128, 256, 512, 1024)) # (32, 64, 128, 256, 512), (64, 128, 256, 512, 1024) # G_net = resnet10(no_cuda=False) G_net = NestedUNet() # 7 D_net = NLD(32) # 64 # D_net = NLD_LG_inpaint(64) net_G = Model(G_net, gpus, verbose=True, check_point_pos=10) net_D = Model(D_net, gpus, verbose=True, check_point_pos=10) print('===> Building optimizer') optG = Optimizer(net_G.parameters(), **G_hprams) optD = Optimizer(net_D.parameters(), **D_hprams) print('===> Training') Trainer = RadonPix2pixGanTrainer(logdir, nepochs, gpus, net_G, net_D, optG, optD, datasets) import sys _DEBUG_ = len(sys.argv) > 1 and sys.argv[1].strip().lower() == "-d" if _DEBUG_: Trainer.debug() else: Trainer.train(show_network=False, subbar_disable=False)
return model def resnet152(num_seg_classes=1, shortcut_type='B', no_cuda=False): """Constructs a ResNet-101 model. """ model = ResNet(Bottleneck, [3, 8, 36, 3], num_seg_classes, shortcut_type, no_cuda) return model def resnet200(num_seg_classes=1, shortcut_type='B', no_cuda=False): """Constructs a ResNet-101 model. """ model = ResNet(Bottleneck, [3, 24, 36, 3], num_seg_classes, shortcut_type, no_cuda) return model if __name__ == '__main__': model = ResNet(Bottleneck, [1, 1, 1, 1], num_seg_classes=1, shortcut_type='B', no_cuda=False) from jdit import Model input = torch.zeros((1, 1, 256, 256)).cuda() model = Model(model, [0]) # from torch.utils.tensorboard import SummaryWriter # w = SummaryWriter(log_dir="log") # w.add_graph(model.model, input) # w.close() print(model(input).size()) print(model.num_params)