# Load experiment setting config = get_config(opts.config) input_dim = config['input_dim_a'] if opts.a2b else config['input_dim_b'] # Load the inception networks if we need to compute IS or CIIS if opts.compute_IS or opts.compute_IS: inception = load_inception(opts.inception_b) if opts.a2b else load_inception(opts.inception_a) # freeze the inception models and set eval mode inception.eval() for param in inception.parameters(): param.requires_grad = False inception_up = nn.Upsample(size=(299, 299), mode='bilinear') # Setup model and data loader image_names = ImageFolder(opts.input_folder, transform=None, return_paths=True) data_loader = get_data_loader_folder(opts.input_folder, 1, False, new_size=config['new_size_a'], crop=False) config['vgg_model_path'] = opts.output_path if opts.trainer == 'MUNIT': style_dim = config['gen']['style_dim'] trainer = MUNIT_Trainer(config) elif opts.trainer == 'UNIT': trainer = UNIT_Trainer(config) else: sys.exit("Only support MUNIT|UNIT") try: state_dict = torch.load(opts.checkpoint) trainer.gen_a.load_state_dict(state_dict['a']) trainer.gen_b.load_state_dict(state_dict['b'])
tr.RandomRotate(15), tr.RandomHorizontalFlip(), tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), tr.ToTensor() ]) composed_transforms_ts = transforms.Compose([ tr.FixedResize(size=(512, 512)), tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), tr.ToTensor() ]) # voc_train = pascal.VOCSegmentation(split='train', transform=composed_transforms_tr) # voc_val = pascal.VOCSegmentation(split='val', transform=composed_transforms_ts) voc_train = ImageFolder(root_path='dataset/train', datasets='OCT-origin') voc_val = ImageFolder(root_path='dataset/validation', datasets='OCT-origin', mode='test') if use_sbd: print("Using SBD dataset") sbd_train = sbd.SBDSegmentation(split=['train', 'val'], transform=composed_transforms_tr) db_train = combine_dbs.CombineDBs([voc_train, sbd_train], excluded=[voc_val]) else: db_train = voc_train trainloader = DataLoader(db_train, batch_size=p['trainBatch'],
parser.add_argument('--output_path', type=str, default='.', help="path for logs, checkpoints, and VGG model weight") parser.add_argument('--trainer', type=str, default='MUNIT', help="MUNIT|UNIT") opts = parser.parse_args() torch.manual_seed(opts.seed) torch.cuda.manual_seed(opts.seed) if not os.path.exists(opts.output_folder): os.makedirs(opts.output_folder) # Load experiment setting config = get_config(opts.config) imagea_names = ImageFolder(opts.A, transform=None, return_paths=True) imageb_names = ImageFolder(opts.B, transform=None, return_paths=True) data_loader_a = get_data_loader_folder(opts.A, 1, False, new_size=config['new_size'], height=224, width=224, crop=False) data_loader_b = get_data_loader_folder(opts.B, 1, False, new_size=config['new_size'], height=224, width=224, crop=False)
def CE_Net_Train(): NAME = 'CE-Net' + Constants.ROOT.split('/')[-1] # run the Visdom viz = Visualizer(env=NAME) solver = MyFrame(CE_Net_, dice_bce_loss, 2e-4) batchsize = torch.cuda.device_count() * Constants.BATCHSIZE_PER_CARD # For different 2D medical image segmentation tasks, please specify the dataset which you use # for examples: you could specify "dataset = 'DRIVE' " for retinal vessel detection. dataset = ImageFolder(root_path=Constants.ROOT, datasets='DRIVE') data_loader = torch.utils.data.DataLoader(dataset, batch_size=batchsize, shuffle=True, num_workers=4) # start the logging files mylog = open('logs/' + NAME + '.log', 'w') tic = time() no_optim = 0 total_epoch = Constants.TOTAL_EPOCH train_epoch_best_loss = Constants.INITAL_EPOCH_LOSS for epoch in range(1, total_epoch + 1): data_loader_iter = iter(data_loader) train_epoch_loss = 0 index = 0 for img, mask in data_loader_iter: solver.set_input(img, mask) train_loss, pred = solver.optimize() train_epoch_loss += train_loss index = index + 1 # show the original images, predication and ground truth on the visdom. show_image = (img + 1.6) / 3.2 * 255. viz.img(name='images', img_=show_image[0, :, :, :]) viz.img(name='labels', img_=mask[0, :, :, :]) viz.img(name='prediction', img_=pred[0, :, :, :]) train_epoch_loss = train_epoch_loss / len(data_loader_iter) print(mylog, '********') print(mylog, 'epoch:', epoch, ' time:', int(time() - tic)) print(mylog, 'train_loss:', train_epoch_loss) print(mylog, 'SHAPE:', Constants.Image_size) print('********') print('epoch:', epoch, ' time:', int(time() - tic)) print('train_loss:', train_epoch_loss) print('SHAPE:', Constants.Image_size) if train_epoch_loss >= train_epoch_best_loss: no_optim += 1 else: no_optim = 0 train_epoch_best_loss = train_epoch_loss solver.save('./weights/' + NAME + '.th') if no_optim > Constants.NUM_EARLY_STOP: print(mylog, 'early stop at %d epoch' % epoch) print('early stop at %d epoch' % epoch) break if no_optim > Constants.NUM_UPDATE_LR: if solver.old_lr < 5e-7: break solver.load('./weights/' + NAME + '.th') solver.update_lr(2.0, factor=True, mylog=mylog) mylog.flush() print(mylog, 'Finish!') print('Finish!') mylog.close()
from networks.dinknet import DUNet from framework import MyFrame from data import ImageFolder SHAPE = (256, 256) ROOT = 'E:/shao_xing/tiny_dataset/new0228/tiny_sat_lab/' imagelist = filter(lambda x: x.find('sat') != -1, os.listdir(ROOT)) trainlist = map(lambda x: x[:-8], imagelist) NAME = 'ratio_16' BATCHSIZE_PER_CARD = 2 solver = MyFrame(DUNet, lr=0.00005) # solver = MyFrame(Unet, dice_bce_loss, 2e-4) batchsize = torch.cuda.device_count() * BATCHSIZE_PER_CARD dataset = ImageFolder(trainlist, ROOT) data_loader = torch.utils.data.DataLoader(dataset, batch_size=batchsize, shuffle=True, num_workers=0) mylog = open('log/' + NAME + '_finetune.log', 'a') tic = time() no_optim = 0 total_epoch = 100 train_epoch_best_loss = 100. solver.load('weights/ratio_16.th') print('* load existing model *') epoch_iter = 0