def main(): if not os.path.exists(opt.output_dir): os.mkdir(opt.output_dir) bsize = opt.b # models if opt.q == 'vgg': feature = vgg.vgg(pretrained=False) elif 'resnet' in opt.q: feature = getattr(resnet, opt.q)(pretrained=False) elif 'densenet' in opt.q: feature = getattr(densenet, opt.q)(pretrained=False) else: feature = None feature.cuda() feature.eval() sb = torch.load('%s/feature.pth'%opt.para_dir) # sb = OrderedDict([(k[7:], v) for (k, v) in sb.items()]) # del sb['classifier.weight'] # del sb['classifier.bias'] feature.load_state_dict(sb) deconv = Deconv(opt.q) deconv.cuda() deconv.eval() sb = torch.load('%s/deconv.pth' % opt.para_dir) # sb = OrderedDict([(k[7:], v) for (k, v) in sb.items()]) deconv.load_state_dict(sb) loader = torch.utils.data.DataLoader( MyTestData(opt.input_dir), batch_size=bsize, shuffle=True, num_workers=4, pin_memory=True) for ib, (data, img_name, img_size) in enumerate(loader): print ib inputs = Variable(data).cuda() feats = feature(inputs) outputs = deconv(feats) outputs = F.sigmoid(outputs) outputs = outputs.data.cpu().squeeze(1).numpy() for ii, msk in enumerate(outputs): msk = (msk * 255).astype(np.uint8) msk = Image.fromarray(msk) msk = msk.resize((img_size[0][ii], img_size[1][ii])) msk.save('%s/%s.png' % (opt.output_dir, img_name[ii]), 'PNG')
def main(): # tensorboard writer """ os.system('rm -rf ./runs/*') writer = SummaryWriter('./runs/'+datetime.now().strftime('%B%d %H:%M:%S')) if not os.path.exists('./runs'): os.mkdir('./runs') std = [.229, .224, .225] mean = [.485, .456, .406] """ train_dir = opt.train_dir val_dir = opt.val_dir check_dir = opt.check_dir bsize = opt.b iter_num = opt.e # training iterations if not os.path.exists(check_dir): os.mkdir(check_dir) # models if opt.q == 'vgg': feature = vgg.vgg(pretrained=True) elif 'resnet' in opt.q: feature = getattr(resnet, opt.q)(pretrained=True) elif 'densenet' in opt.q: feature = getattr(densenet, opt.q)(pretrained=True) else: feature = None feature.cuda() deconv = Deconv(opt.q) deconv.cuda() train_loader = torch.utils.data.DataLoader(MyData(train_dir, transform=True, crop=False, hflip=False, vflip=False), batch_size=bsize, shuffle=True, num_workers=4, pin_memory=True) val_loader = torch.utils.data.DataLoader(MyData(val_dir, transform=True, crop=False, hflip=False, vflip=False), batch_size=bsize / 2, shuffle=True, num_workers=4, pin_memory=True) if 'resnet' in opt.q: lr = 5e-3 lr_decay = 0.9 optimizer = torch.optim.SGD([{ 'params': [ param for name, param in deconv.named_parameters() if name[-4:] == 'bias' ], 'lr': 2 * lr }, { 'params': [ param for name, param in deconv.named_parameters() if name[-4:] != 'bias' ], 'lr': lr, 'weight_decay': 1e-4 }, { 'params': [ param for name, param in feature.named_parameters() if name[-4:] == 'bias' ], 'lr': 2 * lr }, { 'params': [ param for name, param in feature.named_parameters() if name[-4:] != 'bias' ], 'lr': lr, 'weight_decay': 1e-4 }], momentum=0.9, nesterov=True) else: optimizer = torch.optim.Adam([ { 'params': feature.parameters(), 'lr': 1e-4 }, { 'params': deconv.parameters(), 'lr': 1e-3 }, ]) min_loss = 10000.0 for it in range(iter_num): if 'resnet' in opt.q: optimizer.param_groups[0]['lr'] = 2 * lr * ( 1 - float(it) / iter_num)**lr_decay # bias optimizer.param_groups[1]['lr'] = lr * ( 1 - float(it) / iter_num)**lr_decay # weight optimizer.param_groups[2]['lr'] = 2 * lr * ( 1 - float(it) / iter_num)**lr_decay # bias optimizer.param_groups[3]['lr'] = lr * ( 1 - float(it) / iter_num)**lr_decay # weight for ib, (data, lbl) in enumerate(train_loader): inputs = Variable(data).cuda() lbl = Variable(lbl.float().unsqueeze(1)).cuda() feats = feature(inputs) msk = deconv(feats) loss = F.binary_cross_entropy_with_logits(msk, lbl) deconv.zero_grad() feature.zero_grad() loss.backward() optimizer.step() # visualize """ if ib % 100 ==0: # visulize image = make_image_grid(inputs.data[:4, :3], mean, std) writer.add_image('Image', torchvision.utils.make_grid(image), ib) msk = F.sigmoid(msk) mask1 = msk.data[:4] mask1 = mask1.repeat(1, 3, 1, 1) writer.add_image('Image2', torchvision.utils.make_grid(mask1), ib) mask1 = lbl.data[:4] mask1 = mask1.repeat(1, 3, 1, 1) writer.add_image('Label', torchvision.utils.make_grid(mask1), ib) writer.add_scalar('M_global', loss.data[0], ib) """ print('loss: %.4f (epoch: %d, step: %d)' % (loss.data[0], it, ib)) del inputs, msk, lbl, loss, feats gc.collect() sb = validation(feature, deconv, val_loader) if sb < min_loss: filename = ('%s/deconv.pth' % (check_dir)) torch.save(deconv.state_dict(), filename) filename = ('%s/feature.pth' % (check_dir)) torch.save(feature.state_dict(), filename) print('save: (epoch: %d)' % it) min_loss = sb
writer = SummaryWriter('./runs/' + datetime.now().strftime('%B%d %H:%M:%S')) if not os.path.exists('./runs'): os.mkdir('./runs') if not os.path.exists(check_root): os.mkdir(check_root) if not os.path.exists(val_output_root): os.mkdir(val_output_root) # models feature = Feature_FCN() # feature.cuda() deconv = Deconv() # deconv.cuda() feature.load_state_dict( torch.load( '/home/wbm/桌面/未命名文件夹/RFCN-master/fcn_parameters/feature-epoch-0-step-360.pth' )) deconv.load_state_dict( torch.load( '/home/wbm/桌面/未命名文件夹/RFCN-master/fcn_parameters/deconv-epoch-0-step-360.pth' )) train_loader = torch.utils.data.DataLoader(MyData(train_root, transform=True, ptag=ptag), batch_size=bsize, shuffle=True, num_workers=4,
if not os.path.exists(check_dir): os.mkdir(check_dir) # models if 'vgg' == opt.i: feature = Vgg16(pretrained=True) elif 'resnet' == opt.i: feature = resnet50(pretrained=True) elif 'densenet' == opt.i: feature = densenet121(pretrained=True) feature.cuda() if pretrained_feature_file: feature.load_state_dict(torch.load(pretrained_feature_file)) deconv = Deconv(opt.i) deconv.cuda() if resume_ep >= 0: feature_param_file = glob.glob('%s/feature-epoch-%d*.pth'%(check_dir, resume_ep)) deconv_param_file = glob.glob('%s/deconv-epoch-%d*.pth'%(check_dir, resume_ep)) feature.load_state_dict(torch.load(feature_param_file[0])) deconv.load_state_dict(torch.load(deconv_param_file[0])) train_loader = torch.utils.data.DataLoader( MyBoxPixData(train_dir, transform=True, crop=True, hflip=True, vflip=False, source=opt.q), batch_size=bsize, shuffle=True, num_workers=4, pin_memory=True) criterion = CrossEntropyLoss2d(weight=torch.FloatTensor(label_weight)) criterion.cuda()
writer = SummaryWriter('./runs/' + datetime.now().strftime('%B%d %H:%M:%S')) if not os.path.exists('./runs'): os.mkdir('./runs') if not os.path.exists(check_root): os.mkdir(check_root) if not os.path.exists(val_output_root): os.mkdir(val_output_root) # models feature = Feature_FCN() feature.cuda() deconv = Deconv() deconv.cuda() train_loader = torch.utils.data.DataLoader(MyData(train_root, transform=True, ptag=ptag), batch_size=bsize, shuffle=True, num_workers=4, pin_memory=True) val_loader = torch.utils.data.DataLoader(MyTestData(val_root, transform=True, ptag=ptag), batch_size=1, shuffle=True,
deconv_param_file = opt.deconv if not os.path.exists(output_dir): os.mkdir(output_dir) # models if 'vgg' == opt.i: feature = Vgg16() elif 'resnet' == opt.i: feature = resnet50() elif 'densenet' == opt.i: feature = densenet121() feature.cuda() feature.load_state_dict(torch.load(feature_param_file)) deconv = Deconv(opt.i) deconv.cuda() deconv.load_state_dict(torch.load(deconv_param_file)) loader = torch.utils.data.DataLoader(MyTestData(test_dir, transform=True), batch_size=1, shuffle=True, num_workers=4, pin_memory=True) it = 1 for ib, (data, img_name, img_size) in enumerate(loader): print img_name it += 1 inputs = Variable(data).cuda()