Пример #1
0
def main():
    # instantiate model and initialize weights
    model = ENet()
    networks.print_network(model)
    networks.init_weights(model, init_type='normal')
    model.init_convFilter(trainable=srm_trainable)

    if args.cuda:
        model.cuda()

    torch.save({
        'epoch': 0,
        'state_dict': model.state_dict()
    }, '{}/checkpoint_{}.pth'.format(LOG_DIR, 0))

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    L1_criterion = nn.L1Loss(reduction='sum').cuda()

    if not srm_trainable:
        params = []
        for name, param in model.named_parameters():
            if name.find('convFilter1') == -1:
                params += [param]

        optimizer = create_optimizer(params, args.lr)
    else:
        optimizer = create_optimizer(model.parameters(), args.lr)

    for epoch in range(1, args.epochs + 1):
        # update the optimizer learning rate
        adjust_learning_rate(optimizer, epoch)

        train(train_loader, model, optimizer, criterion, L1_criterion, epoch)
Пример #2
0
def main():

    # instantiate model and initialize weights
    model = AutoNet(input_nc=args.input_nc, ndf=6, nonlinear='relu')
    networks.print_network(model)

    if args.cuda:
        model.cuda()

    print('using pretrained model')
    checkpoint = torch.load(project_root + args.log_dir + '/checkpoint_60.pth')
    model.load_state_dict(checkpoint['state_dict'])
    args.lr = args.lr * 0.001
    itr_start = 1

    nature = test_nature(val_loader, model)
    print(nature)
    threshold = max(2, nature * 2)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = create_optimizer(model, args.lr)

    new_samples = []
    pn_num = []
    nature_error_itr_global = []
    for itr in np.arange(itr_start, 5):
        args.dataroot = dst_dir

        tmp = construct_negative_samples(model, new_samples, itr)
        pn_num.append(tmp)

        train_loader = myDataset.DataLoaderHalf(
            myDataset.MyDataset(
                args,
                transforms.Compose([
                    transforms.Resize((256, 256), Image.BICUBIC),
                    transforms.ToTensor(), normalize
                ])),
            batch_size=args.batch_size,
            shuffle=True,
            half_constraint=True,
            sampler_type='RandomBalancedSampler',
            **kwargs)
        print('The number of train data:{}'.format(len(train_loader.dataset)))
        args.epochs = 15  # after the new negative samples construct, the learning rate is constant, and epoch = 15

        train_multi(train_loader, optimizer, model, criterion, val_loader, itr,
                    nature_error_itr_global)

    print(pn_num)
    model_selection(nature_error_itr_global, threshold)
Пример #3
0
    def __init__(self, p):

        super(TestModel, self).__init__(p)
        assert (not p.isTrain)
        self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,
                                      opt.which_model_netG, opt.norm,
                                      not opt.no_dropout, opt.init_type,
                                      self.gpu_ids)
        which_epoch = p.which_epoch
        self.load_model(self.netG, 'G', which_epoch)
        print('---------- Networks initialized -------------')
        networks.print_network(self.netG)
        print('-----------------------------------------------')
Пример #4
0
    def initialize(self, opt, train_mode=True):
        # Model transforms from A --> B and uses Adv as the
        # adversarial example.
        BaseModel.initialize(self, opt)
        self.train_mode = train_mode
        # define tensors
        self.input_B = self.Tensor(opt['batchSize'], opt['input_nc'],
                                   opt['B_height'], opt['B_width'])

        self.input_A = self.Tensor(opt['batchSize'], opt['output_nc'],
                                   opt['A_height'], opt['A_width'])

        # load/define networks
        self.netG = networks.define_G(opt['input_nc'], opt['output_nc'],
                                      opt['ngf'], opt['norm'], self.gpu_ids)

        if self.train_mode:
            use_sigmoid = opt['no_lsgan']
            self.netD = networks.define_D(opt['input_nc'] + opt['output_nc'],
                                          opt['ndf'], opt['which_model_netD'],
                                          opt['n_layers_D'], use_sigmoid,
                                          self.gpu_ids)

        if self.train_mode:
            # self.fake_AB_pool = ImagePool(opt['pool_size'])
            self.old_lr = opt['lr']
            # define loss functions
            self.criterionGAN = networks.GANLoss(use_lsgan=not opt['no_lsgan'],
                                                 tensor=self.Tensor)
            self.content_loss = torch.nn.MSELoss()

            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                                lr=opt['lr'],
                                                betas=(opt['beta1'], 0.999))
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt['lr'],
                                                betas=(opt['beta1'], 0.999))

            print('---------- Networks initialized -------------')
            networks.print_network(self.netG)
            networks.print_network(self.netD)
            print('-----------------------------------------------')
Пример #5
0
    def __init__(self, opt, ignore_noise=False, testing=False):

        self.ignore_noise = ignore_noise

        ##### model options
        self.old_lr = opt.lr
        opt.use_sigmoid = opt.no_lsgan

        self.opt = opt

        ##### define all networks we need here
        self.netG_A_B = networks.define_stochastic_G(nlatent=opt.nlatent, input_nc=opt.input_nc,
                                                     output_nc=opt.output_nc, ngf=opt.ngf,
                                                     which_model_netG=opt.which_model_netG,
                                                     norm=opt.norm, use_dropout=opt.use_dropout,
                                                     gpu_ids=opt.gpu_ids)

        self.netG_B_A = networks.define_G(input_nc=opt.output_nc,
                                          output_nc=opt.input_nc, ngf=opt.ngf,
                                          which_model_netG=opt.which_model_netG,
                                          norm=opt.norm, use_dropout=opt.use_dropout,
                                          gpu_ids=opt.gpu_ids)

        self.netD_A = networks.define_D_A(input_nc=opt.input_nc,
                                          ndf=32, which_model_netD=opt.which_model_netD,
                                          norm=opt.norm, use_sigmoid=opt.use_sigmoid, gpu_ids=opt.gpu_ids)

        self.netD_B = networks.define_D_B(input_nc=opt.output_nc,
                                          ndf=opt.ndf, which_model_netD=opt.which_model_netD,
                                          norm=opt.norm, use_sigmoid=opt.use_sigmoid, gpu_ids=opt.gpu_ids)

        ##### define all optimizers here
        self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A_B.parameters(),
                                                            self.netG_B_A.parameters()),
                                            lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(),
                                                            self.netD_B.parameters()),
                                            lr=opt.lr/5., betas=(opt.beta1, 0.999))

        self.criterionGAN = functools.partial(criterion_GAN, use_sigmoid=opt.use_sigmoid)
        self.criterionCycle = F.l1_loss

        if not testing:
            with open("%s/nets.txt" % opt.expr_dir, 'w') as nets_f:
                networks.print_network(self.netG_A_B, nets_f)
                networks.print_network(self.netG_B_A, nets_f)
                networks.print_network(self.netD_A, nets_f)
                networks.print_network(self.netD_B, nets_f)
    def initialize(self, args):
        BaseModel.initialize(self, args)
        self.nb = args['batch_size']
        sizeH, sizeW = args['fineSizeH'], args['fineSizeW']

        self.input_A = self.Tensor(self.nb, args['input_nc'], sizeH, sizeW)
        self.input_B = self.Tensor(self.nb, args['input_nc'], sizeH, sizeW)
        self.input_A_label = torch.cuda.LongTensor(self.nb, args['input_nc'],
                                                   sizeH, sizeW)

        self.netG = networks.netG().cuda(device_id=args['device_ids'][0])
        self.netD = define_D(
            args['net_D']).cuda(device_id=args['device_ids'][0])

        self.deeplabPart1 = networks.DeeplabPool1().cuda(
            device_id=args['device_ids'][0])
        self.deeplabPart2 = networks.DeeplabPool12Pool5().cuda(
            device_id=args['device_ids'][0])
        self.deeplabPart3 = networks.DeeplabPool52Fc8_interp().cuda(
            device_id=args['device_ids'][0])

        # define loss functions
        self.criterionCE = torch.nn.CrossEntropyLoss(size_average=False)
        self.criterionAdv = networks.Advloss(use_lsgan=args['use_lsgan'],
                                             tensor=self.Tensor)

        if not args['resume']:
            #initialize networks
            self.netG.apply(weights_init)
            self.netD.apply(weights_init)
            pretrained_dict = torch.load(args['weigths_pool'] + '/' +
                                         args['pretrain_model'])
            self.deeplabPart1.weights_init(pretrained_dict=pretrained_dict)
            self.deeplabPart2.weights_init(pretrained_dict=pretrained_dict)
            self.deeplabPart3.weights_init(pretrained_dict=pretrained_dict)

        # initialize optimizers
        self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                            lr=args['lr_gan'],
                                            betas=(args['beta1'], 0.999))
        self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                            lr=args['lr_gan'],
                                            betas=(args['beta1'], 0.999))

        ignored_params = list(map(id, self.deeplabPart3.fc8_1.parameters()))
        ignored_params.extend(
            list(map(id, self.deeplabPart3.fc8_2.parameters())))
        ignored_params.extend(
            list(map(id, self.deeplabPart3.fc8_3.parameters())))
        ignored_params.extend(
            list(map(id, self.deeplabPart3.fc8_4.parameters())))
        base_params = filter(lambda p: id(p) not in ignored_params,
                             self.deeplabPart3.parameters())
        base_params = base_params + filter(lambda p: True,
                                           self.deeplabPart1.parameters())
        base_params = base_params + filter(lambda p: True,
                                           self.deeplabPart2.parameters())

        self.optimizer_P = torch.optim.SGD([
            {
                'params': base_params
            },
            {
                'params': get_parameters(self.deeplabPart3.fc8_1, 'weight'),
                'lr': args['l_rate'] * 10
            },
            {
                'params': get_parameters(self.deeplabPart3.fc8_2, 'weight'),
                'lr': args['l_rate'] * 10
            },
            {
                'params': get_parameters(self.deeplabPart3.fc8_3, 'weight'),
                'lr': args['l_rate'] * 10
            },
            {
                'params': get_parameters(self.deeplabPart3.fc8_4, 'weight'),
                'lr': args['l_rate'] * 10
            },
            {
                'params': get_parameters(self.deeplabPart3.fc8_1, 'bias'),
                'lr': args['l_rate'] * 20
            },
            {
                'params': get_parameters(self.deeplabPart3.fc8_2, 'bias'),
                'lr': args['l_rate'] * 20
            },
            {
                'params': get_parameters(self.deeplabPart3.fc8_3, 'bias'),
                'lr': args['l_rate'] * 20
            },
            {
                'params': get_parameters(self.deeplabPart3.fc8_4, 'bias'),
                'lr': args['l_rate'] * 20
            },
        ],
                                           lr=args['l_rate'],
                                           momentum=0.9,
                                           weight_decay=5e-4)

        #netG_params = filter(lambda p: True, self.netG.parameters())
        self.optimizer_R = torch.optim.SGD(
            [
                {
                    'params': base_params
                },
                #{'params': netG_params, 'lr': args['l_rate'] * 100},
                {
                    'params': get_parameters(self.deeplabPart3.fc8_1,
                                             'weight'),
                    'lr': args['l_rate'] * 10
                },
                {
                    'params': get_parameters(self.deeplabPart3.fc8_2,
                                             'weight'),
                    'lr': args['l_rate'] * 10
                },
                {
                    'params': get_parameters(self.deeplabPart3.fc8_3,
                                             'weight'),
                    'lr': args['l_rate'] * 10
                },
                {
                    'params': get_parameters(self.deeplabPart3.fc8_4,
                                             'weight'),
                    'lr': args['l_rate'] * 10
                },
                {
                    'params': get_parameters(self.deeplabPart3.fc8_1, 'bias'),
                    'lr': args['l_rate'] * 20
                },
                {
                    'params': get_parameters(self.deeplabPart3.fc8_2, 'bias'),
                    'lr': args['l_rate'] * 20
                },
                {
                    'params': get_parameters(self.deeplabPart3.fc8_3, 'bias'),
                    'lr': args['l_rate'] * 20
                },
                {
                    'params': get_parameters(self.deeplabPart3.fc8_4, 'bias'),
                    'lr': args['l_rate'] * 20
                },
            ],
            lr=args['l_rate'],
            momentum=0.9,
            weight_decay=5e-4)

        print('---------- Networks initialized -------------')
        networks.print_network(self.netG)
        networks.print_network(self.netD)
        networks.print_network(self.deeplabPart1)
        networks.print_network(self.deeplabPart2)
        networks.print_network(self.deeplabPart3)
        print('-----------------------------------------------')
Пример #7
0
netG = define_G(opt.input_nc, opt.output_nc, opt.ngf, 'batch', False, [0])

netD = define_D(opt.input_nc + opt.output_nc, opt.ndf, 'batch', False, [0])

print('loading done')

criterionGAN = GANLoss()
criterionL1 = nn.L1Loss()
criterionMSE = nn.MSELoss()

# setup optimizer
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))

print('---------- Networks initialized -------------')
print_network(netG)
print_network(netD)
print('-----------------------------------------------')

real_a = torch.FloatTensor(opt.batchSize, opt.input_nc, 256, 256)
real_b = torch.FloatTensor(opt.batchSize, opt.output_nc, 256, 256)

if opt.cuda:
    netD = netD.cuda()
    netG = netG.cuda()
    criterionGAN = criterionGAN.cuda()
    criterionL1 = criterionL1.cuda()
    criterionMSE = criterionMSE.cuda()
    real_a = real_a.cuda()
    real_b = real_b.cuda()
Пример #8
0
    def initialize(self, args):
        BaseModel.initialize(self, args)
        self.if_adv_train = args['if_adv_train']
        self.Iter = 0
        self.interval_g2 = args['interval_g2']
        self.interval_d2 = args['interval_d2']
        self.nb = args['batch_size']
        sizeH, sizeW = args['fineSizeH'], args['fineSizeW']

        self.tImageA = self.Tensor(self.nb, args['input_nc'], sizeH, sizeW)
        self.tImageB = self.Tensor(self.nb, args['input_nc'], sizeH, sizeW)
        self.tLabelA = torch.cuda.LongTensor(self.nb, 1, sizeH, sizeW)
        self.tOnehotLabelA = self.Tensor(self.nb, args['label_nums'], sizeH,
                                         sizeW)
        self.loss_G = Variable()
        self.loss_D = Variable()

        self.netG1 = networks.netG().cuda(device_id=args['device_ids'][0])
        self.netD1 = define_D(args['net_d1'],
                              512).cuda(device_id=args['device_ids'][0])
        self.netD2 = define_D(
            args['net_d2'],
            args['label_nums']).cuda(device_id=args['device_ids'][0])

        self.deeplabPart1 = networks.DeeplabPool1().cuda(
            device_id=args['device_ids'][0])
        self.deeplabPart2 = networks.DeeplabPool12Pool5().cuda(
            device_id=args['device_ids'][0])
        self.deeplabPart3 = networks.DeeplabPool52Fc8_interp(
            output_nc=args['label_nums']).cuda(device_id=args['device_ids'][0])

        # define loss functions
        self.criterionCE = torch.nn.CrossEntropyLoss(size_average=False)
        self.criterionAdv = networks.Advloss(use_lsgan=args['use_lsgan'],
                                             tensor=self.Tensor)

        if not args['resume']:
            #initialize networks
            self.netG1.apply(weights_init)
            self.netD1.apply(weights_init)
            self.netD2.apply(weights_init)
            pretrained_dict = torch.load(args['weigths_pool'] + '/' +
                                         args['pretrain_model'])
            self.deeplabPart1.weights_init(pretrained_dict=pretrained_dict)
            self.deeplabPart2.weights_init(pretrained_dict=pretrained_dict)
            self.deeplabPart3.weights_init(pretrained_dict=pretrained_dict)

        # initialize optimizers
        self.optimizer_G1 = torch.optim.Adam(self.netG1.parameters(),
                                             lr=args['lr_g1'],
                                             betas=(args['beta1'], 0.999))
        self.optimizer_D1 = torch.optim.Adam(self.netD1.parameters(),
                                             lr=args['lr_g1'],
                                             betas=(args['beta1'], 0.999))

        self.optimizer_G2 = torch.optim.Adam(
            [{
                'params': self.deeplabPart1.parameters()
            }, {
                'params': self.deeplabPart2.parameters()
            }, {
                'params': self.deeplabPart3.parameters()
            }],
            lr=args['lr_g2'],
            betas=(args['beta1'], 0.999))
        self.optimizer_D2 = torch.optim.Adam(self.netD2.parameters(),
                                             lr=args['lr_g2'],
                                             betas=(args['beta1'], 0.999))

        ignored_params = list(map(id, self.deeplabPart3.fc8_1.parameters()))
        ignored_params.extend(
            list(map(id, self.deeplabPart3.fc8_2.parameters())))
        ignored_params.extend(
            list(map(id, self.deeplabPart3.fc8_3.parameters())))
        ignored_params.extend(
            list(map(id, self.deeplabPart3.fc8_4.parameters())))
        base_params = filter(lambda p: id(p) not in ignored_params,
                             self.deeplabPart3.parameters())
        base_params = base_params + filter(lambda p: True,
                                           self.deeplabPart1.parameters())
        base_params = base_params + filter(lambda p: True,
                                           self.deeplabPart2.parameters())

        deeplab_params = [
            {
                'params': base_params
            },
            {
                'params': get_parameters(self.deeplabPart3.fc8_1, 'weight'),
                'lr': args['l_rate'] * 10
            },
            {
                'params': get_parameters(self.deeplabPart3.fc8_2, 'weight'),
                'lr': args['l_rate'] * 10
            },
            {
                'params': get_parameters(self.deeplabPart3.fc8_3, 'weight'),
                'lr': args['l_rate'] * 10
            },
            {
                'params': get_parameters(self.deeplabPart3.fc8_4, 'weight'),
                'lr': args['l_rate'] * 10
            },
            {
                'params': get_parameters(self.deeplabPart3.fc8_1, 'bias'),
                'lr': args['l_rate'] * 20
            },
            {
                'params': get_parameters(self.deeplabPart3.fc8_2, 'bias'),
                'lr': args['l_rate'] * 20
            },
            {
                'params': get_parameters(self.deeplabPart3.fc8_3, 'bias'),
                'lr': args['l_rate'] * 20
            },
            {
                'params': get_parameters(self.deeplabPart3.fc8_4, 'bias'),
                'lr': args['l_rate'] * 20
            },
        ]

        self.optimizer_P = torch.optim.SGD(deeplab_params,
                                           lr=args['l_rate'],
                                           momentum=0.9,
                                           weight_decay=5e-4)

        self.optimizer_R = torch.optim.SGD(deeplab_params,
                                           lr=args['l_rate'],
                                           momentum=0.9,
                                           weight_decay=5e-4)

        print('---------- Networks initialized -------------')
        networks.print_network(self.netG1)
        networks.print_network(self.netD1)
        networks.print_network(self.netD2)
        networks.print_network(self.deeplabPart1)
        networks.print_network(self.deeplabPart2)
        networks.print_network(self.deeplabPart3)
        print('-----------------------------------------------')
Пример #9
0
#netD_Edge   = define_D_Edge(hogehoge)


criterionGAN = GANLoss()
criterionL1 = nn.L1Loss()
criterionMSE = nn.MSELoss()


# setup optimizer
optimizerG = optim.Adadelta(netG.parameters(), lr=opt.lr)
optimizerD_Global = optim.Adadelta(netD_Global.parameters(), lr=opt.lr)
optimizerD_Local = optim.Adadelta(netD_Local.parameters(), lr=opt.lr)
optimizerD_Edge = optim.Adadelta(netD_Edge.parameters(), lr=opt.lr)

print('---------- Networks initialized -------------')
print_network(netG)
print_network(netD_Global)
print_network(netD_Local)
print_network(netD_Edge)
print('-----------------------------------------------')

real_a0_image = torch.FloatTensor(opt.batchSize, opt.output_nc, 256, 256)
real_a_image = torch.FloatTensor(opt.batchSize, opt.output_nc, 256, 256)

#基本的なテンソルに使うShape
#tensor_shape = torch.tensor(opt.batchSize,3,image_size,image_size)
tensor_shape = torch.empty([opt.batchSize,3,image_size,image_size])
if opt.cuda:
  #netD = netD.cuda()
  netD_Global = netD_Global.cuda()
  netD_Local  = netD_Local.cuda()
Пример #10
0
def main(name_exp, segloss=False, cuda=True, finetune=False):
    # Training settings
    parser = argparse.ArgumentParser(description='pix2pix-PyTorch-implementation')
    parser.add_argument('--batchSize', type=int, default=8, help='training batch size')
    parser.add_argument('--testBatchSize', type=int, default=8, help='testing batch size')
    parser.add_argument('--nEpochs', type=int, default=100, help='number of epochs to train for')
    parser.add_argument('--input_nc', type=int, default=3, help='input image channels')
    parser.add_argument('--output_nc', type=int, default=3, help='output image channels')
    parser.add_argument('--ngf', type=int, default=64, help='generator filt+ers in first conv layer')
    parser.add_argument('--ndf', type=int, default=64, help='discriminator filters in first conv layer')
    parser.add_argument('--lr', type=float, default=0.0002, help='Learning Rate. Default=0.0002')
    parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
    parser.add_argument('--threads', type=int, default=8, help='number of threads for data loader to use')
    parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')
    parser.add_argument('--lamb', type=int, default=10, help='weight on L1 term in objective')
    opt = parser.parse_args()

    cudnn.benchmark = True



    def val():
        net_current = "path_exp/checkpoint/DFS/{}/netG_model_current.pth".format(name_exp)
        netVal = torch.load(net_current)
        netVal.eval()
        SEG_NET.eval()
        features.eval()
        with torch.no_grad():
            total_mse = 0
            total_mse2 = 0
            avg_psnr_depth = 0
            avg_psnr_dehaze = 0
            avg_ssim_depth = 0
            avg_ssim_dehaze = 0
            for batch in validation_data_loader:
                input, target, depth = Variable(batch[0]), Variable(batch[1]), Variable(batch[2])
                if cuda == True:
                    input = input.cuda()
                    target = target.cuda()
                    depth = depth.cuda()
                    
                

                dehaze = netVal(input)
                prediction = SEG_NET(dehaze)

                avg_ssim_dehaze += pytorch_ssim.ssim(dehaze, target).item()

                mse = criterionMSE(prediction, depth)
                total_mse += mse.item()
                avg_psnr_depth += 10 * log10(1 / mse.item())

                mse2 = criterionMSE(dehaze, target)
                total_mse2 += mse2.item()
                avg_psnr_dehaze += 10 * log10(1 / mse2.item())

                avg_ssim_depth += pytorch_ssim.ssim(prediction, depth).item()


                visual_ret_val = OrderedDict()

                visual_ret_val['Haze'] = input
                visual_ret_val['Seg estimate'] = prediction
                visual_ret_val['Dehaze '] = dehaze
                visual_ret_val['GT dehaze'] = target
                visual_ret_val['GT Seg '] = depth

                visualizer.display_current_results(visual_ret_val, epoch, True)


            print("===> Validation")
            #f.write("===> Testing: \r\n")

            print("===> PSNR seg: {:.4f} ".format(avg_psnr_depth / len(validation_data_loader)))
            #f.write("===> PSNR depth: {:.4f} \r\n".format(avg_psnr_depth / len(validation_data_loader)))

            print("===> Mse seg: {:.4f} ".format(total_mse / len(validation_data_loader)))
            #f.write("===> Mse depth: {:.4f} \r\n".format(total_mse / len(validation_data_loader)))

            print("===> SSIM seg: {:.4f} ".format(avg_ssim_depth / len(validation_data_loader)))
            #f.write("===> SSIM depth: {:.4f} \r\n".format(avg_ssim_depth / len(validation_data_loader)))

            return total_mse / len(validation_data_loader)






    def testing():
        path = "path_exp/checkpoint/DFS/{}/netG_model_best.pth".format(name_exp)
        net = torch.load(path)
        net.eval()
        SEG_NET.eval()
        features.eval()
        with torch.no_grad():
            total_mse = 0
            total_mse2 = 0
            avg_psnr_depth = 0
            avg_psnr_dehaze = 0
            avg_ssim_depth = 0
            avg_ssim_dehaze = 0
            for batch in testing_data_loader:
                input, target, depth = Variable(batch[0]), Variable(batch[1]), Variable(batch[2])
                if cuda == True:
                    input = input.cuda()
                    target = target.cuda()
                    depth = depth.cuda()

                dehaze = net(input)
                prediction = SEG_NET(dehaze)

                avg_ssim_dehaze += pytorch_ssim.ssim(dehaze, target).item()

                mse = criterionMSE(prediction, depth)
                total_mse += mse.item()
                avg_psnr_depth += 10 * log10(1 / mse.item())

                mse2 = criterionMSE(dehaze, target)
                total_mse2 += mse2.item()
                avg_psnr_dehaze += 10 * log10(1 / mse2.item())

                avg_ssim_depth += pytorch_ssim.ssim(prediction, depth).item()

            print("===> Testing")
            print("===> PSNR seg: {:.4f} ".format(avg_psnr_depth / len(testing_data_loader)))
            print("===> Mse seg: {:.4f} ".format(total_mse / len(testing_data_loader)))
            print("===> SSIM seg: {:.4f} ".format(avg_ssim_depth / len(testing_data_loader)))
            print("===> PSNR dehaze: {:.4f} ".format(avg_psnr_dehaze / len(testing_data_loader)))
            print("===> SSIM dehaze: {:.4f} ".format(avg_ssim_dehaze / len(testing_data_loader)))





    def checkpoint():
        if not os.path.exists("checkpoint"):
            os.mkdir("checkpoint")
        if not os.path.exists(os.path.join("path_exp/checkpoint/DFS", name_exp)):
            os.mkdir(os.path.join("path_exp/checkpoint/DFS", name_exp))
        net_g_model_out_path = "path_exp/checkpoint/DFS/{}/netG_model_best.pth".format(name_exp)
        net_d_model_out_path = "path_exp/checkpoint/DFS/{}/netD_model_best.pth".format(name_exp)
        torch.save(netG, net_g_model_out_path)
        torch.save(netD, net_d_model_out_path)


    def checkpoint_current():
        if not os.path.exists(os.path.join("path_exp/checkpoint/DFS", name_exp)):
            os.mkdir(os.path.join("path_exp/checkpoint/DFS", name_exp))
        net_g_model_out_path = "path_exp/checkpoint/DFS/{}/netG_model_current.pth".format(name_exp)
        torch.save(netG, net_g_model_out_path)

    def checkpoint_seg():
        if not os.path.exists(os.path.join("path_exp/checkpoint/DFS", name_exp)):
            os.mkdir(os.path.join("path_exp/checkpoint/DFS", name_exp))
        net_g_model_out_path = "path_exp/checkpoint/DFS/{}/seg_net.pth".format(name_exp)
        torch.save(SEG_NET, net_g_model_out_path)



    torch.manual_seed(opt.seed)
    if cuda==True:
        torch.cuda.manual_seed(opt.seed)

    print(" ")
    print(name_exp)
    print(" ")

    print('===> Loading datasets')
    train_set = get_training_set('path_exp/cityscape/HAZE')
    val_set = get_val_set('path_exp/cityscape/HAZE')
    test_set = get_test_set('path_exp/cityscape/HAZE')


    training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
    validation_data_loader = DataLoader(dataset=val_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
    testing_data_loader= DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False)

    print('===> Building model')
    netG = define_G(opt.input_nc, opt.output_nc, opt.ngf, 'batch', False, [0])
    netD = define_D(opt.input_nc + opt.output_nc, opt.ndf, 'batch', False, [0])

    criterionGAN = GANLoss()
    criterionL1 = nn.L1Loss()
    criterionMSE = nn.MSELoss()

    # setup optimizer
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))



    print('---------- Networks initialized -------------')
    print_network(netG)
    print_network(netD)
    print('-----------------------------------------------')


    real_a = torch.FloatTensor(opt.batchSize, opt.input_nc, 256, 256)
    real_b = torch.FloatTensor(opt.batchSize, opt.output_nc, 256, 256)
    real_c = torch.FloatTensor(opt.batchSize, opt.output_nc, 256, 256)

    if cuda==True:
        netD = netD.cuda()
        netG = netG.cuda()
        criterionGAN = criterionGAN.cuda()
        criterionL1 = criterionL1.cuda()
        criterionMSE = criterionMSE.cuda()
        real_a = real_a.cuda()
        real_b = real_b.cuda()
        real_c=real_c.cuda()

    real_a = Variable(real_a)
    real_b = Variable(real_b)
    real_c = Variable(real_c)



    SEG_NET = torch.load("path_exp/SEG_NET.pth")

    optimizerSeg = optim.Adam(SEG_NET.parameters(), lr=opt.lr/10, betas=(opt.beta1, 0.999))



    features = Vgg16()

    if cuda==True:
        SEG_NET.cuda()
        features.cuda()


    bon =100000000
    for epoch in range(opt.nEpochs):
        features.eval()

        if finetune== True and epoch>50:
            SEG_NET.train()
        else:
            SEG_NET.eval()

        loss_epoch_gen=0
        loss_epoch_dis=0
        total_segloss=0
        loss_seg=0
        i=0
        for iteration, batch in enumerate(training_data_loader, 1):

            netG.train()
            i=i+1

            # forward
            real_a_cpu, real_b_cpu, real_c_cpu = batch[0], batch[1], batch[2]

            with torch.no_grad():
                real_a = real_a.resize_(real_a_cpu.size()).copy_(real_a_cpu)

            with torch.no_grad():
                real_b = real_b.resize_(real_b_cpu.size()).copy_(real_b_cpu)

            with torch.no_grad():
                real_c = real_c.resize_(real_c_cpu.size()).copy_(real_c_cpu)


            fake_b = netG(real_a)

            ############################
            # (1) Update D network: maximize log(D(x,y)) + log(1 - D(x,G(x)))
            ###########################

            optimizerD.zero_grad()

            # train with fake
            fake_ab = torch.cat((real_a, fake_b), 1)
            pred_fake = netD.forward(fake_ab.detach())
            loss_d_fake = criterionGAN(pred_fake, False)

            # train with real
            real_ab = torch.cat((real_a, real_b), 1)
            pred_real = netD.forward(real_ab)
            loss_d_real = criterionGAN(pred_real, True)

            # Combined loss
            loss_d = (loss_d_fake + loss_d_real) * 0.5

            loss_d.backward()

            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(x,G(x))) + L1(y,G(x))
            ##########################
            optimizerG.zero_grad()
            # First, G(A) should fake the discriminator
            fake_ab = torch.cat((real_a, fake_b), 1)
            pred_fake = netD.forward(fake_ab)
            loss_g_gan = criterionGAN(pred_fake, True)


            # Second, G(A) = B
            loss_g_l1 = criterionL1(fake_b, real_b) * opt.lamb

            features_y = features(fake_b)
            features_x = features(real_b)

            loss_content = criterionMSE(features_y[1], features_x[1])*10


            if segloss == True:
                fake_seg = SEG_NET(fake_b)
                loss_seg = criterionMSE(fake_seg, real_c) * 10

                total_segloss += loss_seg.item()

                features_y = features(fake_seg)
                features_x = features(real_c)

                ssim_seg = criterionMSE(features_y[1], features_x[1]) * 10

                loss_g = loss_g_gan + loss_g_l1 + loss_content + loss_seg


            else:
                loss_g = loss_g_gan + loss_g_l1+loss_content

            loss_epoch_gen+=loss_g.item()
            loss_epoch_dis+=loss_d.item()





            if finetune== True and epoch>50:
                loss_g.backward(retain_graph=True)

                optimizerG.step()

                loss_seg=loss_seg

                loss_seg.backward()

                optimizerSeg.zero_grad()

                optimizerSeg.step()

            else:
                loss_g.backward()
                optimizerG.step()



            errors_ret = OrderedDict()
            errors_ret['Total_G'] = float(loss_g)
            errors_ret['Content'] = float(loss_content)
            errors_ret['GAN'] = float(loss_g_gan)
            errors_ret['L1'] = float(loss_g_l1)
            errors_ret['D'] = float(loss_d)



            if i % 10 == 0:  # print training losses and save logging information to the disk
                if i > 0:
                    visualizer.plot_current_losses(epoch, i/(len(training_data_loader)*opt.batchSize), errors_ret)




        print("===> Epoch[{}]: Loss_D: {:.4f} Loss_G: {:.4f} Loss Seg: {:.4f} ".format(epoch, loss_epoch_dis,loss_epoch_gen, total_segloss))
        checkpoint_current()
        MSE=val()
        if MSE < bon:
            bon = MSE
            checkpoint()
            checkpoint_seg()
            print("BEST EPOCH SAVED")

    testing()
def main():

    # instantiate model and initialize weights
    model = ENet()
    networks.print_network(model)
    networks.init_weights(model, init_type='normal')
    model.init_convFilter(trainable=srm_trainable)

    if args.cuda:
        model.cuda()

    print('using pretrained model')
    checkpoint = torch.load(project_root + args.log_dir +
                            '/checkpoint_300.pth')
    model.load_state_dict(checkpoint['state_dict'])
    args.lr = args.lr * 0.001
    threshold = THRESHOLD_MAX

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    L1_criterion = nn.L1Loss(reduction='sum').cuda()

    if not srm_trainable:
        params = []
        for name, param in model.named_parameters():
            if name.find('convFilter1') == -1:
                params += [param]

        optimizer = create_optimizer(params, args.lr)
    else:
        optimizer = create_optimizer(model.parameters(), args.lr)

    nature_error_itr_global = []
    for itr in np.arange(1, 11):
        args.dataroot = dst_dir
        nature_error_itr_local = []

        # adding negative samples into the original training dataset
        construct_negative_samples(itr)

        train_loader = myDataset.DataLoaderHalf(
            myDataset.MyDataset(
                args,
                transforms.Compose([
                    transforms.RandomCrop(233),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(), normalize
                ])),
            batch_size=args.batch_size,
            shuffle=True,
            half_constraint=True,
            sampler_type='RandomBalancedSampler',
            **kwargs)
        print('The number of train data:{}'.format(len(train_loader.dataset)))
        args.epochs = 15

        train_multi(train_loader, optimizer, model, criterion, L1_criterion, val_loader, itr, \
            nature_error_itr_local, nature_error_itr_global)

        # start from itr = 1
        if len(nature_error_itr_local) > 0:
            adv_model_num, adv_model_idx = adv_model_selection(
                nature_error_itr_local, threshold, itr)
            if adv_model_num < 1:
                break

    print(nature_error_itr_global)
    print(len(nature_error_itr_global) / (args.epochs - args.epochs // 2))
    final_model_selection(nature_error_itr_global, threshold)
Пример #12
0
 def print_net(self):
     networks.print_network(self.net)
Пример #13
0
# Set up loss
criterionGAN = GANLoss()
criterionL1 = nn.L1Loss()
criterionMSE = nn.MSELoss()
criterionSLL = SLLoss()

# Set up optimizer
optimizerG = optim.Adam(list(G.parameters()),
                        lr=opt.lr,
                        betas=(opt.beta1, 0.999))
optimizerD = optim.Adam(list(D.parameters()),
                        lr=opt.lr,
                        betas=(opt.beta1, 0.999))

print_network(G)
print_network(D)

# Train
real_a = torch.FloatTensor(opt.batchSize, opt.input_channels, 256, 256)
real_b = torch.FloatTensor(opt.batchSize, opt.output_channels, 256, 256)
coords = torch.FloatTensor(opt.batchSize, 4)

if opt.cuda:
    D = D.cuda()
    G = G.cuda()
    criterionGAN = criterionGAN.cuda()
    criterionSLL = criterionSLL.cuda()
    criterionL1 = criterionL1.cuda()
    criterionMSE = criterionMSE.cuda()
    real_a = real_a.cuda()
Пример #14
0
    def __init__(
        self, name="experiment", phase="train", which_epoch="latest",
        batch_size=1, image_size=128, map_nc=1, input_nc=3, output_nc=3,
        num_downs=7, ngf=64, ndf=64, norm_layer="batch", pool_size=50,
        lr=0.0002, beta1=0.5, lambda_D=0.5, lambda_MSE=10,
        lambda_P=5.0, use_dropout=True, gpu_ids=[], n_layers=3,
        use_sigmoid=False, use_lsgan=True, upsampling="nearest",
        continue_train=False, checkpoints_dir="checkpoints/"
    ):
        # Define input data that will be consumed by networks
        self.input_A = torch.FloatTensor(
            batch_size, 3, image_size, image_size
        )
        self.input_map = torch.FloatTensor(
            batch_size, map_nc, image_size, image_size
        )
        norm_layer = nn.BatchNorm2d \
            if norm_layer == "batch" else nn.InstanceNorm2d

        # Define netD and netG
        self.netG = networks.UnetGenerator(
            input_nc=input_nc, output_nc=map_nc,
            num_downs=num_downs, ngf=ngf,
            use_dropout=use_dropout, gpu_ids=gpu_ids, norm_layer=norm_layer,
            upsampling_layer=upsampling
        )
        self.netD = networks.NLayerDiscriminator(
            input_nc=input_nc + map_nc, ndf=ndf,
            n_layers=n_layers, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids
        )

        # Transfer data to GPU
        if len(gpu_ids) > 0:
            self.input_A = self.input_A.cuda()
            self.input_map = self.input_map.cuda()
            self.netD.cuda()
            self.netG.cuda()

        # Initialize parameters of netD and netG
        self.netG.apply(networks.weights_init)
        self.netD.apply(networks.weights_init)

        # Load trained netD and netG
        if phase == "test" or continue_train:
            netG_checkpoint_file = os.path.join(
                checkpoints_dir, name, "netG_{}.pth".format(which_epoch)
            )
            self.netG.load_state_dict(
                torch.load(netG_checkpoint_file)
            )
            print("Restoring netG from {}".format(netG_checkpoint_file))

        if continue_train:
            netD_checkpoint_file = os.path.join(
                checkpoints_dir, name, "netD_{}.pth".format(which_epoch)
            )
            self.netD.load_state_dict(
                torch.load(netD_checkpoint_file)
            )
            print("Restoring netD from {}".format(netD_checkpoint_file))

        self.name = name
        self.gpu_ids = gpu_ids
        self.checkpoints_dir = checkpoints_dir

        # Criterions
        if phase == "train":
            self.count = 0
            self.lr = lr
            self.lambda_D = lambda_D
            self.lambda_MSE = lambda_MSE

            self.image_pool = ImagePool(pool_size)
            self.criterionGAN = networks.GANLoss(use_lsgan=use_lsgan)
            self.criterionL1 = torch.nn.L1Loss()
            self.criterionMSE = torch.nn.MSELoss()  # Landmark loss

            self.optimizer_G = torch.optim.Adam(
                self.netG.parameters(), lr=self.lr, betas=(beta1, 0.999)
            )
            self.optimizer_D = torch.optim.Adam(
                self.netD.parameters(), lr=self.lr, betas=(beta1, 0.999)
            )

            print('---------- Networks initialized -------------')
            networks.print_network(self.netG)
            networks.print_network(self.netD)
            print('-----------------------------------------------')