예제 #1
0
    def __init__(self, opt, dataloader):
        super(MNIST_UNET, self).__init__()

        self.opt = opt
        #self.visualizer = Visualizer(opt)
        self.dataloader = dataloader
        self.total_steps = len(dataloader)
        self.device = torch.device(
            'cuda:0' if self.opt.device != 'cpu' else 'cpu')

        self.netg = NetG(self.opt).to(self.device)
        self.netd = NetD(self.opt).to(self.device)
        weights_init(self.netg)
        weights_init(self.netd)

        self.l_adv = self.l2_loss
        self.l_con = nn.L1Loss()
        self.l_enc = self.l2_loss
        self.l_bce = nn.BCELoss()

        # Initialize input tensors.
        self.input_imgs = torch.empty(size=(self.opt.batchsize, self.opt.nc,
                                            self.opt.isize, self.opt.isize),
                                      dtype=torch.float32,
                                      device=self.device)
        #self.label = torch.empty(size=(self.opt.batchsize, ), dtype=torch.float32, device=self.device)
        #self.gt = torch.empty(size=(self.opt.batchsize, ), dtype=torch.long, device=self.device)

        self.real_label = torch.ones(size=(self.opt.batchsize, ),
                                     dtype=torch.float32,
                                     device=self.device)
        self.fake_label = torch.zeros(size=(self.opt.batchsize, ),
                                      dtype=torch.float32,
                                      device=self.device)
예제 #2
0
파일: model.py 프로젝트: xqlmyh/ganomaly
    def __init__(self, opt, dataloader):
        super(Ganomaly, self).__init__(opt, dataloader)

        # -- Misc attributes
        self.epoch = 0
        self.times = []
        self.total_steps = 0

        ##
        # Create and initialize networks.
        self.netg = NetG(self.opt).to(self.device)
        self.netd = NetD(self.opt).to(self.device)
        self.netg.apply(weights_init)
        self.netd.apply(weights_init)

        ##
        if self.opt.resume != '':
            print("\nLoading pre-trained networks.")
            self.opt.iter = torch.load(os.path.join(self.opt.resume, 'netG.pth'))['epoch']
            self.netg.load_state_dict(torch.load(os.path.join(self.opt.resume, 'netG.pth'))['state_dict'])
            self.netd.load_state_dict(torch.load(os.path.join(self.opt.resume, 'netD.pth'))['state_dict'])
            print("\tDone.\n")

        self.l_adv = l2_loss
        self.l_con = nn.L1Loss()
        self.l_enc = l2_loss
        self.l_bce = nn.BCELoss()

        ##
        # Initialize input tensors.
        self.input = torch.empty(size=(self.opt.batchsize, 3, self.opt.isize, self.opt.isize), dtype=torch.float32, device=self.device)
        self.label = torch.empty(size=(self.opt.batchsize,), dtype=torch.float32, device=self.device)
        self.gt    = torch.empty(size=(opt.batchsize,), dtype=torch.long, device=self.device)
        self.fixed_input = torch.empty(size=(self.opt.batchsize, 3, self.opt.isize, self.opt.isize), dtype=torch.float32, device=self.device)
        self.real_label = torch.ones (size=(self.opt.batchsize,), dtype=torch.float32, device=self.device)
        self.fake_label = torch.zeros(size=(self.opt.batchsize,), dtype=torch.float32, device=self.device)
        ##
        # Setup optimizer
        if self.opt.isTrain:
            self.netg.train()
            self.netd.train()
            self.optimizer_d = optim.Adam(self.netd.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
            self.optimizer_g = optim.Adam(self.netg.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
예제 #3
0
    def __init__(self, opt, dataloader=None):
        super(Ganomaly, self).__init__()
        ##
        # Initalize variables.
        self.opt = opt
        self.visualizer = Visualizer(opt)
        self.dataloader = dataloader
        self.trn_dir = os.path.join(self.opt.outf, self.opt.name, 'train')
        self.tst_dir = os.path.join(self.opt.outf, self.opt.name, 'test')
        self.device = torch.device(
            "cuda:0" if self.opt.device != 'cpu' else "cpu")

        # -- Discriminator attributes.
        self.out_d_real = None
        self.feat_real = None
        self.err_d_real = None
        self.fake = None
        self.latent_i = None
        self.latent_o = None
        self.out_d_fake = None
        self.feat_fake = None
        self.err_d_fake = None
        self.err_d = None

        # -- Generator attributes.
        self.out_g = None
        self.err_g_bce = None
        self.err_g_l1l = None
        self.err_g_enc = None
        self.err_g = None

        # -- Misc attributes
        self.epoch = 0
        self.times = []
        self.total_steps = 0

        ##
        # Create and initialize networks.
        self.netg = NetG(self.opt).to(self.device)
        self.netd = NetD(self.opt).to(self.device)
        self.netg.apply(weights_init)
        self.netd.apply(weights_init)

        ##
        if self.opt.resume != '':
            print("\nLoading pre-trained networks.")
            self.opt.iter = torch.load(
                os.path.join(self.opt.resume, 'netG.pth'))['epoch']
            self.netg.load_state_dict(
                torch.load(os.path.join(self.opt.resume,
                                        'netG.pth'))['state_dict'])
            self.netd.load_state_dict(
                torch.load(os.path.join(self.opt.resume,
                                        'netD.pth'))['state_dict'])
            print("\tDone.\n")

        # print(self.netg)
        # print(self.netd)

        ##
        # Loss Functions
        self.bce_criterion = nn.BCELoss()
        self.l1l_criterion = nn.L1Loss()
        self.l2l_criterion = l2_loss

        ##
        # Initialize input tensors.
        self.input = torch.empty(size=(self.opt.batchsize, 3, self.opt.isize,
                                       self.opt.isize),
                                 dtype=torch.float32,
                                 device=self.device)
        self.label = torch.empty(size=(self.opt.batchsize, ),
                                 dtype=torch.float32,
                                 device=self.device)
        self.gt = torch.empty(size=(opt.batchsize, ),
                              dtype=torch.long,
                              device=self.device)
        self.fixed_input = torch.empty(size=(self.opt.batchsize, 3,
                                             self.opt.isize, self.opt.isize),
                                       dtype=torch.float32,
                                       device=self.device)
        self.real_label = 1
        self.fake_label = 0

        ##
        # Setup optimizer
        if self.opt.isTrain:
            self.netg.train()
            self.netd.train()
            self.optimizer_d = optim.Adam(self.netd.parameters(),
                                          lr=self.opt.lr,
                                          betas=(self.opt.beta1, 0.999))
            self.optimizer_g = optim.Adam(self.netg.parameters(),
                                          lr=self.opt.lr,
                                          betas=(self.opt.beta1, 0.999))
예제 #4
0
    def __init__(self, opt):
        super(SSnovelty, self).__init__()
        ##
        # Initalize variables.
        self.opt = opt
        self.visualizer = Visualizer(opt)
        # self.warmup = hyperparameters['model_specifics']['warmup']
        self.trn_dir = os.path.join(self.opt.outf, self.opt.name, 'train')
        self.tst_dir = os.path.join(self.opt.outf, self.opt.name, 'test')
        self.device = torch.device(
            "cuda:0" if self.opt.device != 'cpu' else "cpu")

        # -- Discriminator attributes.
        self.out_d_real = None
        self.feat_real = None
        self.err_d_real = None
        self.fake = None
        self.latent_i = None
        # self.latent_o = None
        self.out_d_fake = None
        self.feat_fake = None
        self.err_d_fake = None
        self.err_d = None
        self.idx = 0
        self.opt.display = True

        # -- Generator attributes.
        self.out_g = None
        self.err_g_bce = None
        self.err_g_l1l = None
        self.err_g_enc = None
        self.err_g = None

        # -- Misc attributes
        self.epoch = 0
        self.epoch1 = 0
        self.times = []
        self.total_steps = 0

        ##
        # Create and initialize networks.
        self.netg = NetG(self.opt).to(self.device)
        self.netd = NetD(self.opt).to(self.device)
        self.netg.apply(weights_init)
        self.netd.apply(weights_init)

        self.netc = Class(self.opt).to(self.device)

        ##
        if self.opt.resume != '':
            print("\nLoading pre-trained networks.")
            # self.opt.iter = torch.load(os.path.join(self.opt.resume, 'netG.pth'))['epoch']
            # self.netg.load_state_dict(torch.load(os.path.join(self.opt.resume, 'netG.pth'))['state_dict'])
            # self.netd.load_state_dict(torch.load(os.path.join(self.opt.resume, 'netD.pth'))['state_dict'])
            self.netc.load_state_dict(
                torch.load(os.path.join(self.opt.resume,
                                        'class.pth'))['state_dict'])
            print("\tDone.\n")

        # print(self.netg)
        # print(self.netd)

        ##
        # Loss Functions
        self.bce_criterion = nn.BCELoss()
        self.l1l_criterion = nn.L1Loss()
        self.mse_criterion = nn.MSELoss()
        self.l2l_criterion = l2_loss
        self.loss_func = torch.nn.CrossEntropyLoss()

        ##
        # Initialize input tensors.
        self.input = torch.empty(size=(self.opt.batchsize, 3, self.opt.isize,
                                       self.opt.isize),
                                 dtype=torch.float32,
                                 device=self.device)
        self.input_1 = torch.empty(size=(self.opt.batchsize, 3, self.opt.isize,
                                         self.opt.isize),
                                   dtype=torch.float32,
                                   device=self.device)
        self.label = torch.empty(size=(self.opt.batchsize, ),
                                 dtype=torch.float32,
                                 device=self.device)
        self.label_r = torch.empty(size=(self.opt.batchsize, ),
                                   dtype=torch.float32,
                                   device=self.device)
        self.gt = torch.empty(size=(self.opt.batchsize, ),
                              dtype=torch.long,
                              device=self.device)
        self.fixed_input = torch.empty(size=(self.opt.batchsize, 3,
                                             self.opt.isize, self.opt.isize),
                                       dtype=torch.float32,
                                       device=self.device)
        self.real_label = 1
        self.fake_label = 0

        base = 1.0
        sigma_list = [1, 2, 4, 8, 16]
        self.sigma_list = [sigma / base for sigma in sigma_list]

        ##
        # Setup optimizer
        if self.opt.isTrain:
            self.netg.train()
            self.netd.train()
            self.netc.train()

            self.optimizer_d = optim.Adam(self.netd.parameters(),
                                          lr=self.opt.lr,
                                          betas=(self.opt.beta1, 0.999))
            self.optimizer_g = optim.Adam(self.netg.parameters(),
                                          lr=self.opt.lr,
                                          betas=(self.opt.beta1, 0.999))
            self.optimizer_c = optim.Adam(self.netc.parameters(),
                                          lr=self.opt.lr_c,
                                          betas=(self.opt.beta1, 0.999))
예제 #5
0
파일: model.py 프로젝트: djtimy/ganomaly
    def __init__(self, opt, dataloader=None):
        super(Ganomaly, self).__init__()
        ##
        # Initalize variables.
        self.opt = opt
        self.visualizer = Visualizer(opt)
        self.dataloader = dataloader
        # set the output folder, and self.opt.name is always euqal ${ganomaly/$dataset},详见option.py line106
        self.trn_dir = os.path.join(self.opt.outf, self.opt.name, 'train')
        self.tst_dir = os.path.join(self.opt.outf, self.opt.name, 'test')
        self.device = torch.device(
            "cuda:0" if self.opt.device != 'cpu' else "cpu")

        # -- Discriminator attributes.
        self.out_d_real = None
        self.feat_real = None
        self.err_d_real = None
        self.fake = None
        self.latent_i = None
        self.latent_o = None
        self.out_d_fake = None
        self.feat_fake = None
        self.err_d_fake = None
        self.err_d = None

        # -- Generator attributes.
        self.out_g = None
        self.err_g_bce = None
        self.err_g_l1l = None
        self.err_g_enc = None
        self.err_g = None

        # -- Misc attributes
        self.epoch = 0
        self.times = []
        self.total_steps = 0

        ##
        # Create and initialize networks.
        self.netg = NetG(self.opt).to(self.device)
        self.netd = NetD(self.opt).to(self.device)
        # 分别对网络模型的Conv层和BatchNorm层进行参数初始化
        self.netg.apply(weights_init)
        self.netd.apply(weights_init)

        ##
        # is not none, to continue training from the path of checkpointers specified by opt.resume
        # 类似于我在cnn_disk项目中使用到的fine-tune
        if self.opt.resume != '':
            print("\nLoading pre-trained networks.")
            self.opt.iter = torch.load(
                os.path.join(self.opt.resume, 'netG.pth'))['epoch']
            self.netg.load_state_dict(
                torch.load(os.path.join(self.opt.resume,
                                        'netG.pth'))['state_dict'])
            self.netd.load_state_dict(
                torch.load(os.path.join(self.opt.resume,
                                        'netD.pth'))['state_dict'])
            print("\tDone.\n")

        # print(self.netg)
        # print(self.netd)

        ##
        # Loss Functions, Binary Cross Entropy
        self.bce_criterion = nn.BCELoss()
        self.l1l_criterion = nn.L1Loss()
        self.l2l_criterion = l2_loss

        ##
        # Initialize input tensors.
        self.input = torch.empty(size=(self.opt.batchsize, 3, self.opt.isize,
                                       self.opt.isize),
                                 dtype=torch.float32,
                                 device=self.device)
        # self.opt.logger.info('empty input size: {}'.format(self.input.size()))
        self.label = torch.empty(size=(self.opt.batchsize, ),
                                 dtype=torch.float32,
                                 device=self.device)
        self.gt = torch.empty(size=(opt.batchsize, ),
                              dtype=torch.long,
                              device=self.device)
        self.fixed_input = torch.empty(size=(self.opt.batchsize, 3,
                                             self.opt.isize, self.opt.isize),
                                       dtype=torch.float32,
                                       device=self.device)
        self.real_label = 1
        self.fake_label = 0

        # Setup optimizer
        if self.opt.isTrain:
            self.netg.train()
            self.netd.train()
            self.optimizer_d = optim.Adam(self.netd.parameters(),
                                          lr=self.opt.lr,
                                          betas=(self.opt.beta1, 0.999))
            self.optimizer_g = optim.Adam(self.netg.parameters(),
                                          lr=self.opt.lr,
                                          betas=(self.opt.beta1, 0.999))
예제 #6
0
    def __init__(self, opt, dataloader=None):
        super(Ganomaly, self).__init__()
        ##
        # Initalize variables.
        self.opt = opt
        self.visualizer = Visualizer(opt)
        self.dataloader = dataloader
        self.trn_dir = os.path.join(self.opt.outf, self.opt.name, 'train')
        self.tst_dir = os.path.join(self.opt.outf, self.opt.name, 'test')

        # -- Discriminator attributes.
        self.out_d_real = None
        self.feat_real = None
        self.err_d_real = None
        self.fake = None
        self.latent_i = None
        self.latent_o = None
        self.out_d_fake = None
        self.feat_fake = None
        self.err_d_fake = None
        self.err_d = None

        # -- Generator attributes.
        self.out_g = None
        self.err_g_bce = None
        self.err_g_l1l = None
        self.err_g_enc = None
        self.err_g = None

        # -- Misc attributes
        self.epoch = 0
        self.times = []
        self.total_steps = 0

        ##
        # Create and initialize networks.
        self.netg = NetG(self.opt)
        self.netd = NetD(self.opt)
        self.netg.apply(weights_init)
        self.netd.apply(weights_init)

        ##
        if self.opt.resume != '':
            print("\nLoading pre-trained networks.")
            self.opt.iter = torch.load(
                os.path.join(self.opt.resume, 'netG.pth'))['epoch']
            self.netg.load_state_dict(
                torch.load(os.path.join(self.opt.resume,
                                        'netG.pth'))['state_dict'])
            self.netd.load_state_dict(
                torch.load(os.path.join(self.opt.resume,
                                        'netD.pth'))['state_dict'])
            print("\tDone.\n")

        print(self.netg)
        print(self.netd)

        ##
        # Loss Functions
        self.bce_criterion = nn.BCELoss()
        self.l1l_criterion = nn.L1Loss()
        self.l2l_criterion = l2_loss

        ##
        # Initialize input tensors.
        self.input = torch.FloatTensor(self.opt.batchsize, 3, self.opt.isize,
                                       self.opt.isize)
        self.label = torch.FloatTensor(self.opt.batchsize)
        self.gt = torch.LongTensor(self.opt.batchsize)
        self.pixel_gt = torch.FloatTensor(self.opt.batchsize, 3,
                                          self.opt.isize, self.opt.isize)
        self.noise = torch.FloatTensor(self.opt.batchsize, self.opt.nz, 1, 1)
        self.fixed_noise = torch.FloatTensor(self.opt.batchsize, self.opt.nz,
                                             1, 1).normal_(0, 1)
        self.fixed_input = torch.FloatTensor(self.opt.batchsize, 3,
                                             self.opt.isize, self.opt.isize)
        self.real_label = 1
        self.fake_label = 0

        self.an_scores = torch.FloatTensor([])  # Anomaly scores.
        self.gt_labels = torch.LongTensor([])  # Frame Level GT Labels.
        self.pixel_gts = torch.FloatTensor([])  # Pixel Level GT Labels.

        ##
        # Convert to CUDA if available.
        if self.opt.gpu_ids:
            self.netd.cuda()
            self.netg.cuda()
            self.bce_criterion.cuda()
            self.l1l_criterion.cuda()
            self.input, self.label = self.input.cuda(), self.label.cuda()
            self.gt, self.pixel_gt = self.gt.cuda(), self.pixel_gt.cuda()
            self.noise, self.fixed_noise = self.noise.cuda(
            ), self.fixed_noise.cuda()
            self.fixed_input = self.fixed_input.cuda()

        ##
        # Convert to Autograd Variable
        self.input = Variable(self.input, requires_grad=False)
        self.label = Variable(self.label, requires_grad=False)
        self.gt = Variable(self.gt, requires_grad=False)
        self.pixel_gt = Variable(self.pixel_gt, requires_grad=False)
        self.noise = Variable(self.noise, requires_grad=False)
        self.fixed_noise = Variable(self.fixed_noise, requires_grad=False)
        self.fixed_input = Variable(self.fixed_input, requires_grad=False)

        ##
        # Setup optimizer
        if self.opt.isTrain:
            self.netg.train()
            self.netd.train()
            self.optimizer_d = optim.Adam(self.netd.parameters(),
                                          lr=self.opt.lr,
                                          betas=(self.opt.beta1, 0.999))
            self.optimizer_g = optim.Adam(self.netg.parameters(),
                                          lr=self.opt.lr,
                                          betas=(self.opt.beta1, 0.999))