예제 #1
0
torch.manual_seed(opt.manualSeed)
if opt.cuda:
    torch.cuda.manual_seed_all(opt.manualSeed)

cudnn.benchmark = True

if torch.cuda.is_available() and not opt.cuda:
    print("WARNING: You have a CUDA device, so you should probably run with --cuda")

# load data
data = util.DATA_LOADER(opt)
print("# of training samples: ", data.ntrain)

netG = model.MLP_G(opt)
netMap = model.Embedding_Net(opt)
netD = model.MLP_CRITIC(opt)
F_ha = model.Dis_Embed_Att(opt)

model_path = './models/' + opt.dataset
if not os.path.exists(model_path):
    os.makedirs(model_path)

if len(opt.gpus.split(','))>1:
    netG=nn.DataParallel(netG)
    netD = nn.DataParallel(netD)
    netMap = nn.DataParallel(netMap)
    F_ha = nn.DataParallel(F_ha)


contras_criterion = losses.SupConLoss_clear(opt.ins_temp)
예제 #2
0
    def __init__(self,
                 opt,
                 attributes,
                 unseenAtt,
                 unseenLabels,
                 seen_feats_mean,
                 gen_type='FG'):
        '''
        CLSWGAN trainer
        Inputs:
            opt -- arguments
            unseenAtt -- embeddings vector of unseen classes
            unseenLabels -- labels of unseen classes
            attributes -- embeddings vector of all classes
        '''
        self.opt = opt

        self.con_real = ConLossReal(seen_feats_mean)
        self.gen_type = gen_type
        # self.Wu_Labels = np.array([i for i, l in enumerate(unseenLabels)])
        self.Wu_Labels = unseenLabels  #np.array([i for i, l in enumerate(unseenLabels)])
        print(f"Wu_Labels {self.Wu_Labels}")
        self.Wu = unseenAtt

        self.unseen_classifier = ClsUnseen(unseenAtt)
        self.unseen_classifier.cuda()
        self.con_loss = SupConLoss()
        self.con_att_feats = AttFeatsCon()

        # self.unseen_classifier = loadUnseenWeights(opt.pretrain_classifier_unseen, self.unseen_classifier)
        self.classifier = ClsModel(num_classes=opt.nclass_all)
        self.classifier.cuda()
        self.classifier = loadFasterRcnnCLSHead(opt.pretrain_classifier,
                                                self.classifier)

        for p in self.classifier.parameters():
            p.requires_grad = False

        for p in self.unseen_classifier.parameters():
            p.requires_grad = False

        self.ntrain = opt.gan_epoch_budget
        self.attributes = attributes.data.numpy()

        print(f"# of training samples: {self.ntrain}")
        # initialize generator and discriminator
        self.netG = model.MLP_G(self.opt)
        self.netD = model.MLP_CRITIC(self.opt)

        if self.opt.cuda and torch.cuda.is_available():
            self.netG = self.netG.cuda()
            self.netD = self.netD.cuda()

        print(
            '\n\n#############################################################\n'
        )
        print(self.netG, '\n')
        print(self.netD)
        print(
            '\n#############################################################\n\n'
        )

        # classification loss, Equation (4) of the paper
        self.cls_criterion = nn.NLLLoss()

        self.one = torch.FloatTensor([1])
        self.mone = self.one * -1

        if self.opt.cuda:

            self.one = self.one.cuda()
            self.mone = self.mone.cuda()
            self.cls_criterion.cuda()

        self.optimizerD = optim.Adam(self.netD.parameters(),
                                     lr=self.opt.lr,
                                     betas=(self.opt.beta1, 0.999))
        self.optimizerG = optim.Adam(self.netG.parameters(),
                                     lr=self.opt.lr,
                                     betas=(self.opt.beta1, 0.999))