示例#1
0
netF = ResClassifier(class_num=args.class_num, extract=args.extract).cuda()
netF.apply(weights_init)


def get_L2norm_loss_self_driven(x):
    l = (x.norm(p=2, dim=1).mean() - args.radius)**2
    return args.weight_ring * l


def get_cls_loss(pred, gt):
    cls_loss = F.nll_loss(F.log_softmax(pred), gt)
    return cls_loss


opt_g = optim.SGD(netG.parameters(), lr=args.lr, weight_decay=0.0005)
opt_f = optim.SGD(netF.parameters(),
                  lr=args.lr,
                  momentum=0.9,
                  weight_decay=0.0005)

for epoch in range(1, args.pre_epoches + 1):
    for i, (s_imgs, s_labels) in tqdm.tqdm(enumerate(source_loader)):
        s_imgs = Variable(s_imgs.cuda())
        s_labels = Variable(s_labels.cuda())

        opt_g.zero_grad()
        opt_f.zero_grad()

        s_bottleneck = netG(s_imgs)
        s_fc2_emb, s_logit = netF(s_bottleneck)
示例#2
0
文件: train.py 项目: yiyang-wang/AFN
netG = ResBase50().cuda()
netF = ResClassifier(class_num=args.class_num, extract=args.extract, dropout_p=args.dropout_p).cuda()
netF.apply(weights_init)


def get_cls_loss(pred, gt):
    cls_loss = F.nll_loss(F.log_softmax(pred), gt)
    return cls_loss

def get_L2norm_loss_self_driven(x):
    l = (x.norm(p=2, dim=1).mean() - args.radius) ** 2
    return args.weight_L2norm * l

opt_g = optim.SGD(netG.parameters(), lr=args.lr, weight_decay=0.0005)
opt_f = optim.SGD(netF.parameters(), lr=args.lr, momentum=0.9, weight_decay=0.0005)
                    
for epoch in range(1, args.pre_epoches + 1):
    for i, (s_imgs, s_labels) in tqdm.tqdm(enumerate(source_loader)):
        if s_imgs.size(0) != args.batch_size:
            continue
            
        s_imgs = Variable(s_imgs.cuda())
        s_labels = Variable(s_labels.cuda())

        opt_g.zero_grad()
        opt_f.zero_grad()

        s_bottleneck = netG(s_imgs)
        s_fc2_emb, s_logit = netF(s_bottleneck)