示例#1
0
        correct += predicted.eq(targets.data).cpu().sum()
    test_acc = 100. * correct / total
    print('* Test results : Acc@1 = %.2f%%' % (test_acc))
    record.write('\nTest Acc: %f\n' % test_acc)
    record.flush()


record = open('./checkpoint/' + args.id + '.txt', 'w')
record.write('learning rate: %f\n' % args.lr)
record.write('batch size: %f\n' % args.batch_size)
record.write('start iter: %d\n' % args.start_iter)
record.write('mid iter: %d\n' % args.mid_iter)
record.flush()

#
loader = cifar_dataloader(args.dataset,r=args.noise_rate,noise_mode='instance',batch_size=args.batch_size,num_workers=5,\
    root_dir='./')

val_loader = loader.run('eval_train')
test_loader = loader.run('test')
train_loader = loader.run('warmup')

#loader = dataloader.clothing_dataloader(batch_size=args.batch_size,num_workers=5,shuffle=True)
#train_loader,val_loader,test_loader = loader.run()

best = 0
init = True
# Model
print('\nModel setup')
print('| Building net')
##
##
示例#2
0
    model = ResNet32(num_classes=args.num_class)
    # from resnet_imselar import resnet18
    # model = resnet18(num_classes=args.num_class)

    model = model.cuda()
    return model

stats_log=open('./checkpoint/%s_%.1f_%s_%s'%(args.dataset,args.r,args.noise_mode,args.sess)+'_stats.txt','w')
test_log=open('./checkpoint/%s_%.1f_%s_%s'%(args.dataset,args.r,args.noise_mode,args.sess)+'_acc.txt','w')

if args.dataset=='cifar10':
    warm_up = 10
elif args.dataset=='cifar100':
    warm_up = 30

loader = dataloader.cifar_dataloader(args.dataset,r=args.r,noise_mode=args.noise_mode,batch_size=args.batch_size,num_workers=5,\
    root_dir=args.data_path,log=stats_log,noise_file='%s/%.1f_%s.json'%(args.data_path,args.r,args.noise_mode))

print('| Building net')
net1 = create_model()
net2 = create_model()
cudnn.benchmark = True

criterion = SemiLoss()
# optimizer1 = optim.SGD(net1.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
# optimizer2 = optim.SGD(net2.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
optimizer1 = optim.SGD(net1.params(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
optimizer2 = optim.SGD(net2.params(), lr=args.lr, momentum=0.9, weight_decay=5e-4)

CE = nn.CrossEntropyLoss(reduction='none')
CEloss = nn.CrossEntropyLoss()
if args.noise_mode=='asym':
示例#3
0
            return torch.mean(torch.sum(probs.log() * probs, dim=1))

    def create_model(devices=[0]):
        model = ResNet18(num_classes=args.num_class)
        model = model.cuda()
        model = torch.nn.DataParallel(model, device_ids=devices).cuda()
        return model

    loader = dataloader.cifar_dataloader(
        dataset=args.dataset,
        r=args.r,
        noise_mode=args.noise_mode,
        batch_size=args.batch_size,
        warmup_batch_size=args.warmup_batch_size,
        num_workers=args.num_workers,
        root_dir=args.data_path,
        noise_file=f"{args.checkpoint_path}/saved/labels.json",
        preaug_file=(
            f"{args.checkpoint_path}/saved/{args.preset}_preaugdata.pth.tar"
            if args.preaugment
            else ""
        ),
        augmentation_strategy=args,
    )

    print("| Building net")
    devices = range(torch.cuda.device_count())
    net1 = create_model(devices)
    net2 = create_model(devices)
    cudnn.benchmark = True

    criterion = SemiLoss()