Beispiel #1
0
    # index = g_model['faces']
    e = EvalToolBox()
    print(args.stage)
    if args.stage == 1:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
        num_gpus = len(args.gpu.split(","))
        gpu_ids = range(num_gpus)
        print('num of GPU is ' + str(num_gpus))
        print('GPU is ' + str(gpu_ids))
        use_cuda = torch.cuda.is_available() and True
        device = torch.device("cuda" if use_cuda else "cpu")
        torch.cuda.set_device(gpu_ids[0])
        config.use_cuda = use_cuda
        config.device = device
        config.device_ids = gpu_ids
        model = net.sphere64a(pretrained=False, stage=3).to(device)
        model = torch.nn.DataParallel(model, device_ids=gpu_ids)
        model = model.to(device)
        load_model(model, config.dict_file)
        if 'nonlinear' in config.dict_file:
            bfmn = BFMN_batch()
        else:
            bfmn = BFMG_batch()
        bfmn = torch.nn.DataParallel(bfmn, device_ids=gpu_ids).to(device)
        bfmn = bfmn.to(device)
        if args.mode == 'micc':
            e.get_param_from_model(model, bfmn)
        elif args.mode == 'single':
            e.get_mesh_from_model_single(model, bfmn)

    elif args.stage == 2 or args.stage == '2':
Beispiel #2
0
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
num_gpus = len(args.gpu.split(","))
gpu_ids = range(num_gpus)
print("num of GPU is " + str(num_gpus))
print("GPU is " + str(gpu_ids))
use_cuda = torch.cuda.is_available() and True
device = torch.device("cuda" if use_cuda else "cpu")
torch.cuda.set_device(gpu_ids[0])
config.use_cuda = use_cuda
config.device = device
#######################################################################################################
dict_file = args.loadfile
lr = args.lr
print("--->loading model of " + args.net + "...")
if args.net == "sphere":
    model = net.sphere64a(pretrained=False, model_root=dict_file).to(device)
else:
    model = mobilenet_2(num_classes=512).to(device)

#######################################################################################################
if not args.verify:
    trainset = VGG2MixDataset(
        max_number_class=args.number_of_class,
        indexfile="../file_path_list_vgg2.txt",
        transform=transform_train,
        ddfa_root=config.ddfa_root,
        ddfa_filelists=args.filelists_train,
        ddfa_param_fp=args.param_fp_train,
        mix=False,
    )
Beispiel #3
0
num_gpus = len(args.gpu.split(","))
gpu_ids = []
for i in range(num_gpus):
    gpu_ids.append(int(args.gpu.split(",")[i]))
print("num of GPU is " + str(num_gpus))
print("GPU is " + str(gpu_ids))
use_cuda = torch.cuda.is_available() and True
device = torch.device("cuda" if use_cuda else "cpu")
torch.cuda.set_device(gpu_ids[0])

config.use_cuda = use_cuda
config.device = device

print("loading model...")
if args.net == "sphere":
    model = net.sphere64a(pretrained=False, model_root=dict_file, stage=2)
    load_model(model, dict_file)
else:
    model = mobilenet_2(num_classes=512)

model = model.to(device)
if not args.verify:
    trainset = MSCelebShapeDataSet(
        indexfile="/data/jdq/imgc2/file_path_list_imgc2_lqy.txt",
        max_number_class=args.number_of_class,
        transform=transform_train,
    )
    train_loader = torch.utils.data.DataLoader(trainset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=16)
Beispiel #4
0
                         transforms.Resize((112, 96)),
                         transforms.ToTensor(),
                         transforms.Normalize(mean=(0.5, 0.5, 0.5),
                                              std=(0.5, 0.5, 0.5))
                     ]))

eval_loader = torch.utils.data.DataLoader(evalset,
                                          batch_size=1,
                                          shuffle=False,
                                          num_workers=16)

# Model
print('==> Building model..')
#model=None
if mainloss == 'A-softmax':
    model = sphere64a(linear='AngleLinear', num_classes=number_of_class)
elif mainloss == 'cosloss' or mainloss == 'mcosloss' or mainloss == 'arcloss':
    model = sphere64a(linear='CosLinear', num_classes=number_of_class)
elif mainloss == 'cocoloss':
    model = sphere64a(linear='CocoLinear',
                      scale=args.s,
                      num_classes=number_of_class)
elif mainloss == 'mcocoloss':
    model = sphere64a(linear='mCocoLinear',
                      scale=args.s,
                      num_classes=number_of_class)
else:
    model = sphere64a(num_classes=number_of_class)

model = model.to(device)
model = torch.nn.DataParallel(model, device_ids=gpu_ids)
Beispiel #5
0
])
transform_eval_fs = transforms.Compose(
    [transforms.CenterCrop((112, 96)),
     transforms.ToTensor(), normalize])

lr = args.lr

os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
num_gpus = len(args.gpu.split(","))
gpu_ids = range(num_gpus)
torch.cuda.set_device(gpu_ids[0])
use_cuda = torch.cuda.is_available() and True
device = torch.device("cuda" if use_cuda else "cpu")

print("loading model...")
model = net.sphere64a(pretrained=True, model_root=dict_file)
model = model.to(device)
evalset = AFLW2000DataSet(root="/data2/lmd_jdq/AFLW2000-3D/AFLW2000_align/",
                          transform=transform_eval)
if not args.verify:
    trainset = MyDataSet(
        root="/data1/jdq/imgc2/",
        max_number_class=args.number_of_class,
        transform=transform_train,
    )
    train_loader = torch.utils.data.DataLoader(trainset,
                                               batch_size=32,
                                               shuffle=True,
                                               num_workers=16)
evalsetfs = FSDataSet(root="", filelist="./1.txt", transform=transform_eval_fs)
def main():
    # ----------------------------------------load images----------------------------------------

    train_loader = torch.utils.data.DataLoader(
        ImageList(
            root=root_path,
            fileList=train_list,
            transform=transforms.Compose([
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
                transforms.Normalize(
                    mean=(0.5, 0.5, 0.5),
                    std=(0.5, 0.5, 0.5))  # range [0.0, 1.0] -> [-1.0,1.0]
            ])),
        batch_size=BatchSize,
        shuffle=True,
        num_workers=workers,
        pin_memory=True,
        drop_last=True)

    print('length of train Dataset: ' + str(len(train_loader.dataset)))
    print('Number of Classses: ' + str(num_class))

    # ------------------------------------model--------------------------------------------
    model_ft = net.sphere64a()

    # # --------------load model---------------
    # model_path = './checkpoints/mnface_30_checkpoints.pth'
    # state_dict = torch.load(model_path)
    # model_ft.load_state_dict(state_dict)

    #------------------------------use gpu--------------------
    if use_gpu:
        model_ft = nn.DataParallel(model_ft).cuda()

    # ------------------------------cosface loss and optimizer-------------------------
    MCP = layer.MarginCosineProduct(512, num_class).cuda()
    # MCP = layer.AngleLinear(512, args.num_class).cuda()
    # MCP = torch.nn.Linear(512, args.num_class, bias=False).cuda()
    criterion = torch.nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.SGD([{
        'params': model_ft.parameters()
    }, {
        'params': MCP.parameters()
    }],
                                lr=lr_ori,
                                momentum=0.9,
                                weight_decay=0.0005)

    for epoch in range(1, 38 + 1):
        # # -------------------my loss----------------------------
        # train(train_loader, model_ft, mining_loss, ce_loss, optimizer, epoch)
        # model_ft.module.save(save_path + 'mnface_' + str(epoch) + '_checkpoints.pth')
        # acc, pred = lfw_eval.eval(save_path + 'mnface_' + str(epoch) + '_checkpoints.pth')

        #-------------------cos face--------------------------
        train(train_loader, model_ft, MCP, criterion, optimizer, epoch)
        model_ft.module.save(save_path + 'cosface_' + str(epoch) +
                             '_checkpoints.pth')
        acc, pred = lfw_eval.eval(save_path + 'cosface_' + str(epoch) +
                                  '_checkpoints.pth')

        writer.add_scalar('Test/LFWAcc', acc, epoch)
    print('finished training')