Ejemplo n.º 1
0
def main():
    # Views the training images and displays the distance on anchor-negative and anchor-positive
    test_display_triplet_distance = True

    # print the experiment configuration
    print('\nparsed options:\n{}\n'.format(vars(args)))
    print('\nNumber of Classes:\n{}\n'.format(len(train_dir.classes)))

    # instantiate model and initialize weights
    model = FaceModel(embedding_size=args.embedding_size,
                      num_classes=len(train_dir.classes),
                      pretrained=False)

    model.to(device)
    triplet_loss = TripletMarginLoss(args.margin)
    optimizer = create_optimizer(model, args.lr)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print('=> loading checkpoint {}'.format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
        else:
            print('=> no checkpoint found at {}'.format(args.resume))

    start = args.start_epoch
    end = start + args.epochs

    for epoch in range(start, end):
        print(80 * '=')
        print('Epoch [{}/{}]'.format(epoch, end - 1))
        time0 = time.time()
        own_train(train_loader, model, triplet_loss, optimizer, epoch,
                  data_size)
        print(f' Execution time    = {time.time() - time0}')
        print(80 * '=')

        if test_display_triplet_distance:
            display_triplet_distance(model, train_loader,
                                     LOG_DIR + "/train_{}".format(epoch))
    print(80 * '=')
    time0 = time.time()
    own_test(test_loader, model, epoch)
    print(f' Execution time    = {time.time() - time0}')
    print(80 * '=')
    if test_display_triplet_distance:
        display_triplet_distance_test(model, test_loader,
                                      LOG_DIR + "/test_{}".format(epoch))
Ejemplo n.º 2
0
def main():
    # Views the training images and displays the distance on anchor-negative and anchor-positive
    test_display_triplet_distance = True

    # print the experiment configuration
    print('\nparsed options:\n{}\n'.format(vars(args)))
    print('\nNumber of Classes:\n{}\n'.format(len(train_dir.classes)))

    # instantiate model and initialize weights
    model = FaceModel(inceptionresnet_v1,
                      embedding_size=args.embedding_size,
                      num_classes=len(train_dir.classes),
                      pretrained=False)

    if args.cuda:
        model.cuda()

    optimizer = create_optimizer(model, args.lr)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print('=> loading checkpoint {}'.format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
        else:
            print('=> no checkpoint found at {}'.format(args.resume))

    start = args.start_epoch
    end = start + args.epochs

    para_model = torch.nn.parallel.data_parallel(model)
    for epoch in range(start, end):
        train(train_loader, para_model, optimizer, epoch)
        # test(test_loader, model, epoch)
        # do checkpointing
        torch.save({
            'epoch': epoch + 1,
            'state_dict': model.state_dict()
        }, '{}/checkpoint_{}.pth'.format(LOG_DIR, epoch))

        if test_display_triplet_distance:
            display_triplet_distance(model, train_loader,
                                     LOG_DIR + "/train_{}".format(epoch))
Ejemplo n.º 3
0
# pathC = os.path.join("/home/yinzi/data4/new_train/checkpoints_C/396e4702", 'best.pth.tar')
pathC = os.path.join("/home/yinzi/data4/new_train/checkpoints_C/1daed2c2",
                     'best.pth.tar')
# 396e4702 带数据增广,单独F1得分为0.87,AB结果送入,F1得分为0.834

# 1daed2c2 使用了修复的crop数据集,无数据增广训练。单独F1得分为0.8551966, AB结果送入。F1得分为0.650

# state = torch.load(path, map_location=device)
stateAB = torch.load(pathAB)
stageC = torch.load(pathC)
stateABC = torch.load(pathABC)

# model1.load_state_dict(stateAB['model1'])
# select_model.load_state_dict(stateAB['select_net'])
# model2.load_state_dict(stageC['model2'])
model1.load_state_dict(stateAB['model1'])
select_model.load_state_dict(stateAB['select_net'])
model2.load_state_dict(stageC['model2'])
#stateABC 0.8088
#StageC 0.69

# Dataset and Dataloader
# Dataset Read_in Part
root_dir = "/data1/yinzi/datas"
parts_root_dir = "/home/yinzi/data3/recroped_parts"

txt_file_names = {
    'train': "exemplars.txt",
    'val': "tuning.txt",
    'test': "testing.txt"
}
Ejemplo n.º 4
0
def main():
    args = parser.parse_args()

    cuda = torch.cuda.is_available()
    torch.manual_seed(args.seed)
    if cuda:
        torch.cuda.manual_seed(args.seed)

    # 1. dataset
    root = args.root
    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    test_transforms = transforms.Compose([
        transforms.Scale(96),
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])
    test_dataset = ImageFolder(root, transform=test_transforms)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.test_batch_size,
                                              shuffle=False,
                                              **kwargs)

    val_iterator = validation_iterator(test_loader)

    # 2. model
    #train_dir = FaceDataset(dir='/media/lior/LinuxHDD/datasets/MSCeleb-cleaned',n_triplets=10)

    print('construct model')
    model = FaceModel(embedding_size=128, num_classes=3367, pretrained=False)

    model = model.cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)

            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}'".format(args.resume))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # extract feature
    print('extracting feature')
    embeds = []
    labels = []
    for data, target in val_iterator:
        if cuda:
            data, target = data.cuda(), target.cuda(async=True)
        data_var = Variable(data, volatile=True)
        # compute output
        output = model(data_var)

        embeds.append(output.data.cpu().numpy())
        labels.append(target.cpu().numpy())

    embeds = np.vstack(embeds)
    labels = np.hstack(labels)

    print('embeds shape is ', embeds.shape)
    print('labels shape is ', labels.shape)

    # prepare dict for display
    namedict = dict()
    for i in range(10):
        namedict[i] = str(i)

    visual_feature_space(embeds, labels, len(test_dataset.classes), namedict)