Пример #1
0
def gen_data_set(root=None):
    """
        return: trainset, valset, {idx:class}
    """
    if root is None:
        raise ValueError("must set root!")
    trainset = ImageFolder(root + "train", transform=transform)
    valset = ImageFolder(root + "val", transform=transform)
    trainset.idx_to_class = {v: k for k, v in trainset.class_to_idx.items()}
    valset.idx_to_class = {v: k for k, v in valset.class_to_idx.items()}
    #a = wulandata.__getitem__(500)

    if trainset.idx_to_class != valset.idx_to_class:
        raise ValueError("train and val folder net equal.")
    return trainset, valset, trainset.idx_to_class
Пример #2
0
def init_base(root, net, mtcnn=None, K=100):
    # obtain single face in sample => keep_all=False
    mtcnn = set_mtcnn(mtcnn, all=False, largest=True)
    data = ImageFolder(root); workers = 0 if os.name=='nt' else 4
    data.idx_to_class = {v:k for k,v in data.class_to_idx.items()}

    collate_fn = lambda x: x[0] # for batch=1: just 1st in list
    # collate_fn() used to handle list of (img,label) sample:
    # loader = DataLoader(data, batch_size=2, collate_fn=collate_fn)
    # for i in loader: print(i) # Try: lambda x: list(zip(*x)) OR x
    loader = DataLoader(data, collate_fn=collate_fn, num_workers=workers)

    faces, names, feat = [], [], []
    for img, idx in loader: # keep_all=False: (C,H,W)
        face, box, prob = mtcnn(img, landmarks=False)
        if face is None or box is None: continue
        faces.append(face); names.append(data.idx_to_class[idx])
    faces = torch.stack(faces) # stack: add dim->(N,C,H,W)
    img = cat_face(faces, names, sh='face'); cv2.waitKey()
    cv2.imwrite(f'{root}/{root}.png', img) # save cat_face

    net = load_net(net); faces = Norm(faces)
    for i in range(0, len(faces), K): # feat: (n,512)
        feat.append(net(faces[i:i+K]).detach().cpu())
    base = [torch.cat(feat), names] # cat: merge dim
    torch.save(base, f'{root}/base.pt') # save base
    cv2.destroyAllWindows(); return base
# This is for our re-trained model
model = InceptionResnetV1(pretrained="vggface2", classify=True)


def collate_fn(x):
    return x[0]


path = "../../data/vggface2-80"

preprocess = transforms.Compose(
    [transforms.Resize((224, 224)),
     transforms.ToTensor()])

dataset = ImageFolder(path)
dataset.idx_to_class = {i: c for c, i in dataset.class_to_idx.items()}
batch_size = 1
loader = torch.utils.data.DataLoader(dataset,
                                     batch_size=batch_size,
                                     shuffle=False,
                                     collate_fn=collate_fn)

max_iterations = len(loader)

num_samples = len(loader)
num_images = max_iterations * batch_size
print(f"there are {num_images} images")
test_correct = 0
model.eval()
mtcnn = MTCNN(image_size=160,
              margin=0,
Пример #4
0
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    mtcnn = MTCNN(
        image_size=160, margin=0, min_face_size=20,
        thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,
        device=device
    )
    resnet = InceptionResnetV1(pretrained='casia-webface').eval().to(device)

    ds = ImageFolder(
        root=args['in_path'],
    )

    def collate_fn(x):
        return list(zip(*x))

    ds.idx_to_class = {i: c for c, i in ds.class_to_idx.items()}
    dl = DataLoader(
        ds,
        batch_size=args['batch_size'],
        shuffle=False,
        num_workers=args['num_workers'],
        collate_fn=collate_fn,
    )


    for imgs, classes in tqdm(dl):
        faces, boxes = align_image(imgs, mtcnn)
        for i, (img, cls, face, box) in enumerate(zip(imgs, classes, faces, boxes)):
            if face is None:
                continue
            boxed_img = draw_boxes(img, [box])