Пример #1
0
def create_mtcnn_net(image,
                     mini_face,
                     device,
                     p_model_path=None,
                     r_model_path=None,
                     o_model_path=None):

    boxes = np.array([])
    landmarks = np.array([])

    if p_model_path is not None:
        pnet = PNet().to(device)
        pnet.load_state_dict(
            torch.load(p_model_path,
                       map_location=lambda storage, loc: storage))
        pnet.eval()

        bboxes = detect_pnet(pnet, image, mini_face, device)

    if r_model_path is not None:
        rnet = RNet().to(device)
        rnet.load_state_dict(
            torch.load(r_model_path,
                       map_location=lambda storage, loc: storage))
        rnet.eval()

        bboxes = detect_rnet(rnet, image, bboxes, device)

    if o_model_path is not None:
        onet = ONet().to(device)
        onet.load_state_dict(
            torch.load(o_model_path,
                       map_location=lambda storage, loc: storage))
        onet.eval()

        bboxes, landmarks = detect_onet(onet, image, bboxes, device)

    return bboxes, landmarks
Пример #2
0
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

p_model_path = 'models/pnet_Weights'
o_model_path = 'models/onet_Weights'
l_model_path = 'models/landmark.pkl'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

pnet = PNet().to(device)
pnet.load_state_dict(
    torch.load(p_model_path, map_location=lambda storage, loc: storage))
pnet.eval()

onet = ONet().to(device)
onet.load_state_dict(
    torch.load(o_model_path, map_location=lambda storage, loc: storage))
onet.eval()

landmark = torch.load(l_model_path)
landmark = landmark.to(device)
landmark.eval()

if __name__ == '__main__':

    parser = argparse.ArgumentParser(description='MTCNN Demo')
    parser.add_argument("--test_image",
                        dest='test_image',
                        help="test image path",
                        default="./lmark",
                        type=str)
    parser.add_argument("--scale",
                        dest='scale',
Пример #3
0
loss_cls = nn.CrossEntropyLoss()
loss_offset = nn.MSELoss()
loss_landmark = nn.MSELoss()

num_epochs = 16
for epoch in range(num_epochs):
    print('Epoch {}/{}'.format(epoch, num_epochs - 1))
    print('-' * 10)

    # Each epoch has a training and validation phase
    for phase in ['train', 'val']:
        if phase == 'train':
            model.train()  # set model to training mode
        else:
            model.eval()  # set model to evaluate mode

        running_loss, running_loss_cls, running_loss_offset, running_loss_landmark = 0.0, 0.0, 0.0, 0.0
        running_correct = 0.0
        running_gt = 0.0

        # iterate over data
        for i_batch, sample_batched in enumerate(tqdm(dataloaders[phase])):

            input_images, gt_label, gt_offset, landmark_offset = sample_batched[
                'input_img'], sample_batched['label'], sample_batched[
                    'bbox_target'], sample_batched['landmark']
            input_images = input_images.to(device)
            gt_label = gt_label.to(device)
            # print('gt_label is ', gt_label)
            gt_offset = gt_offset.type(torch.FloatTensor).to(device)