Exemplo n.º 1
0
    def classify_one_image(self,
                           imgf,
                           classes=[
                               'afraid', 'angry', 'disgusted', 'happy',
                               'neutral', 'sad', 'surprised'
                           ]):
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
        transf = transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])
        # Face detection
        args = {}
        args['threshold'] = 0.0
        args['window'] = False
        args['ignore_multi'] = True
        args['grow'] = 10
        args['resize'] = True
        args['row_resize'] = 512
        args['col_resize'] = 512
        args['min_proportion'] = 0.1
        with tempfile.TemporaryDirectory() as tempdir:
            args['o'] = tempdir
            face_detector.transform(extract_faces.AttributeDict(args), [imgf])
            cropped = Image.open(tempdir + '/' + os.path.basename(imgf))
        cropped = transf(cropped)

        input_var = Variable(cropped.view(1, *cropped.shape))

        if torch.cuda.is_available():
            input_var = input_var.cuda()

        output = self.model.forward(input_var).cpu().data.numpy()
        softmax = np.exp(output) / np.sum(np.exp(output))
        clss = np.argmax(softmax)
        fig = plt.figure()
        plt.imshow(Image.open(imgf))
        fig.subplots_adjust(bottom=0.2)
        plt.figtext(0.1, 0.05, ', '.join(classes))
        plt.figtext(
            0.1, 0.10,
            ', '.join(['{:.3}'.format(a) for a in softmax.reshape(-1)]))
        plt.title(classes[clss])
        plt.show()
Exemplo n.º 2
0
def _inner(arg):
    (k, v, emotion, outdir, args) = arg
    # Output
    args['o'] = outdir + '/' + emotion[k]
    face_detector.transform(AttributeDict(args), v)