def camera_predict():

    video_captor = cv2.VideoCapture(0)
    predicted_class = None
    while True:
        ret, frame = video_captor.read()

        face_img, face_coor = camera_face_detect.face_d(frame)

        if face_coor is not None:
            [x_screen, y_screen, w_screen, h_screen] = face_coor
            cv2.rectangle(frame, (x_screen, y_screen),
                          (x_screen + w_screen, y_screen + h_screen),
                          (255, 0, 0), 2)

        if cv2.waitKey(1) & 0xFF == ord(' '):
            face_img, face_coor = camera_face_detect.face_d(frame)

            if face_coor is not None:
                [x, y, w, h] = face_coor
                cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)

            if face_img is not None:
                if not os.path.exists(cfg.output_folder):
                    os.mkdir('./camera_output')
                cv2.imwrite(os.path.join(cfg.output_folder, 'face_image.jpg'),
                            face_img)
                gray = cv2.cvtColor(face_img, cv2.COLOR_RGB2GRAY)
                gray = cv2.resize(gray, (240, 240))
                img = gray[:, :, np.newaxis]
                img = np.concatenate((img, img, img), axis=2)
                img = Image.fromarray(np.uint8(img))
                inputs = transform_test(img)
                class_names = [
                    'Angry', 'Disgusted', 'Fearful', 'Happy', 'Sad',
                    'Surprised', 'Neutral'
                ]

                net = VGG16.Net()
                checkpoint = torch.load(cfg.ckpt_path)
                net.load_state_dict(checkpoint['net'])

                if use_cuda:
                    net.to(device)

                net.eval()
                ncrops, c, h, w = np.shape(inputs)
                inputs = inputs.view(-1, c, h, w)
                inputs = Variable(inputs, volatile=True)
                if use_cuda:
                    inputs = inputs.to(device)
                outputs = net(inputs)
                outputs_avg = outputs.view(ncrops, -1).mean(0)
                score = F.softmax(outputs_avg)
                print(score)
                _, predicted = torch.max(outputs_avg.data, 0)
                predicted_class = class_names[int(predicted.cpu().numpy())]
                print(predicted_class)

            if predicted_class is not None:
                cv2.putText(frame, predicted_class, (30, 60),
                            cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 1)
                cv2.imwrite(os.path.join(cfg.output_folder, 'predict.jpg'),
                            frame)

        if predicted_class is not None:
            cv2.putText(frame, predicted_class, (30, 60),
                        cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 1)
        cv2.imshow('camera', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cfg.data_path, 'train'),
                                              transform=transform_train)

test_data = torchvision.datasets.ImageFolder(os.path.join(
    cfg.data_path, 'test'),
                                             transform=transform_test)

train_loader = torch.utils.data.DataLoader(train_data,
                                           batch_size=cfg.bs,
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(test_data,
                                          batch_size=cfg.bs,
                                          shuffle=True)

net = VGG16.Net()

if cfg.resume:
    print('------------------------------')
    print('==> Loading the checkpoint ')
    if not os.path.exists(cfg.ckpt_path):
        raise AssertionError['Can not find path']
    checkpoint = torch.load(cfg.ckpt_path)
    net.load_state_dict(checkpoint['net'])
    best_test_acc = checkpoint['best_test_acc']
    print('best_test_acc is %.4f%%' % best_test_acc)
    best_test_acc_epoch = checkpoint['best_test_acc_epoch']
    print('best_test_acc_epoch is %d' % best_test_acc_epoch)
    start_epoch = checkpoint['best_test_acc_epoch'] + 1
else:
    print('------------------------------')
Exemple #3
0
def image_predict(image):

    image_path = os.path.join(arg.input_folder, image)
    src_img = np.array(Image.open(image_path))
    face_img, face_coor = image_face_detect.face_d(src_img)
    gray = cv2.cvtColor(face_img, cv2.COLOR_RGB2GRAY)
    gray = cv2.resize(gray, (240, 240))
    img = gray[:, :, np.newaxis]
    img = np.concatenate((img, img, img), axis=2)
    img = Image.fromarray(np.uint8(img))
    inputs = transform_test(img)

    class_names = [
        'Angry', 'Disgusted', 'Fearful', 'Happy', 'Sad', 'Surprised', 'Neutral'
    ]

    net = VGG16.Net()
    checkpoint = torch.load(arg.ckpt_path)
    net.load_state_dict(checkpoint['net'])
    if use_cuda:
        net.cuda()
    net.eval()
    ncrops, c, h, w = np.shape(inputs)
    inputs = inputs.view(-1, c, h, w)

    inputs = Variable(inputs, volatile=True)
    if use_cuda:
        inputs = inputs.to(device)
    outputs = net(inputs)
    outputs_avg = outputs.view(ncrops, -1).mean(0)
    score = F.softmax(outputs_avg)
    _, predicted = torch.max(outputs_avg.data, 0)
    expression = class_names[int(predicted.cpu().numpy())]
    if face_coor is not None:
        [x, y, w, h] = face_coor
        cv2.rectangle(src_img, (x, y), (x + w, y + h), (255, 0, 0), 2)

    plt.rcParams['figure.figsize'] = (11, 6)
    axes = plt.subplot(1, 2, 1)
    plt.imshow(src_img)
    plt.title('Input Image', fontsize=20)
    axes.set_xticks([])
    axes.set_yticks([])
    plt.tight_layout()
    plt.subplots_adjust(left=0.05,
                        bottom=0.2,
                        right=0.95,
                        top=0.9,
                        hspace=0.02,
                        wspace=0.3)
    plt.subplot(1, 2, 2)
    ind = 0.1 + 0.6 * np.arange(len(class_names))
    width = 0.4
    for i in range(len(class_names)):
        plt.bar(ind[i], score.data.cpu().numpy()[i], width, color='orangered')

    plt.title("Result Analysis", fontsize=20)
    plt.xticks(ind, class_names, rotation=30, fontsize=12)
    if arg.SAVE_FLAG:
        if not os.path.exists(arg.output_folder):
            os.mkdir('./image_output')
        save_path = os.path.join(arg.output_folder, image)
        plt.savefig(save_path + '-result' + '.jpg')
    else:
        if arg.show_resultimg:
            plt.show()

    print("The Expression is %s" % expression)