Ejemplo n.º 1
0
def image(input_image_path, display, gradcam, output_csv_file, screen_size, device, branch, face_detection):
    """
    Receives the full path to a image file and recognizes
    facial expressions of the closets face in a frame-based approach.

    TODO: Write docstring

    :param input_image_path:
    :param display:
    :param gradcam:
    :param output_csv_file:
    :param screen_size:
    :param device:
    :param branch:
    :return:
    """

    image = uimage.read(input_image_path)

    # Call FER method
    fer = cvision.recognize_facial_expression(image, device, face_detection, gradcam)

    # TODO: Implement
    if output_csv_file:
        pass

    if display:
        fer_demo = FERDemo(screen_size=screen_size, display_individual_classification=branch, display_graph_ensemble=False)
        fer_demo.update(fer)
        while fer_demo.is_running():
            fer_demo.show()
        fer_demo.quit()
    return fer
Ejemplo n.º 2
0
def image(input_image_path, display, gradcam, output_csv_file, screen_size,
          device, branch, face_detection):
    """
    Receives the full path to a image file and recognizes
    facial expressions of the closets face in a frame-based approach.
    """

    write_to_file = not (output_csv_file is None)
    img = uimage.read(input_image_path)

    # Call FER method
    fer = cvision.recognize_facial_expression(img, device, face_detection,
                                              gradcam)

    if write_to_file:
        ufile.create_file(output_csv_file, input_image_path)
        ufile.write_to_file(fer, 0.0)
        ufile.close_file()

    if display:
        fer_demo = FERDemo(screen_size=screen_size,
                           display_individual_classification=branch,
                           display_graph_ensemble=False)
        fer_demo.update(fer)
        while fer_demo.is_running():
            fer_demo.show()
        fer_demo.quit()
Ejemplo n.º 3
0
    def _load(self):
        data_affect_net, labels_affect_net = [], []
        counter_loaded_images_per_label = [0 for _ in range(self.num_labels)]

        if self.idx_set == 0:
            path_folders_affect_net = path.join(
                self.base_path_to_affectnet, self.affectnet_sets['supervised'])
        elif self.idx_set == 1:
            path_folders_affect_net = path.join(
                self.base_path_to_affectnet,
                self.affectnet_sets['unsupervised'])
        else:
            path_folders_affect_net = path.join(
                self.base_path_to_affectnet, self.affectnet_sets['validation'])

        folders_affect_net = sort_numeric_directories(
            listdir(path_folders_affect_net))
        # Randomize folders
        if self.idx_set < 2:
            np.random.shuffle(folders_affect_net)

        for f_af in folders_affect_net:
            path_images_affect_net = path.join(path_folders_affect_net, f_af)

            images_affect_net = np.sort(
                np.array(listdir(path_images_affect_net)))
            # Randomize images
            if self.idx_set < 2:
                np.random.shuffle(images_affect_net)

            for file_name_image_affect_net in images_affect_net:
                valence, arousal, quadrant = self._parse_to_label(
                    file_name_image_affect_net)

                if (quadrant >=
                        0) and (counter_loaded_images_per_label[int(quadrant)]
                                < self.max_loaded_images_per_label):
                    img = np.array(
                        uimage.read(
                            path.join(path_images_affect_net,
                                      file_name_image_affect_net)), np.uint8)
                    data_affect_net.append(img)

                    labels_affect_net.append(
                        [np.float32(valence),
                         np.float32(arousal)])

                    counter_loaded_images_per_label[int(quadrant)] += 1

                has_loading_finished = (
                    np.sum(counter_loaded_images_per_label) >=
                    (self.max_loaded_images_per_label * self.num_labels))

                if has_loading_finished:
                    break

            if has_loading_finished:
                break

        return [np.array(data_affect_net), np.array(labels_affect_net)]
Ejemplo n.º 4
0
def pre_process_affect_net(base_path_to_images, base_path_to_annotations,
                           set_index):
    """
    Pre-process the AffectNet dataset. Faces are cropped and resized to 96 x 96 pixels.
    The images are organized in folders with 500 images each. The test set had not been released
    when this experiment was carried out.

    :param base_path_to_images: (string) Path to images.
    :param base_path_to_annotations: (string) Path to annotations.
    :param set_index: (int = {0, 1, 2}) set_index = 0 process the automatically annotated images.
                                        set_index = 1 process the manually annotated images: training set.
                                        set_index = 2 process the manually annotated images: validation set.
    :return: (void)
    """

    assert ((set_index < 3)
            and (set_index >= 0)), "set_index must be 0, 1 or 2."

    annotation_folders = [
        'Automatically_Annotated_Images/', 'Manually_Annotated_Images/',
        'Manually_Annotated_Images/'
    ]
    destination_set_folders = [
        'AffectNet/Training_Unlabeled/', 'AffectNet/Training_Labeled/',
        'AffectNet/Validation/'
    ]
    annotation_file_names = [
        'automatically_annotated.csv', 'Manually_training.csv',
        'Manually_validation.csv'
    ]

    image_id = 0
    error_image_id = []
    img_size = (96, 96)
    num_images_per_folder = 500

    annotation_file = pandas.read_csv(
        path.join(base_path_to_annotations, annotation_file_names[set_index]))

    for line in range(image_id, annotation_file.shape[0]):
        try:
            # Read image
            img_file_name = annotation_file.get('subDirectory_filePath')[line]
            img_full_path = path.join(base_path_to_images,
                                      annotation_folders[set_index],
                                      img_file_name)
            img = uimage.read(img_full_path)

            # Crop face
            x = int(annotation_file.get('face_x')[line])
            y = int(annotation_file.get('face_y')[line])
            w = int(annotation_file.get('face_width')[line])
            h = int(annotation_file.get('face_height')[line])
            img = img[x:x + w, y:y + h, :]

            # Resize image
            img = uimage.resize(img, img_size)

            # Save image
            folder = str(image_id // num_images_per_folder)
            exp = annotation_file.get('expression')[line]
            val = annotation_file.get('valence')[line]
            aro = annotation_file.get('arousal')[line]
            file_name = _generate_single_file_name(image_id, exp, val, aro)
            uimage.write(
                img,
                path.join(base_path_to_images,
                          destination_set_folders[set_index], folder),
                file_name)
            image_id += 1
        except Exception:
            print('ERROR: The image ID %d is corrupted.' % image_id)
            error_image_id.append(image_id)

    print('Dataset has been processed.')
    print('Images successfully processed: %d' %
          (image_id - len(error_image_id)))
    print('Images processed with error: %d' % len(error_image_id))
    print('Image IDs processed with error: %s' % error_image_id)
Ejemplo n.º 5
0
    def _load(self):
        csv_label = []
        data, labels = [], []
        counter_loaded_images_per_label = [0 for _ in range(self.num_labels)]

        path_folders_images = path.join(self.base_path_to_FER_plus, 'Images',
                                        self.fer_sets[self.idx_set])
        path_folders_labels = path.join(self.base_path_to_FER_plus, 'Labels',
                                        self.fer_sets[self.idx_set])

        with open(path_folders_labels + '/label.csv') as csvfile:
            lines = csv.reader(csvfile)
            for row in lines:
                csv_label.append(row)

        # Shuffle training set
        if self.idx_set == 0:
            np.random.shuffle(csv_label)

        for l in csv_label:
            emotion_raw = list(map(float, l[2:len(l)]))
            emotion = self._process_data(emotion_raw)
            emotion = emotion[:-2]

            try:
                emotion = [float(i) / sum(emotion) for i in emotion]
                emotion = self._parse_to_label(emotion)
            except ZeroDivisionError:
                emotion = 9

            if (emotion < self.num_labels) and (
                    counter_loaded_images_per_label[int(emotion)] <
                    self.max_loaded_images_per_label):
                counter_loaded_images_per_label[int(emotion)] += 1

                img = np.array(
                    uimage.read(path.join(path_folders_images, l[0])),
                    np.uint8)

                box = list(map(int, l[1][1:-1].split(',')))

                if box[-1] != 48:
                    print("[INFO] Face is not centralized.")
                    print(path.join(path_folders_images, l[0]))
                    print(box)
                    exit(-1)

                img = img[box[0]:box[2], box[1]:box[3], :]
                img = uimage.resize(img, (96, 96))

                data.append(img)
                labels.append(emotion)

            has_loading_finished = (
                np.sum(counter_loaded_images_per_label) >=
                (self.max_loaded_images_per_label * self.num_labels))

            if has_loading_finished:
                break

        return [np.array(data), np.array(labels)]