def _draw_input_container(self, is_blank): self._input_container = self._get_container(0, 0, self._container_height, self._container_width) if is_blank: uimage.draw_text(self._input_container, FERDemo._TEXT_BLANK_INPUT, self._container_center_position - 60, FERDemo._COLOUR_BGR_WHITE, FERDemo._TEXT_PARAM_SCALE[self._screen_size], FERDemo._TEXT_PARAM_THICKNESS[self._screen_size]) else: # Compute resize factor 'f' h, w, c = self._fer.input_image.shape h_c, w_c, c_c = self._input_container.shape h_ratio = h / h_c w_ratio = w / w_c if h_ratio > w_ratio: if h < (self._container_height * FERDemo._INPUT_IMAGE_SCALE_MIN): f = (self._container_height * FERDemo._INPUT_IMAGE_SCALE_MIN) / float(h) else: f = (self._container_height * FERDemo._INPUT_IMAGE_SCALE_MAX) / float(h) else: if w < (self._container_height * FERDemo._INPUT_IMAGE_SCALE_MIN): f = (self._container_width * FERDemo._INPUT_IMAGE_SCALE_MIN) / float(w) else: f = (self._container_width * FERDemo._INPUT_IMAGE_SCALE_MAX) / float(w) # Resize input image self._input_image = uimage.resize(self._fer.input_image, f=f) # Set input image to the container h, w, c = self._input_image.shape x = int((self._container_height // 2) - (h // 2)) y = int((self._container_width // 2) - (w // 2)) self._input_container[x:(x + h), y:(y + w), :] = self._input_image
def _generate_block_ensemble(self, network_name, emotion, valence, arousal, face_image=None, x=0, y=0): block = self._get_container(x, y, self._output_block_height_ensemble, self._output_block_width) # Image if not (face_image is None): uimage.draw_image( block, face_image, FERDemo._BLOCK_INIT_POS_IMAGE_ENSEMBLE[self._screen_size]) # Text: Ensemble uimage.draw_text( block, network_name, FERDemo._BLOCK_INIT_POS_TEXT_NETWORK_ENSEMBLE[self._screen_size], FERDemo._COLOUR_BGR_BLACK, FERDemo._TEXT_PARAM_SCALE[self._screen_size], FERDemo._TEXT_PARAM_THICKNESS[self._screen_size]) # Text: Emotion uimage.draw_text( block, emotion, FERDemo._BLOCK_INIT_POS_TEXT_EMOTION_ENSEMBLE[self._screen_size], FERDemo._COLOUR_BGR_BLACK, FERDemo._TEXT_PARAM_SCALE[self._screen_size], FERDemo._TEXT_PARAM_THICKNESS[self._screen_size]) # Text: Activation uimage.draw_text( block, FERDemo._TEXT_ACTIVATION + " {:.2f}".format(arousal), FERDemo._BLOCK_INIT_POS_TEXT_ACTIVATION[self._screen_size], FERDemo._COLOUR_BGR_BLACK, FERDemo._TEXT_PARAM_SCALE[self._screen_size], FERDemo._TEXT_PARAM_THICKNESS[self._screen_size]) # Text: Pleasant uimage.draw_text( block, FERDemo._TEXT_PLEASANT + (" 0.00" if valence < 0 else " {:.2f}".format(valence)), FERDemo._BLOCK_INIT_POS_TEXT_PLEASANT[self._screen_size], FERDemo._COLOUR_BGR_BLACK, FERDemo._TEXT_PARAM_SCALE[self._screen_size], FERDemo._TEXT_PARAM_THICKNESS[self._screen_size]) # Text: Unpleasant uimage.draw_text( block, FERDemo._TEXT_UNPLEASANT + (" {:.2f}".format(valence) if valence < 0 else " 0.00"), FERDemo._BLOCK_INIT_POS_TEXT_UNPLEASANT[self._screen_size], FERDemo._COLOUR_BGR_BLACK, FERDemo._TEXT_PARAM_SCALE[self._screen_size], FERDemo._TEXT_PARAM_THICKNESS[self._screen_size]) # Bar: Activation uimage.draw_horizontal_bar( block, arousal, FERDemo._MAX_AROUSAL, FERDemo._BLOCK_INIT_POS_BAR_ACTIVATION[self._screen_size], FERDemo._BLOCK_FINAL_POS_BAR_ACTIVATION[self._screen_size], FERDemo._TEXT_PARAM_THICKNESS[self._screen_size], FERDemo._COLOUR_BGR_DARK_BLUE) # Bar: Pleasant uimage.draw_horizontal_bar( block, 0.0 if valence < 0.0 else valence, FERDemo._MAX_VALENCE, FERDemo._BLOCK_INIT_POS_BAR_PLEASANT[self._screen_size], FERDemo._BLOCK_FINAL_POS_BAR_PLEASANT[self._screen_size], FERDemo._TEXT_PARAM_THICKNESS[self._screen_size], FERDemo._COLOUR_BGR_DARK_GREEN) # Bar: Unpleasant uimage.draw_horizontal_bar( block, np.abs(valence) if valence < 0.0 else 0.0, FERDemo._MAX_VALENCE, FERDemo._BLOCK_INIT_POS_BAR_UNPLEASANT[self._screen_size], FERDemo._BLOCK_FINAL_POS_BAR_UNPLEASANT[self._screen_size], FERDemo._TEXT_PARAM_THICKNESS[self._screen_size], FERDemo._COLOUR_BGR_DARK_RED) # Plot: Arousal and Valence if self._display_graph_ensemble: self._plot_arousal.append(arousal) self._plot_valence.append(valence) uimage.draw_graph( block, self._plot_arousal, self._plot_valence, FERDemo._BLOCK_INIT_POS_GRAPH[self._screen_size], FERDemo._BLOCK_SAMPLE_GRAPH, FERDemo._TEXT_ACTIVATION_WITHOUT_TWO_DOTS, FERDemo._TEXT_PLEASANT_UNPLEASANT, FERDemo._COLOUR_BGR_BLUE, FERDemo._COLOUR_BGR_ORANGE, FERDemo._BLOCK_THICKNESS_GRAPH[self._screen_size], FERDemo._BLOCK_OFFSET_GRAPH[self._screen_size], FERDemo._BLOCK_FONT_SIZE_GRAPH[self._screen_size], FERDemo._COLOUR_BGR_DARK_GREY, FERDemo._BLOCK_SIZE_GRAPH[self._screen_size]) return block
def _generate_block(self, network_name, emotion, valence, arousal, face_image=None, x=0, y=0): block = self._get_container(x, y, self._output_block_height, self._output_block_width) # Image if not (face_image is None): uimage.draw_image(block, face_image, FERDemo._BLOCK_INIT_POS_IMAGE[self._screen_size]) # Text: Ensemble uimage.draw_text( block, network_name, FERDemo._BLOCK_INIT_POS_TEXT_NETWORK[self._screen_size], FERDemo._COLOUR_BGR_BLACK, FERDemo._TEXT_PARAM_SCALE[self._screen_size], FERDemo._TEXT_PARAM_THICKNESS[self._screen_size]) # Text: Emotion uimage.draw_text( block, emotion, FERDemo._BLOCK_INIT_POS_TEXT_EMOTION[self._screen_size], FERDemo._COLOUR_BGR_BLACK, FERDemo._TEXT_PARAM_SCALE[self._screen_size], FERDemo._TEXT_PARAM_THICKNESS[self._screen_size]) # Text: Arousal uimage.draw_text( block, FERDemo._TEXT_AROUSAL, FERDemo._BLOCK_INIT_POS_TEXT_AROUSAL[self._screen_size], FERDemo._COLOUR_BGR_BLACK, FERDemo._TEXT_PARAM_SCALE[self._screen_size], FERDemo._TEXT_PARAM_THICKNESS[self._screen_size]) # Text: Valence uimage.draw_text( block, FERDemo._TEXT_VALENCE, FERDemo._BLOCK_INIT_POS_TEXT_VALENCE[self._screen_size], FERDemo._COLOUR_BGR_BLACK, FERDemo._TEXT_PARAM_SCALE[self._screen_size], FERDemo._TEXT_PARAM_THICKNESS[self._screen_size]) # Bar: Arousal uimage.draw_horizontal_bar( block, arousal, FERDemo._MAX_AROUSAL, FERDemo._BLOCK_INIT_POS_BAR_AROUSAL[self._screen_size], FERDemo._BLOCK_FINAL_POS_BAR_AROUSAL[self._screen_size], FERDemo._TEXT_PARAM_THICKNESS[self._screen_size], FERDemo._COLOUR_BGR_DARK_BLUE) # Bar: Valence uimage.draw_horizontal_bar( block, np.abs(valence), FERDemo._MAX_VALENCE, FERDemo._BLOCK_INIT_POS_BAR_VALENCE[self._screen_size], FERDemo._BLOCK_FINAL_POS_BAR_VALENCE[self._screen_size], FERDemo._TEXT_PARAM_THICKNESS[self._screen_size], FERDemo._COLOUR_BGR_DARK_RED if valence < 0.0 else FERDemo._COLOUR_BGR_DARK_GREEN) return block
def _draw_output_container(self, is_blank): self._output_container = self._get_container( 0, self._output_container_initial_position[1], self._container_height, self._container_width) if is_blank: uimage.draw_text(self._output_container, FERDemo._TEXT_BLANK_INPUT, self._container_center_position - 60, FERDemo._COLOUR_BGR_WHITE, FERDemo._TEXT_PARAM_SCALE[self._screen_size], FERDemo._TEXT_PARAM_THICKNESS[self._screen_size]) else: if self._fer.face_image is None: uimage.draw_text( self._output_container, FERDemo._TEXT_NO_FACE, self._container_center_position - 210, FERDemo._COLOUR_BGR_BLACK, FERDemo._TEXT_PARAM_SCALE[self._screen_size], FERDemo._TEXT_PARAM_THICKNESS[self._screen_size]) else: # Display ensemble and individual classifications if self._display_individual_classification: # Resize face image face_image = uimage.resize( self._fer.face_image, FERDemo._BLOCK_IMAGE_SIZE[self._screen_size]) # Generate block of the ensemble prediction block = self._generate_block( FERDemo._TEXT_ENSEMBLE, self._fer.list_emotion[-1], self._fer.list_affect[-1][0], self._fer.list_affect[-1][1], face_image=face_image, x=0, y=self._output_container_initial_position[1]) # Draw block ot the ensemble prediction uimage.draw_image(self._output_container, block, (0, 0)) # Branches for branch in range(len(self._fer.list_emotion) - 1): # Superimpose saliency map on input face image grad_cam = self._fer.get_grad_cam(branch) if not (grad_cam is None): grad_cam = uimage.superimpose(grad_cam, face_image) # Generate block of the branch prediction block = self._generate_block( FERDemo._TEXT_BRANCH.format(branch + 1), self._fer.list_emotion[branch], self._fer.list_affect[branch][0], self._fer.list_affect[branch][1], grad_cam, x=self._output_block_height * (branch + 1), y=self._output_container_initial_position[1]) # Draw block of the branch prediction uimage.draw_image(self._output_container, block, (self._output_block_height * (branch + 1), 0)) # Display ensemble classification in detail else: # Ensemble face_image = uimage.resize( self._fer.face_image, FERDemo._BLOCK_IMAGE_SIZE_ENSEMBLE[self._screen_size]) block = self._generate_block_ensemble( FERDemo._TEXT_ENSEMBLE, self._fer.list_emotion[-1], self._fer.list_affect[-1][0], self._fer.list_affect[-1][1], face_image=face_image, x=0, y=self._output_container_initial_position[1]) uimage.draw_image(self._output_container, block, (0, 0))