def custom_loss_hm(self, ten_hm_t, ten_hm_p): # print(ten_hm_t.get_shape()) # [None, 56, 56, 68] # print(ten_hm_p.get_shape()) tf_utility = TFRecordUtility() sqr = K.square(ten_hm_t - ten_hm_p) # [None, 56, 56, 68] mean1 = K.mean(sqr, axis=1) mean2 = K.mean(mean1, axis=1) tensor_mean_square_error = K.mean(mean2, axis=1) # print(tensor_mean_square_error.get_shape().as_list()) # [None, 68] # vec_mse = K.eval(tensor_mean_square_error) # print("mse.shape:") # print(vec_mse.shape) # (50, 68) # print(vec_mse) # print("----------->>>") '''calculate points from generated hm''' p_points_batch = tf.stack([ tf_utility.from_heatmap_to_point_tensor(ten_hm_p[i], 5, 1) for i in range(LearningConfig.batch_size) ]) t_points_batch = tf.stack([ tf_utility.from_heatmap_to_point_tensor(ten_hm_t[i], 5, 1) for i in range(LearningConfig.batch_size) ]) '''p_points_batch is [batch, 2, 68]''' sqr_2 = K.square(t_points_batch - p_points_batch) # [None, 2, 68] mean_1 = K.mean(sqr_2, axis=1) tensor_indices_mean_square_error = K.mean(mean_1, axis=1) # tensor_total_loss = tf.reduce_mean([tensor_mean_square_error, tensor_indices_mean_square_error]) tensor_total_loss = tf.add(tensor_mean_square_error, tensor_indices_mean_square_error) return tensor_total_loss
def convert_hm_to_pts(self, hm): x_center = InputDataSize.image_input_size // 2 width = InputDataSize.image_input_size hm_arr = [] tf_util = TFRecordUtility(self.num_landmark // 2) for i in range(LearningConfig.batch_size): hm_t = tf_util.from_heatmap_to_point_tensor(heatmaps=hm[i], number_of_points=5) hm_t = tf.reshape(tensor=hm_t, shape=self.num_landmark) '''hm is in [0,224] --> should be in [-0.5,+0.5]''' hm_t_norm = tf.math.scalar_mul( scalar=1 / width, x=tf.math.subtract(hm_t, np.repeat(x_center, self.num_landmark))) hm_arr.append(hm_t_norm) '''reshape hm''' hm_pts = tf.stack( [hm_arr[i] for i in range(LearningConfig.batch_size)], 0) # bs * self.num_landmark return hm_pts