Example #1
0
def print_partial(counter, img, landmarks_arr):
    image_utility = ImageUtility()
    # plt.figure()
    # plt.imshow(img)
    # implot = plt.imshow(img)

    index =0
    for lndm in landmarks_arr:
        image = Image.new("L", (InputDataSize.image_input_size // 4, InputDataSize.image_input_size // 4))
        draw = ImageDraw.Draw(image)

        # color = "#"+''.join([random.choice('0123456789ABCDEF') for j in range(6)])
        landmark_arr_xy, landmark_arr_x, landmark_arr_y = image_utility.create_landmarks_from_normalized(lndm, 224, 224, 112, 112)

        # plt.scatter(x=landmark_arr_x[:], y=landmark_arr_y[:], c=color, s=20)
        # plt.plot(landmark_arr_x, landmark_arr_y, '-ok', c=color)

        # if index == 4 or index == 5 or index == 6 or index == 7:
        #     draw.polygon((landmark_arr_xy), fill='#ffffff')

        landmark_arr_xy = (np.array(landmark_arr_xy) // 4).tolist()
        draw.line((landmark_arr_xy), fill='#ffffff', width=2)

        img_np = np.asarray(image)
        plt.imshow(img_np)
        image.save('0_name_' + str(counter) + '_' + str(index) + '.png')
        index += 1
Example #2
0
    def _points_to_2d_face_graph(self, _points):
        """
        :param points: [bs, 136]
        :return: [bs, 56, 56, num_fg]
        """
        '''rescale points'''
        points = np.zeros([LearningConfig.batch_size, self.num_landmark])
        image_utility = ImageUtility()
        indx = 0
        for item in _points:
            point_scaled, px_1, py_1 = image_utility.create_landmarks_from_normalized(
                item, InputDataSize.hm_size, InputDataSize.hm_size,
                InputDataSize.hm_center, InputDataSize.hm_center)
            # imgpr.print_image_arr('pts_' + str(indx), np.zeros([56,56]), px_1, py_1)

            points[indx, :] = point_scaled
            indx += 1
        '''create partial parts: '''
        partial_points = self._slice_face_graph_np(points)
        '''convert from flatten to 2d'''
        points_2d = []
        for pnt in partial_points:
            points_2d.append(
                pnt.reshape([LearningConfig.batch_size,
                             len(pnt[1]) // 2, 2]))
        '''create the spare img for each facial part:'''
        hm_img = np.zeros([
            LearningConfig.batch_size, InputDataSize.hm_size,
            InputDataSize.hm_size, self.num_face_graph_elements
        ])
        # bs, 12 * 2
        for i in range(LearningConfig.batch_size):
            for j in range(self.num_face_graph_elements):
                t_hm = np.zeros([InputDataSize.hm_size, InputDataSize.hm_size])
                for x_y in points_2d[j][i]:
                    if not (0 <= x_y[0] <= InputDataSize.hm_size - 1):
                        x_y[0] = 0
                    if not (0 <= x_y[1] <= InputDataSize.hm_size - 1):
                        x_y[1] = 0
                    t_hm[int(x_y[1]), int(x_y[0])] = 1

                hm_img[i, :, :, j] = t_hm

        return hm_img
Example #3
0
    def _points_to_2d(self, _points):
        """

        :param _points:
        :return:
        """
        tf_rec = TFRecordUtility(self.num_landmark)
        image_utility = ImageUtility()
        hm_arr = []
        for i in range(LearningConfig.batch_size):
            _x_y, _x, _y = image_utility.create_landmarks_from_normalized(
                _points[i], InputDataSize.image_input_size,
                InputDataSize.image_input_size, InputDataSize.img_center,
                InputDataSize.img_center)
            hm_multi_layer = tf_rec.generate_hm(InputDataSize.hm_size,
                                                InputDataSize.hm_size,
                                                np.array(_x_y),
                                                self.hm_stride / 2, False)
            hm = np.sum(hm_multi_layer, axis=2)
            hm_arr.append(hm)
        return np.array(hm_arr)
Example #4
0
 def print_hm_cord(self, epoch, step, images, heatmaps_gr, heatmaps_pr,
                   points_gr, points_pr):
     epoch = 0
     image_utility = ImageUtility()
     for i in range(LearningConfig.batch_size):
         # imgpr.print_image_arr_heat('hgr' + '.' + str(epoch)+"."+str(step)+'.'+str(i), heatmaps_gr[i])
         imgpr.print_image_arr_heat(
             'hpr' + '.' + str(epoch) + "." + str(step) + '.' + str(i),
             heatmaps_pr[i])
         # landmark_arr_flat_n, landmark_arr_x_n, landmark_arr_y_n = image_utility.create_landmarks_from_normalized(
         #     points_gr[i], 224, 224, 112, 112)
         # imgpr.print_image_arr('cgr' + '.' + str(epoch)+"."+str(step)+'.'+str(i), np.zeros([224, 224,3]), landmark_arr_x_n,
         #                       landmark_arr_y_n)
         landmark_arr_flat_n, landmark_arr_x_n, landmark_arr_y_n = image_utility.create_landmarks_from_normalized(
             points_pr[i], InputDataSize.image_input_size,
             InputDataSize.image_input_size,
             InputDataSize.image_input_size // 2,
             InputDataSize.image_input_size // 2)
         imgpr.print_image_arr(
             'cpr' + '.' + str(epoch) + "." + str(step) + '.' + str(i),
             np.zeros([
                 InputDataSize.image_input_size,
                 InputDataSize.image_input_size, 3
             ]), landmark_arr_x_n, landmark_arr_y_n)
Example #5
0
    def _test_result_per_image(self, counter, model, img, labels_true):
        image_utility = ImageUtility()
        image = np.expand_dims(img*255, axis=0)
        predict = model.predict(image)

        pre_points = predict[0]
        pose_predicted = [0, 0, 0]

        # pre_points = pre_points.reshape([196])

        labels_true_transformed, landmark_arr_x_t, landmark_arr_y_t = image_utility. \
            create_landmarks_from_normalized(labels_true, 224, 224, 112, 112)

        labels_predict_transformed, landmark_arr_x_p, landmark_arr_y_p = \
            image_utility.create_landmarks_from_normalized(pre_points, 224, 224, 112, 112)

        '''asm pp'''
        # xy_h_p_asm = self.__post_process_correction(xy_h_p, True)
        # labels_predict_transformed_asm, landmark_arr_x_p_asm, landmark_arr_y_p_asm = image_utility. \
        #     create_landmarks_from_normalized(xy_h_p_asm, 224, 224, 112, 112)
        # labels_predict_transformed = labels_predict_transformed_asm
        ''''''

        '''test print'''
        # imgpr.print_image_arr(str((counter + 1))+'_img', img, [], [])
        # imgpr.print_image_arr(str((counter + 1))+'_pred', img, landmark_arr_x_p, landmark_arr_y_p)
        # imgpr.print_image_arr(str((counter + 1))+'_true', img, landmark_arr_x_t, landmark_arr_y_t)

        # imgpr.print_image_arr((counter + 1), img, landmark_arr_x_t, landmark_arr_y_t)

        # print("landmark_arr_x_t: " + str(landmark_arr_x_t))
        # print("landmark_arr_x_p :" + str(landmark_arr_x_p))
        #
        # print("landmark_arr_y_t: " + str(landmark_arr_y_t))
        # print("landmark_arr_y_p :" + str(landmark_arr_y_p))

        # return 0, 0, 0, 0, 0, 0

        # normalizing_distance = self.__calculate_interpupil_distance(labels_true_transformed)
        normalizing_distance = self.__calculate_interoccular_distance(labels_true_transformed)

        # return 1, 1, 1, 1, 1, 1

        sum_errors = 0
        for i in range(0, len(labels_true_transformed), 2):  # two step each time
            '''this is the value after transformation to the real points'''
            x_point_predicted = labels_predict_transformed[i]
            y_point_predicted = labels_predict_transformed[i + 1]

            # x_point_predicted_asm = labels_predict_asm_transformed[i]
            # y_point_predicted_asm = labels_predict_asm_transformed[i+1]
            #

            x_point_true = labels_true_transformed[i]
            y_point_true = labels_true_transformed[i + 1]

            '''this is the normalized value, which predicted by network'''
            error = math.sqrt(((x_point_predicted - x_point_true) ** 2) + ((y_point_predicted - y_point_true) ** 2))
            sum_errors += error

        normalized_mean_error = sum_errors / (normalizing_distance * (self.num_landmark / 2))
        # print(normalized_mean_error)
        # print('=====')

        lp = np.array(labels_predict_transformed).reshape([self.num_landmark // 2, 2])
        lt = np.array(labels_true_transformed).reshape([self.num_landmark // 2, 2])

        # print(labels_true_transformed)
        # print(lt)
        # print('---------------')
        '''When there is no pose:'''
        return normalized_mean_error, lt, lp, 0, 0, 0
Example #6
0
        # roll_normalized = 2 * ((roll_predicted - min_degree) / (max_degree - min_degree)) - 1
        # pose_array = np.array([yaw_normalized, pitch_normalized, roll_normalized])
        pose_array = np.array([1, 1, 1])

        '''normalize landmarks based on hyperface method'''
        width = len(resized_img_new[0])
        height = len(resized_img_new[1])
        x_center = width / 2
        y_center = height / 2
        landmark_arr_flat_normalized = []
        for p in range(0, len(landmark_arr_flat), 2):
            landmark_arr_flat_normalized.append((x_center - landmark_arr_flat[p]) / width)
            landmark_arr_flat_normalized.append((y_center - landmark_arr_flat[p + 1]) / height)

        '''test print after augmentation'''
        landmark_arr_flat_n, landmark_arr_x_n, landmark_arr_y_n = image_utility.create_landmarks_from_normalized(
        landmark_arr_flat_normalized, 224, 224, 112, 112)
        # print_image_arr((i*100)+(k+1), resized_img_new, landmark_arr_x_n, landmark_arr_y_n)


        heatmap_landmark = generate_hm(56, 56, landmark_arr_flat_normalized, s=3.0)
        heatmap_landmark_all = np.sum(heatmap_landmark, axis=2)
        #print_image_arr(2*k, heatmap_landmark_all, [], [])
        # save heatmap

        file_name_save = png_file_name[i][0:-4] + "_" + str(k) + ".npy"
        hm_f = npy_dir + file_name_save
        # imgpr.print_image_arr_heat(1, hm, print_single=False)

        np.save(hm_f, heatmap_landmark)

        landmark_arr_flat_normalized = np.array(landmark_arr_flat_normalized)