Esempio n. 1
0
    def write_test_results_to_file(self, output_folder, results_folder):
        """
        Function to write the test results of the model to a csv file.

        Args
            output_folder (str): the name of the output folder defined in the configuration yaml file
            results_folder (str): the name of the folder where the results of the current execution will be stored
        """
        results_folder_path = join(utils.app_dir, output_folder,
                                   results_folder)
        fold_path = join(results_folder_path, "test_results")
        if not exists(fold_path):
            mkdir(fold_path)
        df = pd.DataFrame(
            columns=["classifier", "metric", "result_kind", "result"])
        row = 0
        for metric_name, metric_value in self.test_metrics.items():
            df.loc[row] = [self.model_name, metric_name, "test", metric_value]
            row += 1
        file_path = join(fold_path,
                         "Results_test_{}.csv".format(self.model_name))
        df.to_csv(file_path, sep=",")
        utils.visualize(df,
                        output_folder,
                        results_folder,
                        "test_results",
                        "Plot_test_{}.png".format(self.model_name),
                        captions=self.captions)
Esempio n. 2
0
def visualize_classifier(properties, models, metric_name, metric_caption):
    path = join(utils.app_dir, properties["output_folder"], "results")
    captions = ["Experiment1", "Experiment2", "Experiment3"]
    for model in models:
        model_path = join(path, model)
        df = pd.read_csv(join(model_path, "Results.csv"),
                         index_col=0,
                         header=0)
        utils.visualize(df=df,
                        output_folder=properties["output_folder"],
                        results_folder="results",
                        folder_name=model,
                        filename="{}_{}.png".format(metric_name, model),
                        captions=captions)
    best_df = pd.read_csv(join(path,
                               "Best_Results_{}.csv".format(metric_name)),
                          index_col=0,
                          header=0)
    captions = [metric_caption]
    visualize(df=best_df,
              output_folder=properties["output_folder"],
              results_folder="results",
              folder_name=None,
              filename="{}_best.png".format(metric_name),
              captions=captions)
Esempio n. 3
0
    def get_fold_avg_result(self, output_folder, results_folder):
        """
        Calculates and writes in a csv file the average value of each metric from all the folds for the specific model.

        Args
            output_folder (str): the name of the output folder defined in the configuration yaml file
            results_folder (str): the name of the folder where the results of the current execution will be stored
        """
        metric_names = [
            MetricNames.macro_precision.value,
            MetricNames.micro_precision.value, MetricNames.macro_recall.value,
            MetricNames.micro_recall.value, MetricNames.macro_f.value,
            MetricNames.micro_f.value
        ]
        for metric_name in metric_names:
            metric_list = []
            for fold_metric in self.fold_metrics:
                metric_list.append(fold_metric[metric_name])
            self.avg_metrics[metric_name] = sum(metric_list) / len(metric_list)
        results_folder_path = join(utils.app_dir, output_folder,
                                   results_folder)
        avg_folder = join(results_folder_path, "fold_avg")
        if not exists(avg_folder):
            mkdir(avg_folder)
        csv_path = join(avg_folder, "Results_avg.csv")
        df = pd.DataFrame(
            columns=["classifier", "metric", "result_kind", "result"])
        row = 0
        for metric_name, metric_value in self.avg_metrics.items():
            df.loc[row] = [self.model_name, metric_name, "avg", metric_value]
            row += 1
        df.to_csv(csv_path, sep=",")
        utils.visualize(df,
                        output_folder,
                        results_folder,
                        "fold_avg",
                        "Plot_avg_{}.png".format(self.model_name),
                        captions=self.captions)
Esempio n. 4
0
    def write_fold_results_to_file(self, output_folder, results_folder,
                                   fold_num):
        """
        Function to write a fold's results (metrics) to a csv file.

        Args
            output_folder (str): the name of the output folder defined in the configuration yaml file
            results_folder (str): the name of the folder where the results of the current execution will be stored
            fold_num (int): the number of the current fold
        """
        results_folder_path = join(utils.app_dir, output_folder,
                                   results_folder)
        if not exists(results_folder_path):
            mkdir(results_folder_path)
        fold_path = join(results_folder_path, "fold_{}".format(fold_num))
        if not exists(fold_path):
            mkdir(fold_path)
        metrics = self.fold_metrics[fold_num]
        df = pd.DataFrame(
            columns=["classifier", "metric", "result_kind", "result"])
        row = 0
        for metric_name, metric_value in metrics.items():
            df.loc[row] = [
                self.model_name, metric_name, "validation", metric_value
            ]
            row += 1
        filename = "Results_{}.csv".format(self.model_name)
        file_path = join(fold_path, filename)
        df.to_csv(file_path, sep=',')

        utils.visualize(df,
                        output_folder,
                        results_folder,
                        "fold_{}".format(fold_num),
                        "Plot_fold_{}_{}.png".format(fold_num,
                                                     self.model_name),
                        captions=self.captions)
def convolution(image, kernel, average=False, verbose=False):
    if verbose:
        utils.visualize(image, 'gray')
        # plt.title("Image")
        # plt.show()

    image_row, image_col = image.shape
    kernel_row, kernel_col = kernel.shape

    output = np.zeros(image.shape)

    pad_height = int((kernel_row - 1) / 2)
    pad_width = int((kernel_col - 1) / 2)

    padded_image = np.zeros((image_row + (2 * pad_height), image_col + (2 * pad_width)))

    # set image for padded img
    padded_image[pad_height:padded_image.shape[0] - pad_height, pad_width:padded_image.shape[1] - pad_width] = image

    if verbose:
        utils.visualize(padded_image, 'gray', title = "Padded Image")
        # plt.title()
        # plt.show()

    for row in range(image_row):
        for col in range(image_col):
            output[row, col] = np.sum(kernel * padded_image[row:row + kernel_row, col:col + kernel_col])
            if average:
                output[row, col] /= kernel.shape[0] * kernel.shape[1]

    print("SHAPE CALCULATED : {}".format(output.shape))

    if verbose:
        utils.visualize(output, 'gray', title = "OUTPUT IMG {}GAUSSIAN {}KERNAL".format(kernel_row, kernel_col))
        # utils.title("OUTPUT IMG {}GAUSSIAN {}KERNAL".format(kernel_row, kernel_col))
        # plt.show()

    return output
Esempio n. 6
0
def main(_):
    print("FLAG1")
    pp.pprint(flags.FLAGS.__flags)

    if FLAGS.input_width is None:
        FLAGS.input_width = FLAGS.input_height
    if FLAGS.output_width is None:
        FLAGS.output_width = FLAGS.output_height

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)
    #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True

    # extract zipfile
    print(FLAGS.dataset)
    print(os.path.join(FLAGS.data_path, "*.zip"))
    source_path = glob.glob(os.path.join(FLAGS.data_path, "*.zip"))
    print(source_path)
    for i, zipped_file in enumerate(source_path):
        print("Extracting image zip %s of %s" % (i + 1, len(source_path)))
        if os.path.exists(os.path.join(FLAGS.data_path, "celebA")):
            print("...File already exists")
        else:
            print(zipped_file)
            unzip_and_save(zipped_file, FLAGS.data_path)
            print("...Extracted!")

    print("Reading from %s" % os.path.join(FLAGS.data_path, "*/*.jpg"))
    unzipped_data_path = os.path.join(
        FLAGS.data_path, "*/*.jpg")  #right now we support only one dataset
    print(unzipped_data_path)
    with tf.Session(config=run_config) as sess:
        if FLAGS.dataset == 'mnist':
            dcgan = DCGAN(
                sess,
                input_width=FLAGS.input_width,
                input_height=FLAGS.input_height,
                output_width=FLAGS.output_width,
                output_height=FLAGS.output_height,
                batch_size=FLAGS.batch_size,
                sample_num=FLAGS.batch_size,
                y_dim=10,
                data_path=FLAGS.data_path,  #glob signature
                dataset_type=unzipped_data_path,
                crop=FLAGS.crop,
                checkpoint_dir=FLAGS.checkpoint_dir,
                sample_dir=FLAGS.sample_dir)
        else:
            dcgan = DCGAN(sess,
                          input_width=FLAGS.input_width,
                          input_height=FLAGS.input_height,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          sample_num=FLAGS.batch_size,
                          data_path=unzipped_data_path,
                          dataset_type=FLAGS.dataset,
                          crop=FLAGS.crop,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir)

        show_all_variables()

        if FLAGS.train:
            dcgan.train(FLAGS)
        else:
            if not dcgan.load(FLAGS.checkpoint_dir)[0]:
                raise Exception("[!] Train a model first, then run test mode")

        # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
        #                 [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
        #                 [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
        #                 [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
        #                 [dcgan.h4_w, dcgan.h4_b, None])

        # Below is codes for visualization
        OPTION = 1
        visualize(sess, dcgan, FLAGS, OPTION)
Esempio n. 7
0
def eval():
    input_data = tf.placeholder(
        dtype=tf.float32,
        shape=[None, FLAGS.input_size, FLAGS.input_size, 3],
        name='input_image')
    center_map = tf.placeholder(
        dtype=tf.float32,
        shape=[None, FLAGS.input_size, FLAGS.input_size, 1],
        name='center_map')
    model = sppe.Model(FLAGS.stages, FLAGS.joints)
    model.generate_model(input_data, center_map, FLAGS.batch_size)

    center_map = utils.generate_gaussian_map(FLAGS.input_size,
                                             FLAGS.input_size,
                                             FLAGS.input_size / 2,
                                             FLAGS.input_size / 2,
                                             FLAGS.cmap_variance)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(CONFIG.model_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        #构造输入数据
        data = h5py.File(FLAGS.data_file, 'r')
        # centers = data['center'], imgnames = data['imgname'], scales = data['scale'], gt_joints = data['part']
        total_size = len(data['index'])
        mesures_rsts = []
        n = 0
        while n * FLAGS.batch_size < total_size:
            i = n * FLAGS.batch_size
            j = min((n + 1) * FLAGS.batch_size, total_size)
            n += 1
            centers = data['center'][i:j]
            img_names = data['imgname'][i:j]
            scales = data['scale'][i:j]
            batch_gt_joints = data['part'][i:j]

            #将人体放中心,crop到368x368,调整坐标
            imgs, joints_list = data_process.crop_to_center(
                img_names, FLAGS.input_size, scales, centers, batch_gt_joints,
                False)
            imgs_input = np.array(imgs) / 255.0 - 0.5
            # imgs_input = np.expand_dims(imgs_input, axis=0)

            center_maps = np.array(list(center_map) * len(imgs_input))
            center_maps = np.reshape(
                center_maps,
                [len(imgs_input), FLAGS.input_size, FLAGS.input_size, 1])

            #inference
            pred_heatmaps = sess.run([model.stage_heatmaps[FLAGS.stages - 1]],
                                     feed_dict={
                                         model.input_image: imgs_input,
                                         model.center_map: center_maps
                                     })
            pred_heatmaps = pred_heatmaps[0]
            batch_pred_joints = []
            for i in range(len(pred_heatmaps)):
                pred_heatmap = pred_heatmaps[i, :, :, 0:FLAGS.joints].reshape(
                    (FLAGS.hmap_size, FLAGS.hmap_size, FLAGS.joints))
                pred_heatmap = cv2.resize(pred_heatmap,
                                          (FLAGS.input_size, FLAGS.input_size))
                preds_joint = np.zeros((FLAGS.joints, 2))
                for joint_idx in range(FLAGS.joints):
                    joint_coord = np.unravel_index(
                        np.argmax(pred_heatmap[:, :, joint_idx]),
                        (FLAGS.input_size, FLAGS.input_size))
                    preds_joint[joint_idx, :] = joint_coord
                # 画出关节和肢体
                # cv2.imwrite(os.path.join(VALID_CROP_DIR, img_names[i].decode('utf-8')), imgs[i].astype(np.uint8))   #画关节和肢体前
                utils.visualize(imgs[i], preds_joint)
                cv2.imwrite(
                    os.path.join(VALID_OUTPUT_DIR,
                                 img_names[i].decode('utf-8')),
                    imgs[i].astype(np.uint8))
                batch_pred_joints.append(preds_joint)

            #pck
            mesures_rsts.extend(
                pck.compute_pck(12, 3, joints_list, batch_pred_joints,
                                0.2))  #2:右肩  11:左胯,对应的mpii是12和3
        acc_joints, acc_ave = pck.compute_pck_accuracy(mesures_rsts)

        print(" head top acc:    %.2f" % acc_joints[0] + "\n neck acc:    %.2f" % acc_joints[1] + "\n right shoulder acc:   %.2f" % acc_joints[2]+ \
              "\n right elbow acc:    %.2f" % acc_joints[3] + "\n right wrist acc:   %.2f" % acc_joints[4] + "\n left shoulder acc:   %.2f" % acc_joints[5] + \
              "\n left elbow acc:   %.2f" % acc_joints[6] + "\n left wrist acc:   %.2f" % acc_joints[7] + "\n right hip acc:   %.2f" % acc_joints[8]+ \
              "\n right knee acc:   %.2f" % acc_joints[9] + "\n right ankle acc:   %.2f" % acc_joints[10] + "\n left hip acc:   %.2f" % acc_joints[11] + \
              "\n left knee acc:   %.2f" % acc_joints[12] + "\n left ankle acc:   %.2f" % acc_joints[13] + "\n average acc:   %.2f" % acc_ave)
    return acc_ave
Esempio n. 8
0
        img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)
        img0 = torch.squeeze(
            img0).numpy()  #p.moveaxis(torch.squeeze(img).numpy(), 0, 2)
        #print(img_np.shape)
        with torch.no_grad():
            #output = model(torch.tensor(img[None]).to(device))
            output = model(img.to(device))
            output = output.data.cpu().numpy()
            # looping over batch items
            for out in output:
                coords = extract_coords(out)
                print(coords)
                # s = coords2str(coords)

                #predictions.append(s)
                q_img = visualize(img0, coords, camera_matrix)
                print(q_img.shape)
                q_img = cv2.resize(
                    q_img,
                    (int(q_img.shape[1] * 0.25), int(q_img.shape[0] * 0.25)))
                # show predictions on image
                cv2.imshow("Prediction", q_img)
                cv2.waitKey()
                # cv2.imshow("Predictions", visualize(img_np, coords, camera_matrix))
                # cv2.waitKey()

    #df_val['PredictionString'] = predictions
    #df_test.to_csv('predictions.csv', index=False)
    #print(df_val.head())

    #def sigmoid(x):
from utils import utils
import canny_edge as ced

imgs = utils.load_data()
utils.visualize(imgs, 'gray')

detector = ced.cannyEdgeDetector(imgs,
                                 sigma=1.4,
                                 kernel_size=5,
                                 lowthreshold=0.09,
                                 highthreshold=0.17,
                                 weak_pixel=100)
imgs_final = detector.detect()

utils.visualize(imgs_final, 'gray')
Esempio n. 10
0
def eval():
    input_data = tf.placeholder(
        dtype=tf.float32,
        shape=[None, FLAGS.input_size, FLAGS.input_size, 3],
        name='input_image')
    center_map = tf.placeholder(
        dtype=tf.float32,
        shape=[None, FLAGS.input_size, FLAGS.input_size, 1],
        name='center_map')  # center map的大小也是368x368

    model = cpm.Model(FLAGS.stages,
                      FLAGS.joints)  # 这里的stages和joints是传进来的参数,跟训练集的关节点个数应该相等
    model.generate_model(input_data, center_map, FLAGS.batch_size)

    center_map = utils.generate_gaussian_map(
        FLAGS.input_size, FLAGS.input_size, FLAGS.input_size / 2,
        FLAGS.input_size / 2, FLAGS.cmap_variance
    )  # 预测的时候,需要把人放图像中间,这里构造center map是以图片中心来构造的,如果预测人没有在中间,就会出现问题

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        # if FLAGS.model_path.endswith('pkl'):
        #     model.load_weights_from_file(FLAGS.model_path, sess, False)
        # else:
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(CONFIG.model_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        #构造输入数据
        data = h5py.File(FLAGS.data_file, 'r')  # 打开h5文件
        # centers = data['center'], imgnames = data['imgname'], scales = data['scale'], gt_joints = data['part']
        total_size = len(data['index'])  # 验证集的大小
        mesures_rsts = []  #存储每个人的pck预测结果
        n = 0
        while n * FLAGS.batch_size < total_size:  #一次读一个batch
            i = n * FLAGS.batch_size
            j = min((n + 1) * FLAGS.batch_size, total_size)
            n += 1
            centers = data['center'][i:j]
            img_names = data['imgname'][i:j]
            scales = data['scale'][i:j]
            batch_gt_joints = data['part'][i:j]

            #将人体放中心,crop到368x368,调整坐标
            imgs, joints_list = data_process.crop_to_center(
                img_names, FLAGS.input_size, scales, centers, batch_gt_joints,
                False)  # 输入图片,368x368,在中心,这里的joints_list是更新过的ground truth
            imgs_input = np.array(imgs) / 255.0 - 0.5  #归一化到[-0.5,0.5]
            # imgs_input = np.expand_dims(imgs_input, axis=0)               #增加了batch的维数为1,此刻test_img_input: 1x368x368x3

            center_maps = np.array(list(center_map) * len(imgs_input))
            center_maps = np.reshape(
                center_maps,
                [len(imgs_input), FLAGS.input_size, FLAGS.input_size, 1])

            #inference
            pred_heatmaps = sess.run(
                [model.stage_heatmaps[FLAGS.stages - 1]
                 ],  #最后一个stage的heatmap,列表长度为batch_size的值
                feed_dict={
                    'input_image:0': imgs_input,
                    'center_map:0': center_maps
                })  #stage_heatmap_np[0]的shape为 [1, 46, 46, 15]
            pred_heatmaps = pred_heatmaps[
                0]  #得到的heatmap只有个stage的,长度为1,[0]之后的长度是32
            batch_pred_joints = []
            for i in range(len(pred_heatmaps)):
                pred_heatmap = pred_heatmaps[i, :, :, 0:FLAGS.joints].reshape((
                    FLAGS.hmap_size, FLAGS.hmap_size,
                    FLAGS.joints))  # 将有效的heatmap切出来,把背景排除,结果:46x46x14,每张图只有一个人
                pred_heatmap = cv2.resize(
                    pred_heatmap, (FLAGS.input_size,
                                   FLAGS.input_size))  # heatmap变成了368x368x14
                preds_joint = np.zeros((FLAGS.joints, 2))  # 预测出的关节坐标
                for joint_idx in range(FLAGS.joints):
                    joint_coord = np.unravel_index(
                        np.argmax(pred_heatmap[:, :, joint_idx]),
                        (FLAGS.input_size,
                         FLAGS.input_size))  # 求出关节点相对于368,368的坐标值
                    preds_joint[joint_idx, :] = joint_coord
                # 画出关节和肢体
                cv2.imwrite(
                    os.path.join(VALID_CROP_DIR, img_names[i].decode('utf-8')),
                    imgs[i].astype(np.uint8))  #画关节和肢体前
                utils.visualize(imgs[i], preds_joint,
                                joints_list[i])  #image, 预测坐标,gt坐标
                cv2.imwrite(
                    os.path.join(VALID_OUTPUT_DIR,
                                 img_names[i].decode('utf-8')),
                    imgs[i].astype(np.uint8))
                batch_pred_joints.append(preds_joint)

            #pck
            mesures_rsts.extend(
                pck.compute_pck(12, 3, joints_list, batch_pred_joints,
                                0.2))  #2:右肩  11:左胯,对应的mpii是12和3
        acc_joints, acc_ave = pck.compute_pck_accuracy(mesures_rsts)

        print(" head top acc:    %.2f" % acc_joints[0] + "\n neck acc:    %.2f" % acc_joints[1] + "\n right shoulder acc:   %.2f" % acc_joints[2]+ \
              "\n right elbow acc:    %.2f" % acc_joints[3] + "\n right wrist acc:   %.2f" % acc_joints[4] + "\n left shoulder acc:   %.2f" % acc_joints[5] + \
              "\n left elbow acc:   %.2f" % acc_joints[6] + "\n left wrist acc:   %.2f" % acc_joints[7] + "\n right hip acc:   %.2f" % acc_joints[8]+ \
              "\n right knee acc:   %.2f" % acc_joints[9] + "\n right ankle acc:   %.2f" % acc_joints[10] + "\n left hip acc:   %.2f" % acc_joints[11] + \
              "\n left knee acc:   %.2f" % acc_joints[12] + "\n left ankle acc:   %.2f" % acc_joints[13] + "\n average acc:   %.2f" % acc_ave)
    return acc_ave