Exemplo n.º 1
0
 def vis_one_im(self):
     if cfgs.anno:
         #im_ = pred_visualize(self.vis_image.copy(), self.vis_pred).astype(np.uint8)
         im_ = pred_visualize(self.vis_image.copy(),
                              self.vis_anno).astype(np.uint8)
         utils.save_image(im_,
                          self.re_save_dir_im,
                          name='inp_' + self.filename + '.jpg')
     if cfgs.fit_ellip:
         im_ellip = fit_ellipse_findContours(
             self.vis_image.copy(),
             np.expand_dims(self.vis_pred, axis=2).astype(np.uint8))
         utils.save_image(im_ellip,
                          self.re_save_dir_ellip,
                          name='ellip_' + self.filename + '.jpg')
     if cfgs.heatmap:
         heat_map = density_heatmap(self.vis_pred_prob[:, :, 1])
         utils.save_image(heat_map,
                          self.re_save_dir_heat,
                          name='heat_' + self.filename + '.jpg')
     if cfgs.trans_heat and cfgs.heatmap:
         trans_heat_map = translucent_heatmap(
             self.vis_image.copy(),
             heat_map.astype(np.uint8).copy())
         utils.save_image(trans_heat_map,
                          self.re_save_dir_transheat,
                          name='trans_heat_' + self.filenaem + '.jpg')
Exemplo n.º 2
0
def main(argv=None):
    keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
    image = tf.placeholder(tf.float32,
                           shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3],
                           name="input_image")
    annotation = tf.placeholder(tf.int32,
                                shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1],
                                name="annotation")
    soft_annotation = tf.placeholder(tf.float32,
                                     shape=[None, IMAGE_SIZE, IMAGE_SIZE, 2],
                                     name="soft_annotation")
    pred_annotation_value, pred_annotation, logits, pred_prob = inference(
        image, keep_probability)
    tf.summary.image("input_image", image, max_outputs=2)
    tf.summary.image("ground_truth",
                     tf.cast(annotation, tf.uint8),
                     max_outputs=2)
    tf.summary.image("pred_annotation",
                     tf.cast(pred_annotation, tf.uint8),
                     max_outputs=2)
    #logits:the last layer of conv net
    #labels:the ground truth
    loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=logits,
        labels=tf.squeeze(annotation, squeeze_dims=[3]),
        name="entropy")))
    tf.summary.scalar("entropy", loss)

    trainable_var = tf.trainable_variables()
    if FLAGS.debug:
        for var in trainable_var:
            utils.add_to_regularization_and_summary(var)
    train_op = train(loss, trainable_var)

    print("Setting up summary op...")
    summary_op = tf.summary.merge_all()

    #Create a file to write logs.
    #filename='logs'+ FLAGS.mode + str(datetime.datetime.now()) + '.txt'
    filename = "logs_%s%s.txt" % (FLAGS.mode, datetime.datetime.now())
    path_ = os.path.join(FLAGS.logs_dir, filename)
    logs_file = open(path_, 'w')
    logs_file.write("The logs file is created at %s\n" %
                    datetime.datetime.now())
    logs_file.write("The mode is %s\n" % (FLAGS.mode))
    logs_file.write(
        "The train data batch size is %d and the validation batch size is %d.\n"
        % (FLAGS.batch_size, FLAGS.v_batch_size))
    logs_file.write("The train data is %s.\n" % (FLAGS.data_dir))
    logs_file.write("The data size is %d and the MAX_ITERATION is %d.\n" %
                    (IMAGE_SIZE, MAX_ITERATION))
    logs_file.write("The model is ---%s---.\n" % FLAGS.logs_dir)

    print("Setting up image reader...")
    logs_file.write("Setting up image reader...\n")
    train_records, valid_records = scene_parsing.my_read_dataset(
        FLAGS.data_dir)
    print('number of train_records', len(train_records))
    print('number of valid_records', len(valid_records))
    logs_file.write('number of train_records %d\n' % len(train_records))
    logs_file.write('number of valid_records %d\n' % len(valid_records))

    print("Setting up dataset reader")
    image_options = {'resize': True, 'resize_size': IMAGE_SIZE}
    if FLAGS.mode == 'check_training':
        train_dataset_reader = dataset_soft.BatchDatset(
            train_records, image_options)
    validation_dataset_reader = dataset.BatchDatset(valid_records,
                                                    image_options)

    sess = tf.Session()

    print("Setting up Saver...")
    saver = tf.train.Saver()
    summary_writer = tf.summary.FileWriter(FLAGS.logs_dir, sess.graph)

    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
    #if not train,restore the model trained before
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")

    if FLAGS.mode == "accurary":
        count = 0
        if_con = True
        accu_iou_t = 0
        accu_pixel_t = 0

        while if_con:
            count = count + 1
            valid_images, valid_annotations, valid_filenames, if_con, start, end = validation_dataset_reader.next_batch_valid(
                FLAGS.v_batch_size)
            valid_loss, pred_anno = sess.run(
                [loss, pred_annotation],
                feed_dict={
                    image: valid_images,
                    annotation: valid_annotations,
                    keep_probability: 1.0
                })
            accu_iou, accu_pixel = accu.caculate_accurary(
                pred_anno, valid_annotations)
            print("Ture %d ---> the data from %d to %d" % (count, start, end))
            print("%s ---> Validation_pixel_accuary: %g" %
                  (datetime.datetime.now(), accu_pixel))
            print("%s ---> Validation_iou_accuary: %g" %
                  (datetime.datetime.now(), accu_iou))
            #Output logs.
            logs_file.write("Ture %d ---> the data from %d to %d\n" %
                            (count, start, end))
            logs_file.write("%s ---> Validation_pixel_accuary: %g\n" %
                            (datetime.datetime.now(), accu_pixel))
            logs_file.write("%s ---> Validation_iou_accuary: %g\n" %
                            (datetime.datetime.now(), accu_iou))

            accu_iou_t = accu_iou_t + accu_iou
            accu_pixel_t = accu_pixel_t + accu_pixel
        print("%s ---> Total validation_pixel_accuary: %g" %
              (datetime.datetime.now(), accu_pixel_t / count))
        print("%s ---> Total validation_iou_accuary: %g" %
              (datetime.datetime.now(), accu_iou_t / count))
        #Output logs
        logs_file.write("%s ---> Total validation_pixel_accurary: %g\n" %
                        (datetime.datetime.now(), accu_pixel_t / count))
        logs_file.write("%s ---> Total validation_iou_accurary: %g\n" %
                        (datetime.datetime.now(), accu_iou_t / count))

    elif FLAGS.mode == "all_visualize":

        re_save_dir = "%s%s" % (FLAGS.result_dir, datetime.datetime.now())
        logs_file.write("The result is save at file'%s'.\n" % re_save_dir)
        logs_file.write("The number of part visualization is %d.\n" %
                        FLAGS.v_batch_size)

        #Check the result path if exists.
        if not os.path.exists(re_save_dir):
            print("The path '%s' is not found." % re_save_dir)
            print("Create now ...")
            os.makedirs(re_save_dir)
            print("Create '%s' successfully." % re_save_dir)
            logs_file.write("Create '%s' successfully.\n" % re_save_dir)
        re_save_dir_anno = os.path.join(re_save_dir, 'anno')
        re_save_dir_pred = os.path.join(re_save_dir, 'pred')
        re_save_dir_train_heat = os.path.join(re_save_dir, 'train_heat')
        re_save_dir_heat = os.path.join(re_save_dir, 'heatmap')
        re_save_dir_ellip = os.path.join(re_save_dir, 'ellip')
        re_save_dir_transheat = os.path.join(re_save_dir, 'transheat')
        if not os.path.exists(re_save_dir_anno):
            os.makedirs(re_save_dir_anno)
        if not os.path.exists(re_save_dir_pred):
            os.makedirs(re_save_dir_pred)
        if not os.path.exists(re_save_dir_train_heat):
            os.makedirs(re_save_dir_train_heat)
        if not os.path.exists(re_save_dir_heat):
            os.makedirs(re_save_dir_heat)
        if not os.path.exists(re_save_dir_ellip):
            os.makedirs(re_save_dir_ellip)
        if not os.path.exists(re_save_dir_transheat):
            os.makedirs(re_save_dir_transheat)
        count = 0
        if_con = True
        accu_iou_t = 0
        accu_pixel_t = 0

        while if_con:
            count = count + 1
            valid_images, valid_annotations, valid_filename, if_con, start, end = validation_dataset_reader.next_batch_valid(
                FLAGS.v_batch_size)
            pred_value, pred, logits_, pred_prob_ = sess.run(
                [pred_annotation_value, pred_annotation, logits, pred_prob],
                feed_dict={
                    image: valid_images,
                    annotation: valid_annotations,
                    keep_probability: 1.0
                })
            #valid_annotations = np.squeeze(valid_annotations, axis=3)
            pred = np.squeeze(pred, axis=3)
            pred_value = np.squeeze(pred_value, axis=3)
            pred_ellip = np.argmax(pred_prob_ + [0.85, 0], axis=3)
            #label_predict_pixel
            for itr in range(len(pred)):
                filename = valid_filename[itr]['filename']
                if FLAGS.anno == 'T':
                    valid_images_anno = anno_visualize(
                        valid_images[itr].copy(), valid_annotations[itr, :, :,
                                                                    1])
                    utils.save_image(valid_images_anno.astype(np.uint8),
                                     re_save_dir_anno,
                                     name="anno_" + filename)
                if FLAGS.pred == 'T':
                    valid_images_pred = pred_visualize(
                        valid_images[itr].copy(),
                        np.expand_dims(pred_ellip[itr], axis=2))
                    utils.save_image(valid_images_pred.astype(np.uint8),
                                     re_save_dir_pred,
                                     name="pred_" + filename)
                if FLAGS.train_heat == 'T':
                    heat_map = density_heatmap(
                        valid_annotations[itr, :, :, 1] / FLAGS.normal)
                    utils.save_image(heat_map.astype(np.uint8),
                                     re_save_dir_train_heat,
                                     name="trainheat_" + filename)

                if FLAGS.fit_ellip == 'T':
                    valid_images_ellip = fit_ellipse_findContours(
                        valid_images[itr].copy(),
                        np.expand_dims(pred_ellip[itr],
                                       axis=2).astype(np.uint8))
                    utils.save_image(valid_images_ellip.astype(np.uint8),
                                     re_save_dir_ellip,
                                     name="ellip_" + filename)
                if FLAGS.heatmap == 'T':
                    heat_map = density_heatmap(pred_prob_[itr, :, :, 1])
                    utils.save_image(heat_map.astype(np.uint8),
                                     re_save_dir_heat,
                                     name="heat_" + filename)
                if FLAGS.trans_heat == 'T':
                    trans_heat_map = translucent_heatmap(
                        valid_images[itr],
                        heat_map.astype(np.uint8).copy())
                    utils.save_image(trans_heat_map,
                                     re_save_dir_transheat,
                                     name="trans_heat_" + filename)

    elif FLAGS.mode == 'check_training':
        re_save_dir = "%s%s" % (FLAGS.result_dir, datetime.datetime.now())
        logs_file.write("The result is save at file'%s'.\n" % re_save_dir)
        logs_file.write("The number of part visualization is %d.\n" %
                        FLAGS.v_batch_size)

        #Check the result path if exists.
        if not os.path.exists(re_save_dir):
            print("The path '%s' is not found." % re_save_dir)
            print("Create now ...")
            os.makedirs(re_save_dir)
            print("Create '%s' successfully." % re_save_dir)
            logs_file.write("Create '%s' successfully.\n" % re_save_dir)
        re_save_dir_anno = os.path.join(re_save_dir, 'anno')
        re_save_dir_pred = os.path.join(re_save_dir, 'pred')
        re_save_dir_train_heat = os.path.join(re_save_dir, 'train_heat')
        re_save_dir_heat = os.path.join(re_save_dir, 'heatmap')
        re_save_dir_ellip = os.path.join(re_save_dir, 'ellip')
        re_save_dir_transheat = os.path.join(re_save_dir, 'transheat')
        if not os.path.exists(re_save_dir_anno):
            os.makedirs(re_save_dir_anno)
        if not os.path.exists(re_save_dir_pred):
            os.makedirs(re_save_dir_pred)
        if not os.path.exists(re_save_dir_train_heat):
            os.makedirs(re_save_dir_train_heat)
        if not os.path.exists(re_save_dir_heat):
            os.makedirs(re_save_dir_heat)
        if not os.path.exists(re_save_dir_ellip):
            os.makedirs(re_save_dir_ellip)
        if not os.path.exists(re_save_dir_transheat):
            os.makedirs(re_save_dir_transheat)
        count = 0
        if_con = True
        accu_iou_t = 0
        accu_pixel_t = 0

        while if_con:
            count = count + 1
            valid_images, valid_annotations, valid_filename, if_con, start, end = train_dataset_reader.next_batch_valid(
                FLAGS.v_batch_size)
            pred_value, pred, logits_, pred_prob_ = sess.run(
                [pred_annotation_value, pred_annotation, logits, pred_prob],
                feed_dict={
                    image: valid_images,
                    soft_annotation: valid_annotations,
                    keep_probability: 1.0
                })
            #valid_annotations = np.squeeze(valid_annotations, axis=3)
            pred = np.squeeze(pred, axis=3)
            pred_value = np.squeeze(pred_value, axis=3)

            #label_predict_pixel
            for itr in range(len(pred)):
                filename = valid_filename[itr]['filename']
                if FLAGS.anno == 'T':
                    valid_images_anno = anno_visualize(
                        valid_images[itr].copy(), valid_annotations[itr, :, :,
                                                                    1])
                    utils.save_image(valid_images_anno.astype(np.uint8),
                                     re_save_dir_anno,
                                     name="anno_" + filename)
                if FLAGS.pred == 'T':
                    valid_images_pred = soft_pred_visualize(
                        valid_images[itr].copy(), pred[itr])
                    utils.save_image(valid_images_pred.astype(np.uint8),
                                     re_save_dir_pred,
                                     name="pred_" + filename)
                if FLAGS.train_heat == 'T':
                    heat_map = density_heatmap(
                        valid_annotations[itr, :, :, 1] / FLAGS.normal)
                    utils.save_image(heat_map.astype(np.uint8),
                                     re_save_dir_train_heat,
                                     name="trainheat_" + filename)

                if FLAGS.fit_ellip == 'T':
                    valid_images_ellip = fit_ellipse_findContours(
                        valid_images[itr].copy(),
                        np.expand_dims(pred[itr], axis=2).astype(np.uint8))
                    utils.save_image(valid_images_ellip.astype(np.uint8),
                                     re_save_dir_ellip,
                                     name="ellip_" + filename)
                if FLAGS.heatmap == 'T':
                    heat_map = density_heatmap(pred_prob_[itr, :, :, 1])
                    utils.save_image(heat_map.astype(np.uint8),
                                     re_save_dir_heat,
                                     name="heat_" + filename)
                if FLAGS.trans_heat == 'T':
                    trans_heat_map = translucent_heatmap(
                        valid_images[itr],
                        heat_map.astype(np.uint8).copy())
                    utils.save_image(trans_heat_map,
                                     re_save_dir_transheat,
                                     name="trans_heat_" + filename)

    logs_file.close()
    if FLAGS.mode == "check_training" or FLAGS.mode == "all_visualize":
        result_logs_file = os.path.join(re_save_dir, filename)
        shutil.copyfile(path_, result_logs_file)
Exemplo n.º 3
0
 def vis_one_im(self):
     if cfgs.anno:
         im_ = pred_visualize(self.vis_image.copy(), self.vis_anno).astype(np.uint8)
        #im_ = pred_visualize(self.vis_image.copy(), self.vis_pred).astype(np.uint8)
        utils.save_image(im_, self.re_save_dir_im, name='inp_' + self.filename + '.jpg')
Exemplo n.º 4
0
def main(argv=None):
    keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
    image = tf.placeholder(tf.float32,
                           shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3],
                           name="input_image")
    annotation = tf.placeholder(tf.int32,
                                shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1],
                                name="annotation")

    pred_annotation_value, pred_annotation, logits, pred_prob = inference(
        image, keep_probability)
    #get the softmax result
    pred_prob = tf.nn.softmax(logits)
    tf.summary.image("input_image", image, max_outputs=2)
    tf.summary.image("pred_annotation",
                     tf.cast(pred_annotation, tf.uint8),
                     max_outputs=2)

    trainable_var = tf.trainable_variables()
    if FLAGS.debug:
        for var in trainable_var:
            utils.add_to_regularization_and_summary(var)

    print("Setting up summary op...")
    summary_op = tf.summary.merge_all()

    #Create a file to write logs.
    #filename='logs'+ FLAGS.mode + str(datetime.datetime.now()) + '.txt'
    filename = "logs_%s%s.txt" % (FLAGS.mode, datetime.datetime.now())
    path_ = os.path.join(FLAGS.logs_dir, filename)
    logs_file = open(path_, 'w')
    logs_file.write("The logs file is created at %s\n" %
                    datetime.datetime.now())
    logs_file.write("The mode is %s\n" % (FLAGS.mode))
    logs_file.write(
        "The train data batch size is %d and the validation batch size is %d.\n"
        % (FLAGS.batch_size, FLAGS.v_batch_size))
    logs_file.write("The data is %s.\n" % (FLAGS.data_dir))
    logs_file.write("The data size is %d.\n" % (IMAGE_SIZE))
    logs_file.write("The model is ---%s---.\n" % FLAGS.logs_dir)

    print("Setting up image reader...")
    logs_file.write("Setting up image reader...\n")
    valid_video_records = scene_parsing.read_validation_video_data(
        FLAGS.data_dir)
    print('number of valid_records', len(valid_video_records))
    logs_file.write('number of valid_records %d\n' % len(valid_video_records))

    print("Setting up dataset reader")
    image_options = {'resize': True, 'resize_size': IMAGE_SIZE}
    validation_dataset_reader = dataset.BatchDatset(valid_video_records,
                                                    image_options)

    sess = tf.Session()

    print("Setting up Saver...")
    saver = tf.train.Saver()
    summary_writer = tf.summary.FileWriter(FLAGS.logs_dir, sess.graph)

    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
    #if not train,restore the model trained before
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")

    re_save_dir = "%s%s" % (FLAGS.result_dir, datetime.datetime.now())
    logs_file.write("The result is save at file'%s'.\n" % re_save_dir)
    logs_file.write("The number of part visualization is %d.\n" %
                    FLAGS.v_batch_size)

    #Check the result path if exists.
    if not os.path.exists(re_save_dir):
        print("The path '%s' is not found." % re_save_dir)
        print("Create now ...")
        os.makedirs(re_save_dir)
        print("Create '%s' successfully." % re_save_dir)
        logs_file.write("Create '%s' successfully.\n" % re_save_dir)
    logs_file.close()

    #Copy the logs to the result file
    result_logs_file = os.path.join(re_save_dir, filename)
    shutil.copyfile(path_, result_logs_file)
    re_save_dir_im = os.path.join(re_save_dir, 'images')
    re_save_dir_pred = os.path.join(re_save_dir, 'pred')
    re_save_dir_heat = os.path.join(re_save_dir, 'heatmap')
    re_save_dir_ellip = os.path.join(re_save_dir, 'ellip')
    re_save_dir_transheat = os.path.join(re_save_dir, 'transheat')
    if not os.path.exists(re_save_dir_im):
        os.makedirs(re_save_dir_im)
    if not os.path.exists(re_save_dir_pred):
        os.makedirs(re_save_dir_pred)
    if not os.path.exists(re_save_dir_heat):
        os.makedirs(re_save_dir_heat)
    if not os.path.exists(re_save_dir_ellip):
        os.makedirs(re_save_dir_ellip)
    if not os.path.exists(re_save_dir_transheat):
        os.makedirs(re_save_dir_transheat)

    count = 0
    if_con = True
    accu_iou_t = 0
    accu_pixel_t = 0

    while if_con:
        count = count + 1
        valid_images, valid_filename, if_con, start, end = validation_dataset_reader.next_batch_video_valid(
            FLAGS.v_batch_size)
        pred_value, pred, logits_, pred_prob_ = sess.run(
            [pred_annotation_value, pred_annotation, logits, pred_prob],
            feed_dict={
                image: valid_images,
                keep_probability: 1.0
            })

        print("Turn %d :----start from %d ------- to %d" % (count, start, end))
        '''
        if FLAGS.softmax == 'T':
            pred_prob_ = sess.run([pred_prob], feed_dict={logits: logits_})
            #print('pred_prob', pred_prob_)
            print('The shape of pred_prob is ', pred_value.shape)'''
        pred = np.squeeze(pred, axis=3)
        pred_value = np.squeeze(pred_value, axis=3)

        for itr in range(len(pred)):
            filename = valid_filename[itr]['filename']
            valid_images_ = pred_visualize(valid_images[itr].copy(), pred[itr])
            #print(valid_images[itr].shape)
            #print((np.expand_dims(pred[itr],axis=2).astype(np.uint8)).shape)
            utils.save_image(valid_images_.astype(np.uint8),
                             re_save_dir_im,
                             name="inp_" + filename)

            if FLAGS.pred:
                utils.save_gray_image(np.expand_dims(pred[itr],
                                                     axis=2).astype(np.uint8),
                                      re_save_dir_pred,
                                      name="pred_" + filename)
            if FLAGS.fit_ellip:
                valid_images_ellip = fit_ellipse_findContours_ori(
                    valid_images[itr].copy(),
                    np.expand_dims(pred[itr], axis=2).astype(np.uint8))
                utils.save_image(valid_images_ellip.astype(np.uint8),
                                 re_save_dir_ellip,
                                 name="ellip_" + filename)
            if FLAGS.heatmap:
                heat_map = density_heatmap(pred_prob_[itr, :, :, 1])
                utils.save_image(heat_map.astype(np.uint8),
                                 re_save_dir_heat,
                                 name="heat_" + filename)
            if FLAGS.trans_heat == 'T':
                trans_heat_map = translucent_heatmap(
                    valid_images[itr],
                    heat_map.astype(np.uint8).copy())
                utils.save_image(trans_heat_map,
                                 re_save_dir_transheat,
                                 name="trans_heat_" + filename)