예제 #1
0
def generate_roi_feature_dataset(dataset,
                                 netname,
                                 model_path,
                                 feature_save_path,
                                 using_attribute_flag=True,
                                 using_clstm_flag=True):
    '''
    对dataset下面的每一个slice生成测试的结果
    :param dataset: dataset的路径
    :return:
    '''
    nc_roi_placeholder = tf.placeholder(
        tf.float32, [None, config.ROI_IMAGE_HEIGHT, config.ROI_IMAGE_WIDTH, 3],
        name='nc_roi_placeholder')
    art_roi_placeholder = tf.placeholder(
        tf.float32, [None, config.ROI_IMAGE_HEIGHT, config.ROI_IMAGE_WIDTH, 3],
        name='art_roi_placeholder')
    pv_roi_placeholder = tf.placeholder(
        tf.float32, [None, config.ROI_IMAGE_HEIGHT, config.ROI_IMAGE_WIDTH, 3],
        name='pv_roi_placeholder')
    nc_patch_placeholder = tf.placeholder(
        tf.float32,
        [None, config.PATCH_IMAGE_HEIGHT, config.PATCH_IMAGE_WIDTH, 3],
        name='nc_patch_placeholder')
    art_patch_placeholder = tf.placeholder(
        tf.float32,
        [None, config.PATCH_IMAGE_HEIGHT, config.PATCH_IMAGE_WIDTH, 3],
        name='art_patch_placeholder')
    pv_patch_placeholder = tf.placeholder(
        tf.float32,
        [None, config.PATCH_IMAGE_HEIGHT, config.PATCH_IMAGE_WIDTH, 3],
        name='pv_patch_placeholder')
    batch_label_placeholder = tf.placeholder(tf.int32, [None, 1],
                                             name='batch_label_input')
    batch_attrs_placeholder = tf.placeholder(tf.float32, [None, 15],
                                             name='batch_attrs_input')
    batch_size_placeholder = tf.placeholder(tf.int32, [], name='batch_size')
    # label_placeholder = tf.placeholder(tf.int32, [None], name='label_placeholder')
    net = networks_with_attrs(nc_roi_placeholder,
                              art_roi_placeholder,
                              pv_roi_placeholder,
                              nc_patch_placeholder,
                              art_patch_placeholder,
                              pv_patch_placeholder,
                              batch_attrs_placeholder,
                              netname,
                              is_training=False,
                              num_classes=config.num_classes,
                              batch_size=batch_size_placeholder,
                              use_attribute_flag=using_attribute_flag,
                              clstm_flag=using_clstm_flag)
    logits = net.logits
    ce_loss, center_loss, gb_ce, lb_ce = net.build_loss(
        batch_label_placeholder, add_to_collection=False)
    predictions = []
    gpu_config = tf.ConfigProto()

    gpu_config.gpu_options.allow_growth = True
    roi_features = []
    with tf.Session(config=gpu_config) as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        # model_path = '/media/dl-box/HDD3/ld/PycharmProjects/GL_BD_LSTM/logs/model.ckpt-150809'
        saver = tf.train.Saver()
        print('restore from ', model_path)
        saver.restore(sess, model_path)
        batch_count = 0
        print(dataset)
        slice_names = os.listdir(dataset)
        print(slice_names)
        for idx, slice_name in enumerate(slice_names):
            if slice_name.startswith('.DS'):
                continue
            # if not slice_name.endswith('0'):
            #     continue
            print(slice_name, idx, ' / ', len(slice_names))
            cur_data_dir = os.path.join(dataset, slice_name)
            logits_values = generate_roi_feature_with_attributions(
                dataset, slice_name, config.patch_size, sess, logits,
                nc_roi_placeholder, art_roi_placeholder, pv_roi_placeholder,
                nc_patch_placeholder, art_patch_placeholder,
                pv_patch_placeholder, batch_attrs_placeholder,
                batch_size_placeholder)
            roi_feature = np.asarray([0., 0., 0., 0., 0.], np.float32)
            patch_num = len(logits_values) * 1.0
            for value in np.unique(logits_values):
                roi_feature[value] = np.sum(logits_values == value) * 1.0
            roi_feature /= patch_num
            roi_features.append(roi_feature)
            print(slice_name[-1], ' logits_values is ', logits_values)
            print('roi_feature is ', roi_feature)
    roi_features = np.asarray(roi_features, np.float32)
    np.save(feature_save_path, roi_features)
예제 #2
0
def generate_roi_feature_dataset(cur_slice_dir,
                                 netname,
                                 model_path,
                                 using_attribute_flag=True,
                                 using_clstm_flag=True,
                                 global_branch_flag=True,
                                 local_branch_flag=True,
                                 patch_size=5):
    '''
    对dataset下面的每一个slice生成测试的结果
    :param dataset: dataset的路径
    :return:
    '''
    nc_roi_placeholder = tf.placeholder(
        tf.float32, [None, config.ROI_IMAGE_HEIGHT, config.ROI_IMAGE_WIDTH, 3],
        name='nc_roi_placeholder')
    art_roi_placeholder = tf.placeholder(
        tf.float32, [None, config.ROI_IMAGE_HEIGHT, config.ROI_IMAGE_WIDTH, 3],
        name='art_roi_placeholder')
    pv_roi_placeholder = tf.placeholder(
        tf.float32, [None, config.ROI_IMAGE_HEIGHT, config.ROI_IMAGE_WIDTH, 3],
        name='pv_roi_placeholder')
    nc_patch_placeholder = tf.placeholder(
        tf.float32,
        [None, config.PATCH_IMAGE_HEIGHT, config.PATCH_IMAGE_WIDTH, 3],
        name='nc_patch_placeholder')
    art_patch_placeholder = tf.placeholder(
        tf.float32,
        [None, config.PATCH_IMAGE_HEIGHT, config.PATCH_IMAGE_WIDTH, 3],
        name='art_patch_placeholder')
    pv_patch_placeholder = tf.placeholder(
        tf.float32,
        [None, config.PATCH_IMAGE_HEIGHT, config.PATCH_IMAGE_WIDTH, 3],
        name='pv_patch_placeholder')
    batch_label_placeholder = tf.placeholder(tf.int32, [None, 1],
                                             name='batch_label_input')
    batch_attrs_placeholder = tf.placeholder(tf.float32, [None, 15],
                                             name='batch_attrs_input')
    batch_size_placeholder = tf.placeholder(tf.int32, [], name='batch_size')
    # label_placeholder = tf.placeholder(tf.int32, [None], name='label_placeholder')
    net = networks_with_attrs(nc_roi_placeholder,
                              art_roi_placeholder,
                              pv_roi_placeholder,
                              nc_patch_placeholder,
                              art_patch_placeholder,
                              pv_patch_placeholder,
                              batch_attrs_placeholder,
                              netname,
                              is_training=False,
                              num_classes=config.num_classes,
                              batch_size=batch_size_placeholder,
                              use_attribute_flag=using_attribute_flag,
                              clstm_flag=using_clstm_flag,
                              global_branch_flag=global_branch_flag,
                              local_branch_flag=local_branch_flag)
    logits = net.logits
    ce_loss, center_loss, gb_ce, lb_ce = net.build_loss(
        batch_label_placeholder, add_to_collection=False)
    predictions = []
    gpu_config = tf.ConfigProto()

    gpu_config.gpu_options.allow_growth = True
    roi_features = []
    with tf.Session(config=gpu_config) as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        # model_path = '/media/dl-box/HDD3/ld/PycharmProjects/GL_BD_LSTM/logs/model.ckpt-150809'
        saver = tf.train.Saver()
        print('restore from ', model_path)
        saver.restore(sess, model_path)
        batch_count = 0
        label_map = generate_roi_feature_with_attributions(
            os.path.dirname(cur_slice_dir), os.path.basename(cur_slice_dir),
            patch_size, sess, logits, nc_roi_placeholder, art_roi_placeholder,
            pv_roi_placeholder, nc_patch_placeholder, art_patch_placeholder,
            pv_patch_placeholder, batch_attrs_placeholder,
            batch_size_placeholder)
        print(np.shape(label_map))
        cv2.imwrite('./' + os.path.basename(cur_slice_dir) + '.PNG',
                    np.asarray(label_map, np.uint8))
예제 #3
0
def evulate_imgs_batch_with_attributions(nc_rois_paths, art_rois_paths,
                                         pv_rois_paths, nc_patches_paths,
                                         art_patches_paths, pv_patches_paths,
                                         attrs, labels, netname, model_path):
    batch_dataset = Generate_Batch_Data_with_attributions(
        nc_rois_paths, art_rois_paths, pv_rois_paths, nc_patches_paths,
        art_patches_paths, pv_patches_paths, attrs, config.val_batch_size,
        labels)
    generator = batch_dataset.generate_next_batch()
    nc_roi_placeholder = tf.placeholder(
        tf.float32, [None, config.ROI_IMAGE_HEIGHT, config.ROI_IMAGE_WIDTH, 3],
        name='nc_roi_placeholder')
    art_roi_placeholder = tf.placeholder(
        tf.float32, [None, config.ROI_IMAGE_HEIGHT, config.ROI_IMAGE_WIDTH, 3],
        name='art_roi_placeholder')
    pv_roi_placeholder = tf.placeholder(
        tf.float32, [None, config.ROI_IMAGE_HEIGHT, config.ROI_IMAGE_WIDTH, 3],
        name='pv_roi_placeholder')
    nc_patch_placeholder = tf.placeholder(
        tf.float32, [None, config.ROI_IMAGE_HEIGHT, config.ROI_IMAGE_WIDTH, 3],
        name='nc_patch_placeholder')
    art_patch_placeholder = tf.placeholder(
        tf.float32, [None, config.ROI_IMAGE_HEIGHT, config.ROI_IMAGE_WIDTH, 3],
        name='art_patch_placeholder')
    pv_patch_placeholder = tf.placeholder(
        tf.float32, [None, config.ROI_IMAGE_HEIGHT, config.ROI_IMAGE_WIDTH, 3],
        name='pv_patch_placeholder')
    batch_label_placeholder = tf.placeholder(tf.int32, [None, 1],
                                             name='batch_label_input')
    batch_size_placeholder = tf.placeholder(tf.int32, [], name='batch_size')
    attrs_placeholder = tf.placeholder(tf.float32, [None, 15],
                                       'attributions_placeholder')
    # label_placeholder = tf.placeholder(tf.int32, [None], name='label_placeholder')

    net = networks_with_attrs(nc_roi_placeholder,
                              art_roi_placeholder,
                              pv_roi_placeholder,
                              nc_patch_placeholder,
                              art_patch_placeholder,
                              pv_patch_placeholder,
                              attrs_placeholder,
                              base_name=netname,
                              is_training=True,
                              num_classes=config.num_classes,
                              batch_size=batch_size_placeholder)
    logits = net.logits
    ce_loss, center_loss, gb_ce, lb_ce = net.build_loss(
        batch_label_placeholder, add_to_collection=False)
    predictions = []
    gts = labels
    gpu_config = tf.ConfigProto()

    gpu_config.gpu_options.allow_growth = True
    with tf.Session(config=gpu_config) as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        # model_path = '/media/dl-box/HDD3/ld/PycharmProjects/GL_BD_LSTM/logs/model.ckpt-150809'
        saver = tf.train.Saver()
        print('restore from ', model_path)
        saver.restore(sess, model_path)
        batch_count = 0
        ce_loss_values = []
        center_loss_values = []

        while True:
            nc_batch_rois, art_batch_rois, pv_batch_rois, \
            nc_batch_patches, art_batch_patches, pv_batch_patches, \
            batch_attrs, batch_label, epoch_end = generator.next()
            # print('batch_label is ', batch_label, np.shape(batch_label))
            logits_v, ce_loss_v = sess.run(
                [logits, ce_loss],
                feed_dict={
                    nc_roi_placeholder: nc_batch_rois,
                    art_roi_placeholder: art_batch_rois,
                    pv_roi_placeholder: pv_batch_rois,
                    nc_patch_placeholder: nc_batch_patches,
                    art_patch_placeholder: art_batch_patches,
                    pv_patch_placeholder: pv_batch_patches,
                    batch_size_placeholder: len(pv_batch_patches),
                    attrs_placeholder: batch_attrs,
                    batch_label_placeholder: np.expand_dims(batch_label,
                                                            axis=1)
                })

            ce_loss_values.append(ce_loss_v)
            print(np.mean(ce_loss_values))
            predictions.extend(np.argmax(logits_v, axis=1))
            # print(np.argmax(logits_v, axis=1), batch_label)
            if batch_count % 100 == 0 and batch_count != 0:
                print(
                    '%d/%d\n' %
                    (batch_count * config.val_batch_size, len(nc_rois_paths)))
            batch_count += 1
            if epoch_end:
                break
    # with open('./evulation.txt', 'w') as save_txt_file:
    #     lines = []
    #     for gt, prediction in zip(gts, predictions):
    #         line = '%d %d\n' % (prediction, gt)
    #         lines.append(line)
    #     save_txt_file.writelines(lines)
    #     save_txt_file.close()

    # calculate the accuracy
    gts = np.asarray(gts, np.uint8)
    predictions = np.asarray(predictions, np.uint8)
    total_acc = np.sum(gts == predictions) / (1.0 * len(gts))
    print('the total acc is ', total_acc)
    for class_id in range(5):
        idx = np.where(gts == class_id)
        cur_predictions = predictions[idx]
        cur_gts = gts[idx]
        cur_acc = np.sum(cur_predictions == cur_gts) / (1.0 * len(cur_gts))
        print('the %d\'s acc is %.4f' % (class_id, cur_acc))
예제 #4
0
def create_clones_with_attrs(train_batch_queue, val_batch_queue):
    with tf.device('/cpu:0'):

        global_step = slim.create_global_step()
        learning_rate = tf.constant(FLAGS.learning_rate, name='learning_rate')
        optimizer = tf.train.MomentumOptimizer(learning_rate,
                                               momentum=FLAGS.momentum, name='Momentum')

        tf.summary.scalar('learning_rate', learning_rate)
    # place clones
    pixel_link_loss = 0  # for summary only
    gradients = []
    # for clone_idx, gpu in enumerate(config.gpus):
        # do_summary = clone_idx == 0  # only summary on the first clone
        # reuse = clone_idx > 0
    with tf.variable_scope(tf.get_variable_scope()) as sc:
        b_nc_roi, b_art_roi, b_pv_roi, b_nc_patch, b_art_patch, b_pv_patch, b_label, b_attrs = train_batch_queue.dequeue()
        val_b_nc_roi, val_b_art_roi, val_b_pv_roi, val_b_nc_patch, val_b_art_patch, val_b_pv_patch, \
        val_b_label, val_b_attrs = val_batch_queue.dequeue()
        # build model and loss
        train_net = networks_with_attrs(b_nc_roi, b_art_roi, b_pv_roi, b_nc_patch, b_art_patch, b_pv_patch, b_attrs,
                                        FLAGS.netname, True, num_classes=config.num_classes, batch_size=FLAGS.batch_size,
                                        use_attribute_flag=FLAGS.attribute_flag, clstm_flag=FLAGS.clstm_flag)
        # ce_loss, center_loss, global_loss, local_loss = net.build_loss(b_label)
        centerloss_lambda = 1.0
        if not FLAGS.centerloss_flag:
            print('do not use the center loss')
            centerloss_lambda = 0.0
        ce_loss, center_loss, global_loss, local_loss, center_update_op = train_net.build_loss(b_label,
                                                                                               lambda_center_loss=centerloss_lambda)
        sc.reuse_variables()
        val_net = networks_with_attrs(val_b_nc_roi, val_b_art_roi, val_b_pv_roi, val_b_nc_patch, val_b_art_patch,
                                      val_b_pv_patch, val_b_attrs, FLAGS.netname, False, config.num_classes,
                                      batch_size=FLAGS.batch_size, use_attribute_flag=FLAGS.attribute_flag,
                                      clstm_flag=FLAGS.clstm_flag)
        val_ce_loss, val_center_loss, val_global_loss, val_local_loss = val_net.build_loss(val_b_label,
                                                                                           lambda_center_loss=centerloss_lambda,
                                                                                           add_to_collection=False)

        losses = tf.get_collection(tf.GraphKeys.LOSSES)
        assert len(losses) == 4
        total_clone_loss = tf.add_n(losses)
        pixel_link_loss += total_clone_loss

        regularization_loss = tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
        total_clone_loss = total_clone_loss + regularization_loss

        clone_gradients = optimizer.compute_gradients(total_clone_loss)
        gradients.append(clone_gradients)

    tf.summary.scalar('final cross entropy', ce_loss)
    tf.summary.scalar('center loss', center_loss)
    tf.summary.scalar('regularization_loss', regularization_loss)
    tf.summary.scalar('global cross entropy', global_loss)
    tf.summary.scalar('local cross entropy', local_loss)
    tf.summary.scalar('val/final cross_entropy', val_ce_loss)
    tf.summary.scalar('val/center loss', val_center_loss)
    tf.summary.scalar('val/global cross entropy', val_global_loss)
    tf.summary.scalar('val/local cross entropy', val_local_loss)

    # add all gradients together
    # note that the gradients do not need to be averaged, because the average operation has been done on loss.
    averaged_gradients = sum_gradients(gradients)

    apply_grad_op = optimizer.apply_gradients(averaged_gradients, global_step=global_step)

    train_ops = [apply_grad_op, center_update_op]

    bn_update_op = util.tf.get_update_op()
    if bn_update_op is not None:
        train_ops.append(bn_update_op)

    # moving average
    if FLAGS.using_moving_average:
        tf.logging.info('using moving average in training, \
        with decay = %f' % (FLAGS.moving_average_decay))
        ema = tf.train.ExponentialMovingAverage(FLAGS.moving_average_decay)
        ema_op = ema.apply(tf.trainable_variables())
        with tf.control_dependencies([apply_grad_op]):  # ema after updating
            train_ops.append(tf.group(ema_op))

    train_op = control_flow_ops.with_dependencies(train_ops, pixel_link_loss, name='train_op')
    train_step_kwargs = {}
    train_step_kwargs['val_loss'] = val_ce_loss
    train_step_kwargs['val_gb_loss'] = val_global_loss
    train_step_kwargs['val_lb_loss'] = val_local_loss
    return train_op, None