Example #1
0
def tower_loss(scope):
    images, labels = read_and_decode()
    if net == 'vgg_16':
        with slim.arg_scope(vgg.vgg_arg_scope()):
            logits, end_points = vgg.vgg_16(images, num_classes=FLAGS.num_classes)
    elif net == 'vgg_19':
        with slim.arg_scope(vgg.vgg_arg_scope()):
            logits, end_points = vgg.vgg_19(images, num_classes=FLAGS.num_classes)
    elif net == 'resnet_v1_101':
        with slim.arg_scope(resnet_v1.resnet_arg_scope()):
            logits, end_points = resnet_v1.resnet_v1_101(images, num_classes=FLAGS.num_classes)
        logits = tf.reshape(logits, [FLAGS.batch_size, FLAGS.num_classes])
    elif net == 'resnet_v1_50':
        with slim.arg_scope(resnet_v1.resnet_arg_scope()):
            logits, end_points = resnet_v1.resnet_v1_50(images, num_classes=FLAGS.num_classes)
        logits = tf.reshape(logits, [FLAGS.batch_size, FLAGS.num_classes])
    elif net == 'resnet_v2_50':
        with slim.arg_scope(resnet_v2.resnet_arg_scope()):
            logits, end_points = resnet_v2.resnet_v2_50(images, num_classes=FLAGS.num_classes)
        logits = tf.reshape(logits, [FLAGS.batch_size, FLAGS.num_classes])
    else:
        raise Exception('No network matched with net %s.' % net)
    assert logits.shape == (FLAGS.batch_size, FLAGS.num_classes)
    _ = cal_loss(logits, labels)
    losses = tf.get_collection('losses', scope)
    total_loss = tf.add_n(losses, name='total_loss')
    for l in losses + [total_loss]:
        loss_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', l.op.name)
        tf.summary.scalar(loss_name, l)
    return total_loss
Example #2
0
def top_feature_net(input, anchors, inds_inside, num_bases):
  stride=8
    # arg_scope = resnet_v1.resnet_arg_scope(weight_decay=0.0)
    # with slim.arg_scope(arg_scope) :
  with slim.arg_scope(vgg.vgg_arg_scope()):
    # net, end_points = resnet_v1.resnet_v1_50(input, None, global_pool=False, output_stride=8)
    block5, end_points = vgg.vgg_16(input)
    block3 = end_points['conv3/conv3_3']
    # block   = conv2d_bn_relu(block, num_kernels=512, kernel_size=(1,1), stride=[1,1,1,1], padding='SAME', name='2')
    tf.summary.histogram('rpn_top_block', block) 
    # tf.summary.histogram('rpn_top_block_weights', tf.get_collection('2/conv_weight')[0])
  with tf.variable_scope('top') as scope:
    #up     = upsample2d(block, factor = 2, has_bias=True, trainable=True, name='1')
    #up     = block
    up      = conv2d_bn_relu(block, num_kernels=128, kernel_size=(3,3), stride=[1,1,1,1], padding='SAME', name='2')
    scores  = conv2d(up, num_kernels=2*num_bases, kernel_size=(1,1), stride=[1,1,1,1], padding='SAME', name='score')
    probs   = tf.nn.softmax( tf.reshape(scores,[-1,2]), name='prob')
    deltas  = conv2d(up, num_kernels=4*num_bases, kernel_size=(1,1), stride=[1,1,1,1], padding='SAME', name='delta')

  #<todo> flip to train and test mode nms (e.g. different nms_pre_topn values): use tf.cond
  with tf.variable_scope('top-nms') as scope:    #non-max
    batch_size, img_height, img_width, img_channel = input.get_shape().as_list()
    img_scale = 1
    # pdb.set_trace()
    rois, roi_scores = tf_rpn_nms( probs, deltas, anchors, inds_inside,
                                     stride, img_width, img_height, img_scale,
                                     nms_thresh=0.7, min_size=stride, nms_pre_topn=nms_pre_topn_, nms_post_topn=nms_post_topn_,
                                     name ='nms')

  #<todo> feature = upsample2d(block, factor = 4,  ...)
  feature = block
Example #3
0
 def __call__(self, image_batch):
     if self.model == vgg16:
         with slim.arg_scope(vgg.vgg_arg_scope()):
             features, _ = self.model(inputs=image_batch)
     if self.model == resnet101:
         with slim.arg_scope(resnet.resnet_arg_scope()):
             features, _ = self.model(inputs=image_batch, num_classes=None)
     return features
Example #4
0
def VGG_16(input_image):
    arg_scope = vgg.vgg_arg_scope()
    with slim.arg_scope(arg_scope):
        features, _ = vgg.vgg_16(input_image)
        # feature flatten
        features = tf.reshape(features, shape=[1, -1])
        features = tf.squeeze(features)
    return features
Example #5
0
def encoder_vgg(x,
                enc_final_size,
                reuse=False,
                scope_prefix='',
                hparams=None,
                is_training=True):
    """VGG network to use as encoder without the top few layers.

  Can be pretrained.

  Args:
    x: The image to encode. In the range 0 to 1.
    enc_final_size: The desired size of the encoding.
    reuse: To reuse in variable scope or not.
    scope_prefix: The prefix before the scope name.
    hparams: The python hparams.
    is_training: boolean value indicating if training is happening.

  Returns:
    The generated image.
  """
    with tf.variable_scope(scope_prefix + 'encoder', reuse=reuse):

        # Preprocess input
        x *= 256
        x = x - COLOR_NORMALIZATION_VECTOR

        with arg_scope(vgg.vgg_arg_scope()):
            # Padding because vgg_16 accepts images of size at least VGG_IMAGE_SIZE.
            x = tf.pad(x, [[0, 0], [0, VGG_IMAGE_SIZE - IMG_WIDTH],
                           [0, VGG_IMAGE_SIZE - IMG_HEIGHT], [0, 0]])
            _, end_points = vgg.vgg_16(x,
                                       num_classes=enc_final_size,
                                       is_training=is_training)
            pool5_key = [key for key in end_points.keys() if 'pool5' in key]
            assert len(pool5_key) == 1
            enc = end_points[pool5_key[0]]
            # Undoing padding.
            enc = tf.slice(enc, [0, 0, 0, 0], [-1, 2, 2, -1])

        enc_shape = enc.get_shape().as_list()
        enc_shape[0] = -1
        enc_size = enc_shape[1] * enc_shape[2] * enc_shape[3]

        enc_flat = tf.reshape(enc, (-1, enc_size))
        enc_flat = tf.nn.dropout(enc_flat, hparams.enc_keep_prob)

        enc_flat = tf.layers.dense(
            enc_flat,
            enc_final_size,
            kernel_initializer=tf.truncated_normal_initializer(stddev=1e-4, ))

        if hparams.enc_pred_use_l2norm:
            enc_flat = tf.nn.l2_normalize(enc_flat, 1)

    return enc_flat
Example #6
0
def encoder_vgg(x, enc_final_size, reuse=False, scope_prefix='', hparams=None,
                is_training=True):
  """VGG network to use as encoder without the top few layers.

  Can be pretrained.

  Args:
    x: The image to encode. In the range 0 to 1.
    enc_final_size: The desired size of the encoding.
    reuse: To reuse in variable scope or not.
    scope_prefix: The prefix before the scope name.
    hparams: The python hparams.
    is_training: boolean value indicating if training is happening.

  Returns:
    The generated image.
  """
  with tf.variable_scope(scope_prefix + 'encoder', reuse=reuse):

    # Preprocess input
    x *= 256
    x = x - COLOR_NORMALIZATION_VECTOR

    with arg_scope(vgg.vgg_arg_scope()):
      # Padding because vgg_16 accepts images of size at least VGG_IMAGE_SIZE.
      x = tf.pad(x, [[0, 0], [0, VGG_IMAGE_SIZE - IMG_WIDTH],
                     [0, VGG_IMAGE_SIZE - IMG_HEIGHT], [0, 0]])
      _, end_points = vgg.vgg_16(
          x,
          num_classes=enc_final_size,
          is_training=is_training)
      pool5_key = [key for key in end_points.keys() if 'pool5' in key]
      assert len(pool5_key) == 1
      enc = end_points[pool5_key[0]]
      # Undoing padding.
      enc = tf.slice(enc, [0, 0, 0, 0], [-1, 2, 2, -1])

    enc_shape = enc.get_shape().as_list()
    enc_shape[0] = -1
    enc_size = enc_shape[1] * enc_shape[2] * enc_shape[3]

    enc_flat = tf.reshape(enc, (-1, enc_size))
    enc_flat = tf.nn.dropout(enc_flat, hparams.enc_keep_prob)

    enc_flat = tf.layers.dense(
        enc_flat,
        enc_final_size,
        kernel_initializer=tf.truncated_normal_initializer(stddev=1e-4,))

    if hparams.enc_pred_use_l2norm:
      enc_flat = tf.nn.l2_normalize(enc_flat, 1)

  return enc_flat
Example #7
0
def rgb_feature_net(input):
    # with tf.variable_scope("rgb_base"):
    # arg_scope = resnet_v1.resnet_arg_scope(weight_decay=0.0)
    # with slim.arg_scope(arg_scope):
    with slim.arg_scope(vgg.vgg_arg_scope()):
        # net, end_points = resnet_v1.resnet_v1_50(input, None, global_pool=False, output_stride=8)
        # block=end_points['resnet_v1_50/block4']
        # block   = conv2d_bn_relu(block, num_kernels=512, kernel_size=(1,1), stride=[1,1,1,1], padding='SAME', name='2')
        block, _ = vgg.vgg_16(input)
        #<todo> feature = upsample2d(block, factor = 4,  ...)
        tf.summary.histogram('rgb_top_block', block)
    feature = block
    return feature
Example #8
0
    def get_logits_prob(self, batch_input):
        """
        Prediction from the model on a single batch.
        :param batch_input: the input batch. Must be from size [?, 224, 224, 3]
        :return: the logits and probabilities for the batch
        """

        with slim.arg_scope(vgg.vgg_arg_scope()):
            logits, _ = vgg.vgg_16(batch_input,
                                   num_classes=1000,
                                   is_training=False)
            probs = tf.squeeze(tf.nn.softmax(logits))[1:]
        return logits, probs
Example #9
0
    def __init__(self,
                 tensor,
                 keep_prob=1.0,
                 num_classes=1000,
                 retrain_layer=[],
                 weights_path='./weights/vgg_16.ckpt'):
        # Call the parent class
        Model.__init__(self, tensor, keep_prob, num_classes, retrain_layer,
                       weights_path)

        # TODO This implementation has a problem while validation (is still set to training)
        is_training = True if retrain_layer else False
        with slim.arg_scope(vgg_arg_scope()):
            self.final, self.endpoints = vgg_16(self.tensor,
                                                num_classes=self.num_classes,
                                                is_training=is_training,
                                                dropout_keep_prob=keep_prob)
    def build(self):
        # Input
        self.input = tf.placeholder(
            dtype=tf.float32,
            shape=[None, self.img_size[0], self.img_size[1], self.img_size[2]])
        self.input_mean = tfutils.mean_value(self.input, self.img_mean)
        if self.base_net == 'vgg16':
            with slim.arg_scope(vgg.vgg_arg_scope()):
                outputs, end_points = vgg.vgg_16(self.input_mean,
                                                 self.num_classes)
                self.prob = tf.nn.softmax(outputs, -1)
                self.logits = outputs

        elif self.base_net == 'res50':
            with slim.arg_scope(resnet_v1.resnet_arg_scope()):
                net, end_points = resnet_v1.resnet_v1_50(
                    self.input_mean,
                    self.num_classes,
                    is_training=self.is_train)
                self.prob = tf.nn.softmax(net[:, 0, 0, :], -1)
                self.logits = net[:, 0, 0, :]
        elif self.base_net == 'res101':
            with slim.arg_scope(resnet_v1.resnet_arg_scope()):
                net, end_points = resnet_v1.resnet_v1_101(
                    self.input_mean,
                    self.num_classes,
                    is_training=self.is_train)
                self.prob = tf.nn.softmax(net[:, 0, 0, :], -1)
                self.logits = net[:, 0, 0, :]
        elif self.base_net == 'res152':
            with slim.arg_scope(resnet_v1.resnet_arg_scope()):
                net, end_points = resnet_v1.resnet_v1_152(
                    self.input_mean,
                    self.num_classes,
                    is_training=self.is_train)
                self.prob = tf.nn.softmax(net[:, 0, 0, :], -1)
                self.logits = net[:, 0, 0, :]
        else:
            raise ValueError(
                'base network should be vgg16, res50, -101, -152...')
        self.gt = tf.placeholder(dtype=tf.int32, shape=[None])
        # self.var_list = tf.trainable_variables()

        if self.is_train:
            self.loss()
    def build_model(self):
        is_train = self.FLAGS.is_train
        dropout_keep_prob = 1.0
        if is_train:
            dropout_keep_prob = 0.5

        images_placeholder = tf.image.resize_images(self.input_placeholder, (224, 224))

        with slim.arg_scope(vgg.vgg_arg_scope()):
            logits, end_points = vgg.vgg_16(images_placeholder, is_training=is_train, dropout_keep_prob=dropout_keep_prob)

        image_features = end_points['vgg_16/fc8']

        scene_logits = slim.fully_connected(image_features, 100, activation_fn=None, scope='scene_pred', trainable=True)
        multi_hot_logits = slim.fully_connected(image_features, 175, activation_fn=None, scope='multi_hot_logits', trainable=True)
        word_embedding_logits = slim.fully_connected(image_features, 300, activation_fn=None, scope='word_embedding_pred', trainable=True)

        obj_embedding_size = 40
        object_embedding_logits = slim.fully_connected(image_features, obj_embedding_size, activation_fn=None, scope='object_embedding_pred', trainable=True)

        outputs = [scene_logits, multi_hot_logits, word_embedding_logits, object_embedding_logits]

        return outputs
Example #12
0
def run_training():
    config = tf.ConfigProto(allow_soft_placement=True)
    sess = tf.Session(config=config)
    #     sess = tf.Session() # config=tf.ConfigProto(log_device_placement=True))

    # create input path and labels np.array from csv annotations
    df_annos = pd.read_csv(ANNOS_CSV, index_col=0)
    df_annos = df_annos.sample(frac=1).reset_index(
        drop=True)  # shuffle the whole datasets
    if DATA == 'l8':
        path_col = ['l8_vis_jpg']
    elif DATA == 's1':
        path_col = ['s1_vis_jpg']
    elif DATA == 'l8s1':
        path_col = ['l8_vis_jpg', 's1_vis_jpg']

    input_files_train = JPG_DIR + df_annos.loc[df_annos.partition == 'train',
                                               path_col].values
    input_labels_train = df_annos.loc[df_annos.partition == 'train',
                                      'pop_density_log2'].values
    input_files_val = JPG_DIR + df_annos.loc[df_annos.partition == 'val',
                                             path_col].values
    input_labels_val = df_annos.loc[df_annos.partition == 'val',
                                    'pop_density_log2'].values
    input_id_train = df_annos.loc[df_annos.partition == 'train',
                                  'village_id'].values
    input_id_val = df_annos.loc[df_annos.partition == 'val',
                                'village_id'].values

    print('input_files_train shape:', input_files_train.shape)
    train_set_size = len(input_labels_train)

    # data input
    with tf.device('/cpu:0'):
        train_images_batch, train_labels_batch, _ = \
        dataset.input_batches(FLAGS.batch_size, FLAGS.output_size, input_files_train, input_labels_train, input_id_train,
                              IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL, regression=True, augmentation=True, normalization=True)
        val_images_batch, val_labels_batch, _ = \
        dataset.input_batches(FLAGS.batch_size, FLAGS.output_size, input_files_val, input_labels_val, input_id_val,
                              IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL, regression=True, augmentation=False, normalization=True)

    images_placeholder = tf.placeholder(
        tf.float32, shape=[None, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL])
    labels_placeholder = tf.placeholder(tf.float32, shape=[
        None,
    ])
    print('finish data input')

    TRAIN_BATCHES_PER_EPOCH = int(
        train_set_size /
        FLAGS.batch_size)  # number of training batches/steps in each epoch
    MAX_STEPS = TRAIN_BATCHES_PER_EPOCH * FLAGS.max_epoch  # total number of training batches/steps

    # CNN forward reference
    if MODEL == 'vgg':
        with slim.arg_scope(
                vgg.vgg_arg_scope(weight_decay=FLAGS.weight_decay)):
            outputs, _ = vgg.vgg_16(images_placeholder,
                                    num_classes=FLAGS.output_size,
                                    dropout_keep_prob=FLAGS.dropout_keep,
                                    is_training=True)
            outputs = tf.squeeze(
                outputs
            )  # change shape from (B,1) to (B,), same as label input
    if MODEL == 'resnet':
        with slim.arg_scope(resnet_v1.resnet_arg_scope()):
            outputs, _ = resnet_v1.resnet_v1_152(images_placeholder,
                                                 num_classes=FLAGS.output_size,
                                                 is_training=True)
            outputs = tf.squeeze(
                outputs
            )  # change shape from (B,1) to (B,), same as label input

    # loss
    labels_real = tf.pow(2.0, labels_placeholder)
    outputs_real = tf.pow(2.0, outputs)

    # only loss_log2_mse are used for gradient calculate, model minimize this value
    loss_log2_mse = tf.reduce_mean(tf.squared_difference(
        labels_placeholder, outputs),
                                   name='loss_log2_mse')
    loss_real_rmse = tf.sqrt(tf.reduce_mean(
        tf.squared_difference(labels_real, outputs_real)),
                             name='loss_real_rmse')
    loss_real_mae = tf.losses.absolute_difference(labels_real, outputs_real)

    tf.summary.scalar('loss_log2_mse', loss_log2_mse)
    tf.summary.scalar('loss_real_rmse', loss_real_rmse)
    tf.summary.scalar('loss_real_mae', loss_real_mae)

    # accuracy (R2)
    def r_sqaured(labels, outputs):
        sst = tf.reduce_sum(
            tf.squared_difference(labels, tf.reduce_mean(labels)))
        sse = tf.reduce_sum(tf.squared_difference(labels, outputs))
        return (1.0 - tf.div(sse, sst))

    r2_log2 = r_sqaured(labels_placeholder, outputs)
    r2_real = r_sqaured(labels_real, outputs_real)

    tf.summary.scalar('r2_log2', r2_log2)
    tf.summary.scalar('r2_real', r2_real)

    # determine the model vairables to restore from pre-trained checkpoint
    if MODEL == 'vgg':
        if DATA == 'l8s1':
            model_variables = slim.get_variables_to_restore(
                exclude=['vgg_16/fc8', 'vgg_16/conv1'])
        else:
            model_variables = slim.get_variables_to_restore(
                exclude=['vgg_16/fc8'])
    if MODEL == 'resnet':
        model_variables = slim.get_variables_to_restore(
            exclude=['resnet_v1_152/logits', 'resnet_v1_152/conv1'])

    # training step and learning rate
    global_step = tf.Variable(0, name='global_step',
                              trainable=False)  #, dtype=tf.int64)
    learning_rate = tf.train.exponential_decay(
        FLAGS.learning_rate,  # initial learning rate
        global_step=global_step,  # current step
        decay_steps=MAX_STEPS,  # total numbers step to decay 
        decay_rate=FLAGS.lr_decay_rate
    )  # final learning rate = FLAGS.learning_rate * decay_rate
    tf.summary.scalar('learning_rate', learning_rate)

    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    #     optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
    #     optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)

    # to only update gradient in first and last layer
    #     vars_update = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'vgg_16/(conv1|fc8)')
    #     print('variables to update in traing: ', vars_update)

    train_op = optimizer.minimize(
        loss_log2_mse, global_step=global_step)  #, var_list = vars_update)

    # summary output in tensorboard
    summary = tf.summary.merge_all()
    summary_writer_train = tf.summary.FileWriter(
        os.path.join(LOG_DIR, 'log_train'), sess.graph)
    summary_writer_val = tf.summary.FileWriter(
        os.path.join(LOG_DIR, 'log_val'), sess.graph)

    # variable initialize
    init = tf.global_variables_initializer()
    sess.run(init)

    # restore the model from pre-trained checkpoint
    restorer = tf.train.Saver(model_variables)
    restorer.restore(sess, PRETRAIN_WEIGHTS)
    print('loaded pre-trained weights: ', PRETRAIN_WEIGHTS)

    # saver object to save checkpoint during training
    saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)

    print('start training...')
    epoch = 0
    best_r2 = -float('inf')
    for step in xrange(MAX_STEPS):
        if step % TRAIN_BATCHES_PER_EPOCH == 0:
            epoch += 1

        start_time = time.time()  # record the time used for each batch

        images_out, labels_out = sess.run(
            [train_images_batch,
             train_labels_batch])  # inputs of this batch, numpy array format

        duration_batch = time.time() - start_time

        if step == 0:
            print("finished reading batch data")
            print("images_out shape:", images_out.shape)
        feed_dict = {
            images_placeholder: images_out,
            labels_placeholder: labels_out
        }
        _, train_loss, train_accuracy, train_outputs, lr = \
            sess.run([train_op, loss_log2_mse, r2_log2, outputs, learning_rate], feed_dict=feed_dict)

        duration = time.time() - start_time

        if step % 10 == 0 or (
                step + 1) == MAX_STEPS:  # print traing loss every 10 batches
            print('Step %d epoch %d lr %.3e: log2 MSE loss = %.4f log2 R2 = %.4f (%.3f sec, %.3f sec(each batch))' \
                  % (step, epoch, lr, train_loss, train_accuracy, duration*10, duration_batch))
            summary_str = sess.run(summary, feed_dict=feed_dict)
            summary_writer_train.add_summary(summary_str, step)
            summary_writer_train.flush()

        if step % 50 == 0 or (
                step + 1
        ) == MAX_STEPS:  # calculate and print validation loss every 50 batches
            images_out, labels_out = sess.run(
                [val_images_batch, val_labels_batch])
            feed_dict = {
                images_placeholder: images_out,
                labels_placeholder: labels_out
            }

            val_loss, val_accuracy = sess.run([loss_log2_mse, r2_log2],
                                              feed_dict=feed_dict)
            print('Step %d epoch %d: val log2 MSE = %.4f val log2 R2 = %.4f ' %
                  (step, epoch, val_loss, val_accuracy))

            summary_str = sess.run(summary, feed_dict=feed_dict)
            summary_writer_val.add_summary(summary_str, step)
            summary_writer_val.flush()

            # in each epoch, if the validation R2 is higher than best R2, save the checkpoint
            if step % (TRAIN_BATCHES_PER_EPOCH -
                       TRAIN_BATCHES_PER_EPOCH % 50) == 0:
                if val_accuracy > best_r2:
                    best_r2 = val_accuracy
                    checkpoint_file = os.path.join(LOG_DIR, 'model.ckpt')
                    saver.save(sess,
                               checkpoint_file,
                               global_step=step,
                               write_state=True)
def main(argv=None):
    # 加载处理好的数据
    processed_data = np.load(INPUT_DATA)
    training_images = processed_data[0]
    n_training_examples = len(training_images)
    training_labels = processed_data[1]
    validation_images = processed_data[2]
    validation_labels = processed_data[3]
    testing_images = processed_data[4]
    testing_labels = processed_data[5]
    print('%d training, %d validation, %d testing' %
          (n_training_examples, len(validation_labels), len(testing_labels)))

    # 定义vgg16的输入
    images = tf.placeholder(tf.float32, [None, 224, 224, 3],
                            name='input_image')
    labels = tf.placeholder(tf.int64, [None], name='labels')

    # 定义vgg16模型
    with slim.arg_scope(vgg.vgg_arg_scope()):
        logits, _ = vgg.vgg_16(images, num_classes=N_CLASSES)

    # 损失函数
    loss_fun = tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASSES),
                                               logits)
    # 训练
    # train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(tf.losses.get_total_loss())
    # 只训练最后一层
    train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(
        tf.losses.get_total_loss(), var_list=get_trainable_variables())

    # 正确率
    with tf.variable_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        evaluation_step = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32))

    ckpt = tf.train.get_checkpoint_state(SAVE_PATH)
    if ckpt and ckpt.model_checkpoint_path:
        # 加载之前训练的参数继续训练
        variables_to_restore = slim.get_model_variables()
        print('continue training from %s' % ckpt)
        step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        step = int(step)
        ckpt = ckpt.model_checkpoint_path
    else:
        # 没有训练数据,就先迁移一部分训练好的
        ckpt = TRAINED_CKPT_FILE
        variables_to_restore = get_tuned_variable()
        print('loading tuned variables from %s' % TRAINED_CKPT_FILE)
        step = 0

    load_fn = slim.assign_from_checkpoint_fn(ckpt,
                                             variables_to_restore,
                                             ignore_missing_vars=True)

    # 开启会话训练
    saver = tf.train.Saver()
    with tf.Session() as sess:
        # 初始化所有参数
        init = tf.global_variables_initializer()
        sess.run(init)
        load_fn(sess)

        start = 0
        end = BATCH
        for i in range(step + 1, step + 1 + STEPS):
            start_time = time.time()
            # 运行训练,不会更新所有参数
            _, loss_val = sess.run(
                [train_step, loss_fun],
                feed_dict={
                    images: training_images[start:end],
                    labels: training_labels[start:end]
                })
            duration = time.time() - start_time

            #print('current train step duration %.3f' % duration)

            if i % 10 == 0:
                print('after %d train step, loss value is: %.4f' %
                      (i, loss_val))

            # 输出日志
            if i % 100 == 0:
                saver.save(sess, TRAIN_FILE, global_step=i)
                validation_accuracy = sess.run(evaluation_step,
                                               feed_dict={
                                                   images: validation_images,
                                                   labels: validation_labels
                                               })
                print('Step %d Validation accuracy = %.1f%%' %
                      (i, validation_accuracy * 100.0))

            start = end
            if start == n_training_examples:
                start = 0

            end = start + BATCH
            if end > n_training_examples:
                end = n_training_examples

        # 在测试集上测试正确率
        test_accuracy = sess.run(evaluation_step,
                                 feed_dict={
                                     images: testing_images,
                                     labels: testing_labels
                                 })
        print('Final test accuracy = %.1f%%' % (test_accuracy * 100.0))
Example #14
0
 def __call__(self, inputs, training=False):
     with slim.arg_scope(vgg_arg_scope()):
         return slim_vgg_16(inputs, is_training=training)[0]
Example #15
0
def run_testing():

    with tf.Graph().as_default():

        with slim.arg_scope(vgg.vgg_arg_scope()):

            images, labels, filenames = inputs(FLAGS.batch_size,
                                               FLAGS.num_epochs)

            images = tf.reshape(images, [-1, gd.INPUT_SIZE, gd.INPUT_SIZE, 3])
            logits, end_points = alexnet.alexnet_v2(images,
                                                    num_classes=gd.NUM_CLASSES,
                                                    is_training=False)

            print(labels)

            print(logits)

            eps = tf.constant(value=1e-10)

            flat_logits = logits + eps

            softmax = tf.nn.softmax(flat_logits)

            probability = tf.reduce_max(softmax, axis=1)
            ll = tf.argmax(logits, axis=1)
            print(ll)
            variables_to_restore = slim.get_variables_to_restore()

            saver = tf.train.Saver(variables_to_restore)

            eval_correct = evaluation(logits, labels)

        config = tf.ConfigProto()

        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:

            saver.restore(sess, checkpoint_file)

            coord = tf.train.Coordinator()

            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            step = 0

            if not os.path.exists(gd.DIR_DESCRIPTION):
                os.makedirs(gd.DIR_DESCRIPTION)

            csvfile = open(
                gd.DIR_DESCRIPTION +
                "/12cls_2017-11-16_alexnet_sensi_color_change_wrongprediction.csv",
                "a")
            writer = csv.writer(csvfile)
            writer.writerow(['labels', 'prediction', 'filename'])

            file_name2 = "/detail_result.csv"
            csvfile2 = open(gd.DIR_DESCRIPTION + file_name2, "wb")
            writer2 = csv.writer(csvfile2)
            writer2.writerow(['labels', 'prediction', 'probability'])

            for step in range(gd.TOTAL):
                #while not coord.should_stop():
                #accuracy=do_eval(sess,eval_correct,log_name)

                labels_out, prediction_out, filename, softmax_out, probability_max = sess.run(
                    [labels, ll, filenames, softmax, probability])
                print("%d : %d ,%d ,max_probability: %f" %
                      (step, labels_out[0], prediction_out[0],
                       probability_max[0]))

                writer2.writerow(
                    [labels_out[0], prediction_out[0], probability_max[0]])
                #print(labels_out[0])

                #print(prediction_out[0])

                count_label[labels_out[0]] += 1

                if labels_out[0] == prediction_out[0]:
                    count_prediction[prediction_out[0]] += 1
                else:
                    writer.writerow(
                        [labels_out[0], prediction_out[0], filename[0]])
                confusion_matrix[labels_out[0]][prediction_out[0]] += 1
                #details_accuray(labels_out,prediction_out,gd.NUM_CLASSES)
            csvfile.close()
        print(count_label)
        print(count_prediction)
        print(confusion_matrix)
        print('\n')
        for i in range(num_of_class):
            print(confusion_matrix[i])
        precision_result = [0 for i in range(num_of_class)]
        recall_result = [0 for i in range(num_of_class)]
        #for i in range(num_of_class):
        #	precision_result[i]=confusion_matrix[i][i]/
        precision_sum = map(sum, zip(*confusion_matrix))

        print("precision_sum:")
        print(precision_sum)
        for i in range(num_of_class):
            precision_result[i] = confusion_matrix[i][i] / precision_sum[i]

        print("average_precision:")
        print(precision_result)

        print("mean_average_precision:")
        print(sum(precision_result) / num_of_class)

        print("recall_sum:")
        recall_sum = map(sum, confusion_matrix)
        print(recall_sum)

        for i in range(num_of_class):
            recall_result[i] = confusion_matrix[i][i] / recall_sum[i]
        print("recall:")
        print(recall_result)

        print("mean_recall:")
        print(sum(recall_result) / num_of_class)

        print("accuracy:%d/%d" % (sum(count_prediction), sum(count_label)))
        #print(sum(count_prediction))
        #print(count_prediction)
        #print(sum(count_label))
        print(sum(count_prediction) / sum(count_label))
Example #16
0
           "First_Student_IC_school_bus_202076.jpg")

    image_string = urllib2.urlopen(url).read()
    image = tf.image.decode_jpeg(image_string, channels=3)

    # Convert image to float32 before subtracting the
    # mean pixel value
    image_float = tf.to_float(image, name='ToFloat')

    # Subtract the mean pixel value from each pixel
    processed_image = _mean_image_subtraction(image_float,
                                              [_R_MEAN, _G_MEAN, _B_MEAN])

    input_image = tf.expand_dims(processed_image, 0)

    with slim.arg_scope(vgg.vgg_arg_scope()):
        # spatial_squeeze option enables to use network in a fully
        # convolutional manner
        logits, _ = vgg.vgg_16(input_image,
                               num_classes=1000,
                               is_training=False,
                               spatial_squeeze=False)

    # For each pixel we get predictions for each class
    # out of 1000. We need to pick the one with the highest
    # probability. To be more precise, these are not probabilities,
    # because we didn't apply softmax. But if we pick a class
    # with the highest value it will be equivalent to picking
    # the highest value after applying softmax
    pred = tf.argmax(logits, dimension=3)