示例#1
0
    def __init__(self, num_classes, train_layers=None, weights_path='DEFAULT'):

        """Create the graph of the densenet_161 model.
        """

        # Parse input arguments into class variables
        if weights_path == 'DEFAULT':
            self.WEIGHTS_PATH = "./pre_trained_models/densenet_161.ckpt"
        else:
            self.WEIGHTS_PATH = weights_path
        self.train_layers = train_layers

        with tf.variable_scope("input"):
            self.image_size = densenet.densenet161.default_image_size
            self.x_input = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, 3], name="x_input")
            self.y_input = tf.placeholder(tf.float32, [None, num_classes], name="y_input")
            self.learning_rate = tf.placeholder(tf.float32, name="learning_rate")

        # train
        with arg_scope(densenet.densenet_arg_scope()):
            self.logits, _ = densenet.densenet161(self.x_input,
                                                  num_classes=num_classes,
                                                  is_training=True,
                                                  reuse=tf.AUTO_REUSE
                                                  )

        # validation
        with arg_scope(densenet.densenet_arg_scope()):
            self.logits_val, _ = densenet.densenet161(self.x_input,
                                                      num_classes=num_classes,
                                                      is_training=False,
                                                      reuse=tf.AUTO_REUSE,
                                                      )

        with tf.name_scope("loss"):
            self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logits, labels=self.y_input))
            self.loss_val = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logits_val, labels=self.y_input))

        with tf.name_scope("train"):

            self.global_step = tf.Variable(0, name="global_step", trainable=False)
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

            var_list = [v for v in tf.trainable_variables() if v.name.split('/')[-2] in train_layers or v.name.split('/')[-3] in train_layers ]
            gradients = tf.gradients(self.loss, var_list)
            self.grads_and_vars = list(zip(gradients, var_list))
            optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)

            with tf.control_dependencies(update_ops):
                self.train_op = optimizer.apply_gradients(grads_and_vars=self.grads_and_vars, global_step=self.global_step)

        with tf.name_scope("probability"):
            self.probability = tf.nn.softmax(self.logits_val, name="probability")

        with tf.name_scope("prediction"):
            self.prediction = tf.argmax(self.logits_val, 1, name="prediction")

        with tf.name_scope("accuracy"):
            correct_prediction = tf.equal(self.prediction, tf.argmax(self.y_input, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"), name="accuracy")
示例#2
0
def apply_network_img(data_dense, batch_size, is_training_net,
                      is_training_drop, net):

    if net == 'D':  #DenseNet-161
        with slim.arg_scope(densenet.densenet_arg_scope()):
            logits, _ = densenet.densenet161(data_dense,
                                             num_classes=None,
                                             is_training_batch=is_training_net,
                                             is_training_drop=is_training_drop)

    elif net == 'V':  #VGG-16
        with slim.arg_scope(vgg.vgg_arg_scope()):
            logits, _ = vgg.vgg_16(data_dense,
                                   num_classes=None,
                                   is_training=is_training_drop,
                                   spatial_squeeze=False,
                                   classification=False,
                                   max_pool=False)

    return logits
示例#3
0
def load_model_tf(parameters):
    # Setup model params
    if (parameters["device_type"] == "gpu") and tf.test.is_gpu_available():
        device_str = "/device:GPU:{}".format(parameters["gpu_number"])
    else:
        device_str = "/cpu:0"

    # Setup Graph
    graph = tf.Graph()
    with graph.as_default():
        with tf.device(device_str):
            x = tf.placeholder(tf.float32, [None, 256, 256, 3])
            with slim.arg_scope(
                    densenet_arg_scope(weight_decay=0.0, data_format='NHWC')):
                densenet121_net, end_points = densenet121(
                    x,
                    num_classes=parameters["number_of_classes"],
                    data_format='NHWC',
                    is_training=False,
                )
            y_logits = densenet121_net[:, 0, 0, :]
            y = tf.nn.softmax(y_logits)

    # Load weights
    sess = tf.Session(graph=graph,
                      config=tf.ConfigProto(gpu_options=tf.GPUOptions(
                          per_process_gpu_memory_fraction=0.333)))
    with open(parameters["tf_torch_weights_map_path"]) as f:
        tf_torch_weights_map = json.loads(f.read())

    with sess.as_default():
        torch_weights = torch.load(parameters["initial_parameters"])
        match_dict = construct_densenet_match_dict(
            tf_variables=tf_utils.get_tf_variables(graph,
                                                   batch_norm_key="BatchNorm"),
            torch_weights=torch_weights,
            tf_torch_weights_map=tf_torch_weights_map)
        sess.run(tf_utils.construct_weight_assign_ops(match_dict))

    return sess, x, y
示例#4
0
 def testAllEndPointsShapes(self):
   batch_size = 5
   height, width = 224, 224
   num_classes = 1000
   inputs = tf.random_uniform((batch_size, height, width, 3))
   with slim.arg_scope(densenet.densenet_arg_scope()):
       logits, end_points = densenet.densenet(inputs, num_classes)
   endpoints_shapes = {'input_layer': [batch_size, 56, 56, 48],
                       'dense_block1': [batch_size, 56, 56, 192],
                       'transition_layer1': [batch_size, 28, 28, 96],
                       'dense_block2': [batch_size, 28, 28, 384],
                       'transition_layer2': [batch_size, 14, 14, 192],
                       'dense_block3': [batch_size, 14, 14, 768],
                       'transition_layer3': [batch_size, 7, 7, 384],
                       'dense_block4': [batch_size, 7, 7, 768],
                       'classify_layer': [batch_size, num_classes]}
   self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
   for endpoint_name in endpoints_shapes:
     expected_shape = endpoints_shapes[endpoint_name]
     self.assertTrue(endpoint_name in end_points)
     self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
                          expected_shape)
示例#5
0
    def _extract_proposal_features(self, preprocessed_inputs, scope):
        """Extracts first stage RPN features.

    Extracts features using the first half of the Inception Resnet v2 network.
    We construct the network in `align_feature_maps=True` mode, which means
    that all VALID paddings in the network are changed to SAME padding so that
    the feature maps are aligned.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
    Raises:
      InvalidArgumentError: If the spatial size of `preprocessed_inputs`
        (height or width) is less than 33.
      ValueError: If the created network is missing the required activation.
    """
        if len(preprocessed_inputs.get_shape().as_list()) != 4:
            raise ValueError(
                '`preprocessed_inputs` must be 4 dimensional, got a '
                'tensor of shape %s' % preprocessed_inputs.get_shape())

        with slim.arg_scope(
                densenet.densenet_arg_scope(weight_decay=self._weight_decay)):
            # Forces is_training to False to disable batch norm update.
            with slim.arg_scope([slim.batch_norm], is_training=False):
                with tf.variable_scope('DenseNet',
                                       reuse=self._reuse_weights) as scope:
                    rpn_feature_map = densenet.Dense_net(preprocessed_inputs,
                                                         nb_blocks=2,
                                                         filters=24,
                                                         is_training=True,
                                                         dropout_ratio=0.2)
        return rpn_feature_map
示例#6
0
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default():

        ######################
        # Select the dataset #
        ######################
        dataset = dataset_factory.get_dataset(FLAGS.dataset_name,
                                              FLAGS.dataset_split_name,
                                              FLAGS.dataset_dir)

        ####################
        # Select the model #
        ####################
        network_fn = nets_factory.get_network_fn(
            FLAGS.model_name,
            num_classes=(dataset.num_classes - FLAGS.labels_offset),
            is_training=False)

        ##############################################################
        # Create a dataset provider that loads data from the dataset #
        ##############################################################
        provider = slim.dataset_data_provider.DatasetDataProvider(
            dataset,
            shuffle=False,
            common_queue_capacity=2 * FLAGS.batch_size,
            common_queue_min=FLAGS.batch_size)
        [image, file_name] = provider.get(['image', 'file_name'])

        #####################################
        # Select the preprocessing function #
        #####################################
        preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
        image_preprocessing_fn = preprocessing_factory.get_preprocessing(
            preprocessing_name, is_training=False)

        eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size

        image = image_preprocessing_fn(image, eval_image_size, eval_image_size)

        images, file_names = tf.train.batch(
            [image, file_name],
            batch_size=FLAGS.batch_size,
            num_threads=FLAGS.num_preprocessing_threads,
            capacity=5 * FLAGS.batch_size)

        ####################
        # Define the model #
        ####################
        #    logits, end_points = network_fn(images)

        #    predictions = tf.argmax(logits, 1)

        # TODO(sguada) use num_epochs=1
        num_batches_per_epoch = math.ceil(dataset.num_samples /
                                          float(FLAGS.batch_size))
        num_steps_per_epoch = num_batches_per_epoch

        with slim.arg_scope(densenet_arg_scope()):
            logits, end_points = network_fn(images)

        print(FLAGS.checkpoint_path)
        if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
            checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
        else:
            checkpoint_path = FLAGS.checkpoint_path
        tf.logging.info('Evaluating %s' % checkpoint_path)

        # #get all the variables to restore from the checkpoint file and create the saver function to restore
        variables_to_restore = slim.get_variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        def restore_fn(sess):
            return saver.restore(sess, checkpoint_path)

        #Just define the metrics to track without the loss or whatsoever


#        predictions = tf.argmax(logits, 1)
#        predictions = end_points[]
        top_k_pred = tf.nn.top_k(end_points['predictions'], k=5)
        #        top_k_pred = tf.nn.top_k(logits, k=5)

        #Create the global step and an increment op for monitoring
        global_step = get_or_create_global_step()
        global_step_op = tf.assign(
            global_step, global_step + 1
        )  #no apply_gradient method so manually increasing the global_step

        file_names_all = []
        predictions_all = []
        all_key = {}
        c_key = {}

        #Create a evaluation step function
        def eval_step(sess, top_k_pred, file_names, global_step):
            '''
            Simply takes in a session, runs the metrics op and some logging information.
            '''
            start_time = time.time()
            #            global_step_count, predictions_ = sess.run([global_step_op, predictions])
            p_file_name = sess.run(file_names)
            global_step_count = sess.run(global_step_op)
            time_elapsed = time.time() - start_time
            #Log some information
            logging.info('Global Step %s: Streaming Accuracy: (%.2f sec/step)',
                         global_step_count, time_elapsed)

            return p_file_name

        #Get your supervisor
        sv = tf.train.Supervisor(logdir=FLAGS.test_dir,
                                 summary_op=None,
                                 saver=None,
                                 init_fn=restore_fn)

        #Now we are ready to run in one session
        #config = tf.ConfigProto(device_count={'GPU':0}) # mask GPUs visible to the session so it falls back on CPU
        with sv.managed_session() as sess:
            for step in range(num_steps_per_epoch * num_epochs):
                sess.run(sv.global_step)
                #print vital information every start of the epoch as always
                file_names_ = eval_step(sess,
                                        top_k_pred,
                                        file_names,
                                        global_step=sv.global_step)
                my_file_name = str(file_names_[0], 'utf-8')
                logging.info('my_file_name=' + my_file_name)
                if my_file_name not in all_key:
                    all_key[my_file_name] = my_file_name
                else:
                    c_key[my_file_name] = my_file_name

            #At the end of all the evaluation, show the final accuracy
            logging.info('总处理不重复的文件数:' + str(len(all_key)))
            logging.info('总处理重复的文件数:' + str(len(c_key)))
示例#7
0
    def _extract_box_classifier_features(self, proposal_feature_maps, scope):
        """Extracts second stage box classifier features.

    This function reconstructs the "second half" of the Densenet
    network after the part defined in `_extract_proposal_features`.

    Args:
      proposal_feature_maps: A 4-D float tensor with shape
        [batch_size * self.max_num_proposals, crop_height, crop_width, depth]
        representing the feature map cropped to each proposal.
      scope: A scope name.

    Returns:
      proposal_classifier_features: A 4-D float tensor with shape
        [batch_size * self.max_num_proposals, height, width, depth]
        representing box classifier features for each proposal.
    """
        with tf.variable_scope('DenseNet', reuse=self._reuse_weights):
            with slim.arg_scope(
                    densenet.densenet_arg_scope(
                        weight_decay=self._weight_decay)):
                # Forces is_training to False to disable batch norm update.
                with slim.arg_scope([slim.batch_norm], is_training=False):
                    with slim.arg_scope(
                        [slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
                            stride=1,
                            padding='SAME'):
                        with tf.variable_scope('Mixed_7a'):
                            with tf.variable_scope('Branch_0'):
                                tower_conv = slim.conv2d(proposal_feature_maps,
                                                         256,
                                                         1,
                                                         scope='Conv2d_0a_1x1')
                                tower_conv_1 = slim.conv2d(
                                    tower_conv,
                                    384,
                                    3,
                                    stride=2,
                                    padding='VALID',
                                    scope='Conv2d_1a_3x3')
                            with tf.variable_scope('Branch_1'):
                                tower_conv1 = slim.conv2d(
                                    proposal_feature_maps,
                                    256,
                                    1,
                                    scope='Conv2d_0a_1x1')
                                tower_conv1_1 = slim.conv2d(
                                    tower_conv1,
                                    288,
                                    3,
                                    stride=2,
                                    padding='VALID',
                                    scope='Conv2d_1a_3x3')
                            with tf.variable_scope('Branch_2'):
                                tower_conv2 = slim.conv2d(
                                    proposal_feature_maps,
                                    256,
                                    1,
                                    scope='Conv2d_0a_1x1')
                                tower_conv2_1 = slim.conv2d(
                                    tower_conv2, 288, 3, scope='Conv2d_0b_3x3')
                                tower_conv2_2 = slim.conv2d(
                                    tower_conv2_1,
                                    320,
                                    3,
                                    stride=2,
                                    padding='VALID',
                                    scope='Conv2d_1a_3x3')
                            with tf.variable_scope('Branch_3'):
                                tower_pool = slim.max_pool2d(
                                    proposal_feature_maps,
                                    3,
                                    stride=2,
                                    padding='VALID',
                                    scope='MaxPool_1a_3x3')
                            net = tf.concat([
                                tower_conv_1, tower_conv1_1, tower_conv2_2,
                                tower_pool
                            ], 3)
                        net = slim.repeat(net,
                                          9,
                                          inception_resnet_v2.block8,
                                          scale=0.20)
                        net = inception_resnet_v2.block8(net,
                                                         activation_fn=None)
                        proposal_classifier_features = slim.conv2d(
                            net, 1536, 1, scope='Conv2d_7b_1x1')
                return proposal_classifier_features
示例#8
0
      if not data:
        break
      image_files.append(data[:-1])
#    image_files = random.sample(image_names, sample_num)
    for i in image_files:
#        GT_dict[i] = i.split('/')[3]
        GT_num[i] = i.split('\\')[3]
        image_input = tf.read_file(i)
        image = tf.image.decode_jpeg(image_input, channels=3)
        user_images.append(image)
        processed_image = densenet_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)
        user_processed_images.append(processed_image)

    processed_images = tf.expand_dims(processed_image, 0)

    with slim.arg_scope(densenet.densenet_arg_scope()):
        logits, _ = densenet.densenet121(user_processed_images, num_classes=5, is_training=False)
    probabilities = tf.nn.softmax(logits)

    init_fn = slim.assign_from_checkpoint_fn(
        os.path.join(checkpoints_dir, 'model.ckpt-21031'),
        slim.get_model_variables('densenet121'))
        
    with tf.Session() as sess:
        init_fn(sess)
        probabilities = sess.run(probabilities)

#    names = os.listdir("tmp/captcha/test_photos2")
    names = ['bus', 'car', 'cat', 'dog', 'ship']
    names.sort()
    #names=['normal', 'adenoma', 'adenocarcinoma']
示例#9
0
    def __init__(self, options):
        num_classes = options.NUM_CLASSES

        with tf.variable_scope("input"):
            self.image_size = options.IMAGE_SIZE
            self.x_input = tf.placeholder(
                tf.float32, [None, self.image_size, self.image_size, 3],
                name="x_input")
            self.y_input = tf.placeholder(tf.float32, [None, num_classes],
                                          name="y_input")
            self.learning_rate = tf.placeholder(tf.float32,
                                                name="learning_rate")
            self.keep_prob = None

        if options.PHASE == 'train':

            if train_layers == 'default':
                self.train_layers = self.DEFAULT_TRAIN_LAYERS
            else:
                self.train_layers = train_layers

            # train
            with arg_scope(densenet.densenet_arg_scope()):
                self.logits, _ = densenet.densenet121(self.x_input,
                                                      num_classes=num_classes,
                                                      is_training=True,
                                                      reuse=tf.AUTO_REUSE)
            self.logits = tf.squeeze(self.logits, [1, 2])

            # validation
            with arg_scope(densenet.densenet_arg_scope()):
                self.logits_val, _ = densenet.densenet121(
                    self.x_input,
                    num_classes=num_classes,
                    is_training=False,
                    reuse=tf.AUTO_REUSE)
            self.logits_val = tf.squeeze(self.logits_val, [1, 2])

            with tf.name_scope("loss"):
                self.loss = tf.reduce_mean(
                    tf.nn.softmax_cross_entropy_with_logits_v2(
                        logits=self.logits, labels=self.y_input))
                self.loss_val = tf.reduce_mean(
                    tf.nn.softmax_cross_entropy_with_logits_v2(
                        logits=self.logits_val, labels=self.y_input))

            with tf.name_scope("train"):
                self.global_step = tf.Variable(0,
                                               name="global_step",
                                               trainable=False)
                update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

                var_list = [
                    v for v in tf.trainable_variables()
                    if v.name.split('/')[-2] in self.train_layers
                    or v.name.split('/')[-3] in self.train_layers
                ]
                gradients = tf.gradients(self.loss, var_list)
                self.grads_and_vars = list(zip(gradients, var_list))
                # optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
                opt_name = options.OPTIMIZER
                if opt_name == 'sgd':
                    optimizer = tf.train.GradientDescentOptimizer(
                        self.learning_rate)
                elif opt_name == 'adam':
                    optimizer = tf.train.AdamOptimizer(self.learning_rate)
                else:
                    raise ValueError('Optimizer not supported')

                with tf.control_dependencies(update_ops):
                    self.train_op = optimizer.apply_gradients(
                        grads_and_vars=self.grads_and_vars,
                        global_step=self.global_step)
        else:
            with arg_scope(densenet.densenet_arg_scope()):
                self.logits_val, _ = densenet.densenet121(
                    self.x_input,
                    num_classes=num_classes,
                    is_training=False,
                    reuse=tf.AUTO_REUSE)
            self.logits_val = tf.squeeze(self.logits_val, [1, 2])

        with tf.name_scope("probability"):
            self.probability = tf.nn.softmax(self.logits_val,
                                             name="probability")

        with tf.name_scope("prediction"):
            self.prediction = tf.argmax(self.logits_val, 1, name="prediction")

        with tf.name_scope("accuracy"):
            correct_prediction = tf.equal(self.prediction,
                                          tf.argmax(self.y_input, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_prediction,
                                                   "float"),
                                           name="accuracy")

        print(self.logits.shape.as_list())
        print(self.logits_val.shape.as_list())