def freeze_model(name):
    #checkpoints_dir = '/data/huang/behaviour/data/tfmodel'
    OUTPUT_PB_FILENAME = 'inception_resnet_v2_behaviour_371_9_25_{}k.pb'.format(name[0:3])
    NUM_CLASSES = 371  
    tensorName_v4='InceptionResnetV2/Logits/Predictions'
    
    
    
    with tf.Graph().as_default():
        
        image_placeholder = tf.placeholder(tf.float32, shape=(1,512,512,3), name='input_image')
        with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
            _, probabilities = inception.inception_resnet_v2(image_placeholder,
                                                             num_classes = NUM_CLASSES,
                                                             is_training=False)
    
        #model_path = tf.train.latest_checkpoint(checkpoints_dir)
        model_path = '/data/huang/behaviour/data/tfmodel/model_backup/model.ckpt-{}'.format(name)
    
        init_fn = slim.assign_from_checkpoint_fn(
            model_path,
            slim.get_model_variables())
    
        with tf.Session() as sess:
            # Now call the initialization function within the session
            init_fn(sess)
            constant_graph = convert_variables_to_constants(sess, sess.graph_def, ["input_image",tensorName_v4])
            tf.train.write_graph(constant_graph, '.', OUTPUT_PB_FILENAME, as_text=False)
Пример #2
0
def classify_from_url(url):
    image_string = urllib.urlopen(url).read()
    image = tf.image.decode_jpeg(image_string, channels=3)
    processed_image = inception_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)
    processed_images  = tf.expand_dims(processed_image, 0)
    
    # Create the model, use the default arg scope to configure the batch norm parameters.
    with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
        logits, _ = inception.inception_resnet_v2(processed_images, num_classes=1001, is_training=False)
    probabilities = tf.nn.softmax(logits)
    
    init_fn = slim.assign_from_checkpoint_fn(
        os.path.join(checkpoints_dir, checkpoints_filename),
        slim.get_model_variables(model_name))
    
    init_fn(sess)
    np_image, probabilities = sess.run([image, probabilities])
    probabilities = probabilities[0, 0:]
    sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])]
        
    plt.figure()
    plt.imshow(np_image.astype(np.uint8))
    plt.axis('off')
    plt.show()

    names = imagenet.create_readable_names_for_imagenet_labels()
    for i in range(5):
        index = sorted_inds[i]
        print('Probability %0.2f%% => [%s]' % (probabilities[index], names[index]))
Пример #3
0
    def __call__(self, image_input, training=False, keep_prob=1.0):
        """
        Runs the CNN producing the embeddings and the gradients.
        :param image_input: Image input to produce embeddings for. [batch_size, image_size, image_size, 1]
        :param training: A flag indicating training or evaluation
        :param keep_prob: A tf placeholder of type tf.float32 indicating the amount of dropout applied
        :return: Embeddings of size [batch_size, 2048]
        """
        weight_decay = FLAGS.weight_decay
        activation_fn = tf.nn.relu

        end_points = {}
        with slim.arg_scope(
                inception.inception_resnet_v2_arg_scope(
                    weight_decay=FLAGS.weight_decay)):
            with tf.variable_scope("", reuse=self.reuse):
                with tf.variable_scope(None, 'InceptionResnetV2',
                                       [image_input]) as scope:
                    with slim.arg_scope([slim.batch_norm, slim.dropout],
                                        is_training=training):
                        net, end_points = inception.inception_resnet_v2_base(
                            image_input,
                            scope=scope,
                            activation_fn=activation_fn)
                        feature_map = end_points['PreAuxLogits']

                        self.reuse = True

        return feature_map
Пример #4
0
    def __call__(self, x_input, batch_size=None, is_training=False):
        """Constructs model and return probabilities for given input."""
        reuse = True if self.built else None
        with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
            with tf.variable_scope(self.name):
                min_pooled = -tf.nn.max_pool(-x_input,
                                             ksize=[1, 10, 10, 1],
                                             strides=[1, 1, 1, 1],
                                             padding='SAME')
                max_pooled = tf.nn.max_pool(x_input,
                                            ksize=[1, 10, 10, 1],
                                            strides=[1, 1, 1, 1],
                                            padding='SAME')
                avg_pooled = (min_pooled + max_pooled) / 2
                compress_pooled = avg_pooled + (x_input - avg_pooled) * (
                    tf.sign(max_pooled - min_pooled - 0.3) + 1) / 2
                logits, end_points = inception.inception_resnet_v2(
                    compress_pooled,
                    num_classes=self.num_classes,
                    is_training=is_training,
                    reuse=reuse)

            preds = tf.argmax(logits, axis=1)
        self.built = True
        self.logits = logits
        self.preds = preds
        return logits
  def testNoBatchNormScaleByDefault(self):
    height, width = 299, 299
    num_classes = 1000
    inputs = tf.placeholder(tf.float32, (1, height, width, 3))
    with tf.contrib.slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
      inception.inception_resnet_v2(inputs, num_classes, is_training=False)

    self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
Пример #6
0
    def __call__(self, feat_a, feat_b, training=False, spatial_matching=True):
        """
        This module calculates the cosine distance between each of the support set embeddings and the target
        image embeddings.
        :param support_set: The embeddings of the support set images, tensor of shape [sequence_length, batch_size, 64]
        :param input_image: The embedding of the target image, tensor of shape [batch_size, 64]
        :param name: Name of the op to appear on the graph
        :param training: Flag indicating training or evaluation (True/False)
        :return: A tensor with cosine similarities of shape [batch_size, sequence_length, 1]
        """

        with slim.arg_scope(
                inception.inception_resnet_v2_arg_scope(
                    use_batch_norm=True, weight_decay=FLAGS.weight_decay)):
            with tf.variable_scope("", reuse=self.reuse):
                with slim.arg_scope([slim.batch_norm, slim.dropout],
                                    is_training=training):
                    eps = 1e-10

                    if spatial_matching:
                        concated_representation = tf.concat([feat_a, feat_b],
                                                            axis=3)
                        net = slim.conv2d(concated_representation,
                                          2048, [3, 3],
                                          scope='Match_1a_3x3')
                        net = slim.conv2d(net,
                                          2048, [3, 3],
                                          scope='Match_1b_3x3')
                        #                         net = slim.max_pool2d(net, [2, 2], scope='Match_pool2')
                        net = slim.flatten(net)
                    else:
                        net = tf.concat([feat_a, feat_b], axis=1)


#                         aux_logits = slim.dropout(aux_logits, 0.75, scope='Dropout_match',is_training=training)
                    net = slim.fully_connected(net,
                                               1024,
                                               activation_fn=tf.nn.relu,
                                               scope='Match_Prelogits1')
                    net = slim.dropout(net,
                                       0.50,
                                       scope='Dropout_match',
                                       is_training=training)
                    net = slim.fully_connected(net,
                                               1024,
                                               activation_fn=tf.nn.relu,
                                               scope='Match_Prelogits2')
                    net = slim.dropout(net,
                                       0.50,
                                       scope='Dropout_match',
                                       is_training=training)
                    net = slim.fully_connected(net,
                                               2,
                                               activation_fn=None,
                                               scope='Match_logits')
                    self.reuse = True
        return net
  def testBatchNormScale(self):
    height, width = 299, 299
    num_classes = 1000
    inputs = tf.placeholder(tf.float32, (1, height, width, 3))
    with tf.contrib.slim.arg_scope(
        inception.inception_resnet_v2_arg_scope(batch_norm_scale=True)):
      inception.inception_resnet_v2(inputs, num_classes, is_training=False)

    gamma_names = set(
        v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
    self.assertGreater(len(gamma_names), 0)
    for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
      self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
Пример #8
0
def inception_resnet_v2(inputs, is_training, opts):
    with slim.arg_scope(inception.inception_resnet_v2_arg_scope(
            weight_decay=opts.weight_decay,
            batch_norm_decay=opts.batch_norm_decay,
            batch_norm_epsilon=opts.batch_norm_epsilon,
            activation_fn=tf.nn.relu)):
        return inception.inception_resnet_v2(
            inputs,
            num_classes=opts.num_classes,
            is_training=is_training,
            dropout_keep_prob=opts.dropout_keep_prob,
            reuse=None,
            create_aux_logits=opts.create_aux_logits,
            global_pool=opts.global_pool)
Пример #9
0
  def __call__(self, x_input, batch_size=None, is_training=False):
    """Constructs model and return probabilities for given input."""
    reuse = True if self.built else None
    with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
      with tf.variable_scope(self.ckpt):
        logits, end_points = inception.inception_resnet_v2(
            x_input, num_classes=self.num_classes, is_training=is_training,
            reuse=reuse)

      preds = tf.argmax(logits, axis=1)
    self.built = True
    self.logits = logits
    self.preds = preds
    return logits
Пример #10
0
 def __call__(self, x_input):
     """Constructs model and return probabilities for given input."""
     reuse = True if self.built else None
     x_input = image_normalize(x_input, normalization_method[4])
     with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
         _, end_points = inception.inception_resnet_v2(
             x_input,
             num_classes=self.num_classes,
             is_training=False,
             reuse=reuse)
     self.built = True
     output = end_points['Predictions']
     # Strip off the extra reshape op at the output
     probs = output.op.inputs[0]
     return output
def freeze_model(name):
    checkpoints_dir = '/data/huang/behaviour/data/tfmodel'
    OUTPUT_PB_FILENAME = 'inception_resnet_v2_behaviour_309_4_10_{}k.pb'.format(
        name[0:3])
    NUM_CLASSES = 309
    tensorName_v4 = 'InceptionResnetV2/Logits/Predictions'

    image_size = inception.inception_resnet_v2.default_image_size

    with tf.Graph().as_default():

        input_image_t = tf.placeholder(tf.string, name='input_image')
        image = tf.image.decode_jpeg(input_image_t, channels=3)

        processed_image = inception_preprocessing.preprocess_for_eval(
            image, image_size, image_size, central_fraction=None)

        processed_images = tf.expand_dims(processed_image, 0)

        with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
            logits, _ = inception.inception_resnet_v2(processed_images,
                                                      num_classes=NUM_CLASSES,
                                                      is_training=False)
        # Apply softmax function to the logits (output of the last layer of the network)
        probabilities = tf.nn.softmax(logits)

        #model_path = tf.train.latest_checkpoint(checkpoints_dir)
        model_path = '/data/huang/behaviour/data/tfmodel/model_backup/model.ckpt-{}'.format(
            name)

        init_fn = slim.assign_from_checkpoint_fn(model_path,
                                                 slim.get_model_variables())

        with tf.Session() as sess:
            # Now call the initialization function within the session
            init_fn(sess)
            constant_graph = convert_variables_to_constants(
                sess, sess.graph_def,
                ["input_image", "DecodeJpeg", tensorName_v4])
            tf.train.write_graph(constant_graph,
                                 '.',
                                 OUTPUT_PB_FILENAME,
                                 as_text=False)
Пример #12
0
    def __call__(self, x_input, batch_size=None, is_training=False):
        """Constructs model and return probabilities for given input."""
        reuse = True if self.built else None
        with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
            with tf.variable_scope(self.name):
                avg_pooled = tf.nn.avg_pool(x_input,
                                            ksize=[1, 6, 6, 1],
                                            strides=[1, 1, 1, 1],
                                            padding='SAME')
                logits, end_points = inception.inception_resnet_v2(
                    avg_pooled,
                    num_classes=self.num_classes,
                    is_training=is_training,
                    reuse=reuse)

            preds = tf.argmax(logits, axis=1)
        self.built = True
        self.logits = logits
        self.preds = preds
        return logits
Пример #13
0
    def configure_inception_resnet(self):

        try:
            from nets import inception
            from datasets import dataset_utils
        except:
            import sys
            print(
                "Make sure you have installed tensorflow/models and it's accessible in the environment"
            )
            print("export PYTHONPATH=/home/ubuntu/models/slim")
            sys.exit()

        image_size = inception.inception_resnet_v2.default_image_size

        self.crop_generator = deepprofiler.imaging.cropping.SingleImageCropGenerator(
            self.config, self.dset)
        # Setup pretrained model
        network_input = crop_transform(self.raw_crops, image_size)
        url = self.config["profiling"]["url"]
        checkpoint = self.config["profiling"]["checkpoint"]
        if not os.path.isfile(checkpoint):
            dataset_utils.download_and_uncompress_tarball(
                url, os.path.dirname(checkpoint))
        with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
            _, self.endpoints = inception.inception_resnet_v2(
                network_input, num_classes=1001, is_training=False)
        init_fn = slim.assign_from_checkpoint_fn(checkpoint,
                                                 slim.get_model_variables())

        # Session configuration
        configuration = tf.ConfigProto()
        configuration.gpu_options.allow_growth = True
        configuration.gpu_options.visible_device_list = self.config[
            "profiling"]["gpu"]

        self.sess = tf.Session(config=configuration)
        init_fn(self.sess)
        self.crop_generator.start(self.sess)
Пример #14
0
def profile(config, dset):

    crop_shape = (
        config["sampling"]["box_size"],      # height
        config["sampling"]["box_size"],      # width
        len(config["image_set"]["channels"]) # channels
    )

    crop_generator = deepprofiler.imaging.cropping.SingleImageCropGenerator(config, dset)
    num_channels = len(config["image_set"]["channels"])

    # Setup pretrained model 
    raw_crops = tf.placeholder(tf.float32, shape=(None, crop_shape[0], crop_shape[1], crop_shape[2]))
    network_input = crop_transform(raw_crops)
    url = config["profiling"]["url"]
    checkpoint = config["profiling"]["checkpoint"]
    if not os.path.isfile(checkpoint):
        dataset_utils.download_and_uncompress_tarball(url, os.path.dirname(checkpoint))
    with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
        _, endpoints = inception.inception_resnet_v2(network_input, num_classes=1001, is_training=False)
    init_fn = slim.assign_from_checkpoint_fn(checkpoint, slim.get_model_variables())
   
    # Session configuration
    configuration = tf.ConfigProto()
    configuration.gpu_options.allow_growth = True
    configuration.gpu_options.visible_device_list = config["profiling"]["gpu"]
   
    sess = tf.Session(config=configuration)
    init_fn(sess)
    crop_generator.start(sess)


    def check(meta):
        output_file = config["profiling"]["output_dir"] + "/{}_{}_{}.npz"
        output_file = output_file.format( meta["Metadata_Plate"], meta["Metadata_Well"], meta["Metadata_Site"])

        # Check if features were computed before
        if os.path.isfile(output_file):
            print("Already done:", output_file)
            return False
        else:
            return True

    
    # Function to process a single image
    def extract_features(key, image_array, meta):
        output_file = config["profiling"]["output_dir"] + "/{}_{}_{}.npz"
        output_file = output_file.format( meta["Metadata_Plate"], meta["Metadata_Well"], meta["Metadata_Site"])

        batch_size = config["validation"]["minibatch"]
        image_key, image_names, outlines = dset.getImagePaths(meta)
        total_crops, pads = crop_generator.prepare_image(
                                   sess,
                                   image_array,
                                   meta,
                                   config["validation"]["sample_first_crops"]
                            )

        # Initialize data buffer
        data = np.zeros(shape=(num_channels, total_crops, num_features))
        b = 0
        start = tic()

        # Extract features in batches
        batches = []
        for batch in crop_generator.generate(sess):
            crops = batch[0]
            feats = sess.run(endpoints['PreLogitsFlatten'], feed_dict={raw_crops:crops})
            feats = np.reshape(feats, (num_channels, batch_size, num_features))
            data[:, b * batch_size:(b + 1) * batch_size, :] = feats
            b += 1
            batches.append(batch)

        # Remove paddings and concatentate features of all channels
        if pads > 0:
            data = data[:, :-pads, :]
        data = np.moveaxis(data, 0, 1)
        data = np.reshape(data, (data.shape[0], data.shape[1]*data.shape[2]))

        # Save features
        np.savez_compressed(output_file, f=data)
        toc(image_key + " (" + str(data.shape[0]-pads) + " cells)", start)

        # Save crops TODO: parameterize saving crops or a sample of them.
        if False:
            batch_data = {"total_crops": total_crops, "pads": pads, "batches": batches}
            with open(output_file.replace(".npz", ".pkl"), "wb") as batch_file:
                pickle.dump(batch_data, batch_file)

        
    dset.scan(extract_features, frame="all", check=check)
    print("Profiling: done")
#取物体类别名
names = imagenet.create_readable_names_for_imagenet_labels()

#slim下载  https://github.com/tensorflow/models
slim = tf.contrib.slim
#使用inception_resnet_v2的模型文件,
checkpoint_file = 'inception_resnet_v2/inception_resnet_v2_2016_08_30.ckpt'
#将要识别的图片文件名
sample_images = ['img.jpg', 'ps.jpg']

input_imgs = tf.placeholder("float", [None, image_size, image_size, 3])

#Load the model
sess = tf.Session()
#arg_scope定义相同命名空间下的输出节点。
arg_scope = inception.inception_resnet_v2_arg_scope()

with slim.arg_scope(arg_scope):
    logits, end_points = inception.inception_resnet_v2(input_imgs,
                                                       is_training=False)

saver = tf.train.Saver()
saver.restore(sess, checkpoint_file)

for image in sample_images:
    reimg = Image.open(image).resize((image_size, image_size))
    reimg = np.array(reimg)
    reimg = reimg.reshape(-1, image_size, image_size, 3)

    plt.figure()
    p1 = plt.subplot(121)
def export():

    with tf.Graph().as_default():

        # build inference model

        # imagenet labels
        names = imagenet.create_readable_names_for_imagenet_labels()

        names_tensor = tf.constant(names.values())

        names_lookup_table = tf.contrib.lookup.index_to_string_table_from_tensor(
            names_tensor)

        # input transformation
        serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
        feature_configs = {
            'image/encoded': tf.FixedLenFeature(shape=[], dtype=tf.string),
        }
        tf_example = tf.parse_example(serialized_tf_example, feature_configs)
        jpegs = tf_example['image/encoded']
        images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)

        # run inference
        with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
            # inception resnet models
            logits, end_points = inception.inception_resnet_v2(
                images, num_classes=NUM_CLASSES + 1, is_training=False)
            # logits = tf.Print(logits, [logits])

        probs = tf.nn.softmax(logits)

        # transform output to topk result
        topk_probs, topk_indices = tf.nn.top_k(probs, NUM_TOP_CLASSES)

        topk_names = names_lookup_table.lookup(tf.to_int64(topk_indices))

        init_fn = slim.assign_from_checkpoint_fn(
            os.path.join(FLAGS.checkpoint_dir,
                         'inception_resnet_v2_2016_08_30.ckpt'),
            slim.get_model_variables('InceptionResnetV2'))

        # sess config
        config = tf.ConfigProto(
            # device_count = {
            #   'GPU': 0
            # },
            gpu_options={
                'allow_growth': 1,
                # 'per_process_gpu_memory_fraction': 0.01
            },
            allow_soft_placement=True,
            log_device_placement=False,
        )

        with tf.Session(config=config) as sess:

            init_fn(sess)

            # init on 2017.10.22
            # note: look into the graphdef for prelogits as image features
            # print('Graph Node Tensor Name:')
            # for node_tensor in tf.get_default_graph().as_graph_def().node:
            #   if str(node_tensor.name).startswith('InceptionResnetV2/Logits'):
            #     print str(node_tensor.name)
            prelogits = sess.graph.get_tensor_by_name(
                'InceptionResnetV2/Logits/Flatten/flatten/Reshape:0')
            # an optional alternative
            # prelogits = end_points['PreLogitsFlatten']

            # export inference model.
            output_path = os.path.join(
                tf.compat.as_bytes(FLAGS.output_dir),
                tf.compat.as_bytes(str(FLAGS.model_version)))
            print 'Exporting trained model to', output_path
            builder = tf.saved_model.builder.SavedModelBuilder(output_path)

            # build the signature_def_map.
            predict_inputs_tensor_info = tf.saved_model.utils.build_tensor_info(
                jpegs)
            classes_output_tensor_info = tf.saved_model.utils.build_tensor_info(
                topk_names)
            scores_output_tensor_info = tf.saved_model.utils.build_tensor_info(
                topk_probs)
            prelogits_output_tensor_info = tf.saved_model.utils.build_tensor_info(
                prelogits)

            prediction_signature = (
                tf.saved_model.signature_def_utils.build_signature_def(
                    inputs={'images': predict_inputs_tensor_info},
                    outputs={
                        'classes': classes_output_tensor_info,
                        'scores': scores_output_tensor_info,
                        'prelogits': prelogits_output_tensor_info
                    },
                    method_name=tf.saved_model.signature_constants.
                    PREDICT_METHOD_NAME))

            legacy_init_op = tf.group(tf.tables_initializer(),
                                      name='legacy_init_op')

            builder.add_meta_graph_and_variables(
                sess, [tf.saved_model.tag_constants.SERVING],
                signature_def_map={
                    'predict_images': prediction_signature,
                },
                legacy_init_op=legacy_init_op)

            builder.save()

            print 'Successfully exported model to %s' % FLAGS.output_dir
Пример #17
0
sorted_inds_list = []
names_list = []
prob_list = []

names_list.append(
    os.listdir(
        "/home/vb/Desktop/inception_resnet_v2/dataset/data_tag1/image/"))
with tf.Graph().as_default():

    image_input = tf.read_file(org_img_path)
    image = tf.image.decode_jpeg(image_input, channels=3)
    processed_image = inception_preprocessing.preprocess_image(
        image, image_size, image_size, is_training=False)
    processed_images = tf.expand_dims(processed_image, 0)

    with slim1.arg_scope(inception.inception_resnet_v2_arg_scope()):
        logits, _ = inception.inception_resnet_v2(processed_images,
                                                  num_classes=10,
                                                  is_training=False)
    probabilities = tf.nn.softmax(logits)

    init_fn = slim1.assign_from_checkpoint_fn(
        os.path.join(checkpoints_dir, 'model.ckpt-1'),
        slim1.get_model_variables('InceptionResnetV2'))

    with tf.Session() as sess:
        init_fn(sess)
        np_image, probabilities = sess.run([image, probabilities])
        prob_list.append(probabilities[0, 0:])
        sorted_inds_list.append([
            i[0] for i in sorted(enumerate(-prob_list[0]), key=lambda x: x[1])
Пример #18
0
def profile(config, dset):
    # Variables and cropping comp. graph
    num_channels = len(config["image_set"]["channels"])
    num_classes = dset.numberOfClasses()
    input_vars = learning.training.input_graph(config)
    images = input_vars["labeled_crops"][0]
    labels = tf.one_hot(input_vars["labeled_crops"][1], num_classes)

    # Setup pretrained model
    crop_shape = input_vars["shapes"]["crops"][0]
    raw_crops = tf.placeholder(tf.float32,
                               shape=(None, crop_shape[0], crop_shape[1],
                                      crop_shape[2]))
    network_input = crop_transform(raw_crops)
    url = config["profiling"]["url"]
    checkpoint = config["profiling"]["checkpoint"]
    if not os.path.isfile(checkpoint):
        dataset_utils.download_and_uncompress_tarball(
            url, os.path.dirname(checkpoint))
    with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
        _, endpoints = inception.inception_resnet_v2(network_input,
                                                     num_classes=1001,
                                                     is_training=False)
    init_fn = slim.assign_from_checkpoint_fn(checkpoint,
                                             slim.get_model_variables())

    # Session configuration
    configuration = tf.ConfigProto()
    configuration.gpu_options.allow_growth = True
    configuration.gpu_options.visible_device_list = config["profiling"]["gpu"]

    sess = tf.Session(config=configuration)
    init_fn(sess)

    def check(meta):
        output_file = config["profiling"]["output_dir"] + "/{}_{}_{}.npz"
        output_file = output_file.format(meta["Metadata_Plate"],
                                         meta["Metadata_Well"],
                                         meta["Metadata_Site"])

        # Check if features were computed before
        if os.path.isfile(output_file):
            print("Already done:", output_file)
            return False
        else:
            return True

    # Function to process a single image
    def extract_features(key, image_array, meta):
        output_file = config["profiling"]["output_dir"] + "/{}_{}_{}.npz"
        output_file = output_file.format(meta["Metadata_Plate"],
                                         meta["Metadata_Well"],
                                         meta["Metadata_Site"])

        # Prepare image and crop locations
        batch_size = config["training"]["minibatch"]
        image_key, image_names = dset.getImagePaths(meta)
        locations = [
            learning.cropping.getLocations(image_key, config, randomize=False)
        ]
        if len(locations[0]) == 0:
            print("Empty locations set:", str(key))
            return
        # Pad last batch with empty locations
        pads = batch_size - len(locations[0]) % batch_size
        zero_pads = np.zeros(shape=(pads, 2), dtype=np.int32)
        pad_data = pandas.DataFrame(columns=locations[0].columns,
                                    data=zero_pads)
        locations[0] = pandas.concat((locations[0], pad_data))

        # Prepare boxes, indices, labels and push the image to the queue
        labels_data = [meta[config["training"]["label_field"]]]
        boxes, box_ind, labels_data = learning.cropping.prepareBoxes(
            locations, labels_data, config)
        images_data = np.reshape(image_array, input_vars["shapes"]["batch"])

        sess.run(
            input_vars["enqueue_op"], {
                input_vars["image_ph"]: images_data,
                input_vars["boxes_ph"]: boxes,
                input_vars["box_ind_ph"]: box_ind,
                input_vars["labels_ph"]: labels_data
            })

        # Collect crops of from the queue
        items = sess.run(input_vars["queue"].size())
        #TODO: move the channels to the last axis
        data = np.zeros(shape=(num_channels, len(locations[0]), num_features))
        b = 0
        start = tic()
        while items >= batch_size:
            # Compute features in a batch of crops
            crops = sess.run(images)
            feats = sess.run(endpoints['PreLogitsFlatten'],
                             feed_dict={raw_crops: crops})
            # TODO: move the channels to the last axis using np.moveaxis
            feats = np.reshape(feats, (num_channels, batch_size, num_features))
            data[:, b * batch_size:(b + 1) * batch_size, :] = feats
            items = sess.run(input_vars["queue"].size())
            b += 1

        # Save features
        # TODO: save data with channels in the last axis
        np.savez_compressed(output_file, f=data[:, :-pads, :])
        toc(image_key + " (" + str(data.shape[1] - pads) + ") cells", start)

    dset.scan(extract_features, frame="all", check=check)
    print("Profiling: done")
Пример #19
0
    def __call__(self, Q, K, V, num_heads, training=False, scope=""):
        """
        This module calculates the cosine distance between each of the support set embeddings and the target
        image embeddings.
        :param support_set: The embeddings of the support set images, tensor of shape [sequence_length, batch_size, 64]
        :param input_image: The embedding of the target image, tensor of shape [batch_size, 64]
        :param name: Name of the op to appear on the graph
        :param training: Flag indicating training or evaluation (True/False)
        :return: A tensor with cosine similarities of shape [batch_size, sequence_length, 1]
        """

        d_model = V.get_shape().as_list()[-1]
        d_key = d_model / num_heads
        d_value = d_model / num_heads

        batch_norm_params = {
            'decay': 0.9997,
            'epsilon': 0.001,
            'fused': None,  # Use fused batch norm if possible.
        }
        normalizer_fn = slim.batch_norm
        normalizer_params = batch_norm_params

        heads = []
        with slim.arg_scope(
                inception.inception_resnet_v2_arg_scope(
                    use_batch_norm=True, weight_decay=FLAGS.weight_decay)):
            with tf.variable_scope(scope, reuse=self.reuse):
                with slim.arg_scope([slim.batch_norm, slim.dropout],
                                    is_training=training):

                    for HeadIdx in range(num_heads):
                        query = slim.conv2d(Q,
                                            d_key, [1, 1],
                                            scope='QueryRep-' + str(HeadIdx))
                        query = tf.reshape(query, [
                            query.get_shape().as_list()[0], -1,
                            query.get_shape().as_list()[3]
                        ])

                        key = slim.conv2d(K,
                                          d_key, [1, 1],
                                          scope='KeyRep-' + str(HeadIdx))
                        key = tf.reshape(key, [
                            key.get_shape().as_list()[0], -1,
                            key.get_shape().as_list()[3]
                        ])

                        value = slim.conv2d(V,
                                            d_value, [1, 1],
                                            scope='ValueRep-' + str(HeadIdx))
                        value = tf.reshape(value, [
                            value.get_shape().as_list()[0], -1,
                            value.get_shape().as_list()[3]
                        ])

                        head = Attention(query, key, value)
                        heads.append(head)

                    heads = tf.concat(heads, axis=2)
#                     heads = slim.fully_connected(heads, d_model,activation_fn=tf.nn.relu,
#                                                  normalizer_fn=normalizer_fn, normalizer_params=normalizer_params) # use conv2d?
        return heads
Пример #20
0
def main(_):
  batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
  num_classes = 1001

  # max_epsilon over checking
  # get original images
  origin_img_list=np.sort(glob.glob(FLAGS.origin_img_dir+"*.png"));
  origin_imgs=np.zeros((len(origin_img_list),FLAGS.image_height,FLAGS.image_width,3),dtype=float);
  for i in range(len(origin_img_list)):
    origin_imgs[i]=imread(origin_img_list[i],mode='RGB').astype(np.float);
  # get adv images
  adv_img_list=np.sort(glob.glob(FLAGS.input_dir+"*.png"));
  adv_imgs=np.zeros((len(adv_img_list),FLAGS.image_height,FLAGS.image_width,3),dtype=float);
  for i in range(len(adv_img_list)):
    adv_imgs[i]=imread(adv_img_list[i],mode='RGB').astype(np.float);
  epsilon_list=np.linalg.norm(np.reshape(abs(origin_imgs-adv_imgs),[-1,FLAGS.image_height*FLAGS.image_width*3]),ord=np.inf,axis=1);
  #print(epsilon_list);exit(1);
  over_epsilon_list=np.zeros((len(origin_img_list),2),dtype=object);
  cnt=0;
  for i in range(len(origin_img_list)):
    file_name=origin_img_list[i].split("/")[-1];
    file_name=file_name.split(".")[0];
    over_epsilon_list[i,0]=file_name;
    if(epsilon_list[i]>FLAGS.max_epsilon):
      over_epsilon_list[i,1]="1";
      cnt+=1;
  tf.logging.set_verbosity(tf.logging.INFO)

  with tf.Graph().as_default():
    # Prepare graph
    x_input = tf.placeholder(tf.float32, shape=batch_shape)

    if(FLAGS.checkpoint_file_name=="inception_v3.ckpt"):
      with slim.arg_scope(inception.inception_v3_arg_scope()):
        _, end_points = inception.inception_v3(
            x_input, num_classes=num_classes, is_training=False)
      predicted_labels = tf.argmax(end_points['Predictions'], 1)
    elif(FLAGS.checkpoint_file_name=="inception_v4.ckpt"):
      with slim.arg_scope(inception.inception_v4_arg_scope()):
        _, end_points = inception.inception_v4(
            x_input, num_classes=num_classes, is_training=False)
      predicted_labels = tf.argmax(end_points['Predictions'], 1)
    elif(FLAGS.checkpoint_file_name=="inception_resnet_v2_2016_08_30.ckpt"):
      with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
        _, end_points = inception.inception_resnet_v2(
            x_input, num_classes=num_classes, is_training=False)
      predicted_labels = tf.argmax(end_points['Predictions'], 1)
    elif(FLAGS.checkpoint_file_name=="resnet_v2_101.ckpt"):
      x_input2 = tf.image.resize_bilinear(x_input,[224,224],align_corners=False);
      with slim.arg_scope(resnet_utils.resnet_arg_scope()):
        _, end_points = resnet_v2.resnet_v2_101(
            x_input2, num_classes=num_classes, is_training=False)
      predicted_labels = tf.argmax(end_points['predictions'], 1)
    elif(FLAGS.checkpoint_file_name=="resnet_v2_50.ckpt"):
      x_input2 = tf.image.resize_bilinear(x_input,[224,224],align_corners=False);
      with slim.arg_scope(resnet_utils.resnet_arg_scope()):
        _, end_points = resnet_v2.resnet_v2_50(
            x_input2, num_classes=num_classes, is_training=False)
      predicted_labels = tf.argmax(end_points['predictions'], 1)
    elif(FLAGS.checkpoint_file_name=="resnet_v2_152.ckpt"):
      x_input2 = tf.image.resize_bilinear(x_input,[224,224],align_corners=False);
      with slim.arg_scope(resnet_utils.resnet_arg_scope()):
        _, end_points = resnet_v2.resnet_v2_152(
            x_input2, num_classes=num_classes, is_training=False)
      predicted_labels = tf.argmax(end_points['predictions'], 1)
    elif(FLAGS.checkpoint_file_name=="inception_v1.ckpt"):
      x_input2 = tf.image.resize_bilinear(x_input,[224,224],align_corners=False);
      with slim.arg_scope(inception.inception_v1_arg_scope()):
        _, end_points = inception.inception_v1(
            x_input2, num_classes=num_classes, is_training=False)
      predicted_labels = tf.argmax(end_points['Predictions'], 1)
    elif(FLAGS.checkpoint_file_name=="inception_v2.ckpt"):
      x_input2 = tf.image.resize_bilinear(x_input,[224,224],align_corners=False);
      with slim.arg_scope(inception.inception_v2_arg_scope()):
        _, end_points = inception.inception_v2(
            x_input2, num_classes=num_classes, is_training=False)
      predicted_labels = tf.argmax(end_points['Predictions'], 1)

    # Resnet v1 and vgg are not working now
    elif(FLAGS.checkpoint_file_name=="vgg_16.ckpt"):
      x_input_list=tf.unstack(x_input,FLAGS.batch_size,0);
      for i in range(FLAGS.batch_size):
        x_input_list[i]=vgg_preprocessing.preprocess_image(x_input_list[i],224,224);
      x_input2=tf.stack(x_input_list,0);
      with slim.arg_scope(vgg.vgg_arg_scope()):
        _, end_points = vgg.vgg_16(
            x_input2, num_classes=num_classes-1, is_training=False)
      predicted_labels = tf.argmax(end_points['vgg_16/fc8'], 1)+1
    elif(FLAGS.checkpoint_file_name=="vgg_19.ckpt"):
      x_input_list=tf.unstack(x_input,FLAGS.batch_size,0);
      for i in range(FLAGS.batch_size):
        x_input_list[i]=vgg_preprocessing.preprocess_image(x_input_list[i],224,224);
      x_input2=tf.stack(x_input_list,0);
      with slim.arg_scope(vgg.vgg_arg_scope()):
        _, end_points = vgg.vgg_19(
            x_input2, num_classes=num_classes-1, is_training=False)
      predicted_labels = tf.argmax(end_points['vgg_19/fc8'], 1)+1
    elif(FLAGS.checkpoint_file_name=="resnet_v1_50.ckpt"):
      x_input_list=tf.unstack(x_input,FLAGS.batch_size,0);
      for i in range(FLAGS.batch_size):
        x_input_list[i]=vgg_preprocessing.preprocess_image(x_input_list[i],224,224);
      x_input2=tf.stack(x_input_list,0);
      with slim.arg_scope(resnet_utils.resnet_arg_scope()):
        _, end_points = resnet_v1.resnet_v1_50(
            x_input, num_classes=num_classes-1, is_training=False)
      predicted_labels = tf.argmax(end_points['predictions'], 1)+1
    elif(FLAGS.checkpoint_file_name=="resnet_v1_101.ckpt"):
      x_input_list=tf.unstack(x_input,FLAGS.batch_size,0);
      for i in range(FLAGS.batch_size):
        x_input_list[i]=vgg_preprocessing.preprocess_image(x_input_list[i],224,224);
      x_input2=tf.stack(x_input_list,0);
      with slim.arg_scope(resnet_utils.resnet_arg_scope()):
        _, end_points = resnet_v1.resnet_v1_101(
            x_input2, num_classes=num_classes-1, is_training=False)
      predicted_labels = tf.argmax(end_points['predictions'], 1)+1
    elif(FLAGS.checkpoint_file_name=="resnet_v1_152.ckpt"):
      x_input_list=tf.unstack(x_input,FLAGS.batch_size,0);
      for i in range(FLAGS.batch_size):
        x_input_list[i]=vgg_preprocessing.preprocess_image(x_input_list[i],224,224);
      x_input2=tf.stack(x_input_list,0);
      with slim.arg_scope(resnet_utils.resnet_arg_scope()):
        _, end_points = resnet_v1.resnet_v1_152(
            x_input2, num_classes=num_classes-1, is_training=False)
      predicted_labels = tf.argmax(end_points['predictions'], 1)+1
    
    # Run computation
    saver = tf.train.Saver(slim.get_model_variables())
    session_creator = tf.train.ChiefSessionCreator(
        scaffold=tf.train.Scaffold(saver=saver),
        checkpoint_filename_with_path=FLAGS.checkpoint_path+FLAGS.checkpoint_file_name,
        master=FLAGS.master)

    f=open(FLAGS.true_label,"r");
    t_label_list=np.array([i[:-1].split(",") for i in f.readlines()]);
    
    score=0;
    with tf.train.MonitoredSession(session_creator=session_creator) as sess:
      with tf.gfile.Open(FLAGS.output_file, 'w') as out_file:
        for filenames, images in load_images(FLAGS.input_dir, batch_shape):
          labels = sess.run(predicted_labels, feed_dict={x_input: images})
          for filename, label in zip(filenames, labels):
            f_name=filename.split(".")[0];
            t_label=int(t_label_list[t_label_list[:,0]==f_name,1][0]);
            if(t_label!=label):
              if(over_epsilon_list[over_epsilon_list[:,0]==f_name,1]!="1"):
                score+=1;
            #out_file.write('{0},{1}\n'.format(filename, label))
  print("Over max epsilon#: "+str(cnt));
  print(str(FLAGS.max_epsilon)+" max epsilon Score: "+str(score));
Пример #21
0
def get_inf(img_path):
    checkpoints_dir = ckpt_dir

    FLAGS = tf.app.flags.FLAGS

    slim1 = tf.contrib.slim
    slim2 = tf.contrib.slim
    slim3 = tf.contrib.slim

    image_size = inception.inception_resnet_v2.default_image_size
    f = open(tag_dir, "w")

    org_img_path = img_path

    sorted_inds_list = []
    names_list = []
    prob_list = []

    names_list.append(os.listdir(label_list1))
    with tf.Graph().as_default():
        image_input = tf.read_file(org_img_path)
        image = tf.image.decode_jpeg(image_input, channels=3)
        processed_image = inception_preprocessing.preprocess_image(
            image, image_size, image_size, is_training=False)
        processed_images = tf.expand_dims(processed_image, 0)

        with slim1.arg_scope(inception.inception_resnet_v2_arg_scope()):
            logits, _ = inception.inception_resnet_v2(processed_images,
                                                      num_classes=10,
                                                      is_training=False)
        probabilities = tf.nn.softmax(logits)

        init_fn = slim1.assign_from_checkpoint_fn(
            os.path.join(checkpoints_dir, 'model.ckpt-1'),
            slim1.get_model_variables('InceptionResnetV2'))

        with tf.Session() as sess:
            init_fn(sess)
            np_image, probabilities = sess.run([image, probabilities])
            prob_list.append(probabilities[0, 0:])
            sorted_inds_list.append([
                i[0]
                for i in sorted(enumerate(-prob_list[0]), key=lambda x: x[1])
            ])

    names_list.append(os.listdir(label_list2))
    with tf.Graph().as_default():
        image_input = tf.read_file(org_img_path)
        image = tf.image.decode_jpeg(image_input, channels=3)
        processed_image = inception_preprocessing.preprocess_image(
            image, image_size, image_size, is_training=False)
        processed_images = tf.expand_dims(processed_image, 0)

        with slim2.arg_scope(inception.inception_resnet_v2_arg_scope()):
            logits, _ = inception.inception_resnet_v2(processed_images,
                                                      num_classes=7,
                                                      is_training=False)
        probabilities = tf.nn.softmax(logits)

        init_fn = slim2.assign_from_checkpoint_fn(
            os.path.join(checkpoints_dir, 'model.ckpt-2'),
            slim2.get_model_variables('InceptionResnetV2'))

        with tf.Session() as sess:
            init_fn(sess)
            np_image, probabilities = sess.run([image, probabilities])
            prob_list.append(probabilities[0, 0:])
            sorted_inds_list.append([
                i[0]
                for i in sorted(enumerate(-prob_list[1]), key=lambda x: x[1])
            ])

    names_list.append(os.listdir(label_list3))
    with tf.Graph().as_default():
        image_input = tf.read_file(org_img_path)
        image = tf.image.decode_jpeg(image_input, channels=3)
        processed_image = inception_preprocessing.preprocess_image(
            image, image_size, image_size, is_training=False)
        processed_images = tf.expand_dims(processed_image, 0)

        with slim3.arg_scope(inception.inception_resnet_v2_arg_scope()):
            logits, _ = inception.inception_resnet_v2(processed_images,
                                                      num_classes=4,
                                                      is_training=False)
        probabilities = tf.nn.softmax(logits)

        init_fn = slim3.assign_from_checkpoint_fn(
            os.path.join(checkpoints_dir, 'model.ckpt-3'),
            slim3.get_model_variables('InceptionResnetV2'))

        with tf.Session() as sess:
            init_fn(sess)
            np_image, probabilities = sess.run([image, probabilities])
            prob_list.append(probabilities[0, 0:])
            sorted_inds_list.append([
                i[0]
                for i in sorted(enumerate(-prob_list[2]), key=lambda x: x[1])
            ])

    ret_str = ''
    for n in range(len(names_list)):
        names_list[n].sort()
        index = sorted_inds_list[n][0]
        ret_str += '#%s' % names_list[n][index]

    f.write(ret_str)
def extract_feat(frames_name):
    """
    Reference:
        1. Vasili's codes
        2. https://github.com/tensorflow/models/issues/429#issuecomment-277885861
    """

    slim_dir = "/home/xyang/workspace/models/research/slim"
    checkpoints_dir = slim_dir + "/pretrain"
    checkpoints_file = checkpoints_dir + '/inception_resnet_v2_2016_08_30.ckpt'
    batch_size = 256

    sys.path.append(slim_dir)
    from nets import inception
    import tensorflow as tf
    slim = tf.contrib.slim
    image_size = inception.inception_resnet_v2.default_image_size

    feat_conv = []
    feat_fc = []
    probs = []
    with tf.Graph().as_default():
        input_batch = tf.placeholder(dtype=tf.uint8,
                                     shape=(batch_size, 300, 300, 3))
        resized_images = tf.image.resize_images(
            tf.image.convert_image_dtype(input_batch, dtype=tf.float32),
            [image_size, image_size])
        preprocessed_images = tf.multiply(tf.subtract(resized_images, 0.5),
                                          2.0)

        # Create the model, use the default arg scope to configure
        # the batch norm parameters.
        with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
            logits, endpoints = inception.inception_resnet_v2(
                preprocessed_images, is_training=False)
        pre_pool = endpoints['Conv2d_7b_1x1']
        pre_logits_flatten = endpoints['PreLogitsFlatten']
        probabilities = endpoints['Predictions']

        with tf.Session() as sess:
            saver = tf.train.Saver()
            saver.restore(sess, checkpoints_file)

            for i in range(0, len(frames_name), batch_size):
                print i, '/', len(frames_name)
                current_batch = np.zeros((batch_size, 300, 300, 3),
                                         dtype=np.uint8)
                for j in range(batch_size):
                    if i + j == len(frames_name):
                        j -= 1
                        break
                    img = Image.open(frames_name[i + j]).convert('RGB').resize(
                        (300, 300))
                    current_batch[j] = np.array(img)

                temp_conv, temp_fc, prob = sess.run(
                    [pre_pool, pre_logits_flatten, probabilities],
                    feed_dict={input_batch: current_batch})
                #                feat_conv.append(temp_conv.astype('float32'))
                feat_fc.append(temp_fc[:j + 1].astype('float32'))
                probs.append(prob[:j + 1].astype('float32'))

    return np.concatenate(feat_fc, axis=0), np.concatenate(probs, axis=0)
Пример #23
0
def inference(images, isTrainingRun=True, isTrainingFromScratch=True):

    images = tf.to_float(images)

    with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
        probabilities, end_points = inception.inception_resnet_v2(
            images, 1001, is_training=False)  #isTrainingRun

    if isTrainingFromScratch:
        variables_to_restore = slim.get_variables_to_restore(exclude=[
            'InceptionResnetV2/AuxLogits', 'InceptionResnetV2/Logits'
        ])
        backbone_init_fn = slim.assign_from_checkpoint_fn(
            checkpoint_file, variables_to_restore)

    with tf.name_scope('COMMON_ENTRANCE'):
        with tf.variable_scope('COMMON_ENTRANCE'):

            CME_GCN_1 = LayerBlocks.global_conv_module(
                end_points['Conv2d_7b_1x1'],
                end_points['Conv2d_7b_1x1'].get_shape()[3],
                name='CME_GCN_1',
                k=7)
            CME_Atrous_1 = LayerBlocks.atrousPyramid_small(
                end_points['Conv2d_7b_1x1'],
                name="CME_Atrous_1",
                allow_r16=False,
                training=isTrainingRun)

            Mixed_6a_conv = tf.contrib.layers.conv2d(end_points['Mixed_6a'],
                                                     164, (2, 2),
                                                     padding='VALID',
                                                     activation_fn=tf.nn.relu)
            Mixed_6a_dil = tf.contrib.layers.conv2d(Mixed_6a_conv,
                                                    64,
                                                    kernel_size=3,
                                                    rate=2,
                                                    padding='SAME')

            Mixed_5b_conv = tf.contrib.layers.conv2d(end_points['Mixed_5b'],
                                                     156, (4, 4),
                                                     padding='VALID',
                                                     activation_fn=tf.nn.relu)
            Mixed_5b_dil = tf.contrib.layers.conv2d(Mixed_5b_conv,
                                                    64,
                                                    kernel_size=3,
                                                    rate=2,
                                                    padding='SAME')

            CME_1 = tf.concat([CME_Atrous_1, CME_GCN_1], 3)
            CME_2 = tf.concat([Mixed_6a_conv, Mixed_6a_dil], 3)
            CME_3 = tf.concat([Mixed_5b_conv, Mixed_5b_dil], 3)

            print("end_points['Conv2d_7b_1x1']  : " +
                  str(end_points['Conv2d_7b_1x1'].get_shape()[:]))
            print("end_points['Mixed_6a']  : " +
                  str(end_points['Mixed_6a'].get_shape()[:]))
            print("end_points['Mixed_5b']  : " +
                  str(end_points['Mixed_5b'].get_shape()[:]))
            print("CME_GCN_1  : " + str(CME_GCN_1.get_shape()[:]))
            print("CME_Atrous_1  : " + str(CME_Atrous_1.get_shape()[:]))
            print("Mixed_6a_conv  : " + str(Mixed_6a_conv.get_shape()[:]))
            print("Mixed_6a_dil  : " + str(Mixed_6a_dil.get_shape()[:]))
            print("Mixed_5b_conv : " + str(Mixed_5b_conv.get_shape()[:]))
            print("Mixed_5b_dil  : " + str(Mixed_5b_dil.get_shape()[:]))
            print("CME_1  : " + str(CME_1.get_shape()[:]))
            print("CME_2  : " + str(CME_2.get_shape()[:]))
            print("CME_3  : " + str(CME_3.get_shape()[:]))

            CME_1 = tf.layers.conv2d(CME_1,
                                     1024, (1, 1),
                                     activation=tf.nn.relu,
                                     padding='VALID')
            print("CME_1 red. : " + str(CME_1.get_shape()[:]))

    with tf.name_scope('NEUTRAL_Branch'):
        with tf.variable_scope('NEUTRAL_Branch_1'):
            n_up_1 = LayerBlocks.up_project(CME_1,
                                            size=[3, 3, 1024, 96],
                                            id='n_2x_a',
                                            stride=1,
                                            training=isTrainingRun)
            n_up_2 = LayerBlocks.up_project(n_up_1,
                                            size=[3, 3, 96, 64],
                                            id='n_2x_b',
                                            stride=1,
                                            training=isTrainingRun)
            n_up_3 = LayerBlocks.up_project(n_up_2,
                                            size=[3, 3, 64, 32],
                                            id='n_2x_c',
                                            stride=1,
                                            training=isTrainingRun)
            n_up_4 = LayerBlocks.up_project(n_up_3,
                                            size=[3, 3, 32, 8],
                                            id='n_2x_d',
                                            stride=1,
                                            training=isTrainingRun)
            n_up_5 = LayerBlocks.up_project(n_up_4,
                                            size=[3, 3, 8, 4],
                                            id='n_2x_e',
                                            stride=1,
                                            training=isTrainingRun)

            print("n_up_1  : " + str(n_up_1.get_shape()[:]))
            print("n_up_2 : " + str(n_up_2.get_shape()[:]))
            print("n_up_3 : " + str(n_up_3.get_shape()[:]))
            print("n_up_4 : " + str(n_up_4.get_shape()[:]))
            print("n_up_5 : " + str(n_up_5.get_shape()[:]))

        with tf.variable_scope('NEUTRAL_Branch_mid'):
            n_up_1_mid = LayerBlocks.up_project(CME_2,
                                                size=[3, 3, 164, 16],
                                                id='n_2x_a_mid',
                                                stride=1,
                                                training=isTrainingRun)
            n_up_2_mid = LayerBlocks.up_project(n_up_1_mid,
                                                size=[3, 3, 16, 16],
                                                id='n_2x_b_mid',
                                                stride=1,
                                                training=isTrainingRun)
            n_up_3_mid = LayerBlocks.up_project(n_up_2_mid,
                                                size=[3, 3, 16, 8],
                                                id='n_2x_c_mid',
                                                stride=1,
                                                training=isTrainingRun)
            n_up_4_mid = LayerBlocks.up_project(n_up_3_mid,
                                                size=[3, 3, 8, 4],
                                                id='n_2x_d_mid',
                                                stride=1,
                                                training=isTrainingRun)

            print("n_up_1_mid  : " + str(n_up_1_mid.get_shape()[:]))
            print("n_up_2_mid  : " + str(n_up_2_mid.get_shape()[:]))
            print("n_up_3_mid  : " + str(n_up_3_mid.get_shape()[:]))
            print("n_up_4_mid  : " + str(n_up_4_mid.get_shape()[:]))

        with tf.variable_scope('NEUTRAL_Branch_high'):
            n_up_1_high = LayerBlocks.up_project(CME_3,
                                                 size=[3, 3, 156, 16],
                                                 id='n_2x_a_high',
                                                 stride=1,
                                                 training=isTrainingRun)
            n_up_2_high = LayerBlocks.up_project(n_up_1_high,
                                                 size=[3, 3, 16, 8],
                                                 id='n_2x_b_high',
                                                 stride=1,
                                                 training=isTrainingRun)
            n_up_3_high = LayerBlocks.up_project(n_up_2_high,
                                                 size=[3, 3, 8, 4],
                                                 id='n_2x_c_high',
                                                 stride=1,
                                                 training=isTrainingRun)

            print("n_up_1_high   : " + str(n_up_1_high.get_shape()[:]))
            print("n_up_2_high  : " + str(n_up_2_high.get_shape()[:]))
            print("n_up_3_high  : " + str(n_up_3_high.get_shape()[:]))

    with tf.name_scope('DEPTH_Branch'):
        with tf.variable_scope('DEPTH_Branch'):
            s_up_0 = LayerBlocks.boundary_refine(CME_1,
                                                 name='BR_s_up_0',
                                                 training=isTrainingRun)
            s_up_0 = slim.convolution2d_transpose(s_up_0,
                                                  384, [3, 3], [2, 2],
                                                  activation_fn=tf.nn.relu)
            sem_mix_0 = tf.concat([s_up_0, CME_2], 3)

            s_up_1 = LayerBlocks.boundary_refine(sem_mix_0,
                                                 name='BR_s_up_1',
                                                 training=isTrainingRun)
            s_up_1 = slim.convolution2d_transpose(s_up_1,
                                                  64, [3, 3], [2, 2],
                                                  activation_fn=tf.nn.relu)

            dil_s_up_1 = tf.contrib.layers.conv2d(s_up_1,
                                                  48,
                                                  kernel_size=3,
                                                  rate=2)
            sem_mix_1 = tf.concat([s_up_1, n_up_2, dil_s_up_1, CME_3], 3)

            s_up_2 = LayerBlocks.boundary_refine(sem_mix_1,
                                                 name='BR_s_up_2',
                                                 training=isTrainingRun)
            s_up_2 = slim.convolution2d_transpose(s_up_2,
                                                  64, [3, 3], [2, 2],
                                                  activation_fn=tf.nn.relu)

            dil_s_up_2 = tf.contrib.layers.conv2d(s_up_2,
                                                  32,
                                                  kernel_size=3,
                                                  rate=4)
            sem_mix_2 = tf.concat([s_up_2, n_up_3, dil_s_up_2], 3)

            s_up_3 = LayerBlocks.boundary_refine(sem_mix_2,
                                                 name='BR_s_up_3',
                                                 training=isTrainingRun)
            s_up_3 = slim.convolution2d_transpose(s_up_3,
                                                  64, [3, 3], [2, 2],
                                                  activation_fn=tf.nn.relu)

            dil_s_up_3 = tf.contrib.layers.conv2d(s_up_3,
                                                  32,
                                                  kernel_size=3,
                                                  rate=8)
            sem_mix_3 = tf.concat([s_up_3, n_up_4, dil_s_up_3], 3)

            s_up_4 = LayerBlocks.boundary_refine(sem_mix_3,
                                                 name='BR_s_up_4',
                                                 training=isTrainingRun)
            s_up_4 = slim.convolution2d_transpose(s_up_4,
                                                  64, [3, 3], [2, 2],
                                                  activation_fn=tf.nn.relu)

            dil_s_up_4 = tf.contrib.layers.conv2d(s_up_4,
                                                  32,
                                                  kernel_size=3,
                                                  rate=8)
            sem_mix_4 = tf.concat(
                [s_up_4, n_up_5, n_up_4_mid, n_up_3_high, dil_s_up_4], 3)
            print("sem_mix_4  : " + str(sem_mix_4.get_shape()[:]))

            depth_endpoint = tf.layers.conv2d(sem_mix_4,
                                              1, (1, 1),
                                              activation=tf.nn.relu,
                                              padding='VALID')
            depth_endpoint = tf.minimum(
                LayerBlocks.boundary_refine(depth_endpoint,
                                            name='depth_endpoint',
                                            training=isTrainingRun), 16896)

            print("s_up_1  : " + str(s_up_1.get_shape()[:]))
            print("s_up_2 : " + str(s_up_2.get_shape()[:]))
            print("s_up_3 : " + str(s_up_3.get_shape()[:]))
            print("s_up_4 : " + str(s_up_4.get_shape()[:]))
            print("depth_endpoint: " + str(depth_endpoint.get_shape()[:]))

    if isTrainingFromScratch:
        return backbone_init_fn, depth_endpoint

    return depth_endpoint
Пример #24
0
    from scipy import misc
    img = misc.imread('lena_299.png')
    print(img.shape)

    inputs = np.ones((1, 299, 299, 3), dtype=np.float32)
    inputs[0, 0, 0, 0] = -1
    #inputs[0] = img
    print(inputs.mean())
    print(inputs.std())
    inputs = tf.pack(inputs)
    # tensorflow normalization
    # https://github.com/tensorflow/models/blob/master/slim/preprocessing/inception_preprocessing.py#L273
    #inputs = tf.sub(inputs, 0.5)
    #inputs = tf.mul(inputs, 2.0)

    with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
        logits, _ = inception.inception_resnet_v2(inputs,
                                                  num_classes=1001,
                                                  is_training=False)

    with tf.Session() as sess:

        # Initialize model
        init_fn = slim.assign_from_checkpoint_fn(
            os.path.join(checkpoints_dir,
                         'inception_resnet_v2_2016_08_30.ckpt'),
            slim.get_model_variables('InceptionResnetV2'))

        init_fn(sess)

        # Display model variables
Пример #25
0
def main(_):
    batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
    num_classes = 1001
    ensemble_type = FLAGS.ensemble_type

    tf.logging.set_verbosity(tf.logging.INFO)

    checkpoint_path_list = [
        FLAGS.checkpoint_path_inception_v1, FLAGS.checkpoint_path_inception_v2,
        FLAGS.checkpoint_path_inception_v3, FLAGS.checkpoint_path_inception_v4,
        FLAGS.checkpoint_path_inception_resnet_v2,
        FLAGS.checkpoint_path_resnet_v1_101,
        FLAGS.checkpoint_path_resnet_v1_152,
        FLAGS.checkpoint_path_resnet_v2_101,
        FLAGS.checkpoint_path_resnet_v2_152, FLAGS.checkpoint_path_vgg_16,
        FLAGS.checkpoint_path_vgg_19
    ]
    normalization_method = [
        'default', 'default', 'default', 'default', 'global', 'caffe_rgb',
        'caffe_rgb', 'default', 'default', 'caffe_rgb', 'caffe_rgb'
    ]
    pred_list = []
    for idx, checkpoint_path in enumerate(checkpoint_path_list, 1):
        with tf.Graph().as_default():
            if int(FLAGS.test_idx) == 20 and idx in [3]:
                continue
            if int(FLAGS.test_idx) in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
                                       ] and int(FLAGS.test_idx) != idx:
                continue
            # Prepare graph
            if idx in [1, 2, 6, 7, 10, 11]:
                _x_input = tf.placeholder(tf.float32, shape=batch_shape)
                x_input = tf.image.resize_images(_x_input, [224, 224])
            else:
                _x_input = tf.placeholder(tf.float32, shape=batch_shape)
                x_input = _x_input

            x_input = image_normalize(x_input, normalization_method[idx - 1])

            if idx == 1:
                with slim.arg_scope(inception.inception_v1_arg_scope()):
                    _, end_points = inception.inception_v1(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 2:
                with slim.arg_scope(inception.inception_v2_arg_scope()):
                    _, end_points = inception.inception_v2(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 3:
                with slim.arg_scope(inception.inception_v3_arg_scope()):
                    _, end_points = inception.inception_v3(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 4:
                with slim.arg_scope(inception.inception_v4_arg_scope()):
                    _, end_points = inception.inception_v4(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 5:
                with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
                    _, end_points = inception.inception_resnet_v2(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 6:
                with slim.arg_scope(resnet_v1.resnet_arg_scope()):
                    _, end_points = resnet_v1.resnet_v1_101(x_input,
                                                            num_classes=1000,
                                                            is_training=False)
            elif idx == 7:
                with slim.arg_scope(resnet_v1.resnet_arg_scope()):
                    _, end_points = resnet_v1.resnet_v1_152(x_input,
                                                            num_classes=1000,
                                                            is_training=False)
            elif idx == 8:
                with slim.arg_scope(resnet_v2.resnet_arg_scope()):
                    _, end_points = resnet_v2.resnet_v2_101(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 9:
                with slim.arg_scope(resnet_v2.resnet_arg_scope()):
                    _, end_points = resnet_v2.resnet_v2_152(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 10:
                with slim.arg_scope(vgg.vgg_arg_scope()):
                    _, end_points = vgg.vgg_16(x_input,
                                               num_classes=1000,
                                               is_training=False)
                    end_points['predictions'] = tf.nn.softmax(
                        end_points['vgg_16/fc8'])
            elif idx == 11:
                with slim.arg_scope(vgg.vgg_arg_scope()):
                    _, end_points = vgg.vgg_19(x_input,
                                               num_classes=1000,
                                               is_training=False)
                    end_points['predictions'] = tf.nn.softmax(
                        end_points['vgg_19/fc8'])

            #end_points = tf.reduce_mean([end_points1['Predictions'], end_points2['Predictions'], end_points3['Predictions'], end_points4['Predictions']], axis=0)

            #predicted_labels = tf.argmax(end_points, 1)

            # Run computation
            saver = tf.train.Saver(slim.get_model_variables())
            session_creator = tf.train.ChiefSessionCreator(
                scaffold=tf.train.Scaffold(saver=saver),
                checkpoint_filename_with_path=checkpoint_path,
                master=FLAGS.master)

            pred_in = []
            filenames_list = []
            with tf.train.MonitoredSession(
                    session_creator=session_creator) as sess:
                for filenames, images in load_images(FLAGS.input_dir,
                                                     batch_shape):
                    #if idx in [1,2,6,7,10,11]:
                    #  # 16x299x299x3
                    #  images = zoom(images, (1, 0.7491638795986622, 0.7491638795986622, 1), order=2)
                    filenames_list.extend(filenames)
                    end_points_dict = sess.run(end_points,
                                               feed_dict={_x_input: images})
                    if idx in [6, 7, 10, 11]:
                        end_points_dict['predictions'] = \
                                      np.concatenate([np.zeros([FLAGS.batch_size, 1]),
                                                      np.array(end_points_dict['predictions'].reshape(-1, 1000))],
                                                      axis=1)
                    try:
                        pred_in.extend(end_points_dict['Predictions'].reshape(
                            -1, num_classes))
                    except KeyError:
                        pred_in.extend(end_points_dict['predictions'].reshape(
                            -1, num_classes))
            pred_list.append(pred_in)

    if ensemble_type == 'mean':
        pred = np.mean(pred_list, axis=0)
        labels = np.argmax(
            pred, axis=1
        )  # model_num X batch X class_num ==(np.mean)==> batch X class_num ==(np.argmax)==> batch
    elif ensemble_type == 'vote':
        pred = np.argmax(
            pred_list, axis=2
        )  # model_num X batch X class_num ==(np.mean)==> batch X class_num ==(np.argmax)==> batch
        labels = np.median(pred, axis=0)
    with tf.gfile.Open(FLAGS.output_file, 'w') as out_file:
        for filename, label in zip(filenames_list, labels):
            out_file.write('{0},{1}\n'.format(filename, label))
Пример #26
0
def main(opt):
    config = tf.ConfigProto(allow_soft_placement=True)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
    config.gpu_options.allow_growth = True

    jpg_path = opt.image_path
    images_lists = []
    for subdir, dirs, files in os.walk(jpg_path):
        for f in files:
            f = f.strip()
            images_lists.append(os.path.join(jpg_path, f))

    att_dir = os.path.join(opt.out_dir, opt.att_dir)
    fc_dir = os.path.join(opt.out_dir, opt.fc_dir)

    if not tf.gfile.Exists(fc_dir):
        tf.gfile.MakeDirs(fc_dir)
    if not tf.gfile.Exists(att_dir):
        tf.gfile.MakeDirs(att_dir)

    checkpoints_dir = opt.model_path

    slim = tf.contrib.slim
    image_size = inception.inception_resnet_v2.default_image_size

    tf_image = tf.placeholder(tf.string, None)
    image = tf.image.decode_jpeg(tf_image, channels=3)
    processed_image = inception_preprocessing.preprocess_image(
        image, image_size, image_size, is_training=False)
    processed_images = tf.expand_dims(processed_image, 0)

    with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
        tf_feats_att, tf_feats_fc = inception.inception_resnet_v2(
            processed_images, num_classes=1001, is_training=False)

    init_fn = slim.assign_from_checkpoint_fn(
        os.path.join(checkpoints_dir, 'inception_resnet_v2_2016_08_30.ckpt'),
        slim.get_model_variables('InceptionResnetV2'))

    with tf.Session(config=config) as sess:
        init_fn(sess)

        for idx, image_path in enumerate(images_lists):
            image_name = os.path.basename(image_path)
            image_id = get_image_id(image_name)

            time_start = time.time()

            url = 'file://' + image_path
            image_string = urllib.request.urlopen(url).read()

            feat_conv, feat_fc = sess.run([tf_feats_att, tf_feats_fc],
                                          feed_dict={tf_image: image_string})
            feat_conv = np.squeeze(feat_conv)
            feat_fc = np.squeeze(feat_fc)

            np.save(os.path.join(fc_dir, str(image_id)), feat_fc)
            np.savez_compressed(os.path.join(att_dir, str(image_id)),
                                feat=feat_conv)

            time_end = time.time()
            print('{}  {}  {:.5f}'.format(idx, image_name,
                                          time_end - time_start))