Esempio n. 1
0
def test_network(img_path, label_path):
    x = tf.placeholder("float", shape=[None, 224, 224, 3], name='input')
    xscale = tf.subtract(tf.multiply(tf.div(x, 255.0), 2), 1.0)
    with slim.arg_scope(inception_v1.inception_v1_arg_scope()):
        logits, end_points = inception_v1.inception_v1(xscale,
                                                       num_classes=1001,
                                                       dropout_keep_prob=1.0,
                                                       is_training=False)
    predictions = tf.nn.softmax(logits, name="output")
    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, "inception_v1.ckpt")
        #ckpt_info("inception_v1.ckpt")

        #var_list = tf.global_variables()
        #print(var_list)
        constant_graph = tf.get_default_graph().as_graph_def()
        output_graph_def = graph_util.convert_variables_to_constants(
            sess, constant_graph, ['output'])
        with tf.gfile.GFile("inception_v1.pb", "wb") as f:
            f.write(output_graph_def.SerializeToString())

        imgfloat = tf.cast(tf.image.decode_jpeg(tf.read_file(img_path),
                                                channels=3),
                           dtype=tf.float32)
        img = tf.image.resize_images(tf.expand_dims(imgfloat, 0), (224, 224),
                                     method=0)
        predictions_val = predictions.eval(feed_dict={x: img.eval()})
        predicted_classes = np.argmax(predictions_val, axis=1)

        file = open(label_path)
        labels = file.readlines()
        print(predicted_classes, labels[predicted_classes[0]])
Esempio n. 2
0
def test_network(img_path, label_path):
    x = tf.placeholder("float", shape=[None, 224, 224, 3], name='input')
    with slim.arg_scope(inception_v1.inception_v1_arg_scope()):
        logits, end_points = inception_v1.inception_v1(x,
                                                       num_classes=1001,
                                                       dropout_keep_prob=1.0,
                                                       is_training=False)
    predictions = end_points["Predictions"]
    saver = tf.train.Saver()
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        saver.restore(sess, "inception_v1.ckpt")

        imgfloat = tf.cast(tf.image.decode_jpeg(tf.read_file(img_path),
                                                channels=3),
                           dtype=tf.float32)
        img = tf.subtract(
            tf.multiply(
                tf.div(
                    tf.image.resize_images(tf.expand_dims(imgfloat, 0),
                                           (224, 224),
                                           method=0), 255.0), 2), 1.0)
        predictions_val = predictions.eval(feed_dict={x: img.eval()})
        predicted_classes = np.argmax(predictions_val, axis=1)

        file = open(label_path)
        labels = file.readlines()
        print(predicted_classes, labels[predicted_classes[0]])
Esempio n. 3
0
 def testModelHasExpectedNumberOfParameters(self):
   batch_size = 5
   height, width = 224, 224
   inputs = random_ops.random_uniform((batch_size, height, width, 3))
   with arg_scope(inception_v1.inception_v1_arg_scope()):
     inception_v1.inception_v1_base(inputs)
   total_params, _ = model_analyzer.analyze_vars(
       variables_lib.get_model_variables())
   self.assertAlmostEqual(5607184, total_params)
Esempio n. 4
0
 def testModelHasExpectedNumberOfParameters(self):
     batch_size = 5
     height, width = 224, 224
     inputs = random_ops.random_uniform((batch_size, height, width, 3))
     with arg_scope(inception_v1.inception_v1_arg_scope()):
         inception_v1.inception_v1_base(inputs)
     total_params, _ = model_analyzer.analyze_vars(
         variables_lib.get_model_variables())
     self.assertAlmostEqual(5607184, total_params)
Esempio n. 5
0
def create_inception(image_input,
                     is_training,
                     scope="",
                     inception_out="Mixed_5c",
                     resnet_version=50,
                     cbn=None):
    """
    Create a resnet by overidding the classic batchnorm with conditional batchnorm
    :param image_input: placeholder with image
    :param is_training: are you using the resnet at training_time or test_time
    :param scope: tensorflow scope
    :param resnet_version: 50/101/152
    :param cbn: the cbn factory
    :return: the resnet output
    """

    # assert False, "\n" \
    #               "There is a bug with classic batchnorm with slim networks (https://github.com/tensorflow/tensorflow/issues/4887). \n" \
    #               "Please use the following config -> 'cbn': {'use_cbn':true, 'excluded_scope_names': ['*']}"
    # arg_sc = slim_utils.resnet_arg_scope(is_training=is_training)

    # print("--- 1")
    arg_sc = inception_v1.inception_v1_arg_scope()

    # Pick the correct version of the resnet
    # if resnet_version == 50:
    #     current_resnet = resnet_v1.resnet_v1_50
    # elif resnet_version == 101:
    #     current_resnet = resnet_v1.resnet_v1_101
    # elif resnet_version == 152:
    #     current_resnet = resnet_v1.resnet_v1_152
    # else:
    #     raise ValueError("Unsupported resnet version")

    # inception_scope = os.path.join('InceptionV1/InceptionV1', inception_out)
    # print("--- 2")
    inception_scope = inception_out
    # print(" resnet_out = {} , resnet_scope = {}".format(resnet_out,resnet_scope))
    # print("--- 3")
    with slim.arg_scope(arg_sc):
        net, end_points = inception_v1.inception_v1(
            image_input, 1001)  # 1000 is the number of softmax class

    print("Net = ", net)
    # print("--- 4")

    if len(scope) > 0 and not scope.endswith("/"):
        scope += "/"
    # print("--- 5")
    # print(end_points)
    print(" Batch ", inception_scope)

    out = end_points[scope + inception_scope]
    print("-- out Use: {},output = {}".format(inception_scope, out))

    return out, end_points
Esempio n. 6
0
def test_network():
    x = tf.placeholder("float", shape=[None, 224, 224, 3], name='input')
    with slim.arg_scope(inception_v1.inception_v1_arg_scope()):
        logits, end_points = inception_v1.inception_v1(x,
                                                       num_classes=2,
                                                       dropout_keep_prob=1.0,
                                                       is_training=False)
    predictions = end_points["Predictions"]

    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, "train.ckpt")

        path = './picture/'
        w = 224
        h = 224
        c = 3
        cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
        imgs = []
        labels = []
        for idx, folder in enumerate(cate):
            for im in glob.glob(folder + '/*.jpg'):
                print('reading the image: %s' % (im))
                img = io.imread(im)
                img = transform.resize(img, (w, h, c))
                imgs.append(img)
                labels.append([1, 0] if idx == 0 else [0, 1])
                break
            break

        data = np.asarray(imgs, np.float32)
        label = np.asarray(labels, np.int32)

        predictions_val = predictions.eval(feed_dict={x: data})

        print(predictions_val)
Esempio n. 7
0
                excluded = True
                break
        if not excluded:
            variables_to_restore.append(var)

    return slim.assign_from_checkpoint_fn(
        os.path.join(checkpoints_dir, 'inception_v1.ckpt'),
        variables_to_restore)


x = tf.placeholder(tf.float32, shape=[None, 224, 224, 3])
y_ = tf.placeholder(tf.int32, shape=[
    None,
], name='y_')
net_in = tl.layers.InputLayer(x, name='input_layer')
with slim.arg_scope(inception_v1_arg_scope()):
    network = tl.layers.SlimNetsLayer(
        layer=net_in,
        slim_layer=inception_v1,
        slim_args={
            'num_classes': 19,
            'is_training': True,
            'dropout_keep_prob': 0.5,
            'prediction_fn': slim.softmax,
            'spatial_squeeze': True,
            'reuse': None,
            'scope': 'InceptionV1'
        },
        name='InceptionV1'  # <-- the name should be the same with the ckpt model
    )
Esempio n. 8
0
 def build_graph(self):
     with arg_scope(inception_v1.inception_v1_arg_scope()):
         #with variable_scope.variable_scope(None, 'InceptionV1', [self.image, 21], reuse=None) as scope:
         with arg_scope([layers_lib.batch_norm, layers_lib.dropout],
                        is_training=True):
             logit, endpoints = inception_v1.inception_v1_base(self.image)
             net = endpoints['Mixed_3c']
             self.feature_map.append(
                 conv_layer(net, [3, 3, 480, 4 * (class_num + 4)],
                            [1, 1, 1, 1], 'FeatureMap_1', 'SSD',
                            self.is_training, self.reuse))
             net = endpoints['Mixed_4f']
             net = conv_layer(net, [3, 3, 832, 1024], [1, 1, 1, 1], 'conv6',
                              'SSD', self.is_training, self.reuse)
             net = conv_layer(net, [1, 1, 1024, 1024], [1, 1, 1, 1],
                              'conv7', 'SSD', self.is_training, self.reuse)
             self.feature_map.append(
                 conv_layer(net, [3, 3, 1024, 6 * (class_num + 4)],
                            [1, 1, 1, 1], 'FeatureMap_2', 'SSD',
                            self.is_training, self.reuse))
             net = conv_layer(net, [1, 1, 1024, 256], [1, 1, 1, 1],
                              'conv8_1', 'SSD', self.is_training,
                              self.reuse)
             net = conv_layer(net, [3, 3, 256, 512], [1, 2, 2, 1],
                              'conv8_2', 'SSD', self.is_training,
                              self.reuse)
             self.feature_map.append(
                 conv_layer(net, [3, 3, 512, 6 * (class_num + 4)],
                            [1, 1, 1, 1], 'FeatureMap_3', 'SSD',
                            self.is_training, self.reuse))
             net = conv_layer(net, [1, 1, 512, 128], [1, 1, 1, 1],
                              'conv9_1', 'SSD', self.is_training,
                              self.reuse)
             net = conv_layer(net, [3, 3, 128, 256], [1, 2, 2, 1],
                              'conv9_2', 'SSD', self.is_training,
                              self.reuse)
             self.feature_map.append(
                 conv_layer(net, [3, 3, 256, 6 * (class_num + 4)],
                            [1, 1, 1, 1], 'FeatureMap_4', 'SSD',
                            self.is_training, self.reuse))
             net = conv_layer(net, [1, 1, 256, 128], [1, 1, 1, 1],
                              'conv10_1', 'SSD', self.is_training,
                              self.reuse)
             net = conv_layer(net, [3, 3, 128, 256], [1, 1, 1, 1],
                              'conv10_2',
                              'SSD',
                              self.is_training,
                              self.reuse,
                              padding='VALID')
             self.feature_map.append(
                 conv_layer(net, [3, 3, 256, 6 * (class_num + 4)],
                            [1, 1, 1, 1], 'FeatureMap_5', 'SSD',
                            self.is_training, self.reuse))
             net = conv_layer(net, [1, 1, 256, 128], [1, 1, 1, 1],
                              'conv11_1', 'SSD', self.is_training,
                              self.reuse)
             net = conv_layer(net, [3, 3, 128, 256], [1, 1, 1, 1],
                              'conv11_2',
                              'SSD',
                              self.is_training,
                              self.reuse,
                              padding='VALID')
             self.feature_map.append(
                 conv_layer(net, [1, 1, 256, 6 * (class_num + 4)],
                            [1, 1, 1, 1], 'FeatureMap_6', 'SSD',
                            self.is_training, self.reuse))
Esempio n. 9
0
        excluded = False
        for exclusion in exclusions:
            if var.op.name.startswith(exclusion):
                excluded = True
                break
        if not excluded:
            variables_to_restore.append(var)

    return slim.assign_from_checkpoint_fn(
      os.path.join(checkpoints_dir, 'inception_v1.ckpt'),
      variables_to_restore)

x = tf.placeholder(tf.float32, shape=[None, 224, 224, 3])
y_ = tf.placeholder(tf.int32, shape=[None, ], name='y_')
net_in = tl.layers.InputLayer(x, name='input_layer')
with slim.arg_scope(inception_v1_arg_scope()):
    network = tl.layers.SlimNetsLayer(layer=net_in, slim_layer=inception_v1,
                                    slim_args= {
                                            'num_classes':19,
                                            'is_training':True,
                                            'dropout_keep_prob':0.5,
                                            'prediction_fn':slim.softmax,
                                            'spatial_squeeze':True,
                                            'reuse':None,
                                            'scope':'InceptionV1'
                                            },
                                        name='InceptionV1'  # <-- the name should be the same with the ckpt model
                                        )

y = network.outputs
probs = tf.nn.softmax(y)