def testModelVariables(self):
   batch_size = 5
   height, width = 224, 224
   num_classes = 1000
   with self.test_session():
     inputs = tf.random_uniform((batch_size, height, width, 3))
     vgg.vgg_19(inputs, num_classes)
     expected_names = [
         'vgg_19/conv1/conv1_1/weights',
         'vgg_19/conv1/conv1_1/biases',
         'vgg_19/conv1/conv1_2/weights',
         'vgg_19/conv1/conv1_2/biases',
         'vgg_19/conv2/conv2_1/weights',
         'vgg_19/conv2/conv2_1/biases',
         'vgg_19/conv2/conv2_2/weights',
         'vgg_19/conv2/conv2_2/biases',
         'vgg_19/conv3/conv3_1/weights',
         'vgg_19/conv3/conv3_1/biases',
         'vgg_19/conv3/conv3_2/weights',
         'vgg_19/conv3/conv3_2/biases',
         'vgg_19/conv3/conv3_3/weights',
         'vgg_19/conv3/conv3_3/biases',
         'vgg_19/conv3/conv3_4/weights',
         'vgg_19/conv3/conv3_4/biases',
         'vgg_19/conv4/conv4_1/weights',
         'vgg_19/conv4/conv4_1/biases',
         'vgg_19/conv4/conv4_2/weights',
         'vgg_19/conv4/conv4_2/biases',
         'vgg_19/conv4/conv4_3/weights',
         'vgg_19/conv4/conv4_3/biases',
         'vgg_19/conv4/conv4_4/weights',
         'vgg_19/conv4/conv4_4/biases',
         'vgg_19/conv5/conv5_1/weights',
         'vgg_19/conv5/conv5_1/biases',
         'vgg_19/conv5/conv5_2/weights',
         'vgg_19/conv5/conv5_2/biases',
         'vgg_19/conv5/conv5_3/weights',
         'vgg_19/conv5/conv5_3/biases',
         'vgg_19/conv5/conv5_4/weights',
         'vgg_19/conv5/conv5_4/biases',
         'vgg_19/fc6/weights',
         'vgg_19/fc6/biases',
         'vgg_19/fc7/weights',
         'vgg_19/fc7/biases',
         'vgg_19/fc8/weights',
         'vgg_19/fc8/biases',
     ]
     model_variables = [v.op.name for v in slim.get_model_variables()]
     self.assertSetEqual(set(model_variables), set(expected_names))
Beispiel #2
0
 def testModelVariables(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     with self.test_session():
         inputs = tf.random_uniform((batch_size, height, width, 3))
         vgg.vgg_19(inputs, num_classes)
         expected_names = [
             'vgg_19/conv1/conv1_1/weights',
             'vgg_19/conv1/conv1_1/biases',
             'vgg_19/conv1/conv1_2/weights',
             'vgg_19/conv1/conv1_2/biases',
             'vgg_19/conv2/conv2_1/weights',
             'vgg_19/conv2/conv2_1/biases',
             'vgg_19/conv2/conv2_2/weights',
             'vgg_19/conv2/conv2_2/biases',
             'vgg_19/conv3/conv3_1/weights',
             'vgg_19/conv3/conv3_1/biases',
             'vgg_19/conv3/conv3_2/weights',
             'vgg_19/conv3/conv3_2/biases',
             'vgg_19/conv3/conv3_3/weights',
             'vgg_19/conv3/conv3_3/biases',
             'vgg_19/conv3/conv3_4/weights',
             'vgg_19/conv3/conv3_4/biases',
             'vgg_19/conv4/conv4_1/weights',
             'vgg_19/conv4/conv4_1/biases',
             'vgg_19/conv4/conv4_2/weights',
             'vgg_19/conv4/conv4_2/biases',
             'vgg_19/conv4/conv4_3/weights',
             'vgg_19/conv4/conv4_3/biases',
             'vgg_19/conv4/conv4_4/weights',
             'vgg_19/conv4/conv4_4/biases',
             'vgg_19/conv5/conv5_1/weights',
             'vgg_19/conv5/conv5_1/biases',
             'vgg_19/conv5/conv5_2/weights',
             'vgg_19/conv5/conv5_2/biases',
             'vgg_19/conv5/conv5_3/weights',
             'vgg_19/conv5/conv5_3/biases',
             'vgg_19/conv5/conv5_4/weights',
             'vgg_19/conv5/conv5_4/biases',
             'vgg_19/fc6/weights',
             'vgg_19/fc6/biases',
             'vgg_19/fc7/weights',
             'vgg_19/fc7/biases',
             'vgg_19/fc8/weights',
             'vgg_19/fc8/biases',
         ]
         model_variables = [v.op.name for v in slim.get_model_variables()]
         self.assertSetEqual(set(model_variables), set(expected_names))
Beispiel #3
0
 def testEndPoints(self):
   batch_size = 5
   height, width = 224, 224
   num_classes = 1000
   with self.test_session():
     inputs = tf.random_uniform((batch_size, height, width, 3))
     _, end_points = vgg.vgg_19(inputs, num_classes)
     expected_names = [
         'vgg_19/conv1/conv1_1',
         'vgg_19/conv1/conv1_2',
         'vgg_19/pool1',
         'vgg_19/conv2/conv2_1',
         'vgg_19/conv2/conv2_2',
         'vgg_19/pool2',
         'vgg_19/conv3/conv3_1',
         'vgg_19/conv3/conv3_2',
         'vgg_19/conv3/conv3_3',
         'vgg_19/conv3/conv3_4',
         'vgg_19/pool3',
         'vgg_19/conv4/conv4_1',
         'vgg_19/conv4/conv4_2',
         'vgg_19/conv4/conv4_3',
         'vgg_19/conv4/conv4_4',
         'vgg_19/pool4',
         'vgg_19/conv5/conv5_1',
         'vgg_19/conv5/conv5_2',
         'vgg_19/conv5/conv5_3',
         'vgg_19/conv5/conv5_4',
         'vgg_19/pool5',
         'vgg_19/fc6',
         'vgg_19/fc7',
         'vgg_19/fc8'
     ]
     self.assertSetEqual(set(end_points.keys()), set(expected_names))
    def build_cnn(self):
        with tf.contrib.slim.arg_scope(vgg.vgg_arg_scope()):
            _, end_points = vgg.vgg_19(inputs=self.images)
            net = end_points['vgg_19/fc7']  # shape = [batch size, 1, 1, 4096]

        with tf.variable_scope('mlc'):
            net = tf.contrib.slim.dropout(net,
                                          self.dropout_keep_prob,
                                          is_training=self.is_training,
                                          scope='dropout7')
            net = tf.contrib.slim.conv2d(
                net,
                1024, [1, 1],
                activation_fn=tf.nn.relu,
                normalizer_fn=None,
                scope='fc8')  # shape = [batch size, 1, 1, 1024]
            net = tf.contrib.slim.dropout(net,
                                          self.dropout_keep_prob,
                                          is_training=self.is_training,
                                          scope='dropout8')
            net = tf.contrib.slim.conv2d(
                net,
                self.label_num, [1, 1],
                activation_fn=None,
                normalizer_fn=None,
                scope='fc9')  # shape = [batch size, 1, 1, 15]
            logits = tf.squeeze(net, [1, 2])  # shape = [batch size, 15]

        self.logits = logits
        self.predictions = tf.nn.sigmoid(logits)
        self.conv5_3_feats = end_points['vgg_19/conv5/conv5_3']
        print('cnn built.')
 def testEndPoints(self):
   batch_size = 5
   height, width = 224, 224
   num_classes = 1000
   with self.test_session():
     inputs = tf.random_uniform((batch_size, height, width, 3))
     _, end_points = vgg.vgg_19(inputs, num_classes)
     expected_names = [
         'vgg_19/conv1/conv1_1',
         'vgg_19/conv1/conv1_2',
         'vgg_19/pool1',
         'vgg_19/conv2/conv2_1',
         'vgg_19/conv2/conv2_2',
         'vgg_19/pool2',
         'vgg_19/conv3/conv3_1',
         'vgg_19/conv3/conv3_2',
         'vgg_19/conv3/conv3_3',
         'vgg_19/conv3/conv3_4',
         'vgg_19/pool3',
         'vgg_19/conv4/conv4_1',
         'vgg_19/conv4/conv4_2',
         'vgg_19/conv4/conv4_3',
         'vgg_19/conv4/conv4_4',
         'vgg_19/pool4',
         'vgg_19/conv5/conv5_1',
         'vgg_19/conv5/conv5_2',
         'vgg_19/conv5/conv5_3',
         'vgg_19/conv5/conv5_4',
         'vgg_19/pool5',
         'vgg_19/fc6',
         'vgg_19/fc7',
         'vgg_19/fc8'
     ]
     self.assertSetEqual(set(end_points.keys()), set(expected_names))
Beispiel #6
0
 def testNoClasses(self):
   batch_size = 5
   height, width = 224, 224
   num_classes = None
   with self.test_session():
     inputs = tf.random_uniform((batch_size, height, width, 3))
     net, end_points = vgg.vgg_19(inputs, num_classes)
     expected_names = [
         'vgg_19/conv1/conv1_1',
         'vgg_19/conv1/conv1_2',
         'vgg_19/pool1',
         'vgg_19/conv2/conv2_1',
         'vgg_19/conv2/conv2_2',
         'vgg_19/pool2',
         'vgg_19/conv3/conv3_1',
         'vgg_19/conv3/conv3_2',
         'vgg_19/conv3/conv3_3',
         'vgg_19/conv3/conv3_4',
         'vgg_19/pool3',
         'vgg_19/conv4/conv4_1',
         'vgg_19/conv4/conv4_2',
         'vgg_19/conv4/conv4_3',
         'vgg_19/conv4/conv4_4',
         'vgg_19/pool4',
         'vgg_19/conv5/conv5_1',
         'vgg_19/conv5/conv5_2',
         'vgg_19/conv5/conv5_3',
         'vgg_19/conv5/conv5_4',
         'vgg_19/pool5',
         'vgg_19/fc6',
         'vgg_19/fc7',
     ]
     self.assertSetEqual(set(end_points.keys()), set(expected_names))
     self.assertTrue(net.op.name.startswith('vgg_19/fc7'))
def test_vgg_19(img_dir):
    """
    Test VGG-19 with a single image.
    :param img_dir: Path of the image to be classified
    :return: classification result and probability of a single image
    """
    img = cv2.imread(img_dir)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = cv2.resize(img, (224, 224))
    img = img.reshape((1, 224, 224, 3))

    tf.reset_default_graph()
    inputs = tf.placeholder(name='input_images',
                            shape=[None, 224, 224, 3],
                            dtype=tf.float32)
    with slim.arg_scope(vgg_arg_scope()):
        _, _ = vgg_19(inputs, is_training=False)

    with tf.Session() as sess:
        tf.train.Saver().restore(sess, './models/vgg_19.ckpt')
        inputs = sess.graph.get_tensor_by_name('input_images:0')
        outputs = sess.graph.get_tensor_by_name('vgg_19/fc8/squeezed:0')
        pred = tf.argmax(tf.nn.softmax(outputs), axis=1)[0]
        prob = tf.reduce_max(tf.nn.softmax(outputs), axis=1)[0]

        pred, prob = sess.run([pred, prob], feed_dict={inputs: img})
        name = label_dict[pred + 1]

    print('Result of VGG-19:', name, prob)
    return name, prob
Beispiel #8
0
 def testNoClasses(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = None
     with self.test_session():
         inputs = tf.random_uniform((batch_size, height, width, 3))
         net, end_points = vgg.vgg_19(inputs, num_classes)
         expected_names = [
             'vgg_19/conv1/conv1_1',
             'vgg_19/conv1/conv1_2',
             'vgg_19/pool1',
             'vgg_19/conv2/conv2_1',
             'vgg_19/conv2/conv2_2',
             'vgg_19/pool2',
             'vgg_19/conv3/conv3_1',
             'vgg_19/conv3/conv3_2',
             'vgg_19/conv3/conv3_3',
             'vgg_19/conv3/conv3_4',
             'vgg_19/pool3',
             'vgg_19/conv4/conv4_1',
             'vgg_19/conv4/conv4_2',
             'vgg_19/conv4/conv4_3',
             'vgg_19/conv4/conv4_4',
             'vgg_19/pool4',
             'vgg_19/conv5/conv5_1',
             'vgg_19/conv5/conv5_2',
             'vgg_19/conv5/conv5_3',
             'vgg_19/conv5/conv5_4',
             'vgg_19/pool5',
             'vgg_19/fc6',
             'vgg_19/fc7',
         ]
         self.assertSetEqual(set(end_points.keys()), set(expected_names))
         self.assertTrue(net.op.name.startswith('vgg_19/fc7'))
Beispiel #9
0
def build_train_op(image_tensor, label_tensor, is_training):
    vgg_argscope = vgg_arg_scope(weight_decay=FLAGS.weight_decay)
    global_step = tf.get_variable(name="global_step",
                                  shape=[],
                                  dtype=tf.int32,
                                  trainable=False)
    with slim.arg_scope(vgg_argscope):
        logits, end_points = vgg_19(image_tensor,
                                    is_training=is_training,
                                    num_classes=10)
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
                                                       labels=label_tensor))
    accuracy = tf.reduce_sum(
        tf.cast(
            tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), label_tensor),
            tf.int32))
    end_points['loss'], end_points['accuracy'] = loss, accuracy
    if is_training:
        optimizer = tf.train.AdadeltaOptimizer(
            learning_rate=FLAGS.learning_rate)
        train_op = optimizer.minimize(loss, global_step=global_step)
        return train_op, end_points
    else:
        return None, end_points
 def testForward(self):
   batch_size = 1
   height, width = 224, 224
   with self.test_session() as sess:
     inputs = tf.random_uniform((batch_size, height, width, 3))
     logits, _ = vgg.vgg_19(inputs)
     sess.run(tf.global_variables_initializer())
     output = sess.run(logits)
     self.assertTrue(output.any())
Beispiel #11
0
def extract_features(input_file_path):
  image_names = os.listdir(input_file_path)
  print 'len image list', len(image_names)

  image_list = list()

  num_images = len(image_names)
  for i in range(num_images):
    img = image_names[i]
    image_list.append(os.path.join(input_file_path, img))
  print image_list[:10]
  images = image_list
  
  slim = tf.contrib.slim

  # Get the image size that vgg_19 accepts
  image_size = vgg.vgg_19.default_image_size
  preprocessed_images = list()
  out_features_list = []


  total_count = 0
  batch = 0
  while total_count < len(images):
    batch += 1
    print 'batch number', batch
  
    preprocessed_images = list()

    with tf.Graph().as_default():
      # This allows for default parameters
      with slim.arg_scope(vgg.vgg_arg_scope()):

        for c in range(10): 
          if total_count >= len(images):
            break  ##
          print total_count
          print images[total_count]
          image = tf.read_file(image_list[total_count])
          decoded_image = tf.image.decode_jpeg(image, channels=3)
          preprocessed_images.append(preproc.preprocess_image(decoded_image, image_size, image_size, is_training=True))
          total_count += 1

        stacked_images = tf.stack(preprocessed_images)
        print 'stacked images', stacked_images
        _, end_points = vgg.vgg_19(stacked_images, is_training=False)

	with tf.Session() as sess: 
	  print 'inside tf sess'
    sess.run(tf.global_variables_initializer())
	  saver = tf.train.Saver()
	  out_features = sess.run(stacked_images)

    out_features_list.extend(out_features)

    print 'accumulated features array'
    print np.array(out_features_list).shape
Beispiel #12
0
 def testForward(self):
     batch_size = 1
     height, width = 224, 224
     with self.test_session() as sess:
         inputs = tf.random_uniform((batch_size, height, width, 3))
         logits, _ = vgg.vgg_19(inputs)
         sess.run(tf.global_variables_initializer())
         output = sess.run(logits)
         self.assertTrue(output.any())
Beispiel #13
0
def vgg_19(inputs):
    with slim.arg_scope(vgg.vgg_arg_scope()):
        logits, end_points = vgg.vgg_19(
            inputs,
            num_classes=None,
            is_training=False,
            fc_conv_padding='VALID',
            global_pool=True)
    return logits, end_points, vgg_19_ckpt_path
 def testFullyConvolutional(self):
   batch_size = 1
   height, width = 256, 256
   num_classes = 1000
   with self.test_session():
     inputs = tf.random_uniform((batch_size, height, width, 3))
     logits, _ = vgg.vgg_19(inputs, num_classes, spatial_squeeze=False)
     self.assertEquals(logits.op.name, 'vgg_19/fc8/BiasAdd')
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, 2, 2, num_classes])
 def testBuild(self):
   batch_size = 5
   height, width = 224, 224
   num_classes = 1000
   with self.test_session():
     inputs = tf.random_uniform((batch_size, height, width, 3))
     logits, _ = vgg.vgg_19(inputs, num_classes)
     self.assertEquals(logits.op.name, 'vgg_19/fc8/squeezed')
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
Beispiel #16
0
 def testBuild(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     with self.test_session():
         inputs = tf.random_uniform((batch_size, height, width, 3))
         logits, _ = vgg.vgg_19(inputs, num_classes)
         self.assertEquals(logits.op.name, 'vgg_19/fc8/squeezed')
         self.assertListEqual(logits.get_shape().as_list(),
                              [batch_size, num_classes])
Beispiel #17
0
 def testFullyConvolutional(self):
     batch_size = 1
     height, width = 256, 256
     num_classes = 1000
     with self.test_session():
         inputs = tf.random_uniform((batch_size, height, width, 3))
         logits, _ = vgg.vgg_19(inputs, num_classes, spatial_squeeze=False)
         self.assertEquals(logits.op.name, 'vgg_19/fc8/BiasAdd')
         self.assertListEqual(logits.get_shape().as_list(),
                              [batch_size, 2, 2, num_classes])
Beispiel #18
0
def vgg_19(inputs, is_training, opts):
    with slim.arg_scope(vgg.vgg_arg_scope(weight_decay=opts.weight_decay)):
        return vgg.vgg_19(
            inputs,
            num_classes=opts.num_classes,
            dropout_keep_prob=opts.dropout_keep_prob,
            spatial_squeeze=opts.spatial_squeeze,
            is_training=is_training,
            fc_conv_padding='VALID',
            global_pool=opts.global_pool)
Beispiel #19
0
 def create_network(self):
     with tf.contrib.slim.arg_scope(vgg_arg_scope()):
         logits, end_points = vgg_19(self.img,
                                     num_classes=self.nb_class,
                                     is_training=self.is_training,
                                     fc_conv_padding='SAME',
                                     global_pool=True)
     self.logits = logits
     self.probabilities = tf.nn.sigmoid(self.logits)
     self.predictions = tf.cast(
         self.probabilities >= self.prediction_threshold, tf.float32)
Beispiel #20
0
 def testEvaluation(self):
   batch_size = 2
   height, width = 224, 224
   num_classes = 1000
   with self.test_session():
     eval_inputs = tf.random_uniform((batch_size, height, width, 3))
     logits, _ = vgg.vgg_19(eval_inputs, is_training=False)
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
     predictions = tf.argmax(logits, 1)
     self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
Beispiel #21
0
def eval(params):
    batch_size = params['batch_size']
    num_examples = len(params['test_files'][0])
    with tf.Graph().as_default():
        batch = dut.distorted_inputs(params,is_training=is_training)

        with slim.arg_scope(vgg.vgg_arg_scope()):
            logits, end_points = vgg.vgg_19(batch[0], num_classes=params['n_output'], is_training=is_training)

        init_fn=ut.get_init_fn(slim,params)
        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = params['per_process_gpu_memory_fraction']

        with tf.Session(config=config) as sess:
            # sess.run(tf.initialize_all_variables())
            sess.run(tf.initialize_local_variables())
            coord = tf.train.Coordinator()
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(qr.create_threads(sess, coord=coord, daemon=True, start=True))

            init_fn(sess)
            num_iter = int(math.ceil(num_examples / batch_size))
            print('%s: Testing started.' % (datetime.now()))

            step = 0
            loss_lst=[]
            run_lst=[]
            run_lst.append(logits)
            [run_lst.append(lst) for lst in batch[1:len(batch)]]

            while step < num_iter and not coord.should_stop():
                try:
                    batch_res= sess.run(run_lst)
                except tf.errors.OutOfRangeError:
                    print ('Testing finished....%d'%step)
                    break
                if(params['write_est']==True):
                    ut.write_est(params,batch_res)
                est=batch_res[0]
                gt=batch_res[1]
                loss= ut.get_loss(params,gt,est)
                loss_lst.append(loss)
                s ='VAL --> batch %i/%i | error %f'%(step,num_iter,loss)
                ut.log_write(s,params)
                # joint_list=['/'.join(p1.split('/')[0:-1]).replace('joints','img').replace('.cdf','')+'/frame_'+(p1.split('/')[-1].replace('.txt','')).zfill(5)+'.png' for p1 in image_names]
                # print ('List equality check:')
                # print len(label_names) == len(set(label_names))
                # print sum(joint_list==label_names)==(len(est))
                # print(len(label_names))
                step += 1
            coord.request_stop()
            coord.join(threads)
            return np.mean(loss_lst)
 def testEvaluation(self):
   batch_size = 2
   height, width = 224, 224
   num_classes = 1000
   with self.test_session():
     eval_inputs = tf.random_uniform((batch_size, height, width, 3))
     logits, _ = vgg.vgg_19(eval_inputs, is_training=False)
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
     predictions = tf.argmax(logits, 1)
     self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
Beispiel #23
0
    def build_cnn(self):
        _, end_points = vgg.vgg_19(self.images,
                                   num_classes=1000,
                                   is_training=self.is_training)

        visual_feats = end_points[
            'vgg_19/conv5/conv5_4']  # [batch_size, 14, 14, 512]
        self.visual_feats = tf.reshape(visual_feats,
                                       [self.batch_size, 196, 512])

        print('cnn built.')
Beispiel #24
0
 def testTrainEvalWithReuse(self):
   train_batch_size = 2
   eval_batch_size = 1
   train_height, train_width = 224, 224
   eval_height, eval_width = 256, 256
   num_classes = 1000
   with self.test_session():
     train_inputs = tf.random_uniform(
         (train_batch_size, train_height, train_width, 3))
     logits, _ = vgg.vgg_19(train_inputs)
     self.assertListEqual(logits.get_shape().as_list(),
                          [train_batch_size, num_classes])
     tf.get_variable_scope().reuse_variables()
     eval_inputs = tf.random_uniform(
         (eval_batch_size, eval_height, eval_width, 3))
     logits, _ = vgg.vgg_19(eval_inputs, is_training=False,
                            spatial_squeeze=False)
     self.assertListEqual(logits.get_shape().as_list(),
                          [eval_batch_size, 2, 2, num_classes])
     logits = tf.reduce_mean(logits, [1, 2])
     predictions = tf.argmax(logits, 1)
     self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
 def testTrainEvalWithReuse(self):
   train_batch_size = 2
   eval_batch_size = 1
   train_height, train_width = 224, 224
   eval_height, eval_width = 256, 256
   num_classes = 1000
   with self.test_session():
     train_inputs = tf.random_uniform(
         (train_batch_size, train_height, train_width, 3))
     logits, _ = vgg.vgg_19(train_inputs)
     self.assertListEqual(logits.get_shape().as_list(),
                          [train_batch_size, num_classes])
     tf.get_variable_scope().reuse_variables()
     eval_inputs = tf.random_uniform(
         (eval_batch_size, eval_height, eval_width, 3))
     logits, _ = vgg.vgg_19(eval_inputs, is_training=False,
                            spatial_squeeze=False)
     self.assertListEqual(logits.get_shape().as_list(),
                          [eval_batch_size, 2, 2, num_classes])
     logits = tf.reduce_mean(logits, [1, 2])
     predictions = tf.argmax(logits, 1)
     self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
Beispiel #26
0
def run_steps(params, epoch_counter):
    with tf.Graph().as_default():
        num_examples = len(params['training_files'][0])
        number_of_steps = int(math.ceil(
            num_examples / params['batch_size'])) - 1
        number_of_steps = number_of_steps * (epoch_counter + 1)
        tf.logging.set_verbosity(tf.logging.INFO)
        batch = dut.distorted_inputs(params, is_training=is_training)

        with slim.arg_scope(vgg.vgg_arg_scope()):
            logits, endpoint = vgg.vgg_19(batch[0],
                                          num_classes=params['n_output'],
                                          is_training=is_training)

        # # Create the model:
        # with slim.arg_scope(inception.inception_v2_arg_scope()):
        #     logits, _ = inception.inception_v2(batch[0], num_classes=params['n_output'], is_training=is_training)

        err = tf.sub(logits, batch[1])
        losses = tf.reduce_mean(tf.reduce_sum(tf.square(err), 1))
        reg_loss = slim.losses.get_total_loss()
        total_loss = losses + reg_loss
        tf.scalar_summary('losses/total_loss', total_loss)
        tf.scalar_summary('losses/losses', losses)
        tf.scalar_summary('losses/reg_loss', reg_loss)
        summary_writer = tf.train.SummaryWriter(params["sm"])

        # Specify the optimizer and create the train op:
        optimizer = tf.train.AdamOptimizer(learning_rate=params['lr'])

        train_op = slim.learning.create_train_op(total_loss,
                                                 optimizer,
                                                 summarize_gradients=True)
        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = params[
            'per_process_gpu_memory_fraction']
        # Run the training:
        final_loss = learn.train(
            loss=losses,
            logits=logits,
            batch=batch,
            endpoint=endpoint,
            train_op=train_op,
            logdir=params["cp_file"],
            init_fn=ut.get_init_fn(slim, params),
            number_of_steps=number_of_steps,
            summary_writer=summary_writer,
            session_config=config,
        )
    return final_loss
Beispiel #27
0
def main():
    """
    You can also run these commands manually to generate the pb file
    1. git clone https://github.com/tensorflow/models.git
    2. export PYTHONPATH=Path_to_your_model_folder
    3. python alexnet.py
    """
    height, width = 224, 224
    inputs = tf.Variable(tf.random_uniform((1, height, width, 3)), name='input')
    net, end_points  = vgg.vgg_19(inputs, is_training = False)
    print("nodes in the graph")
    for n in end_points:
        print(n + " => " + str(end_points[n]))
    net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split())
    run_model(net_outputs, argv[1])
Beispiel #28
0
def get_vgg_model(x, is_training):
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        activation_fn=tf.nn.relu,
                        weights_regularizer=slim.l2_regularizer(
                            FLAGS.WEIGHT_DECAY),
                        biases_initializer=tf.zeros_initializer()):
        with slim.arg_scope([slim.conv2d], padding='SAME'):
            logits, _ = vgg.vgg_19(x,
                                   num_classes=FLAGS.NUM_CLASSES,
                                   is_training=is_training,
                                   dropout_keep_prob=FLAGS.KEEP_PROB,
                                   spatial_squeeze=True,
                                   scope='vgg_19',
                                   fc_conv_padding='VALID',
                                   global_pool=True)
    return logits
Beispiel #29
0
def main():
    """
    You can also run these commands manually to generate the pb file
    1. git clone https://github.com/tensorflow/models.git
    2. export PYTHONPATH=Path_to_your_model_folder
    3. python alexnet.py
    """
    height, width = 224, 224
    inputs = tf.Variable(tf.random_uniform((1, height, width, 3)), name='input')
    inputs = tf.identity(inputs, "input_node")
    net, end_points  = vgg.vgg_19(inputs, is_training = False)
    print("nodes in the graph")
    for n in end_points:
        print(n + " => " + str(end_points[n]))
    net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split(','))
    run_model(net_outputs, argv[1], 'vgg_19', argv[3] == 'True')
Beispiel #30
0
 def __call__(self, x_input):
     """Constructs model and return probabilities for given input."""
     reuse = True if self.built else None
     x_input = image_normalize(x_input, normalization_method[10])
     x_input = tf.image.resize_images(x_input, [224, 224])
     with slim.arg_scope(vgg.vgg_arg_scope()):
         _, end_points = vgg.vgg_19(x_input,
                                    num_classes=1000,
                                    is_training=False)
     end_points['predictions'] = tf.nn.softmax(end_points['vgg_19/fc8'])
     end_points['predictions'] = \
                   tf.concat([tf.zeros([tf.shape(x_input)[0], 1]),
                                   tf.reshape(end_points['predictions'], [-1, 1000])],
                                   axis=1)
     self.built = True
     output = end_points['predictions']
     return output
Beispiel #31
0
def extract_feature_vgg_19(checkpoints_dir, input_image, layer):
    image_size = vgg.vgg_19.default_image_size
    processed_image = vgg_preprocessing.preprocess_image(input_image,
                                                         image_size,
                                                         image_size,
                                                         is_training=False)

    processed_images = tf.expand_dims(processed_image, 0)

    with slim.arg_scope(vgg.vgg_arg_scope()):
        _, model_var = vgg.vgg_19(processed_images,
                                       num_classes=1000,
                                       is_training=False)

    init_fn = slim.assign_from_checkpoint_fn(os.path.join(checkpoints_dir, 'vgg_16.ckpt'),
                                             slim.get_model_variables('vgg_16'))

    with tf.Session() as sess:
        init_fn(sess)
        features = sess.run(model_var)
        fea=features['vgg_19/'+layer]

    return fea
Beispiel #32
0
def main(_):
    batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
    num_classes = 1001
    ensemble_type = FLAGS.ensemble_type

    tf.logging.set_verbosity(tf.logging.INFO)

    checkpoint_path_list = [
        FLAGS.checkpoint_path_inception_v1, FLAGS.checkpoint_path_inception_v2,
        FLAGS.checkpoint_path_inception_v3, FLAGS.checkpoint_path_inception_v4,
        FLAGS.checkpoint_path_inception_resnet_v2,
        FLAGS.checkpoint_path_resnet_v1_101,
        FLAGS.checkpoint_path_resnet_v1_152,
        FLAGS.checkpoint_path_resnet_v2_101,
        FLAGS.checkpoint_path_resnet_v2_152, FLAGS.checkpoint_path_vgg_16,
        FLAGS.checkpoint_path_vgg_19
    ]
    normalization_method = [
        'default', 'default', 'default', 'default', 'global', 'caffe_rgb',
        'caffe_rgb', 'default', 'default', 'caffe_rgb', 'caffe_rgb'
    ]
    pred_list = []
    for idx, checkpoint_path in enumerate(checkpoint_path_list, 1):
        with tf.Graph().as_default():
            if int(FLAGS.test_idx) == 20 and idx in [3]:
                continue
            if int(FLAGS.test_idx) in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
                                       ] and int(FLAGS.test_idx) != idx:
                continue
            # Prepare graph
            if idx in [1, 2, 6, 7, 10, 11]:
                _x_input = tf.placeholder(tf.float32, shape=batch_shape)
                x_input = tf.image.resize_images(_x_input, [224, 224])
            else:
                _x_input = tf.placeholder(tf.float32, shape=batch_shape)
                x_input = _x_input

            x_input = image_normalize(x_input, normalization_method[idx - 1])

            if idx == 1:
                with slim.arg_scope(inception.inception_v1_arg_scope()):
                    _, end_points = inception.inception_v1(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 2:
                with slim.arg_scope(inception.inception_v2_arg_scope()):
                    _, end_points = inception.inception_v2(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 3:
                with slim.arg_scope(inception.inception_v3_arg_scope()):
                    _, end_points = inception.inception_v3(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 4:
                with slim.arg_scope(inception.inception_v4_arg_scope()):
                    _, end_points = inception.inception_v4(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 5:
                with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
                    _, end_points = inception.inception_resnet_v2(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 6:
                with slim.arg_scope(resnet_v1.resnet_arg_scope()):
                    _, end_points = resnet_v1.resnet_v1_101(x_input,
                                                            num_classes=1000,
                                                            is_training=False)
            elif idx == 7:
                with slim.arg_scope(resnet_v1.resnet_arg_scope()):
                    _, end_points = resnet_v1.resnet_v1_152(x_input,
                                                            num_classes=1000,
                                                            is_training=False)
            elif idx == 8:
                with slim.arg_scope(resnet_v2.resnet_arg_scope()):
                    _, end_points = resnet_v2.resnet_v2_101(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 9:
                with slim.arg_scope(resnet_v2.resnet_arg_scope()):
                    _, end_points = resnet_v2.resnet_v2_152(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 10:
                with slim.arg_scope(vgg.vgg_arg_scope()):
                    _, end_points = vgg.vgg_16(x_input,
                                               num_classes=1000,
                                               is_training=False)
                    end_points['predictions'] = tf.nn.softmax(
                        end_points['vgg_16/fc8'])
            elif idx == 11:
                with slim.arg_scope(vgg.vgg_arg_scope()):
                    _, end_points = vgg.vgg_19(x_input,
                                               num_classes=1000,
                                               is_training=False)
                    end_points['predictions'] = tf.nn.softmax(
                        end_points['vgg_19/fc8'])

            #end_points = tf.reduce_mean([end_points1['Predictions'], end_points2['Predictions'], end_points3['Predictions'], end_points4['Predictions']], axis=0)

            #predicted_labels = tf.argmax(end_points, 1)

            # Run computation
            saver = tf.train.Saver(slim.get_model_variables())
            session_creator = tf.train.ChiefSessionCreator(
                scaffold=tf.train.Scaffold(saver=saver),
                checkpoint_filename_with_path=checkpoint_path,
                master=FLAGS.master)

            pred_in = []
            filenames_list = []
            with tf.train.MonitoredSession(
                    session_creator=session_creator) as sess:
                for filenames, images in load_images(FLAGS.input_dir,
                                                     batch_shape):
                    #if idx in [1,2,6,7,10,11]:
                    #  # 16x299x299x3
                    #  images = zoom(images, (1, 0.7491638795986622, 0.7491638795986622, 1), order=2)
                    filenames_list.extend(filenames)
                    end_points_dict = sess.run(end_points,
                                               feed_dict={_x_input: images})
                    if idx in [6, 7, 10, 11]:
                        end_points_dict['predictions'] = \
                                      np.concatenate([np.zeros([FLAGS.batch_size, 1]),
                                                      np.array(end_points_dict['predictions'].reshape(-1, 1000))],
                                                      axis=1)
                    try:
                        pred_in.extend(end_points_dict['Predictions'].reshape(
                            -1, num_classes))
                    except KeyError:
                        pred_in.extend(end_points_dict['predictions'].reshape(
                            -1, num_classes))
            pred_list.append(pred_in)

    if ensemble_type == 'mean':
        pred = np.mean(pred_list, axis=0)
        labels = np.argmax(
            pred, axis=1
        )  # model_num X batch X class_num ==(np.mean)==> batch X class_num ==(np.argmax)==> batch
    elif ensemble_type == 'vote':
        pred = np.argmax(
            pred_list, axis=2
        )  # model_num X batch X class_num ==(np.mean)==> batch X class_num ==(np.argmax)==> batch
        labels = np.median(pred, axis=0)
    with tf.gfile.Open(FLAGS.output_file, 'w') as out_file:
        for filename, label in zip(filenames_list, labels):
            out_file.write('{0},{1}\n'.format(filename, label))





input_imgs = tf.placeholder("float", [None,None,3])
# 每个像素减去像素的均值
processed_image = _mean_image_subtraction(input_imgs,
                                          [_R_MEAN, _G_MEAN, _B_MEAN])

input_image = tf.expand_dims(processed_image, 0)
#print(input_image.shape)
with slim.arg_scope(vgg.vgg_arg_scope()):# spatial_squeeze选项指定是否压缩结果的空间维度将不必要的空间维度删除
    logits, _ = vgg.vgg_19(input_image,
                           num_classes=1000,
                           is_training=False,
                           spatial_squeeze=False)


pred = tf.argmax(logits, dimension=3)

init_fn = slim.assign_from_checkpoint_fn(
    os.path.join(checkpoints_dir, 'vgg_19.ckpt'),
    slim.get_model_variables('vgg_19'))

with tf.Session() as sess:
    init_fn(sess)
    for image in sample_images:
        reimg = Image.open(image)
        plt.suptitle("原始图片", fontsize=14, fontweight='bold')
        plt.imshow(reimg) # 显示图片
    def __init__(self, num_classes, train_layers=None, weights_path='DEFAULT'):
        """Create the graph of the vgg19 model.
        """

        # Parse input arguments into class variables
        if weights_path == 'DEFAULT':
            self.WEIGHTS_PATH = "./pre_trained_models/vgg_19.ckpt"
        else:
            self.WEIGHTS_PATH = weights_path
        self.train_layers = train_layers

        with tf.variable_scope("input"):
            self.image_size = vgg.vgg_19.default_image_size
            self.x_input = tf.placeholder(
                tf.float32, [None, self.image_size, self.image_size, 3],
                name="x_input")
            self.y_input = tf.placeholder(tf.float32, [None, num_classes],
                                          name="y_input")
            self.learning_rate = tf.placeholder(tf.float32,
                                                name="learning_rate")
            self.keep_prob = tf.placeholder(tf.float32, name="keep_prob")

        # train
        with arg_scope(vgg.vgg_arg_scope()):
            self.logits, _ = vgg.vgg_19(self.x_input,
                                        num_classes=num_classes,
                                        is_training=True,
                                        reuse=tf.AUTO_REUSE,
                                        dropout_keep_prob=self.keep_prob)

        # validation
        with arg_scope(vgg.vgg_arg_scope()):
            self.logits_val, _ = vgg.vgg_19(self.x_input,
                                            num_classes=num_classes,
                                            is_training=False,
                                            reuse=tf.AUTO_REUSE,
                                            dropout_keep_prob=self.keep_prob)

        with tf.name_scope("loss"):
            self.loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits_v2(
                    logits=self.logits, labels=self.y_input))
            self.loss_val = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits_v2(
                    logits=self.logits_val, labels=self.y_input))

        with tf.name_scope("train"):

            self.global_step = tf.Variable(0,
                                           name="global_step",
                                           trainable=False)
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

            var_list = [
                v for v in tf.trainable_variables()
                if v.name.split('/')[-2] in train_layers
                or v.name.split('/')[-3] in train_layers
            ]
            gradients = tf.gradients(self.loss, var_list)
            self.grads_and_vars = list(zip(gradients, var_list))
            optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)

            with tf.control_dependencies(update_ops):
                self.train_op = optimizer.apply_gradients(
                    grads_and_vars=self.grads_and_vars,
                    global_step=self.global_step)

        with tf.name_scope("probability"):
            self.probability = tf.nn.softmax(self.logits_val,
                                             name="probability")

        with tf.name_scope("prediction"):
            self.prediction = tf.argmax(self.logits_val, 1, name="prediction")

        with tf.name_scope("accuracy"):
            correct_prediction = tf.equal(self.prediction,
                                          tf.argmax(self.y_input, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_prediction,
                                                   "float"),
                                           name="accuracy")
Beispiel #35
0
def main(_):
  batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
  num_classes = 1001

  # max_epsilon over checking
  # get original images
  origin_img_list=np.sort(glob.glob(FLAGS.origin_img_dir+"*.png"));
  origin_imgs=np.zeros((len(origin_img_list),FLAGS.image_height,FLAGS.image_width,3),dtype=float);
  for i in range(len(origin_img_list)):
    origin_imgs[i]=imread(origin_img_list[i],mode='RGB').astype(np.float);
  # get adv images
  adv_img_list=np.sort(glob.glob(FLAGS.input_dir+"*.png"));
  adv_imgs=np.zeros((len(adv_img_list),FLAGS.image_height,FLAGS.image_width,3),dtype=float);
  for i in range(len(adv_img_list)):
    adv_imgs[i]=imread(adv_img_list[i],mode='RGB').astype(np.float);
  epsilon_list=np.linalg.norm(np.reshape(abs(origin_imgs-adv_imgs),[-1,FLAGS.image_height*FLAGS.image_width*3]),ord=np.inf,axis=1);
  #print(epsilon_list);exit(1);
  over_epsilon_list=np.zeros((len(origin_img_list),2),dtype=object);
  cnt=0;
  for i in range(len(origin_img_list)):
    file_name=origin_img_list[i].split("/")[-1];
    file_name=file_name.split(".")[0];
    over_epsilon_list[i,0]=file_name;
    if(epsilon_list[i]>FLAGS.max_epsilon):
      over_epsilon_list[i,1]="1";
      cnt+=1;
  tf.logging.set_verbosity(tf.logging.INFO)

  with tf.Graph().as_default():
    # Prepare graph
    x_input = tf.placeholder(tf.float32, shape=batch_shape)

    if(FLAGS.checkpoint_file_name=="inception_v3.ckpt"):
      with slim.arg_scope(inception.inception_v3_arg_scope()):
        _, end_points = inception.inception_v3(
            x_input, num_classes=num_classes, is_training=False)
      predicted_labels = tf.argmax(end_points['Predictions'], 1)
    elif(FLAGS.checkpoint_file_name=="inception_v4.ckpt"):
      with slim.arg_scope(inception.inception_v4_arg_scope()):
        _, end_points = inception.inception_v4(
            x_input, num_classes=num_classes, is_training=False)
      predicted_labels = tf.argmax(end_points['Predictions'], 1)
    elif(FLAGS.checkpoint_file_name=="inception_resnet_v2_2016_08_30.ckpt"):
      with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
        _, end_points = inception.inception_resnet_v2(
            x_input, num_classes=num_classes, is_training=False)
      predicted_labels = tf.argmax(end_points['Predictions'], 1)
    elif(FLAGS.checkpoint_file_name=="resnet_v2_101.ckpt"):
      x_input2 = tf.image.resize_bilinear(x_input,[224,224],align_corners=False);
      with slim.arg_scope(resnet_utils.resnet_arg_scope()):
        _, end_points = resnet_v2.resnet_v2_101(
            x_input2, num_classes=num_classes, is_training=False)
      predicted_labels = tf.argmax(end_points['predictions'], 1)
    elif(FLAGS.checkpoint_file_name=="resnet_v2_50.ckpt"):
      x_input2 = tf.image.resize_bilinear(x_input,[224,224],align_corners=False);
      with slim.arg_scope(resnet_utils.resnet_arg_scope()):
        _, end_points = resnet_v2.resnet_v2_50(
            x_input2, num_classes=num_classes, is_training=False)
      predicted_labels = tf.argmax(end_points['predictions'], 1)
    elif(FLAGS.checkpoint_file_name=="resnet_v2_152.ckpt"):
      x_input2 = tf.image.resize_bilinear(x_input,[224,224],align_corners=False);
      with slim.arg_scope(resnet_utils.resnet_arg_scope()):
        _, end_points = resnet_v2.resnet_v2_152(
            x_input2, num_classes=num_classes, is_training=False)
      predicted_labels = tf.argmax(end_points['predictions'], 1)
    elif(FLAGS.checkpoint_file_name=="inception_v1.ckpt"):
      x_input2 = tf.image.resize_bilinear(x_input,[224,224],align_corners=False);
      with slim.arg_scope(inception.inception_v1_arg_scope()):
        _, end_points = inception.inception_v1(
            x_input2, num_classes=num_classes, is_training=False)
      predicted_labels = tf.argmax(end_points['Predictions'], 1)
    elif(FLAGS.checkpoint_file_name=="inception_v2.ckpt"):
      x_input2 = tf.image.resize_bilinear(x_input,[224,224],align_corners=False);
      with slim.arg_scope(inception.inception_v2_arg_scope()):
        _, end_points = inception.inception_v2(
            x_input2, num_classes=num_classes, is_training=False)
      predicted_labels = tf.argmax(end_points['Predictions'], 1)

    # Resnet v1 and vgg are not working now
    elif(FLAGS.checkpoint_file_name=="vgg_16.ckpt"):
      x_input_list=tf.unstack(x_input,FLAGS.batch_size,0);
      for i in range(FLAGS.batch_size):
        x_input_list[i]=vgg_preprocessing.preprocess_image(x_input_list[i],224,224);
      x_input2=tf.stack(x_input_list,0);
      with slim.arg_scope(vgg.vgg_arg_scope()):
        _, end_points = vgg.vgg_16(
            x_input2, num_classes=num_classes-1, is_training=False)
      predicted_labels = tf.argmax(end_points['vgg_16/fc8'], 1)+1
    elif(FLAGS.checkpoint_file_name=="vgg_19.ckpt"):
      x_input_list=tf.unstack(x_input,FLAGS.batch_size,0);
      for i in range(FLAGS.batch_size):
        x_input_list[i]=vgg_preprocessing.preprocess_image(x_input_list[i],224,224);
      x_input2=tf.stack(x_input_list,0);
      with slim.arg_scope(vgg.vgg_arg_scope()):
        _, end_points = vgg.vgg_19(
            x_input2, num_classes=num_classes-1, is_training=False)
      predicted_labels = tf.argmax(end_points['vgg_19/fc8'], 1)+1
    elif(FLAGS.checkpoint_file_name=="resnet_v1_50.ckpt"):
      x_input_list=tf.unstack(x_input,FLAGS.batch_size,0);
      for i in range(FLAGS.batch_size):
        x_input_list[i]=vgg_preprocessing.preprocess_image(x_input_list[i],224,224);
      x_input2=tf.stack(x_input_list,0);
      with slim.arg_scope(resnet_utils.resnet_arg_scope()):
        _, end_points = resnet_v1.resnet_v1_50(
            x_input, num_classes=num_classes-1, is_training=False)
      predicted_labels = tf.argmax(end_points['predictions'], 1)+1
    elif(FLAGS.checkpoint_file_name=="resnet_v1_101.ckpt"):
      x_input_list=tf.unstack(x_input,FLAGS.batch_size,0);
      for i in range(FLAGS.batch_size):
        x_input_list[i]=vgg_preprocessing.preprocess_image(x_input_list[i],224,224);
      x_input2=tf.stack(x_input_list,0);
      with slim.arg_scope(resnet_utils.resnet_arg_scope()):
        _, end_points = resnet_v1.resnet_v1_101(
            x_input2, num_classes=num_classes-1, is_training=False)
      predicted_labels = tf.argmax(end_points['predictions'], 1)+1
    elif(FLAGS.checkpoint_file_name=="resnet_v1_152.ckpt"):
      x_input_list=tf.unstack(x_input,FLAGS.batch_size,0);
      for i in range(FLAGS.batch_size):
        x_input_list[i]=vgg_preprocessing.preprocess_image(x_input_list[i],224,224);
      x_input2=tf.stack(x_input_list,0);
      with slim.arg_scope(resnet_utils.resnet_arg_scope()):
        _, end_points = resnet_v1.resnet_v1_152(
            x_input2, num_classes=num_classes-1, is_training=False)
      predicted_labels = tf.argmax(end_points['predictions'], 1)+1
    
    # Run computation
    saver = tf.train.Saver(slim.get_model_variables())
    session_creator = tf.train.ChiefSessionCreator(
        scaffold=tf.train.Scaffold(saver=saver),
        checkpoint_filename_with_path=FLAGS.checkpoint_path+FLAGS.checkpoint_file_name,
        master=FLAGS.master)

    f=open(FLAGS.true_label,"r");
    t_label_list=np.array([i[:-1].split(",") for i in f.readlines()]);
    
    score=0;
    with tf.train.MonitoredSession(session_creator=session_creator) as sess:
      with tf.gfile.Open(FLAGS.output_file, 'w') as out_file:
        for filenames, images in load_images(FLAGS.input_dir, batch_shape):
          labels = sess.run(predicted_labels, feed_dict={x_input: images})
          for filename, label in zip(filenames, labels):
            f_name=filename.split(".")[0];
            t_label=int(t_label_list[t_label_list[:,0]==f_name,1][0]);
            if(t_label!=label):
              if(over_epsilon_list[over_epsilon_list[:,0]==f_name,1]!="1"):
                score+=1;
            #out_file.write('{0},{1}\n'.format(filename, label))
  print("Over max epsilon#: "+str(cnt));
  print(str(FLAGS.max_epsilon)+" max epsilon Score: "+str(score));
    return rgbmean


resnetimg = gen(x_smalls2)
result = (resnetimg + 1) * 127.5
gen_var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)

y_pred = tf.maximum(result, 0)
y_pred = tf.minimum(y_pred, 255)

dbatch = tf.concat([images, result], 0)
rgbmean = rgbmeanfun(dbatch)

#vgg 特征值
_, end_points = vgg.vgg_19(rgbmean,
                           num_classes=1000,
                           is_training=False,
                           spatial_squeeze=False)
conv54 = end_points['vgg_19/conv5/conv5_4']
print("vgg.conv5_4", conv54.shape)
fmap = tf.split(conv54, 2)

content_loss = tf.losses.mean_squared_error(fmap[0], fmap[1])
######################################


def Discriminator(dbatch, name="Discriminator"):
    with tf.variable_scope(name):
        net = slim.conv2d(dbatch, 64, 1, activation_fn=leaky_relu)

        ochannels = [64, 128, 128, 256, 256, 512, 512]
        stride = [2, 1]
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)
g = tf.get_default_graph()
#%%
with g.as_default():
    if is_save:
        opts = tf.python_io.TFRecordOptions(
            tf.python_io.TFRecordCompressionType.ZLIB)
        feature_writer = tf.python_io.TFRecordWriter(feature_tfrecord_filename,
                                                     options=opts)

    img_input_ph = tf.placeholder(dtype=tf.float32,
                                  shape=[None, height, width, 3])
    with slim.arg_scope(vgg.vgg_arg_scope()):
        _, _ = vgg.vgg_19(img_input_ph, num_classes=1000, is_training=False)
        init_fn = slim.assign_from_checkpoint_fn(checkpoints_dir,
                                                 slim.get_model_variables())
        features = g.get_tensor_by_name('vgg_19/conv5/conv5_4/Relu:0')

    idx = 0
    init_fn(sess)
    while True:  #idx < 3125:
        try:
            processed_images_v, img_ids_v, labels_v = sess.run(
                [processed_images, img_ids, labels])
            features_v = sess.run(features, {img_input_ph: processed_images_v})
            print('batch no. {}'.format(idx))
            for idx_s in range(features_v.shape[0]):
                feature = features_v[idx_s, :, :, :]
                feature = np.reshape(feature, [196, 512])
Beispiel #38
0
def compute_feature_of_batch_ts_with_cnn(file_path_of_ts, file_path_of_feature,
                                         cnn_model_name,
                                         file_path_of_pretrained_model):
    r'''
    compute feature of somme time series with pretrained CNN
    :param file_path_of_ts: file path of time series
    :param file_path_of_feature: file path of saving feature
    :param cnn_model_name: name of CNN model
    :param file_path_of_pretrained_model: file path of pretrained CNN
    :return: ''
    '''
    #tf.reset_default_graph()
    #read data
    data = pd.read_csv(file_path_of_ts)
    #data=data.sample(20)
    #change dataframe to list
    id_list = data.iloc[:, 0].tolist()
    data_list = change_dataframe_to_dict_(data)

    model = cnn_model_name
    checkpoint_file = file_path_of_pretrained_model

    # I only have these because I thought some take in size of (299,299), but maybe not
    if 'inception' in model: height, width, channels = 224, 224, 3
    if 'resnet' in model: height, width, channels = 224, 224, 3
    if 'vgg' in model: height, width, channels = 224, 224, 3

    if model == 'inception_resnet_v2': height, width, channels = 299, 299, 3

    x = tf.placeholder(tf.float32, shape=(1, height, width, channels))

    # load up model specific stuff
    if model == 'inception_v1':
        #from inception_v1 import *
        from nets import inception_v1

        arg_scope = inception_v1.inception_v1_arg_scope()
        with slim.arg_scope(arg_scope):
            logits, end_points = inception_v1.inception_v1(x,
                                                           is_training=False,
                                                           num_classes=None)
            features = end_points['AvgPool_0a_7x7']
            # print('logits')
            # print(logits.shape)
            # print('features')
            # print(features.shape)
    elif model == 'inception_v2':
        #from inception_v2 import *
        from nets import inception_v2

        arg_scope = inception_v2.inception_v2_arg_scope()
        with slim.arg_scope(arg_scope):
            logits, end_points = inception_v2(x,
                                              is_training=False,
                                              num_classes=None)
            features = end_points['AvgPool_1a']
    elif model == 'inception_v3':
        #from inception_v3 import *
        from nets import inception_v3

        arg_scope = inception_v3.inception_v3_arg_scope()
        with slim.arg_scope(arg_scope):
            logits, end_points = inception_v3(x,
                                              is_training=False,
                                              num_classes=None)
            features = end_points['AvgPool_1a']
    elif model == 'inception_resnet_v2':
        #from inception_resnet_v2 import *
        from nets import inception_resnet_v2

        arg_scope = inception_resnet_v2.inception_resnet_v2_arg_scope()
        with slim.arg_scope(arg_scope):
            logits, end_points = inception_resnet_v2(x,
                                                     is_training=False,
                                                     num_classes=1001)
            features = end_points['PreLogitsFlatten']
    elif model == 'resnet_v1_50':
        #from resnet_v1 import *

        from nets import resnet_v1

        arg_scope = resnet_v1.resnet_arg_scope()
        with slim.arg_scope(arg_scope):
            logits, end_points = resnet_v1.resnet_v1_50(x,
                                                        is_training=False,
                                                        num_classes=1000)
            features = end_points['global_pool']
    elif model == 'resnet_v1_101':
        #from resnet_v1 import *
        from nets import resnet_v1

        arg_scope = resnet_v1.resnet_arg_scope()
        with slim.arg_scope(arg_scope):
            logits, end_points = resnet_v1.resnet_v1_101(x,
                                                         is_training=False,
                                                         num_classes=1000)
            features = end_points['global_pool']
    elif model == 'vgg_16':
        #from vgg import *
        from nets import vgg

        arg_scope = vgg.vgg_arg_scope()
        with slim.arg_scope(arg_scope):
            logits, end_points = vgg.vgg_16(x, is_training=False)
            features = end_points['vgg_16/fc8']
    elif model == 'vgg_19':
        #from vgg import *
        from nets import vgg

        arg_scope = vgg.vgg_arg_scope()
        with slim.arg_scope(arg_scope):
            logits, end_points = vgg.vgg_19(x, is_training=False)
            features = end_points['vgg_19/fc8']
    #cpu_config = tf.ConfigProto(intra_op_parallelism_threads = 8, inter_op_parallelism_threads = 8, device_count = {'CPU': 3})
    #sess = tf.Session(config = cpu_config)
    sess = tf.Session()
    saver = tf.train.Saver()
    saver.restore(sess, checkpoint_file)
    feature_list = []
    count_temp = 0

    for i in range(len(data_list)):
        count_temp = count_temp + 1
        #imaging ts
        ts_dict = data_list[i]
        ts = ts_dict['ts']
        id = ts_dict['id']
        new_ts = min_max_transform(ts)
        normalized = np.array(new_ts)
        fig, ax = plt.subplots()
        #plt.imshow(recurrence_plot.rec_plot(normalized), cmap=plt.cm.gray)
        plt.imshow(recurrence_plot.rec_plot(normalized))
        ax.set_xticks([])
        ax.set_yticks([])
        #print(id)
        path = "inception-v1/" + id + ".jpg"
        plt.savefig(path)
        plt.close(fig)
        #compute feature
        # #begin to compute features
        image = misc.imread(path)
        #from matplotlib.pyplot import imread
        #image=imread(path)
        # print('image')
        # print(image.size)
        image = misc.imresize(image, (height, width))
        image = np.expand_dims(image, 0)
        feature = np.squeeze(sess.run(features, feed_dict={x: image}))
        feature_list.append(feature)
        # print('feature-test')
        # print(feature)
        os.remove(path)
        if count_temp % 100 == 0:
            print(count_temp)
        #begin to process parellel result and write_to_csv
    feature_array = np.array(feature_list)

    feature_df = pd.DataFrame(feature_array)
    # print(feature_df.shape)
    # print(len(id_list))
    #add id
    feature_df.insert(loc=0, column='id', value=id_list)
    # print(feature_final_df.shape)
    # print(feature_final_df.head())
    feature_df.to_csv(file_path_of_feature, index=False)
    gc.collect()
Beispiel #39
0
        lr = 0.0001

    return {images: imgs, labels: lbls,learning_rate: lr, keep_prob: keep_prob_per, keep_prob_data: keep_prob_data_per}

tf.reset_default_graph()

images = tf.placeholder(tf.float32,shape=[None,height_image,width_image,3])
labels = tf.placeholder(tf.float32,shape=[None,3])
learning_rate = tf.placeholder(tf.float32,shape=[])
keep_prob = tf.placeholder(tf.float32,shape=[])
keep_prob_data = tf.placeholder(tf.float32,shape=[])


with slim.arg_scope(vgg.vgg_arg_scope()):
    logits_drop_1 = tf.contrib.layers.dropout(images, keep_prob_data)
    vgg_logits, end_points = vgg.vgg_19(logits_drop_1, num_classes=3, dropout_keep_prob = 0.95, global_pool=True, is_training=True)

checkpoint_exclude_scopes=["vgg_19/fc8"]
#checkpoint_exclude_scopes=[]
exclusions = checkpoint_exclude_scopes
#review code!
variables_to_restore = []
for var in slim.get_model_variables():
    excluded = False
    for exclusion in exclusions:
        if var.op.name.startswith(exclusion):
            excluded = True
            break
    if not excluded:
        variables_to_restore.append(var)
#variables_to_restore = slim.get_variables_to_restore(exclude = checkpoint_exclude_scopes)