예제 #1
0
    def _transform(self, dataset):
        if any([field.dataType == DoubleType() for field in dataset.schema]):
            logger.warning("Detected DoubleType columns in dataframe passed to transform(). In "
                           "Deep Learning Pipelines 1.0 and above, DoubleType columns can only be "
                           "fed to input tensors of type tf.float64. To feed dataframe data to "
                           "tensors of other types (e.g. tf.float32, tf.int32, tf.int64), use the "
                           "corresponding Spark SQL data types (FloatType, IntegerType, LongType).")

        graph_def = self._optimize_for_inference()
        input_mapping = self.getInputMapping()
        output_mapping = self.getOutputMapping()

        graph = tf.Graph()
        with tf.Session(graph=graph):
            analyzed_df = tfs.analyze(dataset)
            out_tnsr_op_names = [tfx.op_name(tnsr_name) for tnsr_name, _ in output_mapping]
            # Load graph
            tf.import_graph_def(graph_def=graph_def, name='', return_elements=out_tnsr_op_names)
            # Feed dict maps from placeholder name to DF column name
            feed_dict = {tfx.op_name(tnsr_name): col_name for col_name, tnsr_name in input_mapping}
            fetches = [tfx.get_tensor(tnsr_name, graph) for tnsr_name in out_tnsr_op_names]
            out_df = tfs.map_blocks(fetches, analyzed_df, feed_dict=feed_dict)
            # We still have to rename output columns
            for tnsr_name, new_colname in output_mapping:
                old_colname = tfx.op_name(tnsr_name, graph)
                if old_colname != new_colname:
                    out_df = out_df.withColumnRenamed(old_colname, new_colname)

        return out_df
예제 #2
0
    def __init__(self, graph_path, target_size=(320, 240)):
        self.target_size = target_size

        # load graph
        with tf.gfile.GFile(graph_path, 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())

        self.graph = tf.get_default_graph()
        tf.import_graph_def(graph_def, name='TfPoseEstimator')
        self.persistent_sess = tf.Session(graph=self.graph)

        # for op in self.graph.get_operations():
        #     print(op.name)

        self.tensor_image = self.graph.get_tensor_by_name('TfPoseEstimator/image:0')
        self.tensor_output = self.graph.get_tensor_by_name('TfPoseEstimator/Openpose/concat_stage7:0')

        self.heatMat = self.pafMat = None

        # warm-up
        self.persistent_sess.run(
            self.tensor_output,
            feed_dict={
                self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)]
            }
        )
예제 #3
0
def loadmodle():
	print u"step2:模型加载测试".decode('utf8')
	with tf.Session() as persisted_sess:
		print("---1:load graph") #加载计算图
		with gfile.FastGFile("/tmp/load/test.pb",'rb') as f:
			graph_def = tf.GraphDef()
			graph_def.ParseFromString(f.read())
			persisted_sess.graph.as_default()
			tf.import_graph_def(graph_def, name='') #加载图定义

		print("---2,map variables")
		persisted_result = persisted_sess.graph.get_tensor_by_name("saved1_result:0") #获取这个tensor
		tf.add_to_collection(tf.GraphKeys.VARIABLES,persisted_result)  				 #将这个tensor加入到要恢复的变量中

		# 恢复数据
		print("---3,load data")
		try:
			saver = tf.train.Saver(tf.all_variables()) # 'Saver' misnomer! Better: Persister!  #将变量恢复
		except Exception,e:
			print(str(e))
		saver.restore(persisted_sess, "checkpoint.data")  # 将变量的数据重新加载到各个tensor


		#重现运算
		print(persisted_result.eval())
		print("DONE")
  def tfliteInvoke(self, graph, test_inputs, outputs):
    tf.reset_default_graph()
    # Turn the input into placeholder of shape 1
    tflite_input = tf.placeholder(
        "float", [1, self.time_steps, self.n_input], name="INPUT_IMAGE_LITE")
    tf.import_graph_def(graph, name="", input_map={"INPUT_IMAGE": tflite_input})
    with tf.Session() as sess:
      curr = sess.graph_def
      curr = convert_op_hints_to_stubs(graph_def=curr)

    curr = optimize_for_inference_lib.optimize_for_inference(
        curr, ["INPUT_IMAGE_LITE"], ["OUTPUT_CLASS"],
        [tf.float32.as_datatype_enum])

    tflite = tf.lite.toco_convert(
        curr, [tflite_input], [outputs], allow_custom_ops=False)

    interpreter = tf.lite.Interpreter(model_content=tflite)

    try:
      interpreter.allocate_tensors()
    except ValueError:
      assert False

    input_index = (interpreter.get_input_details()[0]["index"])
    interpreter.set_tensor(input_index, test_inputs)
    interpreter.invoke()
    output_index = (interpreter.get_output_details()[0]["index"])
    result = interpreter.get_tensor(output_index)
    # Reset all variables so it will not pollute other inferences.
    interpreter.reset_all_variables()
    return result
예제 #5
0
 def testInvalidInputForInputMap(self):
   with tf.Graph().as_default():
     with self.assertRaises(TypeError) as e:
       tf.import_graph_def(self._MakeGraphDef(''),
                               input_map=[tf.constant(5.0)])
     self.assertEqual('input_map must be a dictionary mapping strings to '
                      'Tensor objects.', str(e.exception))
예제 #6
0
def main(unused_args):
  if not tf.gfile.Exists(FLAGS.input):
    print("Input graph file '" + FLAGS.input + "' does not exist!")
    return -1

  known_modes = ["round", "quantize", "eightbit", "weights", "test",
                 "weights_rounded"]
  if not any(FLAGS.mode in s for s in known_modes):
    print("mode is '" + FLAGS.mode + "', not in " + ", ".join(known_modes) +
          ".")
    return -1

  tf_graph = tf.GraphDef()
  with tf.gfile.Open(FLAGS.input, "r") as f:
    data = f.read()
    tf_graph.ParseFromString(data)

  graph = tf.Graph()
  with graph.as_default():
    tf.import_graph_def(tf_graph, input_map={}, name="")

  rewriter = GraphRewriter(tf_graph, FLAGS.mode)

  output_graph = rewriter.rewrite(FLAGS.output_node_names.split(","))

  f = tf.gfile.FastGFile(FLAGS.output, "w")
  f.write(output_graph.SerializeToString())

  return 0
def run_graph_def(graph_def, input_map, outputs):
  graph = tf.Graph()
  with graph.as_default():
    tf.import_graph_def(graph_def, input_map={}, name="")
  with tf.Session(graph=graph) as sess:
    results = sess.run(outputs, feed_dict=input_map)
  return results
예제 #8
0
def pbtxt_to_graphdef(filename):
  with open(filename, 'r') as f:
    graph_def = tf.GraphDef()
    file_content = f.read()
    text_format.Merge(file_content, graph_def)
    tf.import_graph_def(graph_def, name='')
    tf.train.write_graph(graph_def, 'pbtxt/', 'protobuf.pb', as_text=False)
    def __init__(self):
        # Now load the Inception model from file. The way TensorFlow
        # does this is confusing and requires several steps.

        # Create a new TensorFlow computational graph.
        self.graph = tf.Graph()

        # Set the new graph as the default.
        with self.graph.as_default():

            # TensorFlow graphs are saved to disk as so-called Protocol Buffers
            # aka. proto-bufs which is a file-format that works on multiple
            # platforms. In this case it is saved as a binary file.

            # Open the graph-def file for binary reading.
            path = os.path.join(data_dir, path_graph_def)
            with tf.gfile.FastGFile(path, 'rb') as file:
                # The graph-def is a saved copy of a TensorFlow graph.
                # First we need to create an empty graph-def.
                graph_def = tf.GraphDef()

                # Then we load the proto-buf file into the graph-def.
                graph_def.ParseFromString(file.read())

                # Finally we import the graph-def to the default TensorFlow graph.
                tf.import_graph_def(graph_def, name='')

                # Now self.graph holds the Inception model from the proto-buf file.

            # Get a reference to the tensor for inputting images to the graph.
            self.input = self.graph.get_tensor_by_name(self.tensor_name_input_image)

            # Get references to the tensors for the commonly used layers.
            self.layer_tensors = [self.graph.get_tensor_by_name(name + ":0") for name in self.layer_names]
예제 #10
0
  def testDefaultAttrsRemoved(self):
    producer_op_list = op_def_pb2.OpList()
    text_format.Merge("""
      op {
        name: 'OpWithFutureDefaultAttr'
        attr { name: 'default_int' type: 'int' default_value { i: 456 } }
      }
    """, producer_op_list)
    # Attr only in producer_op_list with default value gets removed.
    with tf.Graph().as_default():
      a = tf.import_graph_def(
          self._MakeGraphDef("""
          node { name: 'A' op: 'OpWithFutureDefaultAttr'
                 attr { key: 'default_int' value { i: 456 } } }
          """),
          return_elements=["A"], producer_op_list=producer_op_list)
      with self.assertRaisesRegexp(ValueError, "No attr named 'default_int'"):
        a[0].get_attr("default_int")

    # Attr only in producer_op_list with non-default value is preserved.
    with tf.Graph().as_default():
      a = tf.import_graph_def(
          self._MakeGraphDef("""
          node { name: 'A' op: 'OpWithFutureDefaultAttr'
                 attr { key: 'default_int' value { i: 987 } } }
          """),
          return_elements=["A"], producer_op_list=producer_op_list)
      self.assertEqual(987, a[0].get_attr("default_int"))
예제 #11
0
def graphdef_to_pbtxt(filename): 
  with gfile.FastGFile(filename,'rb') as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())
    tf.import_graph_def(graph_def, name='')
    tf.train.write_graph(graph_def, 'pbtxt/', 'protobuf.pbtxt', as_text=True)
  return
예제 #12
0
def test_i2v():
    """Loads the i2v network and applies it to a test image.
    """
    with tf.Session() as sess:
        net = get_i2v_model()
        tf.import_graph_def(net['graph_def'], name='i2v')
        g = tf.get_default_graph()
        names = [op.name for op in g.get_operations()]
        x = g.get_tensor_by_name(names[0] + ':0')
        softmax = g.get_tensor_by_name(names[-3] + ':0')

        from skimage import data
        img = preprocess(data.coffee())[np.newaxis]
        res = np.squeeze(softmax.eval(feed_dict={x: img}))
        print([(res[idx], net['labels'][idx])
               for idx in res.argsort()[-5:][::-1]])

        """Let's visualize the network's gradient activation
        when backpropagated to the original input image.  This
        is effectively telling us which pixels contribute to the
        predicted class or given neuron"""
        pools = [name for name in names if 'pool' in name.split('/')[-1]]
        fig, axs = plt.subplots(1, len(pools))
        for pool_i, poolname in enumerate(pools):
            pool = g.get_tensor_by_name(poolname + ':0')
            pool.get_shape()
            neuron = tf.reduce_max(pool, 1)
            saliency = tf.gradients(neuron, x)
            neuron_idx = tf.arg_max(pool, 1)
            this_res = sess.run([saliency[0], neuron_idx],
                                feed_dict={x: img})

            grad = this_res[0][0] / np.max(np.abs(this_res[0]))
            axs[pool_i].imshow((grad * 128 + 128).astype(np.uint8))
            axs[pool_i].set_title(poolname)
예제 #13
0
	def __init__(self, proxy_map):
		super(SpecificWorker, self).__init__(proxy_map)
		self.timer.timeout.connect(self.compute)
		self.Period = 100
		self.timer.start(self.Period)

		# SIFT feature extractor
		self.feature_extractor = cv2.xfeatures2d.SIFT_create()

		# Create a dense grid of keypoints
		self.keypoints=list()
		for i in range(5,IMAGE_SIZE,12):
			for j in range(5,IMAGE_SIZE,12):
				self.keypoints.append(cv2.KeyPoint(i,j,12))

		# Create a tensorflow session
		self.sess=tf.Session()

		# Read the frozen graph from the model file
		with gfile.FastGFile(MODEL_FILE,'rb') as f:
			graph_def = tf.GraphDef()
			graph_def.ParseFromString(f.read())
			self.sess.graph.as_default()
			tf.import_graph_def(graph_def, name='')

			# Get input and output tensors from graph
			self.x_input = self.sess.graph.get_tensor_by_name("input:0")
			self.output = self.sess.graph.get_tensor_by_name("output:0")
			self.dsift = self.sess.graph.get_tensor_by_name("sift:0")
예제 #14
0
    def __init__(self, model):

        detection_graph = tf.Graph()

        with detection_graph.as_default():

            od_graph_def = tf.GraphDef()

            with tf.gfile.GFile(model, 'rb') as fid:

                serialized_graph = fid.read()

                od_graph_def.ParseFromString(serialized_graph)

                tf.import_graph_def(od_graph_def, name='')



        self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

        self.detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')

        self.detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')

        self.detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')

        self.num_detections = detection_graph.get_tensor_by_name('num_detections:0')

        self.sess = tf.Session(graph=detection_graph)
예제 #15
0
def strip_and_freeze_until(fetches, graph, sess=None, return_graph=False):
    """
    Create a static view of the graph by

    * Converting all variables into constants
    * Removing graph elements not reachacble to `fetches`

    :param graph: tf.Graph, the graph to be frozen
    :param fetches: list, graph elements representing the outputs of the graph
    :param return_graph: bool, if set True, return the graph function object
    :return: GraphDef, the GraphDef object with cleanup procedure applied
    """
    graph = validated_graph(graph)
    should_close_session = False
    if not sess:
        sess = tf.Session(graph=graph)
        should_close_session = True

    gdef_frozen = tf.graph_util.convert_variables_to_constants(
        sess,
        graph.as_graph_def(add_shapes=True),
        [op_name(graph, tnsr) for tnsr in fetches])

    if should_close_session:
        sess.close()

    if return_graph:
        g = tf.Graph()
        with g.as_default():
            tf.import_graph_def(gdef_frozen, name='')
        return g
    else:
        return gdef_frozen
예제 #16
0
  def build_graph_from_proto(self, graph_def_file, saver_def_file,
                             checkpoint_path):
    """Builds the inference graph from serialized GraphDef and SaverDef protos.

    Args:
      graph_def_file: File containing a serialized GraphDef proto.
      saver_def_file: File containing a serialized SaverDef proto.
      checkpoint_path: Checkpoint file or a directory containing a checkpoint
        file.

    Returns:
      restore_fn: A function such that restore_fn(sess) loads model variables
        from the checkpoint file.
    """
    # Load the Graph.
    tf.logging.info("Loading GraphDef from file: %s", graph_def_file)
    graph_def = tf.GraphDef()
    with tf.gfile.FastGFile(graph_def_file, "rb") as f:
      graph_def.ParseFromString(f.read())
    tf.import_graph_def(graph_def, name="")

    # Load the Saver.
    tf.logging.info("Loading SaverDef from file: %s", saver_def_file)
    saver_def = tf.train.SaverDef()
    with tf.gfile.FastGFile(saver_def_file, "rb") as f:
      saver_def.ParseFromString(f.read())
    saver = tf.train.Saver(saver_def=saver_def)

    return self._create_restore_fn(checkpoint_path, saver)
예제 #17
0
파일: model.py 프로젝트: forin-xyz/FoolNLTK
def load_graph(path):
    with tf.gfile.GFile(path, mode='rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
    with tf.Graph().as_default() as graph:
        tf.import_graph_def(graph_def, name="prefix")
    return graph
예제 #18
0
 def testVersionLow(self):
   with tf.Graph().as_default():
     pat = (r"^GraphDef version -1 is no longer supported: TensorFlow \S+ "
            r"needs %d <= version <= %d.  Please regenerate your graph.$" %
            (tf.GRAPH_DEF_VERSION_MIN, tf.GRAPH_DEF_VERSION_MAX))
     with self.assertRaisesRegexp(ValueError, pat):
       tf.import_graph_def(self._MakeGraphDef("", version=-1))
예제 #19
0
 def testVersionHigh(self):
   with tf.Graph().as_default():
     pat = (r"^GraphDef version \d+ is not yet supported: TensorFlow \S+ "
            r"needs %d <= version <= %d.  Please upgrade TensorFlow.$" %
            (tf.GRAPH_DEF_VERSION_MIN, tf.GRAPH_DEF_VERSION_MAX))
     with self.assertRaisesRegexp(ValueError, pat):
       tf.import_graph_def(self._MakeGraphDef("", version=1 << 30))
예제 #20
0
def load_graph(frozen_model_dir):
    """Load frozen tensorflow graph into the default graph.

    Args:
        frozen_model_dir: location of protobuf file containing frozen graph.

    Returns:
        tf.Graph object imported from frozen_model_path.
    """

    # Prase the frozen graph definition into a GraphDef object.
    frozen_file = os.path.join(frozen_model_dir, "frozen_model.pb")
    with tf.gfile.GFile(frozen_file, "rb") as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())

    # Load the graph def into the default graph and return it.
    with tf.Graph().as_default() as graph:
        tf.import_graph_def(
            graph_def,
            input_map=None,
            return_elements=None,
            op_dict=None,
            producer_op_list=None)
    return graph
예제 #21
0
def testmodel2():
    with tf.Session() as persisted_sess:
        print ("load graph")  # 加载计算图
        with gfile.FastGFile("write_graph_model.meta", "rb") as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            persisted_sess.graph.as_default()
            tf.import_graph_def(graph_def, name="")  # 加载图定义

            print ("map variables")
            predict = persisted_sess.graph.get_tensor_by_name("prediction:0")  # 获取这个tensor
            tf.add_to_collection(tf.GraphKeys.VARIABLES, predict)  # 将这个tensor加入到要恢复的变量中
            try:
                saver = tf.train.Saver(tf.all_variables())  # 'Saver' misnomer! Better: Persister!  #将变量恢复
            except:
                pass

                # 恢复数据
            print ("load data")
            saver.restore(persisted_sess, "saver_checkpoint")  # 将变量的数据重新加载到各个tensor

            # 重现运算
            test = test_samples[1,].reshape(1, 784)
            mark = np.diag([1] * 4)
            prev = persisted_sess.run(predict, feed_dict={x: test, y_: mark, keep_prob: 1.0})
            print u"[prev]:", chr(prev.tolist().index(1) + 65)
            print u"[true]:", chr(test_labels[1,].tolist().index(1) + 65)
예제 #22
0
def main(_):
  # a = tf.constant(5,name="a")
  # b = tf.constant(15,name="b")
  # c = tf.add(a,b,name="c")
  # p = tf.Print(c,[c])
    
  # sess.run(p)
  with tf.device('/cpu:0'):
    t = read_tensor_from_image_file("/home/dek/makerfaire-booth/2018/burger/machine/data/all.299/burgers/burger_000156.png")

  graph = tf.Graph()
  graph_def = tf.GraphDef()
  with tf.Graph().as_default() as graph:
    model_path = '/home/dek/tensorflow/tensorflow/examples/label_image/data/inception_v3_2016_08_28_frozen.pb'
    
    print('Model path: ', model_path)
    with open(model_path, "rb") as f:
      graph_def.ParseFromString(f.read())
    with graph.as_default():
      tf.import_graph_def(graph_def)
    input_op = graph.get_operation_by_name('import/input')
    output_op = graph.get_operation_by_name('import/InceptionV3/Predictions/Reshape_1')
    sess = tf.Session("grpc://localhost:2222")
    results = sess.run(output_op.outputs[0], {
      input_op.outputs[0]: t
    })
    results = np.squeeze(results)

    top_k = results.argsort()[-5:][::-1]
    label_file = "/home/dek/tensorflow/tensorflow/examples/label_image/data/imagenet_slim_labels.txt"
    labels = load_labels(label_file)
    for i in top_k:
      print(labels[i], results[i])
예제 #23
0
def Import(sess):
    with gfile.FastGFile("../models/producttype/graph.pb",'rb') as f:
        graph_def = tf.GraphDef()
        content = f.read()
        graph_def.ParseFromString(content)
        sess.graph.as_default()
        tf.import_graph_def(graph_def, name='')
예제 #24
0
def main(_):
    labels = [line.rstrip() for line in tf.gfile.GFile(FLAGS.output_labels)]

    with tf.gfile.FastGFile(FLAGS.output_graph, 'rb') as fp:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(fp.read())
        tf.import_graph_def(graph_def, name='')

    with tf.Session() as sess:
        logits = sess.graph.get_tensor_by_name('final_result:0')
        image = tf.gfile.FastGFile(sys.argv[1], 'rb').read()
        prediction = sess.run(logits, {'DecodeJpeg/contents:0': image})

    # print('=== 예측 결과 ===')
    # top_result = int(np.argmax(prediction[0]))
    # name = labels[top_result]
    # score = prediction[0][top_result]
    # print('%s (%.2f%%)' % (name, score * 100))

    print('=== 예측 결과 ===')
    for i in range(len(labels)):
        name = labels[i]
        score = prediction[0][i]
        print('%s (%.2f%%)' % (name, score * 100))

    if FLAGS.show_image:
        img = mpimg.imread(sys.argv[1])
        plt.imshow(img)
        plt.show()
예제 #25
0
    def classify(self, path, resize_height, resize_width):
        """ Resizes the passed image to indicated dimensions and estimates its
            VP using the graph stored self.filename.
        """ 
        self.info("Manually classifying the image in " + str(path))
        # Load freezed graph from file.
        graph_def = tf.GraphDef()
        with open(self.filename, 'rb') as f:
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def)

        predictions = []
        with tf.Session() as sess:
            # Load output node to use for predictions.
            output_node_processed = sess.graph.get_tensor_by_name('import/output_processed:0')
            # Iterate files from directory.
            start_time = time.time()
            # Read image 
            img = cv.imread(path, 1)
            # Process image that will be evaluated by the model.
            img_pred = imresize(img, [resize_height, resize_width], 'bilinear')
            img_pred = img_pred.astype(np.float32)
            img_pred = np.multiply(img_pred, 1.0 / 256.0)
            img_pred = img_pred.flatten()
            # Compute prediction point.
            predictions = output_node_processed.eval(
                feed_dict = {
                    'import/input_images:0': img_pred,
                    'import/keep_prob:0': 1.0
                }
            )
            predictions = np.round(predictions).astype(int)
            self.info('Predicted Point Processed: (' + str(int(round(predictions[0][0]))) + ', ' + str(int(round(predictions[0][1]))) + ')')
        return predictions
def build_prepro_graph(inception_path):
    global input_layer, output_layer
    with open(inception_path, 'rb') as f:
        fileContent = f.read()

    graph_def = tf.GraphDef()
    graph_def.ParseFromString(fileContent)
    tf.import_graph_def(graph_def)
    graph = tf.get_default_graph()

    input_layer = graph.get_tensor_by_name("import/InputImage:0")
    output_layer = graph.get_tensor_by_name(
        "import/InceptionV4/Logits/AvgPool_1a/AvgPool:0")

    input_file = tf.placeholder(dtype=tf.string, name="InputFile")
    image_file = tf.read_file(input_file)
    jpg = tf.image.decode_jpeg(image_file, channels=3)
    png = tf.image.decode_png(image_file, channels=3)
    output_jpg = tf.image.resize_images(jpg, [299, 299]) / 255.0
    output_jpg = tf.reshape(
        output_jpg, [
            1, 299, 299, 3], name="Preprocessed_JPG")
    output_png = tf.image.resize_images(png, [299, 299]) / 255.0
    output_png = tf.reshape(
        output_png, [
            1, 299, 299, 3], name="Preprocessed_PNG")
    return input_file, output_jpg, output_png
예제 #27
0
    def __init__(self):
        logger.info('Loading Tensorflow Detection API')

        weights_path = get_file(config.SSD_INCEPTION_FILENAME, config.SSD_INCEPTION_URL,
                                cache_dir=os.path.abspath(config.WEIGHT_PATH),
                                cache_subdir='models')

        extract_path = weights_path.replace('.tar.gz', '')
        if not os.path.exists(extract_path):
            tar = tarfile.open(weights_path, "r:gz")
            tar.extractall(path=os.path.join(config.WEIGHT_PATH, 'models'))
            tar.close()
        pb_path = os.path.join(extract_path, self.PB_NAME)

        self.graph = tf.Graph()
        with self.graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(pb_path, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')

        self.label_map = label_map_util.load_labelmap(self.PATH_TO_LABELS)
        self.categories = label_map_util.convert_label_map_to_categories(self.label_map,
                                                                         max_num_classes=self.NUM_CLASSES,
                                                                         use_display_name=True)
        self.category_index = label_map_util.create_category_index(self.categories)
def _get_expected_result(gin, local_features):
    """
    Running the graph in the :py:obj:`TFInputGraph` object and compute the expected results.
    :param: gin, a :py:obj:`TFInputGraph`
    :return: expected results in NumPy array
    """
    graph = tf.Graph()
    with tf.Session(graph=graph) as sess, graph.as_default():
        # Build test graph and transformers from here
        tf.import_graph_def(gin.graph_def, name='')

        # Build the results
        _results = []
        for row in local_features:
            fetches = [tfx.get_tensor(tnsr_name, graph)
                       for tnsr_name, _ in _output_mapping.items()]
            feed_dict = {}
            for colname, tnsr_name in _input_mapping.items():
                tnsr = tfx.get_tensor(tnsr_name, graph)
                feed_dict[tnsr] = np.array(row[colname])[np.newaxis, :]

            curr_res = sess.run(fetches, feed_dict=feed_dict)
            _results.append(np.ravel(curr_res))

        expected = np.hstack(_results)

    return expected
예제 #29
0
def get_layer_names(model='inception'):
    """Retun every layer's index and name in the given model.

    Parameters
    ----------
    model : str, optional
        Which model to load. Must be one of: ['inception'], 'i2v_tag', 'i2v',
        'vgg16', or 'vgg_face'.

    Returns
    -------
    names : list of tuples
        The index and layer's name for every layer in the given model.
    """
    g = tf.Graph()
    with tf.Session(graph=g):
        if model == 'inception':
            net = inception.get_inception_model()
        elif model == 'vgg_face':
            net = vgg16.get_vgg_face_model()
        elif model == 'vgg16':
            net = vgg16.get_vgg_model()
        elif model == 'i2v':
            net = i2v.get_i2v_model()
        elif model == 'i2v-tag':
            net = i2v.get_i2v_tag_model()

        tf.import_graph_def(net['graph_def'], name='net')
        names = [(i, op.name) for i, op in enumerate(g.get_operations())]
        return names
예제 #30
0
 def __init__(self, name, input):
     self.name = name
     with open("models/vgg16.tfmodel", mode='rb') as f:
         fileContent = f.read()
     graph_def = tf.GraphDef()
     graph_def.ParseFromString(fileContent)
     tf.import_graph_def(graph_def, input_map={ "images": input }, name=self.name)
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")

from utils import label_map_util

from utils import visualization_utils as vis_util

# ## Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
    od_graph_def = tf.GraphDef()
    with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
        serialized_graph = fid.read()
        od_graph_def.ParseFromString(serialized_graph)
        tf.import_graph_def(od_graph_def, name='')

label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
    label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)


# ## Helper code
def load_image_into_numpy_array(image):
    (im_width, im_height) = image.size
    return np.array(image.getdata()).reshape(
        (im_height, im_width, 3)).astype(np.uint8)


# # Detection for images, not the streaming
예제 #32
0
def import_tensorflow_computation(comp, name='fn'):
    """Creates a `computation_module.ComputationModule` from a TF computation.

  WARNING: This helper function is under construction, and most capabilities are
  not implemented at this stage:

  * The parameter and result of `comp` can only be a single tensor. Named
    tuples, sequences, or functional types are not currently supported.

  * Only tensorflow code can be imported.

  TODO(b/153499219): Add support for named tuples, sequences, and functions.

  Args:
    comp: An instance of a `pb.Computation` with TensorFlow code to import.
    name: An optional `str` name of the (single) function in the IREE module.

  Returns:
    An instance of `Module` with the imported function present.

  Raises:
    TypeError: If arguments are of the wrong types, e.g., in `comp` is not a
      TensorFlow computation.
  """
    py_typecheck.check_type(comp, pb.Computation)
    type_spec = type_serialization.deserialize_type(comp.type)
    if not type_spec.is_function():
        type_spec = computation_types.FunctionType(None, type_spec)

    # TODO(b/153499219): Replace this with a recursive check of the signature
    # after relaxing the type restrictions and introducing nested structures.
    py_typecheck.check_type(type_spec.result, computation_types.TensorType)
    if type_spec.parameter is not None:
        py_typecheck.check_type(type_spec.parameter,
                                computation_types.TensorType)

    which_computation = comp.WhichOneof('computation')
    if which_computation != 'tensorflow':
        raise TypeError('Expected a TensorFlow computation, found {}.'.format(
            which_computation))

    output_tensor_names = tensorflow_utils.extract_tensor_names_from_binding(
        comp.tensorflow.result)
    if type_spec.parameter is not None:
        input_tensor_names = tensorflow_utils.extract_tensor_names_from_binding(
            comp.tensorflow.parameter)
    else:
        input_tensor_names = []

    graph_def = serialization_utils.unpack_graph_def(comp.tensorflow.graph_def)
    init_op = comp.tensorflow.initialize_op
    return_elements = input_tensor_names + output_tensor_names
    if init_op:
        graph_def = tensorflow_utils.add_control_deps_for_init_op(
            graph_def, init_op)
        return_elements.append(init_op)

    with tf.Graph().as_default() as graph:
        # TODO(b/153499219): See if we can reintroduce uniquify_shared_names().
        # Right now, it causes loader breakage, and unclear if still necessary.
        import_results = tf.import_graph_def(graph_def,
                                             input_map={},
                                             return_elements=return_elements,
                                             name='')

    if init_op:
        initializer = import_results[-1]
        import_results.pop()
    else:
        initializer = None

    inputs = import_results[0:len(input_tensor_names)]
    outputs = import_results[len(input_tensor_names):]

    with graph.as_default():
        # TODO(b/153499219): Find a way to reflect the nested parameter and result
        # structure here after relaxing the restrictions.
        if inputs:
            assert len(inputs) < 2
            input_dict = {
                'parameter':
                tf.compat.v1.saved_model.utils.build_tensor_info(inputs[0])
            }
        else:
            input_dict = {}
        assert len(outputs) == 1
        output_dict = {
            'result':
            tf.compat.v1.saved_model.utils.build_tensor_info(outputs[0])
        }
        sig_def = tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
            inputs=input_dict, outputs=output_dict, method_name=name)
        with tempfile.TemporaryDirectory() as model_dir:
            builder = tf.compat.v1.saved_model.Builder(model_dir)
            with tf.compat.v1.Session(graph=graph) as sess:
                builder.add_meta_graph_and_variables(
                    sess, ['unused'],
                    signature_def_map={name: sig_def},
                    legacy_init_op=initializer,
                    strip_default_attrs=True)
                builder.save()
            iree_module = iree_compiler.tf_signature_def_saved_model_to_compiler_module(
                model_dir,
                saved_model_tags=set(['unused']),
                exported_names=[name])
            return computation_module.ComputationModule(
                iree_module, name, type_spec)
예제 #33
0
def _write_saved_model(saved_model_path, frozen_graph_def, inputs, outputs,
                       label_index_map):
    """Writes SavedModel to disk.

  If checkpoint_path is not None bakes the weights into the graph thereby
  eliminating the need of checkpoint files during inference. If the model
  was trained with moving averages, setting use_moving_averages to true
  restores the moving averages, otherwise the original set of variables
  is restored.

  Args:
    saved_model_path: Path to write SavedModel.
    frozen_graph_def: tf.GraphDef holding frozen graph.
    inputs: The input image tensor to use for detection.
    outputs: A tensor dictionary containing the outputs of a DetectionModel.
  """
    with tf.Graph().as_default() as graph:
        with session.Session() as sess:

            tf.import_graph_def(frozen_graph_def, name='')

            # Make detection signature same as the merged detection classification model
            default_names = [
                "detection_scores", "detection_boxes", "detection_classes"
            ]

            def map_new_name(prefix_names):
                import re
                prefix = ""
                for name in prefix_names:
                    t = re.findall("|".join([x + "$" for x in default_names]),
                                   name)
                    if len(t) > 0:
                        prefix = re.sub(t[0] + "$", "", name)
                        break
                names = [re.sub("^" + prefix, "", nm) for nm in prefix_names]
                return names

            original_names = outputs.keys()
            mapped_names = map_new_name(original_names)
            outputs = {
                name_: graph.get_tensor_by_name(name + ":0")
                for name, name_ in zip(original_names, mapped_names)
            }

            detection_score_threshold = 0.5
            valid = tf.greater_equal(
                tf.reshape(outputs["detection_scores"], [-1]),
                detection_score_threshold)
            num = tf.reduce_sum(tf.cast(valid, tf.int32))
            outputs['detection_boxes'] = tf.slice(outputs['detection_boxes'],
                                                  [0, 0, 0], [-1, num, -1],
                                                  name='detection_boxes')
            outputs['detection_scores'] = tf.slice(outputs['detection_scores'],
                                                   [0, 0], [-1, num],
                                                   name='detection_scores')
            outputs['detection_classes'] = tf.slice(
                outputs['detection_classes'], [0, 0], [-1, num],
                name='detection_classes')

            index_label_map = {v: k for k, v in label_index_map.items()}
            classification_class_names = [
                index_label_map.get(i, "Unkown")
                for i in range(1 + max(index_label_map.keys()))
            ]
            classification_class_names = tf.constant(
                classification_class_names)
            detection_classes = tf.reshape(
                tf.cast(outputs['detection_classes'], dtype=tf.int64), [-1])
            outputs['detection_classes_names'] = tf.gather(
                classification_class_names,
                detection_classes,
                name="detection_classes_names")

            builder = tf.saved_model.builder.SavedModelBuilder(
                saved_model_path)

            tensor_info_inputs = {
                'inputs': tf.saved_model.utils.build_tensor_info(inputs)
            }
            tensor_info_outputs = {
                'boxes':
                tf.saved_model.utils.build_tensor_info(
                    outputs['detection_boxes']),
                'scores':
                tf.saved_model.utils.build_tensor_info(
                    outputs['detection_scores']),
                'classes':
                tf.saved_model.utils.build_tensor_info(
                    outputs['detection_classes_names']),
            }

            detection_signature = (
                tf.saved_model.signature_def_utils.build_signature_def(
                    inputs=tensor_info_inputs,
                    outputs=tensor_info_outputs,
                    method_name=signature_constants.PREDICT_METHOD_NAME))

            builder.add_meta_graph_and_variables(
                sess,
                [tf.saved_model.tag_constants.SERVING],
                signature_def_map={
                    "detection_only_with_names": detection_signature,
                },
            )
            builder.save()
예제 #34
0
def apply_saved_model(model_dir,
                      inputs,
                      tags,
                      signature_name=None,
                      output_keys_in_signature=None):
    """Applies a SavedModel to some `Tensor`s.

  Applies a SavedModel to `inputs`. The SavedModel is specified with
  `model_dir`, `tags` and `signature_name`. Note that the SavedModel will be
  converted to an all-constants graph.

  Note: This API can only be used when TF2 is disabled or
  `tft_beam.Context.force_tf_compat_v1=True`.

  Args:
    model_dir: A path containing a SavedModel.
    inputs: A dict whose keys are the names from the input signature and whose
        values are `Tensor`s. If there is only one input in the model's input
        signature then `inputs` can be a single `Tensor`.
    tags: The tags specifying which metagraph to load from the SavedModel.
    signature_name: Specify signature of the loaded model. The default value
        None can be used if there is only one signature in the MetaGraphDef.
    output_keys_in_signature: A list of strings which should be a subset of
        the outputs in the signature of the SavedModel. The returned `Tensor`s
        will correspond to specified output `Tensor`s, in the same order. The
        default value None can be used if there is only one output from
        signature.

  Returns:
    A `Tensor` or list of `Tensor`s representing the application of the
        SavedModel.

  Raises:
    ValueError: if
    `inputs` is invalid type, or
    `signature_name` is None but the SavedModel contains multiple signature, or
    `inputs` do not match the signature inputs, or
    `output_keys_in_signature` is not a subset of the signature outputs.
  """
    # Load model, get graph, inputs and outputs.
    loaded_graph = tf.compat.v1.Graph()
    loaded_initializer_op_names = []

    with loaded_graph.as_default():
        sess = tf.compat.v1.Session()
        meta_graph = tf.compat.v1.saved_model.load(sess,
                                                   export_dir=model_dir,
                                                   tags=tags)
        loaded_initializer_op_names = [
            op.name for op in tf.compat.v1.get_collection(
                tf.compat.v1.GraphKeys.TABLE_INITIALIZERS)
        ]

        if signature_name:
            signature = meta_graph.signature_def[signature_name]
        elif len(meta_graph.signature_def) > 1:
            raise ValueError(
                'The SavedModel contains multiple signatures (%r) but signature_name '
                'was not specified.' % (meta_graph.signature_def.keys(), ))
        else:
            signature = next(six.itervalues(meta_graph.signature_def))

    # Generate mapping from tensors in the graph to the input tensors.
    if isinstance(inputs, dict):
        if set(signature.inputs.keys()) != set(inputs.keys()):
            raise ValueError(
                'The keys in `inputs` (%r) do not match inputs of the SavedModel '
                '(%r).' % (inputs.keys(), signature.inputs.keys()))
        input_name_to_tensor_map = {
            signature.inputs[key].name: inputs[key]
            for key in inputs.keys()
        }
    elif len(signature.inputs) != 1:
        raise ValueError(
            'The SavedModel does not have exactly one input (had inputs %r) but '
            '`inputs` was not a dict.' % (signature.inputs.keys(), ))
    else:
        input_name_to_tensor_map = {
            next(six.itervalues(signature.inputs)).name: inputs
        }

    # Get output tensor names.
    if output_keys_in_signature:
        if not set(output_keys_in_signature) <= set(signature.outputs.keys()):
            raise ValueError(
                'output_keys_in_signature (%r) is not a subset of outputs of the '
                'SavedModel (%r).' %
                (output_keys_in_signature, signature.outputs.keys()))

        output_tensor_names = [
            signature.outputs[key].name for key in output_keys_in_signature
        ]
        output_single_tensor = False
    elif len(signature.outputs) != 1:
        raise ValueError(
            'The SavedModel does not have exactly one output (had outputs %r) but '
            'output_keys_in_signature was not specified.' %
            (signature.outputs.keys(), ))
    else:
        output_tensor_names = [next(six.itervalues(signature.outputs)).name]
        output_single_tensor = True

    # Convert_variables_to_constants() requires op name.
    output_op_names = [
        loaded_graph.get_tensor_by_name(tensor_name).op.name
        for tensor_name in output_tensor_names
    ]
    constant_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
        sess, loaded_graph.as_graph_def(),
        output_op_names + loaded_initializer_op_names)
    sess.close()

    returned_elements = tf.import_graph_def(
        constant_graph_def,
        input_map=input_name_to_tensor_map,
        return_elements=output_tensor_names + loaded_initializer_op_names)
    returned_output_tensors = returned_elements[:len(output_tensor_names)]
    returned_initializer_ops = returned_elements[len(output_tensor_names):]

    for initializer_op in returned_initializer_ops:
        tf.compat.v1.add_to_collection(
            tf.compat.v1.GraphKeys.TABLE_INITIALIZERS, initializer_op)

    if output_single_tensor:
        assert len(output_tensor_names) == 1
        return returned_output_tensors[0]
    else:
        return returned_output_tensors
예제 #35
0
def apply_function_with_checkpoint(fn,
                                   inputs,
                                   checkpoint,
                                   include=None,
                                   exclude=None):
    """Applies a tensor-in-tensor-out function with variables to some `Tensor`s.

  Variable values are loaded from the given checkpoint path. Note that the
  input_tensor_func, together with the checkpoint, will be converted to an
  all-constants graph, so ops requiring graph collections, such as table lookup
  (which requires a table init op being added to TABLE_INITIALIZERS collection),
  are not supported.

  Note: This API can only be used when TF2 is disabled or
  `tft_beam.Context.force_tf_compat_v1=True`.

  Args:
    fn: A tensor-in-tensor-out function that may contain variables.
    inputs: A list of `Tensor`s to apply `fn` to.
    checkpoint: The checkpoint path to load variables from.
    include: An optional list/tuple of scope strings for filtering which
        variables from the VARIABLES collection to include. If None, all
        variables will be included.
    exclude: An optional list/tuple of scope strings for filtering which
        variables from the VARIABLES collection to exclude. If None, no
        variables will be excluded.

  Returns:
    A `Tensor` or list of `Tensor`s representing the application of `fn`.

  Raises:
    ValueError: if the input tensor-in-tensor-out function adds to
        TABLE_INITIALIZERS collections.
  """
    loaded_graph = tf.compat.v1.Graph()
    with loaded_graph.as_default():
        input_placeholders = [
            tf.compat.v1.placeholder(dtype=tensor.dtype,
                                     shape=tensor.shape,
                                     name=tensor.op.name) for tensor in inputs
        ]
        output = fn(*input_placeholders)
        if isinstance(output, tf.Tensor):
            output_tensors = [output]
            output_single_tensor = True
        else:
            output_tensors = output
            output_single_tensor = False

        # TODO(qimingj/kestert): Copy table initializers to the composed graph.
        if tf.compat.v1.get_collection(
                tf.compat.v1.GraphKeys.TABLE_INITIALIZERS):
            raise ValueError('Models with table init ops are not supported.')

        vars_to_restore = _get_variables_to_restore(include=include,
                                                    exclude=exclude)
        saver = tf.compat.v1.train.Saver(vars_to_restore)
        with tf.compat.v1.Session() as sess:
            saver.restore(sess, checkpoint)
            output_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
                sess, loaded_graph.as_graph_def(),
                [tensor.op.name for tensor in output_tensors])

    input_map = {tensor.name: tensor for tensor in inputs}
    output_tensors = tf.import_graph_def(
        output_graph_def,
        input_map=input_map,
        return_elements=[tensor.name for tensor in output_tensors])

    if output_single_tensor:
        assert len(output_tensors) == 1
        return output_tensors[0]
    else:
        return output_tensors
예제 #36
0
def create_graph():
    # Creates graph from a pb file
    with tf.gfile.FastGFile(constant.PB_FILE_NAME, 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(graph_def, name='')
def img_de(IMAGE_NAME,MODEL_NAME):
    # Grab path to current working directory
    CWD_PATH = os.getcwd()

    # Path to frozen detection graph .pb file, which contains the model that is used
    # for object detection.
    PATH_TO_CKPT = os.path.join(MODEL_NAME,'frozen_inference_graph.pb')

    # Path to label map file
    PATH_TO_LABELS = os.path.join(CWD_PATH,'training','object-detection.pbtxt')

    # Path to image
    PATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME)

    # Number of classes the object detector can identify
    NUM_CLASSES = 5

    # Load the label map.
    # Label maps map indices to category names, so that when our convolution
    # network predicts `5`, we know that this corresponds to `king`.
    # Here we use internal utility functions, but anything that returns a
    # dictionary mapping integers to appropriate string labels would be fine
    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)

    # Load the Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    # Define input and output tensors (i.e. data) for the object detection classifier

    # Input tensor is the image
    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

    # Output tensors are the detection boxes, scores, and classes
    # Each box represents a part of the image where a particular object was detected
    detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')

    # Each score represents level of confidence for each of the objects.
    # The score is shown on the result image, together with the class label.
    detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
    detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')

    # Number of objects detected
    num_detections = detection_graph.get_tensor_by_name('num_detections:0')

    # Load image using OpenCV and
    # expand image dimensions to have shape: [1, None, None, 3]
    # i.e. a single-column array, where each item in the column has the pixel RGB value
    image = cv2.imread(PATH_TO_IMAGE)
    image_expanded = np.expand_dims(image, axis=0)

    # Perform the actual detection by running the model with the image as input
    (boxes, scores, classes, num) = sess.run(
        [detection_boxes, detection_scores, detection_classes, num_detections],
        feed_dict={image_tensor: image_expanded})

    # Draw the results of the detection (aka 'visulaize the results')

    vis_util.visualize_boxes_and_labels_on_image_array(
        image,
        np.squeeze(boxes),
        np.squeeze(classes).astype(np.int32),
        np.squeeze(scores),
        category_index,
        use_normalized_coordinates=True,
        line_thickness=8,
        min_score_thresh=0.60)

    # All the results have been drawn on image. Now display the image.
    cv2.imshow('Object detector', image)

    # Press any key to close the image
    cv2.waitKey(0)

    # Clean up
    cv2.destroyAllWindows()
예제 #38
0
import tensorflow as tf
import utils


with open("../vgg16.tfmodel", mode='rb') as f:
  fileContent = f.read()

graph_def = tf.GraphDef()
graph_def.ParseFromString(fileContent)

images = tf.placeholder("float", [None, 224, 224, 3])

tf.import_graph_def(graph_def, input_map={ "images": images })
print "graph loaded from disk"

graph = tf.get_default_graph()

cat = utils.load_image("cat.jpg")

with tf.Session() as sess:
  init = tf.initialize_all_variables()
  sess.run(init)
  print "variables initialized"

  batch = cat.reshape((1, 224, 224, 3))
  assert batch.shape == (1, 224, 224, 3)

  feed_dict = { images: batch }

  prob_tensor = graph.get_tensor_by_name("import/prob:0")
  prob = sess.run(prob_tensor, feed_dict=feed_dict)
예제 #39
0
    def init_from_frozen_graphdef(self, config):
        frozen_graph_path = os.path.join(config.model_dir, 'frozen_graph.pb')
        # If the file doesn't existed, create it.
        if not os.path.exists(frozen_graph_path):
            logging.warning(
                'The frozen graph does not existed, use \'init_from_config\' instead'
                'and create a frozen graph for next use.')
            self.init_from_config(config)
            saver = tf.train.Saver()
            save_dir = '/tmp/graph-{}'.format(os.getpid())
            os.mkdir(save_dir)
            save_path = '{}/ckpt'.format(save_dir)
            saver.save(sess=self.sess, save_path=save_path)

            with tf.Session(graph=tf.Graph()) as sess:
                clear_devices = True
                output_node_names = ['loss_sum', 'predictions']
                # We import the meta graph in the current default Graph
                saver = tf.train.import_meta_graph(save_path + '.meta',
                                                   clear_devices=clear_devices)

                # We restore the weights
                saver.restore(sess, save_path)

                # We use a built-in TF helper to export variables to constants
                output_graph_def = tf.graph_util.convert_variables_to_constants(
                    sess,  # The session is used to retrieve the weights
                    tf.get_default_graph().as_graph_def(
                    ),  # The graph_def is used to retrieve the nodes
                    output_node_names  # The output node names are used to select the useful nodes
                )

                # Finally we serialize and dump the output graph to the filesystem
                with tf.gfile.GFile(frozen_graph_path, "wb") as f:
                    f.write(output_graph_def.SerializeToString())
                    logging.info("%d ops in the final graph." %
                                 len(output_graph_def.node))

                # Remove temp files.
                os.system('rm -rf ' + save_dir)
        else:
            sess_config = tf.ConfigProto()
            sess_config.gpu_options.allow_growth = True
            sess_config.allow_soft_placement = True
            self.sess = tf.Session(config=sess_config)
            self.data_reader = DataReader(config)

            # We load the protobuf file from the disk and parse it to retrieve the
            # unserialized graph_def
            with tf.gfile.GFile(frozen_graph_path, "rb") as f:
                graph_def = tf.GraphDef()
                graph_def.ParseFromString(f.read())

            # Import the graph_def into current the default graph.
            tf.import_graph_def(graph_def)
            graph = tf.get_default_graph()
            self.model = AttrDict()

            def collect_placeholders(prefix):
                ret = []
                idx = 0
                while True:
                    try:
                        ret.append(
                            graph.get_tensor_by_name('import/{}_{}:0'.format(
                                prefix, idx)))
                        idx += 1
                    except KeyError:
                        return tuple(ret)

            self.model['src_pls'] = collect_placeholders('src_pl')
            self.model['dst_pls'] = collect_placeholders('dst_pl')
            self.model['predictions'] = graph.get_tensor_by_name(
                'import/predictions:0')
예제 #40
0
def main():
    print("starting program . . .")

    if not checkIfNecessaryPathsAndFilesExist():
        return
    # end if

    # get a list of classifications from the labels file
    classifications = []
    # for each line in the label file . . .
    for currentLine in tf.gfile.GFile(RETRAINED_LABELS_TXT_FILE_LOC):
        # remove the carriage return
        classification = currentLine.rstrip()
        # and append to the list
        classifications.append(classification)
    # end for

    # show the classifications to prove out that we were able to read the label file successfully
    print("classifications = " + str(classifications))

    # load the graph from file
    with tf.gfile.FastGFile(RETRAINED_GRAPH_PB_FILE_LOC, 'rb') as retrainedGraphFile:
        # instantiate a GraphDef object
        graphDef = tf.GraphDef()
        # read in retrained graph into the GraphDef object
        graphDef.ParseFromString(retrainedGraphFile.read())
        # import the graph into the current default Graph, note that we don't need to be concerned with the return value
        _ = tf.import_graph_def(graphDef, name='')
    # end with

    # if the test image directory listed above is not valid, show an error message and bail
    if not os.path.isdir(TEST_IMAGES_DIR):
        print("the test image directory does not seem to be a valid directory, check file / directory paths")
        return
    # end if

    with tf.Session() as sess:
        # for each file in the test images directory . . .
        for fileName in os.listdir(TEST_IMAGES_DIR):
            # if the file does not end in .jpg or .jpeg (case-insensitive), continue with the next iteration of the for loop
            if not (fileName.lower().endswith(".jpg") or fileName.lower().endswith(".jpeg")):
                continue
            # end if

            # show the file name on std out
            print(fileName)

            # get the file name and full path of the current image file
            imageFileWithPath = os.path.join(TEST_IMAGES_DIR, fileName)
            # attempt to open the image with OpenCV
            openCVImage = cv2.imread(imageFileWithPath)

            # if we were not able to successfully open the image, continue with the next iteration of the for loop
            if openCVImage is None:
                print("unable to open " + fileName + " as an OpenCV image")
                continue
            # end if

            # get the final tensor from the graph
            finalTensor = sess.graph.get_tensor_by_name('final_result:0')

            # convert the OpenCV image (numpy array) to a TensorFlow image
            tfImage = np.array(openCVImage)[:, :, 0:3]
            
            # run the network to get the predictions
            predictions = sess.run(finalTensor, {'DecodeJpeg:0': tfImage})

            # sort predictions from most confidence to least confidence
            sortedPredictions = predictions[0].argsort()[-len(predictions[0]):][::-1]

            print("---------------------------------------")

            # keep track of if we're going through the next for loop for the first time so we can show more info about
            # the first prediction, which is the most likely prediction (they were sorted descending above)
            onMostLikelyPrediction = True
            # for each prediction . . .
            for prediction in sortedPredictions:
                strClassification = classifications[prediction]

                # if the classification (obtained from the directory name) ends with the letter "s", remove the "s" to change from plural to singular
                if strClassification.endswith("s"):
                    strClassification = strClassification[:-1]
                # end if

                # get confidence, then get confidence rounded to 2 places after the decimal
                confidence = predictions[0][prediction]

                # if we're on the first (most likely) prediction, state what the object appears to be and show a % confidence to two decimal places
                if onMostLikelyPrediction:
                    # get the score as a %
                    scoreAsAPercent = confidence * 100.0
                    # show the result to std out
                    print("the object appears to be a " + strClassification + ", " + "{0:.2f}".format(scoreAsAPercent) + "% confidence")
                    # write the result on the image
                    writeResultOnImage(openCVImage, strClassification + ", " + "{0:.2f}".format(scoreAsAPercent) + "% confidence")
                    # finally we can show the OpenCV image
                    cv2.imshow(fileName, openCVImage)
                    # mark that we've show the most likely prediction at this point so the additional information in
                    # this if statement does not show again for this image
                    onMostLikelyPrediction = False
                # end if

                # for any prediction, show the confidence as a ratio to five decimal places
                print(strClassification + " (" +  "{0:.5f}".format(confidence) + ")")
            # end for

            # pause until a key is pressed so the user can see the current image (shown above) and the prediction info
            cv2.waitKey()
            # after a key is pressed, close the current window to prep for the next time around
            cv2.destroyAllWindows()
        # end for
    # end with

    # write the graph to file so we can view with TensorBoard
    tfFileWriter = tf.summary.FileWriter(os.getcwd())
    tfFileWriter.add_graph(sess.graph)
    tfFileWriter.close()
예제 #41
0
uid_to_human = {}
for uid, line in enumerate(lines):
    line = line.strip('\n')
    uid_to_human[uid] = line


def id_to_string(node_id):
    if node_id not in uid_to_human:
        return ''
    return uid_to_human[node_id]


with tf.gfile.GFile('retrain/output_graph.pb', 'rb') as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())
    tf.import_graph_def(graph_def, name='')  #name必须要加上,选择mo

with tf.Session() as sess:
    softamx_tensor = sess.graph.get_tensor_by_name('final_result:0')

    for root, dirs, files in os.walk('retrain/data/test/'):
        for file in files:
            #显示图片
            image_data = tf.gfile.FastGFile(os.path.join(root, file),
                                            'rb').read()
            predictions = sess.run(softamx_tensor,
                                   {'DecodeJpeg/contents:0': image_data})
            predictions = np.squeeze(predictions)

            #
            image_path = os.path.join(root, file)
def adam_attack_cross(dataset, label): 

    LEARNING_RATE = 0.01
    label = [label]
    shape = (16000,)
    
    graphfile = ** VICTIM GRAPH FILE ** 
    global global_mfcc
       
    new_graph = tf.compat.v1.Graph()
    with new_graph.as_default():
    
        #load new tensors
    
        #added_vec = tf.constant(np.zeros((1, 73, 40), dtype = np.float32))
        data_in = tf.compat.v1.placeholder(tf.float32, shape=(16000,))
        data_reshap = tf.reshape(data_in, (16000, 1))
        truth_label = tf.compat.v1.placeholder(tf.float32, shape=(1,12))
        mfc_mod = tf.Variable(np.zeros((1, 98, 40), dtype=np.float32))

        #Load subgraph to produce mfcc 
        with tf.io.gfile.GFile(graphfile, 'rb') as f:
            graph_def = tf.compat.v1.GraphDef()
            graph_def.ParseFromString(f.read())
            pruned_graph = tf.compat.v1.graph_util.extract_sub_graph(graph_def, ["AudioSpectrogram", "Mfcc/sample_rate", "Mfcc"])
            import1 = tf.import_graph_def(pruned_graph, name='', input_map={"decoded_sample_data":data_reshap})
            
        pre_mfcc = new_graph.get_tensor_by_name('Mfcc:0')
        #intermediate = tf.concat([added_vec, mfc_mod], 1)
        #attack = pre_mfcc + intermediate
        
        attack= pre_mfcc + mfc_mod
        
        with tf.io.gfile.GFile(graphfile, 'rb') as f:
            graph_def = tf.compat.v1.GraphDef()
            graph_def.ParseFromString(f.read())
            import2 = tf.import_graph_def(graph_def, name='', input_map={"Mfcc:0":attack})
            
        #print(new_graph.get_operations())
  
        #load existing tensors
        victim_output = new_graph.get_tensor_by_name('labels_softmax:0')
        
        #Does not actually connect to graph, but create new node to nowhere.
        #rando2 = sample_data + rando

        #print(victim_output)
        
        reshape_op = new_graph.get_operation_by_name("Reshape_3")      
        # print(reshape_op)
        
        #calculate delta
        mod_sqr = tf.math.square(mfc_mod)
        delta = tf.compat.v1.reduce_sum(mod_sqr)
        
        #calculate loss 
        max_arg = tf.math.argmax(truth_label[0])
        
        # #Mean Squared Error
        #difference = tf.compat.v1.losses.mean_squared_error(truth_label, victim_output)
        #mod_loss = -1 * difference
        
        # #Inverse Max Element 
        difference = truth_label[0][max_arg] - victim_output[0][max_arg]
        mod_loss = 1/difference 
        
        total_loss = delta + 1000 * mod_loss
        
        with open("new_graph_ops.txt", "w+") as file:
              for op in new_graph.get_operations():
                  file.write(str(op))

        #setup optimizer
        optimizer = tf.compat.v1.train.AdamOptimizer(LEARNING_RATE)
        train = optimizer.minimize(total_loss, var_list=mfc_mod)
        
        
        init_op = tf.compat.v1.global_variables_initializer()


        with tf.compat.v1.Session() as sess2:  
            sess2.run([init_op])
            for x in range(10):
                count = 0
                for datum1 in dataset:
                    count = count + 1 
                    pre_mfc, vec, att, loss, softmax, d, ml, _ = sess2.run([pre_mfcc, mfc_mod, attack, total_loss, victim_output, delta, difference, train], {data_in: datum1, truth_label: label})
                #opt = modifier.eval()
                # print("Training set: " + str(x))
                # print("pre_mfcc value is " + str(pre_mfc))
                # print("mfcc vector value is " + str(vec))
                # print("The Softmax is " + str(softmax))
                # print("The attacking array is" + str(att))
                # print("Delta is " + str(d))
                # print("Difference of confidence is " + str(ml))
                # print("Total loss is " + str(loss) + '\n\n')
                    print("This is step " + str(x) + " with count " + str(count) + " and the loss is: " + str(loss))
                
                global_mfcc = pre_mfc
                
                
            #last_loss = total_loss.eval()
            
       #print(global_mfcc)
        return(vec, d)    
예제 #43
0
print("reject_poor_quality_plates: {}".format(reject_poor_quality_plates))

# initialize the model
model = tf.Graph()

# create a context manager that makes this model the default one for
# execution
with model.as_default():
    # initialize the graph definition
    graphDef = tf.GraphDef()

    # load the graph from disk
    with tf.gfile.GFile(conf["model"], "rb") as f:
        serializedGraph = f.read()
        graphDef.ParseFromString(serializedGraph)
        tf.import_graph_def(graphDef, name="")

# load the class labels from disk
labelMap = label_map_util.load_labelmap(conf["labels"])
categories = label_map_util.convert_label_map_to_categories(
    labelMap, max_num_classes=conf["num_classes"], use_display_name=True)
categoryIdx = label_map_util.create_category_index(categories)

# open the logFile, create if it does not exist, otherwise open in append mode
logFilePath = "{}/{}".format(conf["output_image_path"], conf["log_file_name"])
if (os.path.isdir(conf["output_image_path"]) != True):
    os.makedirs(conf["output_image_path"])
if os.path.exists(logFilePath) == False:
    logFile = open(logFilePath, "w")
else:
    logFile = open(logFilePath, "a")
예제 #44
0
    "Size of tiles. Decrease if out of GPU memory. Increase if bad utilization."
)

FLAGS = tf.app.flags.FLAGS

# creating TensorFlow session and loading the model
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph,
                             config=tf.ConfigProto(log_device_placement=False))
#graph_def = tf.GraphDef.FromString(open(FLAGS.model).read()) # ORIGINAL
graph_def = tf.GraphDef.FromString(open(FLAGS.model,
                                        "rb").read())  # SPECIFY BINARY
t_input = tf.placeholder(np.float32, name='input')  # define the input tensor
imagenet_mean = 117.0
t_preprocessed = tf.expand_dims(t_input - imagenet_mean, 0)
tf.import_graph_def(graph_def, {'input': t_preprocessed})

## This block of code shows us available layers;
#print("--- Available Layers: ---")
#layers = []
#for name in (op.name for op in graph.get_operations()):
#  layer_shape = graph.get_tensor_by_name(name+':0').get_shape()
#  if not layer_shape.ndims: continue
#  layers.append((name, int(layer_shape[-1])))
#  print(name, "Features/Channels: ", int(layer_shape[-1]))
#print('Number of layers', len(layers))
#print('Total number of feature channels:', sum((layer[1] for layer in layers)))
#print('Chosen layer: ')
#print(graph.get_operation_by_name(FLAGS.layer))

예제 #45
0
def run(video_source, path_object_model, path_encoder_model, path_labels,
        min_score_thresh, nms_max_overlap, max_cosine_distance, nn_budget,
        display, time_profile):
    """Run multi-target tracker on a particular sequence.

    Parameters
    ----------
    video_source : str
        Path to the video source to process.
    path_object_model : str
        Path to object recognition model.
    path_encoder_model : str
        Path to encoder model.
    path_labels : str
        Path to object labels.
    min_score_thresh : float
        Detection confidence threshold. Disregard 
        all detections that have a confidence lower than this value
    nms_max_overlap: float
        Maximum detection overlap (non-maxima suppression threshold).
    max_cosine_distance : float
        Gating threshold for cosine distance metric (object appearance).
    nn_budget : Optional[int]
        Maximum size of the appearance descriptor gallery. If None, no budget
        is enforced.
    display : bool
        If True, show visualization of intermediate tracking results.
    time_profile : bool
        If True, Show timing informations.
    """
    def timeit(method):
        def timed(*args, **kw):
            ts = timer()
            result = method(*args, **kw)
            te = timer()

            if time_profile:
                print('%r %2.3f sec' % (method.__name__, te - ts))
            return result

        return timed

    # Open video stream
    cap = cv2.VideoCapture(video_source)
    frame_count = 0
    fps = cap.get(cv2.CAP_PROP_FPS)

    # Deep SORT stuff
    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    if not os.path.exists(path_encoder_model):
        print("%s: No such file or directory" % path_encoder_model)
        sys.exit(1)
    encoder = generate_detections.create_box_encoder(path_encoder_model)

    # Object detection

    # ## Check if model exist otherwise download it
    OBJECT_MODEL_PATH = os.path.join(path_object_model, '')
    OBJECT_MODEL_FILE = os.path.join(OBJECT_MODEL_PATH,
                                     'frozen_inference_graph.pb')

    if not os.path.exists(OBJECT_MODEL_PATH):

        DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'

        DOWNLOAD_FILE = str.split(OBJECT_MODEL_PATH, '/')[-2] + '.tar.gz'

        DOWNLOAD_TO = os.path.join(str.split(OBJECT_MODEL_PATH, '/')[0], '')

        print('Model \"%s\" not on disk' % OBJECT_MODEL_PATH)
        print('Download it from %s' % (DOWNLOAD_BASE + DOWNLOAD_FILE))

        opener = urllib.request.URLopener()
        opener.retrieve(os.path.join(DOWNLOAD_BASE, DOWNLOAD_FILE),
                        os.path.join(DOWNLOAD_TO, DOWNLOAD_FILE))

        # Extract tar the model from the tar file
        print('Extract frozen tensorflow model')
        tar_file = tarfile.open(os.path.join(DOWNLOAD_TO, DOWNLOAD_FILE))
        for file in tar_file.getmembers():
            file_name = os.path.basename(file.name)
            if 'frozen_inference_graph.pb' in file_name:
                tar_file.extract(file, DOWNLOAD_TO)

    # ## Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(OBJECT_MODEL_FILE, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')
        with tf.Session() as sess:
            # Get handles to input and output tensors
            # Definite input and output Tensors for detection_graph
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Each box represents a part of the image where a particular object was detected.
            detection_boxes = detection_graph.get_tensor_by_name(
                'detection_boxes:0')
            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label.
            detection_scores = detection_graph.get_tensor_by_name(
                'detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name(
                'detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name(
                'num_detections:0')

            tensor_list = [
                detection_boxes, detection_scores, detection_classes,
                num_detections
            ]

    # ## Loading label map
    # Label maps map indices to category names, so that when our convolution
    # network predicts `5`, we know that this corresponds to `airplane`.
    # Here we use internal utility functions, but anything that returns a
    # dictionary mapping integers to appropriate string labels would be fine
    if not os.path.exists(path_labels):
        print("%s: No such file or directory" % path_labels)
        sys.exit(1)

    label_map = label_map_util.load_labelmap(path_labels)
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes=90, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)
    #
    # ## Select some category to display
    # 1 : person
    # 2 : bycicle
    # 3 : car
    # 4 : motorcicle
    # 6 : bus
    # 8 : truck
    #idx_to_keep = [1,2,3,4,6,8]
    #category_index = { i: category_index[i] for i in idx_to_keep}

    # end of initialization

    # # Detection
    @timeit
    def object_detection(image, graph):

        (boxes, scores, classes,
         num) = sess.run(tensor_list,
                         feed_dict={image_tensor: np.expand_dims(image, 0)})

        mask = scores > min_score_thresh
        classes = classes[mask]
        boxes = boxes[mask]
        scores = scores[mask]

        return (classes, boxes, scores)

    @timeit
    def extract_features(image, boxes):

        image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
        im_width, im_height = image_pil.size
        detections = []

        for box in boxes:

            ymin, xmin, ymax, xmax = box
            (left, right, bottom, top) = (xmin * im_width, xmax * im_width,
                                          ymin * im_height, ymax * im_height)

            detections.append(
                np.array([left, bottom, right - left, top - bottom]))
            #scores.append(score)

        detections = np.array(detections)

        features = encoder(image, detections)

        detections = [
            Detection(bbox, 1.0, feature)
            for bbox, feature in zip(detections, features)
        ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])

        scores = np.array([d.confidence for d in detections])

        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)

        detections = [detections[i] for i in indices]

        return detections

    @timeit
    def tracking(detections):
        tracker.predict()
        tracker.update(detections)
        return tracker

    @timeit
    def frame_callback():
        ret, frame_np = cap.read()

        # Resize frame of video to 1/4 size for faster face recognition processing
        #frame_np = cv2.resize(frame_np, (0, 0), fx=0.25, fy=0.25)

        # Skip bad read frames
        if not ret:
            return

        # Do things here
        if time_profile:
            t_obj_start = timer()

        # Actual detection.
        tf_classes, tf_boxes, tf_scores = object_detection(
            frame_np, detection_graph)

        # Do things here
        if time_profile:
            t_obj_stop = timer()
            t_feat_start = timer()

        detections = extract_features(frame_np, tf_boxes)

        # Update tracker.
        tracker = tracking(detections)

        for track, tf_class, tf_score in zip(tracker.tracks, tf_classes,
                                             tf_scores):

            bbox = track.to_tlbr()

            if display:

                h, w, _ = frame_np.shape
                thick = int((h + w) / 300.)

                cv2.rectangle(
                    frame_np, (int(bbox[0]), int(bbox[1])),
                    (int(bbox[2]), int(bbox[3])),
                    visualization.create_unique_color_uchar(track.track_id,
                                                            hue_step=0.41),
                    thick)
                #(255,255,255), thick)

                cv2.putText(
                    frame_np,
                    str('id: %i, class: %s, score: %.2f' %
                        (track.track_id, category_index[tf_class]['name'],
                         tf_score)), (int(bbox[0]), int(bbox[1]) - 12), 0,
                    1e-3 * h, (255, 0, 0), int(thick / 3))

                cv2.imshow('object detection',
                           cv2.resize(frame_np, (800, 450)))
                #cv2.imshow('object detection', frame_np)

    while True:

        print('Frame %i, %s' % (frame_count, datetime.now()))

        frame_callback()

        frame_count += 1

        if cv2.waitKey(10) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
예제 #46
0
def test_model(dataset, paths, device):
    """The main function for executing network testing. It loads the specified
       dataset iterator and optimized saliency model. By default, when no model
       checkpoint is found locally, the pretrained weights will be downloaded.
       Testing only works for models trained on the same device as specified in
       the config file.

    Args:
        dataset (str): Denotes the dataset that was used during training.
        paths (dict, str): A dictionary with all path elements.
        device (str): Represents either "cpu" or "gpu".
    """

    iterator = data.get_dataset_iterator("test", dataset, paths["data"])

    next_element, init_op = iterator

    input_images, original_shape, file_path = next_element

    graph_def = tf.GraphDef()

    model_name = "model_%s_%s.pb" % (dataset, device)

    if os.path.isfile(paths["best"] + model_name):
        with tf.gfile.Open(paths["best"] + model_name, "rb") as file:
            graph_def.ParseFromString(file.read())
    else:
        if not os.path.isfile(paths["weights"] + model_name):
            download.download_pretrained_weights(paths["weights"],
                                                 model_name[:-3])

        with tf.gfile.Open(paths["weights"] + model_name, "rb") as file:
            graph_def.ParseFromString(file.read())

    [predicted_maps] = tf.import_graph_def(graph_def,
                                           input_map={"input": input_images},
                                           return_elements=["output:0"])

    jpeg = data.postprocess_saliency_map(predicted_maps[0], original_shape[0])

    print(">> Start testing with %s %s model..." % (dataset.upper(), device))

    with tf.Session() as sess:
        sess.run(init_op)

        while True:
            try:
                output_file, path = sess.run([jpeg, file_path])
            except tf.errors.OutOfRangeError:
                break

            path = path[0][0].decode("utf-8")

            filename = os.path.basename(path)
            filename = os.path.splitext(filename)[0]
            filename += ".jpeg"

            os.makedirs(paths["images"], exist_ok=True)

            with open(paths["images"] + filename, "wb") as file:
                file.write(output_file)
예제 #47
0
pb_file_path = './mobilenet_multiregression.pb'

# input image dimensions
img_rows, img_cols = 224, 224

img_path = 'dog.jpg'
img = cv2.imread(img_path)
img = cv2.resize(img, (224, 224))
x = np.expand_dims(img, axis=0)

with tf.Graph().as_default():
    output_graph_def = tf.GraphDef()

    with open(pb_file_path, "rb") as f:
        output_graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(output_graph_def, name="")

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)

        op = sess.graph.get_operations()
        tensor_name = [m.values() for m in op]
        print tensor_name

        input_x = sess.graph.get_tensor_by_name("input_1:0")
        print input_x
        out_score0 = sess.graph.get_tensor_by_name("output_node0:0")
        out_score11 = sess.graph.get_tensor_by_name("output_node11:0")
        # print out_score
예제 #48
0
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')
    if not FLAGS.frozen_pb:
        raise ValueError('You must supply the frozen pb with --frozen_pb')
    if not FLAGS.output_node_name:
        raise ValueError(
            'You must supply the output node name with --output_node_name')
    if not FLAGS.output_dir:
        raise ValueError(
            'You must supply the output directory with --output_dir')

    tf.logging.set_verbosity(tf.logging.INFO)
    tfrecords = prepare_tfrecords(FLAGS.dataset_name, FLAGS.dataset_dir,
                                  FLAGS.dataset_split_name)

    if FLAGS.max_num_batches:
        num_batches = FLAGS.max_num_batches
    else:
        num_records = sum(
            [len(list(tf.python_io.tf_record_iterator(r))) for r in tfrecords])
        num_batches = int(math.ceil(num_records / float(FLAGS.batch_size)))

    tf.logging.info('Load GraphDef from frozen_pb {}'.format(FLAGS.frozen_pb))
    graph_def = load_graph_def(FLAGS.frozen_pb)

    tf.logging.info('Quantize Graph')
    with tf.Session() as sess:
        tf.import_graph_def(graph_def, name='')
        quantized_graph = qg.create_training_graph(sess.graph)
        quantized_inf_graph = qg.create_eval_graph(sess.graph)

    # Initialize `iterator` with training data.
    with tf.Session(graph=quantized_graph) as sess:
        tf.logging.info('Prepare dataset')
        with tf.name_scope("dataset"):
            filenames = tf.placeholder(tf.string, shape=[None])
            dataset = prepare_dataset(filenames,
                                      FLAGS.dataset_name,
                                      FLAGS.input_size,
                                      batch_size=FLAGS.batch_size)
            iterator = dataset.make_initializable_iterator()
            next_batch = iterator.get_next()

        tf.logging.info('Prepare metrics')
        lbls, preds, accuracy, acc_update_op = prepare_metrics(
            FLAGS.dataset_name)

        tf.logging.info('Prepare Saver')
        saver = tf.train.Saver()

        if FLAGS.summary_dir:
            tf.logging.info('Prepare summary writer')
            summary_writer = tf.summary.FileWriter(FLAGS.summary_dir)

        # initialize
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        sess.run(iterator.initializer, feed_dict={filenames: tfrecords})

        graph = sess.graph

        # get x and y
        x = graph.get_tensor_by_name('{}:0'.format(FLAGS.input_node_name))
        y = graph.get_tensor_by_name('{}:0'.format(FLAGS.output_node_name))

        # summary all min/max variables
        # print(graph.get_collection('variables')[3].eval())
        for var in graph.get_collection('variables'):
            tf.summary.scalar(var.name, var)
        summaries = tf.summary.merge_all()

        for step in range(num_batches):
            images, labels = sess.run(next_batch)
            ys = sess.run(y, feed_dict={x: images})
            sess.run(acc_update_op, feed_dict={lbls: labels, preds: ys})
            summary = sess.run(summaries)
            if FLAGS.summary_dir:
                summary_writer.add_summary(summary, step)

        print('Accuracy: [{:.4f}]'.format(sess.run(accuracy)))
        if FLAGS.summary_dir:
            summary_writer.add_graph(graph)

        # save graph and ckpts
        saver.save(sess, os.path.join(FLAGS.output_dir, "model.ckpt"))
        # tf.train.write_graph(graph, FLAGS.output_dir, 'quantor.pb', as_text=False)
        tf.train.write_graph(quantized_inf_graph,
                             FLAGS.output_dir,
                             'quantor.pb',
                             as_text=False)
예제 #49
0
image_path = sys.argv[1]

# Read the image_data
image_data = tf.gfile.FastGFile(image_path, 'rb').read()

# Loads label file, strips off carriage return
label_lines = [
    line.rstrip() for line in tf.gfile.GFile("logs/output_labels.txt")
]

# Unpersists graph from file
with tf.gfile.FastGFile("logs/output_graph.pb", 'rb') as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())
    _ = tf.import_graph_def(graph_def, name='')

with tf.Session() as sess:
    # Feed the image_data as input to the graph and get first prediction
    softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')

    predictions = sess.run(softmax_tensor, \
             {'DecodeJpeg/contents:0': image_data})

    # Sort to show labels of first prediction in order of confidence
    top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]

    for node_id in top_k:
        human_string = label_lines[node_id]
        score = predictions[0][node_id]
print('%s (score = %.5f)' % (human_string, score))
예제 #50
0
def load_graph(filename):
    """Unpersists graph from file as default graph."""
    with tf.gfile.FastGFile(filename, 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        tf.import_graph_def(graph_def, name='')
def RunObjectRecognitionModel():
    pathToCheck = os.path.join(os.getcwd(), Model().ModelName, Model().Graph)

    if not os.path.exists(pathToCheck):
        print("Downloading Model...please wait...")
        Model().Download()

    GraphPath = Model().GetCKPT_Path()

    detection_graph = tf.Graph()

    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(GraphPath, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

    LabelsPath = Model().GetLabelsPath()
    label_map = label_map_util.load_labelmap(LabelsPath)
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)

    TEST_IMAGE_PATHS = _LoadImages()

    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            # Definite input and output Tensors for detection_graph
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Each box represents a part of the image where a particular object was detected.
            detection_boxes = detection_graph.get_tensor_by_name(
                'detection_boxes:0')
            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label
            detection_scores = detection_graph.get_tensor_by_name(
                'detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name(
                'detection_classes:0')
        num_detections = detection_graph.get_tensor_by_name('num_detections:0')

        return_dict = {}
        for image_n, image_path in enumerate(_LoadImages()):
            image = Image.open(image_path)
            # the array based representation of the image will be used later in
            # order to prepare the result image with boxes and labels on it.
            image_np = load_image_into_numpy_array(image)
            # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
            image_np_expanded = np.expand_dims(image_np, axis=0)
            # Actual detection.
            (boxes, scores, classes,
             num) = sess.run([
                 detection_boxes, detection_scores, detection_classes,
                 num_detections
             ],
                             feed_dict={image_tensor: image_np_expanded})

            img_number, avg_score, angle = FindDetectedObjects(
                category_index, np.squeeze(boxes), np.squeeze(classes),
                np.squeeze(scores), image_path, 'person')

            if avg_score > 0.0 or avg_score != 0:
                return_dict[img_number] = {"Score": avg_score, "Angle": angle}
            else:
                pass
            # Visualization of the results of a detection.
            # vis_util.visualize_boxes_and_labels_on_image_array(
            #     image_np,
            #     np.squeeze(boxes),
            #     np.squeeze(classes).astype(np.int32),
            #     np.squeeze(scores),
            #     category_index,
            #     use_normalized_coordinates=True,
            #     line_thickness=4)
            # plt.imshow(image_np)
            # plt.show()

        print(return_dict)
        return return_dict
예제 #52
0
def models_yuce(con):
    detection_graph = tf.Graph()  # 加载目标定位模型
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile('frozen_inference_graph.pb', 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

    c1, c2 = con
    c1.close()  # 主进程用conn1发送数据,子进程要用对应的conn2接受,所以讲conn1关闭,不关闭程序会阻塞
    config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
    with detection_graph.as_default():
        with tf.Session(config=config) as sess:
            ops = tf.get_default_graph().get_operations()
            all_tensor_names = {
                output.name
                for op in ops for output in op.outputs
            }
            tensor_dict = {}
            for key in [
                    'num_detections', 'detection_boxes', 'detection_scores',
                    'detection_classes', 'detection_masks'
            ]:
                tensor_name = key + ':0'
                if tensor_name in all_tensor_names:
                    tensor_dict[key] = tf.get_default_graph(
                    ).get_tensor_by_name(tensor_name)
            image_tensor = tf.get_default_graph().get_tensor_by_name(
                'image_tensor:0')
            while True:
                try:  # 异常处理,出现异常退出
                    tf_list = c2.recv()
                    value = 0  # 0左1右
                    plate_dict = False
                    for image_np_expanded in tf_list[0]:  # 一组违法一张一张跑
                        # print(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()))
                        output_dict = sess.run(
                            tensor_dict,
                            feed_dict={image_tensor: image_np_expanded})
                        # print(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()))
                        output_dict['num_detections'] = int(
                            output_dict['num_detections'][0])
                        output_dict['detection_classes'] = output_dict[
                            'detection_classes'][0].astype(np.uint8)
                        output_dict['detection_boxes'] = output_dict[
                            'detection_boxes'][0]
                        output_dict['detection_scores'] = output_dict[
                            'detection_scores'][0]
                        image2 = Image.fromarray(tf_list[1][value])

                        car_list, plate_list, carwei_list = [], [], []
                        car_list_scores, carwei_list_scores = [], []
                        for i in range(output_dict['num_detections']
                                       ):  # 收集每个图片第一张车跟车牌 如果有的话
                            if output_dict['detection_scores'][i] < 0.5:
                                break
                            if output_dict['detection_classes'][
                                    i] == 2 and not plate_list:
                                plate_list.append(
                                    output_dict['detection_boxes'][i])
                            if output_dict['detection_classes'][i] == 1:
                                car_list.append(
                                    np.append(
                                        output_dict['detection_boxes'][i],
                                        np.ones(1)))
                                car_list_scores.append(
                                    output_dict['detection_scores'][i])
                            if output_dict['detection_classes'][i] == 3:
                                carwei_list.append(
                                    np.append(
                                        output_dict['detection_boxes'][i],
                                        np.zeros(1)))
                                carwei_list_scores.append(
                                    output_dict['detection_scores'][i])
                        if car_list or carwei_list:
                            # print('1 car_list', car_list)
                            # print('1 carwei_list', carwei_list)
                            if car_list and carwei_list:
                                # print(car_list_scores, carwei_list_scores)
                                car_num = np.r_[car_list, carwei_list]
                                car_scores = np.append(car_list_scores,
                                                       carwei_list_scores)
                            elif not carwei_list:
                                car_num = np.array(car_list)
                                car_scores = np.array(car_list_scores)
                            else:
                                car_num = np.array(carwei_list)
                                car_scores = np.array(carwei_list_scores)
                            car_list, carwei_list = [], []
                            b = np.array([
                                image2.size[1], image2.size[0], image2.size[1],
                                image2.size[0], 1.
                            ],
                                         dtype=float)
                            car_num = car_num * b
                            b = py_cpu_nms(car_num, car_scores)
                            num = 0
                            # print(b)
                            for i in b:
                                if car_num[i][-1]:
                                    car_list.append(car_num[i])
                                else:
                                    carwei_list.append(car_num[i])
                                num += 1
                                if num == 2:
                                    break
                            # print('2 car_list', car_list)
                            # print('2 carwei_list', carwei_list)
                        if carwei_list and not car_list:  # 仅有车尾
                            with open('result_chaosu.txt', 'a') as f:
                                f.write('{}#1#E0607 \n'.format(
                                    tf_list[2].split(".")[0]))  # 车尾607
                            save_img(tf_list[3], tf_list[2],
                                     "{}.jpg".format(tf_list[2].split(".")[0]),
                                     tf_list[4][1])
                            plate_dict = False
                            break
                        elif not car_list:
                            with open('result_chaosu.txt', 'a') as f:
                                if not value:
                                    f.write('{}#1#E0604 \n'.format(
                                        tf_list[2].split(".")[0]))  # 车身不完整 604
                                else:
                                    f.write('{}#1#E0606 \n'.format(
                                        tf_list[2].split(".")[0]))  # 车身不完整 606
                            save_img(tf_list[3], tf_list[2],
                                     "{}.jpg".format(tf_list[2].split(".")[0]),
                                     tf_list[4][1])
                            plate_dict = False
                            break
                        elif not plate_list:
                            with open('result_chaosu.txt', 'a') as f:
                                if not value:
                                    f.write('{}#1#E0603 \n'.format(
                                        tf_list[2].split(".")[0]))  # 车身不完整 603
                                else:
                                    f.write('{}#1#E0605 \n'.format(
                                        tf_list[2].split(".")[0]))  # 车身不完整 605
                            save_img(tf_list[3], tf_list[2],
                                     "{}.jpg".format(tf_list[2].split(".")[0]),
                                     tf_list[4][1])
                            plate_dict = False
                            break
                        elif len(car_list) + len(carwei_list) == 2:
                            if car_list and carwei_list:
                                car_num = np.r_[car_list, carwei_list]
                            elif not carwei_list:
                                car_num = np.array(car_list)
                            else:
                                car_num = np.array(carwei_list)
                            car_1 = (car_num[0][2] -
                                     car_num[0][0]) / 2 + car_num[0][0]
                            car_2 = (car_num[1][2] -
                                     car_num[1][0]) / 2 + car_num[1][0]
                            if abs(car_1 - car_2) < 200:
                                with open('result_chaosu.txt', 'a') as f:
                                    f.write('{}#1#E0602 \n'.format(
                                        tf_list[2].split(".")[0]))  # 并行602
                                save_img(
                                    tf_list[3], tf_list[2],
                                    "{}.jpg".format(tf_list[2].split(".")[0]),
                                    tf_list[4][1])
                                plate_dict = False
                                break

                        plate_dict = True
                        value += 1
                    if plate_dict:
                        with open('result_chaosu.txt', 'a') as f:
                            f.write('{}#0#0 \n'.format(
                                tf_list[2].split(".")[0]))
                        save_img(tf_list[3], tf_list[2],
                                 "{}.jpg".format(tf_list[2].split(".")[0]),
                                 tf_list[4][0])
                except EOFError:
                    break
예제 #53
0
def main(_):
    # 读取所有的图片
    image_lists = create_image_lists(VALIDATION_PERCENTAGE, TEST_PERCENTAGE)
    n_classes = len(image_lists.keys())

    # 读取训练好的inception-v3模型
    with gfile.FastGFile(os.path.join(MODEL_DIR, MODEL_FILE), 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())

    # 加载inception-v3模型,并返回数据输入张量和瓶颈层输出张量
    bottleneck_tensor, jpeg_data_tensor = tf.import_graph_def(
        graph_def,
        return_elements=[BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME])

    # 定义新的神经网络输入
    bottleneck_input = tf.placeholder(
        tf.float32, [None, BOTTLENECK_TENSOR_SIZE],
        name='BottleneckInputPlaceholder')

    # 定义新的标准答案输入
    ground_truth_input = tf.placeholder(
        tf.float32, [None, n_classes], name='GroundTruthInput')

    # 定义一层全连接层解决新的图片分类问题
    with tf.name_scope('final_training_ops'):
        weights = tf.Variable(
            tf.truncated_normal(
                [BOTTLENECK_TENSOR_SIZE, n_classes], stddev=0.1))
        biases = tf.Variable(tf.zeros([n_classes]))
        logits = tf.matmul(bottleneck_input, weights) + biases
        final_tensor = tf.nn.softmax(logits)

    # 定义交叉熵损失函数
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
        logits=logits, labels=ground_truth_input)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(
        cross_entropy_mean)

    # 计算正确率
    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(
            tf.argmax(final_tensor, 1), tf.argmax(ground_truth_input, 1))
        evaluation_step = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32))

    # 训练过程
    with tf.Session() as sess:
        init = tf.global_variables_initializer().run()

        for i in range(STEPS):
            # 每次获取一个batch的训练数据
            train_bottlenecks, train_ground_truth = get_random_cached_bottlenecks(
                sess, n_classes, image_lists, BATCH, 'training',
                jpeg_data_tensor, bottleneck_tensor)
            sess.run(
                train_step,
                feed_dict={
                    bottleneck_input: train_bottlenecks,
                    ground_truth_input: train_ground_truth
                })

            # 在验证集上测试正确率
            if i % 100 == 0 or i + 1 == STEPS:
                validation_bottlenecks, validation_ground_truth = get_random_cached_bottlenecks(
                    sess, n_classes, image_lists, BATCH, 'validation',
                    jpeg_data_tensor, bottleneck_tensor)
                validation_accuracy = sess.run(
                    evaluation_step,
                    feed_dict={
                        bottleneck_input: validation_bottlenecks,
                        ground_truth_input: validation_ground_truth
                    })
                print(
                    'Step %d : Validation accuracy on random sampled %d examples = %.1f%%'
                    % (i, BATCH, validation_accuracy * 100))

        # 最后在测试集上测试正确率
        test_bottlenecks, test_ground_truth = get_test_bottlenecks(
            sess, image_lists, n_classes, jpeg_data_tensor, bottleneck_tensor)
        test_accuracy = sess.run(
            evaluation_step,
            feed_dict={
                bottleneck_input: test_bottlenecks,
                ground_truth_input: test_ground_truth
            })
        print('Final test accuracy = %.1f%%' % (test_accuracy * 100))
    tf.reset_default_graph()

    # Config loader
    config = tf.ConfigProto()
    #jit_level = tf.OptimizerOptions.ON_1
    jit_level = 0
    config.graph_options.optimizer_options.global_jit_level = jit_level

    with tf.Session(graph=tf.Graph(), config=config) as sess:

        # Load graph
        gd = tf.GraphDef()
        with tf.gfile.Open(FRONZEN_GRAPH_FILE, 'rb') as f:
            data = f.read()
            gd.ParseFromString(data)
        tf.import_graph_def(gd, name='')

        #print(len(sess.graph.get_operations()))

        # Read in tensors
        keep_prob = sess.graph.get_tensor_by_name('keep_prob:0')
        image_pl = sess.graph.get_tensor_by_name('image_input:0')
        logits = sess.graph.get_tensor_by_name('Reshape:0')

        # Get video
        vid_name = args.file.split('.')[0]
        output = vid_name + "_out.mp4"
        clip = VideoFileClip(args.file)
        if args.length is not None:
            clip = clip.subclip(0, args.length)
        orig_fps = clip.fps
예제 #55
0
파일: file.py 프로젝트: subtitlegerms/drmly
def deep_dream(model, output_path, input_image=noise):
    # take input args chosen for parameters
    iter_num = int(args.iterations)
    octave_num = int(args.octaves)
    layer = str(args.layer)
    octave_scale = float(args.ocscale)
    print("layer is : ", layer)
    """implement of deep dream"""
    # define graph
    graph = tf.Graph()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.InteractiveSession(graph=graph, config=config)

    # load model
    with tf.gfile.FastGFile(model, "rb") as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())

    # define input
    X = tf.placeholder(tf.float32, name="input")
    X2 = tf.expand_dims(X - imagenet_mean, 0)
    tf.import_graph_def(graph_def, {"input": X2})

    print("\n", iter_num, "\n")

    count = 1
    output_count = len([
        name for name in os.listdir('.')
        if os.path.isfile(args.input + '/output')
    ])
    count = output_count + 1
    rl = str(args.randomlayer)
    rle = int(args.randomlayerevery)
    print("what is args.randomlayer: " + rl, "done every ", str(rle), "frames")
    file_list = sorted(glob.iglob(args.input + '/*.png'), key=numericalSort)
    for image_file in file_list:
        if count < output_count:  # hack to finish a partially completed job
            count += 1
            continue
        if rle != 0 and rle % count:  # only randomize every n frames
            print("random layer in ", str(rl), " frames")
            if "nc" in rl:
                layer = random.choice(no_conv)
                print("layer is noconv", layer)
            elif "random" in rl:
                layer = random.choice(layer_names)
                print("layer is rl ", layer)
            elif "c" in rl:
                layer = random.choice(layer_conv)
                print("random layer choice is ", layer)
        # L2 and gradient
        channel = int(args.channel)

        def T(layer):
            '''Helper for getting layer output tensor'''
            return graph.get_tensor_by_name("import/%s:0" % layer)

        t_obj = T(layer)[:, :, :, channel]
        loss = tf.reduce_mean(t_obj)
        gradient = tf.gradients(loss, X)[0]
        # increase iterations this run if doing linear increase
        if int(args.linear) > 0 and iter_num < 500:
            iter_num += int(args.linear)
            print("increase iter_num to ", iter_num)
        iw = int(args.itwaver)
        r = bool(random.getrandbits(1))
        if iw > 0:
            if iter_num > iw:
                if r and iter_num < 500:
                    iter_num += iw
                else:
                    iter_num -= iw
            else:
                if not r:
                    iter_num += iw
        # ow = int(args.ocwaver)
        # r = bool(random.getrandbits(1))
        # if ow > 0:
        #     if octave_num > ow:
        #         if r and octave_num < 8:
        #                 octave_num += ow
        #         else:
        #             octave_num -=ow
        #     else:
        #         if not r:
        #             octave_num += ow

        image = np.float32(cv2.imread(image_file))
        octaves = []
        output_path = args.input + "/output/" + str(count) + ".png"
        print(output_path)
        count += 1

        # tranforming TF function

        def tffunc(*argtypes):
            placeholders = list(map(tf.placeholder, argtypes))

            def wrap(f):
                out = f(*placeholders)

                def wrapper(*args, **kw):
                    return out.eval(dict(zip(placeholders, args)),
                                    session=kw.get('session'))

                return wrapper

            return wrap

        def resize(image, size):
            """resize image in nparray"""
            image = tf.expand_dims(image, 0)
            print("called resize with channel: ", args.channel)
            return tf.image.resize_bilinear(image, size)[0, :, :, :]

        resize = tffunc(np.float32, np.int32)(resize)

        for i in range(octave_num - 1):
            size = np.shape(image)[:2]
            narrow_size = np.int32(np.float32(size) / octave_scale)
            # down sampling and up sampling equal to smooth, diff can save significance
            down = resize(image, narrow_size)
            diff = image - resize(down, size)
            image = down
            octaves.append(diff)

        def cal_gradient(image, gradient):
            """cal gradient"""
            # generate offset and shift to smooth tile edge
            shift_x, shift_y = np.random.randint(tile_size, size=2)
            image_shift = np.roll(np.roll(image, shift_x, 1), shift_y, 0)
            total_gradient = np.zeros_like(image)
            # calculate gradient for each region
            for y in range(0, max(image.shape[0] - tile_size // 2, tile_size),
                           tile_size):
                for x in range(0,
                               max(image.shape[1] - tile_size // 2, tile_size),
                               tile_size):
                    region = image_shift[y:y + tile_size, x:x + tile_size]
                    total_gradient[y:y + tile_size,
                                   x:x + tile_size] = sess.run(
                                       gradient, {X: region})
            return np.roll(np.roll(total_gradient, -shift_x, 1), -shift_y, 0)

        for i in range(octave_num):
            print("octave num %s/%s..." % (i + 1, octave_num))
            if i > 0:
                # restore image except original image
                diff = octaves[-i]
                image = resize(image, diff.shape[:2]) + diff
            for j in range(iter_num):
                # gradient ascent
                g_ = cal_gradient(image, gradient)
                # large learning rate for small g_
                image += g_ * (learning_rate / (np.abs(g_).mean() + 1e-7))
        cv2.imwrite(output_path, image)
        sess.close()
예제 #56
0
 def __init__(self, graph_def, labels):
     super(TFObjectDetection, self).__init__(labels)
     self.graph = tf.compat.v1.Graph()
     with self.graph.as_default():
         input_data = tf.compat.v1.placeholder(tf.float32, [1, None, None, 3], name='Placeholder')
         tf.import_graph_def(graph_def, input_map={"Placeholder:0": input_data}, name="")
    def run(self):
        self.mprint('start--------')
        self.mprint('net model path: {}'.format(self.netModelPath))

        ckpt_path = os.path.join(self.netModelPath, 'frozen_inference_graph.pb')

        detection_graph = tf.Graph()
        sess = tf.Session(graph=detection_graph)

        with detection_graph.as_default():
            # initial
            self.mprint('initial graph')
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(ckpt_path, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')
            self.mprint('load ckpt done')
            # Get handles to input and output tensors
            ops = tf.get_default_graph().get_operations()
            all_tensor_names = {output.name for op in ops for output in op.outputs}
            tensor_dict = {}
            for key in [
                'num_detections', 'detection_boxes', 'detection_scores',
                'detection_classes', 'detection_masks'
            ]:
                tensor_name = key + ':0'
                if tensor_name in all_tensor_names:
                    tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
                        tensor_name)
            image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')

            with tf.Session() as sess:
                # call loop--------------------------
                self.mprint('graph initial done, enter call loop')
                while (self.isRun):
                    images_list = self.inQueue.get()  # block
                    if images_list == END_FLAG:
                        break

                    # prepare numpy.array images batch
                    num_imgs = len(images_list)
                    image_batch_np = np.zeros((num_imgs, input_image_rows, input_image_cols, 3))
                    t0 = time.time()
                    for k in range(num_imgs):
                        image_batch_np[k] = cv.resize(images_list[k],
                                                      (input_image_cols, input_image_rows),
                                                      interpolation=cv.INTER_AREA)[:, :, ::-1]
                    t1 = time.time()
                    self.mprint('resize images time cost {}s'.format(t1 - t0))

                    # Actual detection.
                    time_start = time.time()
                    output_dict = run_inference_for_image_batch(image_batch_np, sess, tensor_dict, image_tensor)
                    time_end = time.time()
                    self.mprint('inference image batch time cost: {}s'.format(time_end - time_start))

                    # boxes_batch[k] is [[xmin,ymin,xmax,ymax,class_id,probility], [...],...]
                    boxes_batch = []
                    for idx_batch in range(num_imgs):
                        boxes = []
                        im_rows = images_list[idx_batch].shape[0]
                        im_cols = images_list[idx_batch].shape[1]
                        for i in range(output_dict['detection_boxes'][idx_batch].shape[0]):
                            if output_dict['detection_scores'][idx_batch] is None \
                                    or output_dict['detection_scores'][idx_batch][i] > score_thresh:
                                box = output_dict['detection_boxes'][idx_batch][i].tolist()
                                (box[0], box[2], box[1], box[3]) = (int(box[1] * im_cols), int(box[3] * im_cols),
                                                                    int(box[0] * im_rows), int(box[2] * im_rows))
                                box.append(
                                    int(round(output_dict['detection_classes'][idx_batch][i])))  # box[4]: class id
                                box.append(output_dict['detection_scores'][idx_batch][i])  # box[5]: probility
                                boxes.append(box)
                        boxes_batch.append(boxes)
                    self.mprint('result: {}'.format(boxes_batch))
                    self.outQueue.put(boxes_batch)
                # call loop end--------------------------

        self.mprint('over------')
    #dictionaries_path = '../build/26_NN_dictionaries'
    #model_name =
    #output_path = '../build/37_predicted_f0_steps'

    #dictionary_title1 = 'sign_labels_dictionary'
    #dictionary_title2 = 'magn_labels_dictionary'

    # Load tensorflow model
    with tf.gfile.GFile(os.path.join(model_name), "rb") as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())

    with tf.Graph().as_default() as graph:
        tf.import_graph_def(graph_def,
                            input_map=None,
                            return_elements=None,
                            name=None,
                            op_dict=None,
                            producer_op_list=None)

    # Load tensorflow variables
    WV = graph.get_tensor_by_name('import/WV:0')
    X2 = graph.get_tensor_by_name('import/X2:0')
    keep_prob1 = graph.get_tensor_by_name('import/keep_prob1:0')
    n_frames1 = graph.get_tensor_by_name('import/n_frames1:0')
    seq_len1 = graph.get_tensor_by_name('import/seq_len1:0')
    BiRNN = graph.get_tensor_by_name('import/BiRNN:0')
    cutoff = graph.get_tensor_by_name('import/cutoff:0')
    X4 = graph.get_tensor_by_name('import/X4:0')
    X5 = graph.get_tensor_by_name('import/X5:0')
    n_frames2 = graph.get_tensor_by_name('import/n_frames2:0')
    Y2_ = graph.get_tensor_by_name('import/Y2_:0')
예제 #59
0
def freeze_graph_with_def_protos(input_graph_def,
                                 input_saver_def,
                                 input_checkpoint,
                                 output_node_names,
                                 restore_op_name,
                                 filename_tensor_name,
                                 clear_devices,
                                 initializer_nodes,
                                 optimize_graph=True,
                                 variable_names_blacklist=''):
    """Converts all variables in a graph and checkpoint into constants."""
    del restore_op_name, filename_tensor_name  # Unused by updated loading code.

    # 'input_checkpoint' may be a prefix if we're using Saver V2 format
    if not saver_lib.checkpoint_exists(input_checkpoint):
        raise ValueError('Input checkpoint "' + input_checkpoint +
                         '" does not exist!')

    if not output_node_names:
        raise ValueError(
            'You must supply the name of a node to --output_node_names.')

    # Remove all the explicit device specifications for this node. This helps to
    # make the graph more portable.
    if clear_devices:
        for node in input_graph_def.node:
            node.device = ''

    with tf.Graph().as_default():
        tf.import_graph_def(input_graph_def, name='')

        if optimize_graph:
            logging.info('Graph Rewriter optimizations enabled')
            if tf.__version__ >= '1.5.0':
                rewrite_options = rewriter_config_pb2.RewriterConfig(
                    layout_optimizer=rewriter_config_pb2.RewriterConfig.ON)
            else:
                rewrite_options = rewriter_config_pb2.RewriterConfig(
                    optimize_tensor_layout=True)
            rewrite_options.optimizers.append('pruning')
            rewrite_options.optimizers.append('constfold')
            rewrite_options.optimizers.append('layout')
            graph_options = tf.GraphOptions(rewrite_options=rewrite_options,
                                            infer_shapes=True)
        else:
            logging.info('Graph Rewriter optimizations disabled')
            graph_options = tf.GraphOptions()
        config = tf.ConfigProto(graph_options=graph_options)
        with session.Session(config=config) as sess:
            if input_saver_def:
                saver = saver_lib.Saver(saver_def=input_saver_def)
                saver.restore(sess, input_checkpoint)
            else:
                var_list = {}
                reader = pywrap_tensorflow.NewCheckpointReader(
                    input_checkpoint)
                var_to_shape_map = reader.get_variable_to_shape_map()
                for key in var_to_shape_map:
                    try:
                        tensor = sess.graph.get_tensor_by_name(key + ':0')
                    except KeyError:
                        # This tensor doesn't exist in the graph (for example it's
                        # 'global_step' or a similar housekeeping element) so skip it.
                        continue
                    var_list[key] = tensor
                saver = saver_lib.Saver(var_list=var_list)
                saver.restore(sess, input_checkpoint)
                if initializer_nodes:
                    sess.run(initializer_nodes)

            variable_names_blacklist = (variable_names_blacklist.split(',')
                                        if variable_names_blacklist else None)
            output_graph_def = graph_util.convert_variables_to_constants(
                sess,
                input_graph_def,
                output_node_names.split(','),
                variable_names_blacklist=variable_names_blacklist)

    return output_graph_def
예제 #60
0
    def __init__(self, rho, sigma, image_shape, kernel_shape, denoiser='./models/map_color/optimizedMAPdenoiser.pb'):
        """
        Initializes the MAP Deblurer.

        Parameters
        ----------
        rho - optimization parameter (optimal 1 / sigma_dae**2), where sigma_dae is the sigma used for training the MAP denoiser
        sigma - standard deviation of the noise used to degrade the image
        image_shape - shape of the image to deblur
        kernel_shape - shape of the blur kernel
        denoiser - path to the frozen denoiser protobuf file
        """
        tf.reset_default_graph()

        # Load denoiser
        with tf.gfile.GFile(denoiser, "rb") as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())

        self.rho = rho
        self.sigma = sigma
        self.image_shape = image_shape

        half = [kernel_shape[0] // 2, kernel_shape[1] // 2]
        self.valid_shape = (image_shape[0] - (kernel_shape[0] - 1), image_shape[1] - (kernel_shape[1] - 1))
        self.kernel_shape = kernel_shape

        self.image_ph = tf.placeholder(tf.float32, (None,) + image_shape, "blurry_image")
        self.kernel_sig_ph = tf.placeholder(tf.complex64, image_shape[:2], "blur_kernel")  # conjugated and flipped version of the kernel that was used to degrade the image

        self.y = tf.get_variable("y", initializer=self.image_ph, validate_shape=False)
        self.x_hat = tf.get_variable("x_hat", initializer=self.image_ph, validate_shape=False)
        self.z_hat = tf.get_variable("z_hat", initializer=self.image_ph, validate_shape=False)
        self.lam = tf.get_variable("lam", initializer=tf.zeros(tf.shape(self.image_ph)), validate_shape=False)

        self.kernel_sig = tf.expand_dims(self.kernel_sig_ph, axis=0)
        self.kernel_sig = tf.expand_dims(self.kernel_sig, axis=0)

        # self.kernel_sig = tf.pad(self.kernel_sig, paddings)

        self.kernel_sig = tf.get_variable("kernel_sig", initializer=self.kernel_sig, validate_shape=False)

        self.rev_kernel_sig = tf.math.conj(tf.reverse(self.kernel_sig, axis=(0, 1)))
        self.rev_kernel_sig = tf.get_variable("rev_kernel_sig", initializer=self.rev_kernel_sig, validate_shape=False)

        self.denominator = tf.cast((tf.abs(self.kernel_sig) ** 2 / self.sigma ** 2) + self.rho, tf.complex64)
        self.denominator = tf.get_variable("denominator", initializer=self.denominator, validate_shape=False)

        # image = tf.cast(tf.transpose(self.image_ph, (0,3,1,2)),tf.complex64)

        # paddings = [[0,0], [0,0], [half[0], half[0]], [half[1], half[1]]]

        # if padding_mode == "WRAP":
        #    image = wrap_pad(image, half)
        # else:
        #    image = tf.pad(image, paddings, mode=padding_mode)

        # self.ul = tf.get_variable("ul", initializer=self.ul, validate_shape=False)

        x_tilde = tf.cast(tf.transpose(self.z_hat - self.lam, (0, 3, 1, 2)), tf.complex64)
        # if padding_mode == "WRAP":
        #    x_tilde = wrap_pad(x_tilde, half)
        # else:
        #    x_tilde = tf.pad(x_tilde, paddings, mode=padding_mode)

        x_hat = tf.cast(tf.transpose(self.x_hat[:, :, :, :], (0, 3, 1, 2)), tf.complex64)
        y_est = tf.real(tf.spectral.ifft2d(tf.spectral.fft2d(x_hat) * self.rev_kernel_sig))
        y = tf.transpose(self.y, (0, 3, 1, 2))

        mask = tf.ones((1, 1) + self.valid_shape, dtype=tf.float32)
        mask = tf.pad(mask, [[0, 0], [0, 0], [half[0], half[0]], [half[1], half[1]]])

        y_est = y * mask + y_est * np.abs(mask - 1.0)
        self.y_est = tf.transpose(y_est, (0, 2, 3, 1))

        self.ul = self.kernel_sig * tf.spectral.fft2d(tf.cast(y_est, tf.complex64)) / self.sigma ** 2

        x_hat = tf.real(tf.spectral.ifft2d((self.ul + self.rho * tf.spectral.fft2d(x_tilde)) / self.denominator))
        # x_hat = x_hat[:,:,half[0]:-half[0],half[1]:-half[1]]
        x_hat = tf.transpose(x_hat, (0, 2, 3, 1))

        self.eq1 = self.x_hat.assign(x_hat)

        # eq3
        self.eq3 = self.lam.assign(self.lam + (self.x_hat - self.z_hat))

        # eq2
        x_tilda = (self.x_hat + self.lam) / 255.0
        self.denoised = tf.import_graph_def(
            graph_def,
            input_map={"clean_image": x_tilda},
            return_elements=["denoised:0"])

        self.eq2 = self.z_hat.assign(self.denoised[0] * 255.0)

        self.init = tf.global_variables_initializer()