コード例 #1
0
def read_tensor_from_image_file(file_name,
                                input_height=299,
                                input_width=299,
                                input_mean=0,
                                input_std=255):
    input_name = "file_reader"
    output_name = "normalized"
    file_reader = tf.read_file(file_name, input_name)
    if file_name.endswith(".png"):
        image_reader = tf.image.decode_png(file_reader,
                                           channels=3,
                                           name='png_reader')
    elif file_name.endswith(".gif"):
        image_reader = tf.squeeze(
            tf.image.decode_gif(file_reader, name='gif_reader'))
    elif file_name.endswith(".bmp"):
        image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
    else:
        image_reader = tf.image.decode_jpeg(file_reader,
                                            channels=3,
                                            name='jpeg_reader')
    float_caster = tf.cast(image_reader, tf.float32)
    dims_expander = tf.expand_dims(float_caster, 0)
    resized = tf.image.resize_bilinear(dims_expander,
                                       [input_height, input_width])
    normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
    sess = tf.Session()
    result = sess.run(normalized)

    return result
コード例 #2
0
def visualize_tfrecords(path_to_tfrecord, num_vids, num_skip_frames):
    """Visualizes TFRecords in given path.

  Args:
    path_to_tfrecord: string, Path to TFRecords. Provide search pattern in
    string.
    num_vids: integer, Number of videos to visualize.
    num_skip_frames: integer, Number of frames to skip while visualzing.
  """
    tfrecord_files = glob.glob(path_to_tfrecord)
    tfrecord_files.sort()
    sess = tf.Session()
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())

    dataset = tf.data.TFRecordDataset(tfrecord_files)
    dataset = dataset.map(decode)
    dataset = dataset.batch(1)

    iterator = dataset.make_one_shot_iterator()
    next_batch = iterator.get_next()

    sess.run(init_op)

    for _ in xrange(num_vids):

        # Fetch a new batch from the dataset
        batch_videos, batch_names = sess.run(next_batch)
        tf.logging.info('Class label = %d', batch_names[0])
        for frame_idx in xrange(0, len(batch_videos[0]), num_skip_frames):
            plt.imshow(batch_videos[0, frame_idx])
            plt.pause(0.1)
            plt.clf()
コード例 #3
0
ファイル: nsynth.py プロジェクト: Mbah-Javis/datasets
 def start_bundle(self):
     self._calc_loudness = tf.function(
         _calc_loudness,
         input_signature=[
             tf.TensorSpec(shape=[_NUM_SECS * _AUDIO_RATE],
                           dtype=tf.float32)
         ])
     self._sess = None if tf.executing_eagerly() else tf.Session()
コード例 #4
0
ファイル: coco_utils.py プロジェクト: runchida/Bachelorarbeit
    def __call__(self):
        with tf.Graph().as_default():
            dataset = self._build_pipeline()
            groundtruth = dataset.make_one_shot_iterator().get_next()

            with tf.Session() as sess:
                for _ in range(self._num_examples):
                    groundtruth_result = sess.run(groundtruth)
                    yield groundtruth_result
コード例 #5
0
    y = y.reshape(-1, 1)

#Convert the output labels to one-hot encoding format
y = np.eye(y.shape[1] + 1)[y]
y = np.array([item[0] for item in y])

X_train, X_test, y_train, y_test = model_selection.train_test_split(
    X, y, test_size=.4, random_state=42)

num_outputs = y.shape[1]
num_inputs = X.shape[1]

learning_rate = 0.001
epochs = 3000

with tf.Session() as sess:
    x = tf.placeholder(shape=[None, num_inputs], dtype=tf.float32)
    y = tf.placeholder(shape=[None, num_outputs], dtype=tf.float32)

    w = tf.Variable(tf.random.normal(shape=[num_inputs, num_outputs]),
                    dtype=tf.float32,
                    name='w')
    b = tf.Variable(tf.random.normal(shape=[num_outputs]),
                    dtype=tf.float32,
                    name='b')

    y_hat = tf.nn.sigmoid(tf.matmul(x, w) + b)

    loss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(y_hat), axis=1))
    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
    match = tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(y_hat, 1)),
コード例 #6
0
            output_layer = args.output_layer

        graph = load_graph(model_file)

        t = read_tensor_from_image_file(file_name,
                                        input_height=input_height,
                                        input_width=input_width,
                                        input_mean=input_mean,
                                        input_std=input_std)

        input_name = "import/" + input_layer
        output_name = "import/" + output_layer
        input_operation = graph.get_operation_by_name(input_name)
        output_operation = graph.get_operation_by_name(output_name)

        with tf.Session(graph=graph) as sess:
            start = time.time()
            results = sess.run(output_operation.outputs[0],
                               {input_operation.outputs[0]: t})
            end = time.time()
        results = np.squeeze(results)

        top_k = results.argsort()[-5:][::-1]
        labels = load_labels(label_file)

        print('\nEvaluation time (1-image): {:.3f}s\n'.format(end - start))

        for i in top_k:
            if labels[i] == "bad canal images":
                if results[i] >= 0.60:
                    shutil.copy2(singlefile, "result/bad")