示例#1
0
def define_batch_buffer(shape, capacity, varname):

    # each server holds three values: xi, ai, and alpha
    server_packed_shape = (3, len(m),) + tuple(shape)

    # the crypto producer holds just one value: a
    cryptoprovider_packed_shape = (len(m),) + tuple(shape)

    with tf.device(SERVER_0):
        queue_0 = tf.FIFOQueue(
            capacity=capacity,
            dtypes=[INT_TYPE],
            shapes=[server_packed_shape],
            name='buffer_{}_0'.format(varname),
        )

    with tf.device(SERVER_1):
        queue_1 = tf.FIFOQueue(
            capacity=capacity,
            dtypes=[INT_TYPE],
            shapes=[server_packed_shape],
            name='buffer_{}_1'.format(varname),
        )

    with tf.device(CRYPTO_PRODUCER):
        queue_cp = tf.FIFOQueue(
            capacity=capacity,
            dtypes=[INT_TYPE],
            shapes=[cryptoprovider_packed_shape],
            name='buffer_{}_cp'.format(varname),
        )

    return (queue_0, queue_1, queue_cp)
示例#2
0
def causal_linear(x, n_inputs, n_outputs, name, filter_length, rate,
                  batch_size):
    """Applies dilated convolution using queues.

  Assumes a filter_length of 3.

  Args:
    x: The [mb, time, channels] tensor input.
    n_inputs: The input number of channels.
    n_outputs: The output number of channels.
    name: The variable scope to provide to W and biases.
    filter_length: The length of the convolution, assumed to be 3.
    rate: The rate or dilation
    batch_size: Non-symbolic value for batch_size.

  Returns:
    y: The output of the operation
    (init_1, init_2): Initialization operations for the queues
    (push_1, push_2): Push operations for the queues
  """
    assert filter_length == 3

    # create queue
    q_1 = tf.FIFOQueue(rate,
                       dtypes=tf.float32,
                       shapes=(batch_size, 1, n_inputs))
    q_2 = tf.FIFOQueue(rate,
                       dtypes=tf.float32,
                       shapes=(batch_size, 1, n_inputs))
    init_1 = q_1.enqueue_many(tf.zeros((rate, batch_size, 1, n_inputs)))
    init_2 = q_2.enqueue_many(tf.zeros((rate, batch_size, 1, n_inputs)))
    state_1 = q_1.dequeue()
    push_1 = q_1.enqueue(x)
    state_2 = q_2.dequeue()
    push_2 = q_2.enqueue(state_1)

    # get pretrained weights
    w = tf.get_variable(
        name=name + '/W',
        shape=[1, filter_length, n_inputs, n_outputs],
        dtype=tf.float32,
    )
    b = tf.get_variable(name=name + '/biases',
                        shape=[n_outputs],
                        dtype=tf.float32)
    w_q_2 = tf.slice(w, [0, 0, 0, 0], [-1, 1, -1, -1])
    w_q_1 = tf.slice(w, [0, 1, 0, 0], [-1, 1, -1, -1])
    w_x = tf.slice(w, [0, 2, 0, 0], [-1, 1, -1, -1])

    # perform op w/ cached states
    y = tf.nn.bias_add(
        tf.matmul(state_2[:, 0, :], w_q_2[0][0]) +
        tf.matmul(state_1[:, 0, :], w_q_1[0][0]) +
        tf.matmul(x[:, 0, :], w_x[0][0]),
        b,
    )

    y = tf.expand_dims(y, 1)
    return y, (init_1, init_2), (push_1, push_2)
示例#3
0
 def __init__(self,
              X,
              Y,
              multi_inputs=False,
              batch_size=32,
              shuffle=True,
              capacity=None):
     # Handle multiple inputs
     if not multi_inputs:
         X = [X]
     if not capacity:
         capacity = batch_size * 8
     X = [np.array(x) for x in X]
     self.X = X
     self.Xlen = len(X[0])
     Y = np.array(Y)
     self.Y = Y
     # Create X placeholders
     self.tensorX = [
         tf.placeholder(dtype=tf.float32,
                        shape=[None] +
                        list(utils.get_incoming_shape(x)[1:])) for x in X
     ]
     # Create Y placeholders
     self.tensorY = tf.placeholder(dtype=tf.float32,
                                   shape=[None] +
                                   list(utils.get_incoming_shape(Y)[1:]))
     # FIFO Queue for feeding data
     self.queue = tf.FIFOQueue(dtypes=[x.dtype for x in self.tensorX] +
                               [self.tensorY.dtype],
                               capacity=capacity)
     self.enqueue_op = self.queue.enqueue(self.tensorX + [self.tensorY])
     self.batch_size = batch_size
     self.multi_inputs = multi_inputs
     self.shuffle = shuffle
示例#4
0
def batch_inputs(feature_map, data_files, height=2048, width=2448,
                 batch_size=1, is_train=True, num_readers=1, num_preprocess_threads=4):
    # feature_map: 对应proto的数据映射。
    # data_files: list类型,存放的是tfrecord的文件列表。
    # batch_size: 一个批次batch的大小。
    # is_train: DataProvider在train和test节点的表现形式有所不同,主要test时并不需要一个循环队列。
    # num_reader: 每一个线程reader的个数。
    # num_preprocess_threads: 处理数据的线程的个数。
    with tf.name_scope('reader_defination'):
        # 创建文件队列,如果是训练,创建一个随机文件队列,如果是测试,创建一个顺序文件队列。
        if is_train:
            filename_queue = tf.train.string_input_producer(data_files, shuffle=True, capacity=16)
        else:
            filename_queue = tf.train.string_input_producer(data_files, shuffle=False, capacity=1)
        # reader的个数至少为1。
        num_readers = 1 if num_readers < 1 else num_readers
        
        if num_readers > 1:
            # 定义缓冲池的大小。
            examples_per_shard = 1024
            min_queue_examples = examples_per_shard * 16
            if is_train:
                examples_queue = tf.RandomShuffleQueue(capacity=min_queue_examples + 3 * batch_size,
                                                       min_after_dequeue=min_queue_examples,
                                                       dtypes=[tf.string])
            else:
                examples_queue = tf.FIFOQueue(capacity=examples_per_shard + 3 * batch_size, 
                                              dtypes=[tf.string])
            
            # 多个reader时对reader队列进行管理。
            enqueue_ops = []
            for _ in range(num_readers):
                reader = tf.TFRecordReader()
                _, value = reader.read(filename_queue)
                enqueue_ops.append(examples_queue.enqueue([value]))
            
            tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops))
            example_serialized = examples_queue.dequeue()
        else:
            reader = tf.TFRecordReader()
            _, example_serialized = reader.read(filename_queue)
        
        samples = []
        for _ in range(num_preprocess_threads):
            features = tf.parse_single_example(example_serialized, feature_map)
            samples.append([image_processing(features['image/encoded'], height, width), features['image/format']])
            
        batch_data = tf.train.batch_join(samples, batch_size=batch_size,
                                         capacity=2 * num_preprocess_threads * batch_size)
                
        data = tf.reshape(batch_data[0], [batch_size, -1])
        label = tf.reshape(batch_data[1], [batch_size])
        return (data, label)
示例#5
0
 def _build(self):
     """Returns a tuple containing observation and target one-hot tensors."""
     q = tf.FIFOQueue(
         self._queue_capacity, [self._dtype, self._dtype],
         shapes=[[self._num_steps, self._batch_size, self._vocab_size]] * 2)
     obs, target = tf.py_func(self._get_batch, [], [tf.int32, tf.int32])
     obs = self._one_hot(obs)
     target = self._one_hot(target)
     enqueue_op = q.enqueue([obs, target])
     obs, target = q.dequeue()
     tf.train.add_queue_runner(tf.train.QueueRunner(q, [enqueue_op]))
     return SequenceDataOpsNoMask(obs, target)
示例#6
0
 def __init__(self, batch_env):
     super(_MemoryWrapper, self).__init__(batch_env)
     infinity = 10000000
     meta_data = list(zip(*_rollout_metadata(batch_env)))
     # In memory wrapper we do not collect pdfs neither value_function
     # thus we only need the first 4 entries of meta_data
     shapes = meta_data[0][:4]
     dtypes = meta_data[1][:4]
     self.speculum = tf.FIFOQueue(infinity, shapes=shapes, dtypes=dtypes)
     observs_shape = batch_env.observ.shape
     # TODO(piotrmilos): possibly retrieve the observation type for batch_env
     self._observ = tf.Variable(tf.zeros(observs_shape, self.observ_dtype),
                                trainable=False)
示例#7
0
 def tf_ops(self, capacity=32):
     im = tf.placeholder(tf.float32, shape=self.image_shape)
     label = tf.placeholder(tf.int32, shape=self.label_shape)
     if self.image_shape is None or self.label_shape is None:
         shapes = None
     else:
         shapes = [self.image_shape, self.label_shape]
     queue = tf.FIFOQueue(capacity, [tf.float32, tf.int32], shapes=shapes)
     enqueue_op = queue.enqueue([im, label])
     fqr = FeedingQueueRunner(queue, [enqueue_op],
                              feed_fns=[self.feed(im, label).__next__])
     tf.train.add_queue_runner(fqr)
     return queue.dequeue()
    def init_queue(self):
        queue_types = []
        queue_shapes = []
        self.placeholders = []
        for (name, shape) in self.data_names_and_shapes:
            if name in self.data_types:
                types = self.data_types[name]
            else:
                types = tf.float32
            queue_shapes.append([self.batch_size] + shape)
            queue_types.append(types)
            self.placeholders.append(tf.placeholder(types, [self.batch_size] + shape, name='placeholder_' + name))

        self.queue = tf.FIFOQueue(self.queue_size, queue_types, queue_shapes)
        self.enqueue = self.queue.enqueue(self.placeholders)
示例#9
0
            def build():
                queue_dtypes = [tf.float32, tf.int32, tf.string]
                queue_names = ['image', 'bboxes', 'filename']

                queue = tf.FIFOQueue(capacity=3,
                                     dtypes=queue_dtypes,
                                     names=queue_names,
                                     name='fifo_queue')
                filename = tf.cast('filename_test', tf.string)
                filename = tf.train.limit_epochs([filename], num_epochs=2)

                data = {
                    'image': tf.random_uniform([600, 800, 3], maxval=255),
                    'bboxes': tf.constant([[0, 0, 30, 30, 0]]),
                    'filename': filename
                }
                enqueue_ops = [queue.enqueue(data)] * 2
                tf.train.add_queue_runner(
                    tf.train.QueueRunner(queue, enqueue_ops))

                return queue.dequeue()
示例#10
0
    def _build(self):
        # Find split file from which we are going to read.
        split_path = os.path.join(self._dataset_dir,
                                  '{}.tfrecords'.format(self._split))
        if not tf.gfile.Exists(split_path):
            raise InvalidDataDirectory(
                '"{}" does not exist.'.format(split_path))
        # String input producer allows for a variable number of files to read
        # from. We just know we have a single file.
        filename_queue = tf.train.string_input_producer(
            [split_path], num_epochs=self._num_epochs, seed=self._seed)

        # Define reader to parse records.
        reader = tf.TFRecordReader()
        _, raw_record = reader.read(filename_queue)

        values, dtypes, names = self.read_record(raw_record)

        if self._random_shuffle:
            queue = tf.RandomShuffleQueue(capacity=100,
                                          min_after_dequeue=0,
                                          dtypes=dtypes,
                                          names=names,
                                          name='tfrecord_random_queue',
                                          seed=self._seed)
        else:
            queue = tf.FIFOQueue(capacity=100,
                                 dtypes=dtypes,
                                 names=names,
                                 name='tfrecord_fifo_queue')

        # Generate queueing ops for QueueRunner.
        enqueue_ops = [queue.enqueue(values)] * self._total_queue_ops
        self.queue_runner = tf.train.QueueRunner(queue, enqueue_ops)

        tf.train.add_queue_runner(self.queue_runner)

        return queue.dequeue()
示例#11
0
    def __init__(self, dataset, num_threads, queue_size, batch_size):
        self._dataset = dataset
        self._num_threads = num_threads
        self._queue_size = queue_size
        self._batch_size = batch_size

        datatypes = 2*['float32']
        shapes = 2*[self._dataset.shape]

        batch_shape = [None]+list(self._dataset.shape)
        
        self._placeholders = 2*[
            tf.placeholder(dtype=tf.float32, shape=batch_shape),
            tf.placeholder(dtype=tf.float32, shape=batch_shape) 
        ]

        self._queue = tf.FIFOQueue(self._queue_size, datatypes, shapes=shapes)
        self.x, self.y = self._queue.dequeue_up_to(self._batch_size)
        self.enqueue_op = self._queue.enqueue_many(self._placeholders)

        self._coordinator = tf.train.Coordinator()

        self._threads = []
示例#12
0
  def testImageIsNotZerothOutputOfOp(self):
    # Throughout the framework, we assume that the 0th output of each op is the
    # only one of interest. One exception that often happens is when the input
    # image comes from a queue or from a staging op. Then the image is one of
    # the outputs of the dequeue (or staging) op, not necessarily the 0th one.
    # Here we test that the BilinearNetworkRegularizer deals correctly with this
    # case.

    # Create an input op where the image is output number 1, not 0.
    # TODO(g1) Move this mechanism to add_concat_model_stub, possibly using
    # tf.split to produce an op where the image is not the 0th output image
    # (instead of FIFOQueue).
    image = add_concat_model_stub.image_stub()
    non_image_tensor = tf.zeros(shape=(41,))
    queue = tf.FIFOQueue(
        capacity=1,
        dtypes=(tf.float32,) * 2,
        shapes=(non_image_tensor.shape, image.shape))

    # Pass the image (output[1]) to the network.
    with arg_scope(self._batch_norm_scope()):
      output_op = add_concat_model_stub.build_model(queue.dequeue()[1])

    # Create OpHandler dict for test.
    op_handler_dict = collections.defaultdict(
        grouping_op_handler.GroupingOpHandler)
    op_handler_dict.update({
        'FusedBatchNormV3':
            batch_norm_source_op_handler.BatchNormSourceOpHandler(0.1),
        'Conv2D':
            output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(),
        'ConcatV2':
            concat_op_handler.ConcatOpHandler(),
    })

    # Create OpRegularizerManager and NetworkRegularizer for test.
    manager = orm.OpRegularizerManager([output_op], op_handler_dict)
    calculator = cc.CostCalculator(manager, resource_function.flop_function)

    # Calculate expected FLOP cost.
    expected_alive_conv1 = sum(add_concat_model_stub.expected_alive()['conv1'])
    conv1_op = tf.get_default_graph().get_operation_by_name('conv1/Conv2D')
    conv1_coeff = resource_function.flop_coeff(conv1_op)
    num_channels = 3
    expected_cost = conv1_coeff * num_channels * expected_alive_conv1

    with self.session():
      tf.global_variables_initializer().run()
      # Set gamma values to replicate aliveness in add_concat_model_stub.
      name_to_var = {v.op.name: v for v in tf.global_variables()}
      gamma1 = name_to_var['conv1/BatchNorm/gamma']
      gamma1.assign([0, 1, 1, 0, 1, 0, 1]).eval()
      gamma4 = name_to_var['conv4/BatchNorm/gamma']
      gamma4.assign([0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0]).eval()

      queue.enqueue((non_image_tensor, image)).run()
      self.assertEqual(expected_cost,
                       calculator.get_cost([conv1_op]).eval())
      # for 0/1 assigments cost and reg_term are equal:
      self.assertEqual(expected_cost,
                       calculator.get_regularization_term([conv1_op]).eval())
def arbitrary_style_image_inputs(style_dataset_file,
                                 batch_size=None,
                                 image_size=None,
                                 center_crop=True,
                                 shuffle=True,
                                 augment_style_images=False,
                                 random_style_image_size=False,
                                 min_rand_image_size=128,
                                 max_rand_image_size=300):
    """Loads a batch of random style image given the path of tfrecord dataset.

  This method does not return pre-compute Gram matrices for the images like
  style_image_inputs. But it can provide data augmentation. If
  augment_style_images is equal to True, then style images will randomly
  modified (eg. changes in brightness, hue or saturation) for data
  augmentation. If random_style_image_size is set to True then all images
  in one batch will be resized to a random size.
  Args:
    style_dataset_file: str, path to the tfrecord dataset of style files.
    batch_size: int. If provided, batches style images. Defaults to None.
    image_size: int. The images will be resized bilinearly so that the smallest
        side has size image_size. Defaults to None.
    center_crop: bool. If True, center-crops to [image_size, image_size].
        Defaults to False.
    shuffle: bool, whether to shuffle style files at random. Defaults to False.
    augment_style_images: bool. Wheather to augment style images or not.
    random_style_image_size: bool. If this value is True, then all the style
        images in one batch will be resized to a random size between
        min_rand_image_size and max_rand_image_size.
    min_rand_image_size: int. If random_style_image_size is True, this value
        specifies the minimum image size.
    max_rand_image_size: int. If random_style_image_size is True, this value
        specifies the maximum image size.

  Returns:
    4-D tensor of shape [1, ?, ?, 3] with values in [0, 1] for the style
    image (with random changes for data augmentation if
    augment_style_image_size is set to true), and 0-D tensor for the style
    label, 4-D tensor of shape [1, ?, ?, 3] with values in [0, 1] for the style
    image without random changes for data augmentation.

  Raises:
    ValueError: if center cropping is requested but no image size is provided,
        or if batch size is specified but center-cropping or
        augment-style-images is not requested,
        or if both augment-style-images and center-cropping are requested.
  """
    if center_crop and image_size is None:
        raise ValueError('center-cropping requires specifying the image size.')
    if center_crop and augment_style_images:
        raise ValueError(
            'When augment_style_images is true images will be randomly cropped.'
        )
    if batch_size is not None and not center_crop and not augment_style_images:
        raise ValueError(
            'batching requires same image sizes (Set center-cropping or '
            'augment_style_images to true)')

    with tf.name_scope('style_image_processing'):
        # Force all input processing onto CPU in order to reserve the GPU for the
        # forward inference and back-propagation.
        with tf.device('/cpu:0'):
            filename_queue = tf.train.string_input_producer(
                [style_dataset_file],
                shuffle=False,
                capacity=1,
                name='filename_queue')
            if shuffle:
                examples_queue = tf.RandomShuffleQueue(
                    capacity=64,
                    min_after_dequeue=32,
                    dtypes=[tf.string],
                    name='random_examples_queue')
            else:
                examples_queue = tf.FIFOQueue(capacity=64,
                                              dtypes=[tf.string],
                                              name='fifo_examples_queue')
            reader = tf.TFRecordReader()
            _, value = reader.read(filename_queue)
            enqueue_ops = [examples_queue.enqueue([value])]
            tf.train.queue_runner.add_queue_runner(
                tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops))
            example_serialized = examples_queue.dequeue()
            features = tf.parse_single_example(
                example_serialized,
                features={
                    'label': tf.FixedLenFeature([], tf.int64),
                    'image_raw': tf.FixedLenFeature([], tf.string)
                })
            image = tf.image.decode_jpeg(features['image_raw'])
            image.set_shape([None, None, 3])
            label = features['label']

            if image_size is not None:
                image_channels = int(image.shape[2])
                if augment_style_images:
                    image_orig = image
                    image = tf.image.random_brightness(image, max_delta=0.8)
                    image = tf.image.random_saturation(image,
                                                       lower=0.5,
                                                       upper=1.5)
                    image = tf.image.random_hue(image, max_delta=0.2)
                    image = tf.image.random_flip_left_right(image)
                    image = tf.image.random_flip_up_down(image)
                    random_larger_image_size = tf.random_uniform(
                        [],
                        minval=image_size + 2,
                        maxval=image_size + 200,
                        dtype=tf.int32)
                    image = _aspect_preserving_resize(
                        image, random_larger_image_size)
                    image = tf.random_crop(
                        image, size=[image_size, image_size, image_channels])
                    image.set_shape([image_size, image_size, image_channels])

                    image_orig = _aspect_preserving_resize(
                        image_orig, image_size + 2)
                    image_orig = _central_crop([image_orig], image_size,
                                               image_size)[0]
                    image_orig.set_shape([image_size, image_size, 3])
                elif center_crop:
                    image = _aspect_preserving_resize(image, image_size + 2)
                    image = _central_crop([image], image_size, image_size)[0]
                    image.set_shape([image_size, image_size, image_channels])
                    image_orig = image
                else:
                    image = _aspect_preserving_resize(image, image_size)
                    image_orig = image

            image = tf.to_float(image) / 255.0
            image_orig = tf.to_float(image_orig) / 255.0

            if batch_size is None:
                image = tf.expand_dims(image, 0)
            else:
                [image, image_orig,
                 label] = tf.train.batch([image, image_orig, label],
                                         batch_size=batch_size)

            if random_style_image_size:
                # Selects a random size for the style images and resizes all the images
                # in the batch to that size.
                image = _aspect_preserving_resize(
                    image,
                    tf.random_uniform([],
                                      minval=min_rand_image_size,
                                      maxval=max_rand_image_size,
                                      dtype=tf.int32))

            return image, label, image_orig
def style_image_inputs(style_dataset_file,
                       batch_size=None,
                       image_size=None,
                       square_crop=False,
                       shuffle=True):
    """Loads a batch of random style image given the path of tfrecord dataset.

  Args:
    style_dataset_file: str, path to the tfrecord dataset of style files.
        The dataset is produced via the create_style_dataset.py script and is
        made of Example protobufs with the following features:
        * 'image_raw': byte encoding of the JPEG string of the style image.
        * 'label': integer identifier of the style image in [0, N - 1], where
              N is the number of examples in the dataset.
        * 'vgg_16/<LAYER_NAME>': Gram matrix at layer <LAYER_NAME> of the VGG-16
              network (<LAYER_NAME> in {conv,pool}{1,2,3,4,5}) for the style
              image.
    batch_size: int. If provided, batches style images. Defaults to None.
    image_size: int. The images will be resized bilinearly so that the smallest
        side has size image_size. Defaults to None.
    square_crop: bool. If True, square-crops to [image_size, image_size].
        Defaults to False.
    shuffle: bool, whether to shuffle style files at random. Defaults to True.

  Returns:
    If batch_size is defined, a 4-D tensor of shape [batch_size, ?, ?, 3] with
    values in [0, 1] for the style image, and 1-D tensor for the style label.

  Raises:
    ValueError: if center cropping is requested but no image size is provided,
        or if batch size is specified but center-cropping is not requested.
  """
    vgg_layers = [
        'vgg_16/conv1', 'vgg_16/pool1', 'vgg_16/conv2', 'vgg_16/pool2',
        'vgg_16/conv3', 'vgg_16/pool3', 'vgg_16/conv4', 'vgg_16/pool4',
        'vgg_16/conv5', 'vgg_16/pool5'
    ]

    if square_crop and image_size is None:
        raise ValueError('center-cropping requires specifying the image size.')
    if batch_size is not None and not square_crop:
        raise ValueError('batching requires center-cropping.')

    with tf.name_scope('style_image_processing'):
        filename_queue = tf.train.string_input_producer([style_dataset_file],
                                                        shuffle=False,
                                                        capacity=1,
                                                        name='filename_queue')
        if shuffle:
            examples_queue = tf.RandomShuffleQueue(
                capacity=64,
                min_after_dequeue=32,
                dtypes=[tf.string],
                name='random_examples_queue')
        else:
            examples_queue = tf.FIFOQueue(capacity=64,
                                          dtypes=[tf.string],
                                          name='fifo_examples_queue')
        reader = tf.TFRecordReader()
        _, value = reader.read(filename_queue)
        enqueue_ops = [examples_queue.enqueue([value])]
        tf.train.queue_runner.add_queue_runner(
            tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops))
        example_serialized = examples_queue.dequeue()
        features = tf.parse_single_example(
            example_serialized,
            features={
                'label': tf.FixedLenFeature([], tf.int64),
                'image_raw': tf.FixedLenFeature([], tf.string),
                'vgg_16/conv1': tf.FixedLenFeature([64, 64], tf.float32),
                'vgg_16/pool1': tf.FixedLenFeature([64, 64], tf.float32),
                'vgg_16/conv2': tf.FixedLenFeature([128, 128], tf.float32),
                'vgg_16/pool2': tf.FixedLenFeature([128, 128], tf.float32),
                'vgg_16/conv3': tf.FixedLenFeature([256, 256], tf.float32),
                'vgg_16/pool3': tf.FixedLenFeature([256, 256], tf.float32),
                'vgg_16/conv4': tf.FixedLenFeature([512, 512], tf.float32),
                'vgg_16/pool4': tf.FixedLenFeature([512, 512], tf.float32),
                'vgg_16/conv5': tf.FixedLenFeature([512, 512], tf.float32),
                'vgg_16/pool5': tf.FixedLenFeature([512, 512], tf.float32)
            })
        image = tf.image.decode_jpeg(features['image_raw'])
        label = features['label']
        gram_matrices = [features[vgg_layer] for vgg_layer in vgg_layers]
        image.set_shape([None, None, 3])

        if image_size:
            if square_crop:
                image = _aspect_preserving_resize(image, image_size + 2)
                image = _central_crop([image], image_size, image_size)[0]
                image.set_shape([image_size, image_size, 3])
            else:
                image = _aspect_preserving_resize(image, image_size)

        image = tf.to_float(image) / 255.0

        if batch_size is None:
            image = tf.expand_dims(image, 0)
        else:
            image_label_gram_matrices = tf.train.batch([image, label] +
                                                       gram_matrices,
                                                       batch_size=batch_size)
            image, label = image_label_gram_matrices[:2]
            gram_matrices = image_label_gram_matrices[2:]

        gram_matrices = dict(
            (vgg_layer, gram_matrix)
            for vgg_layer, gram_matrix in zip(vgg_layers, gram_matrices))
        return image, label, gram_matrices
    def __init__(self,
                 network_name,
                 checkpoint_path,
                 batch_size,
                 image_size=None):
        self._network_name = network_name
        self._checkpoint_path = checkpoint_path
        self._batch_size = batch_size
        self._image_size = image_size
        self._layer = {}

        self._global_step = tf.train.get_or_create_global_step()

        # Retrieve the function that returns logits and endpoints
        self._network_fn = nets_factory.get_network_fn(self._network_name,
                                                       num_classes=num_classes,
                                                       is_training=False)

        # Retrieve the model scope from network factory
        self._model_scope = nets_factory.arg_scopes_map[self._network_name]

        # Fetch the default image size
        self._image_size = self._network_fn.default_image_size
        self._filename_queue = tf.FIFOQueue(100000, [tf.string],
                                            shapes=[[]],
                                            name="filename_queue")
        self._pl_image_files = tf.placeholder(tf.string,
                                              shape=[None],
                                              name="image_file_list")
        self._enqueue_op = self._filename_queue.enqueue_many(
            [self._pl_image_files])
        self._num_in_queue = self._filename_queue.size()

        self._batch_from_queue, self._batch_filenames = self._preproc_image_batch(
            self._batch_size, num_threads=4)

        #self._image_batch = tf.placeholder_with_default(
        #        self._batch_from_queue, shape=[self._batch_size, _STRIDE, self._image_size, self._image_size, 3])
        self._image_batch = tf.placeholder(
            tf.float32, [batch_size, _STRIDE, image_size, image_size, 3])

        # Retrieve the logits and network endpoints (for extracting activations)
        # Note: endpoints is a dictionary with endpoints[name] = tf.Tensor
        self._logits, self._endpoints = self._network_fn(self._image_batch)

        # Find the checkpoint file
        checkpoint_path = self._checkpoint_path
        if tf.gfile.IsDirectory(self._checkpoint_path):
            checkpoint_path = tf.train.latest_checkpoint(self._checkpoint_path)

        # Load pre-trained weights into the model
        variables_to_restore = slim.get_variables_to_restore()
        restore_fn = slim.assign_from_checkpoint_fn(self._checkpoint_path,
                                                    variables_to_restore)

        # Start the session and load the pre-trained weights
        self._sess = tf.Session()
        restore_fn(self._sess)

        # Local variables initializer, needed for queues etc.
        self._sess.run(tf.local_variables_initializer())

        # Managing the queues and threads
        self._coord = tf.train.Coordinator()
        self._threads = tf.train.start_queue_runners(coord=self._coord,
                                                     sess=self._sess)
示例#16
0
# print(iris)
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()


def read_and_push_instance(filename_queue, instance_queue):
    reader = tf.TextLineReader(skip_header_lines=1)
    key, value = reader.read(filename_queue)
    x1, x2, target = tf.decode_csv(value, record_defaults=[[-1.], [-1.], [-1]])
    features = tf.stack([x1, x2])
    enqueue_instance = instance_queue.enqueue([features, target])
    return enqueue_instance


filename_queue = tf.FIFOQueue(capacity=10, dtypes=[tf.string], shapes=[()])
filename = tf.placeholder(tf.string)
enqueue_filename = filename_queue.enqueue([filename])
close_filename_queue = filename_queue.close()

instance_queue = tf.RandomShuffleQueue(capacity=10,
                                       min_after_dequeue=2,
                                       dtypes=[tf.float32, tf.int32],
                                       shapes=[[2], []],
                                       name="instance_q",
                                       shared_name="shared_instance_q")

minibatch_instances, minibatch_targets = instance_queue.dequeue_up_to(2)

read_and_enqueue_ops = [
    read_and_push_instance(filename_queue, instance_queue) for i in range(5)
示例#17
0
    def __init__(self, mc):
        self.div_f = 1.0
        self.mc = mc
        # a scalar tensor in range (0, 1]. Usually set to 0.5 in training phase and
        # 1.0 in evaluation phase
        self.keep_prob = 0.5 if mc.IS_TRAINING else 1.0

        # image batch input
        self.ph_image_input = tf.placeholder(
            tf.float32, [mc.BATCH_SIZE, mc.IMAGE_HEIGHT, mc.IMAGE_WIDTH, 3],
            name='image_input')
        # A tensor where an element is 1 if the corresponding box is "responsible"
        # for detection an object and 0 otherwise.
        self.ph_input_mask = tf.placeholder(tf.float32,
                                            [mc.BATCH_SIZE, mc.ANCHORS, 1],
                                            name='box_mask')
        # Tensor used to represent bounding box deltas.
        self.ph_box_delta_input = tf.placeholder(
            tf.float32, [mc.BATCH_SIZE, mc.ANCHORS, 4], name='box_delta_input')
        # Tensor used to represent bounding box coordinates.
        self.ph_box_input = tf.placeholder(tf.float32,
                                           [mc.BATCH_SIZE, mc.ANCHORS, 4],
                                           name='box_input')
        # Tensor used to represent labels
        self.ph_labels = tf.placeholder(
            tf.float32, [mc.BATCH_SIZE, mc.ANCHORS, mc.CLASSES], name='labels')

        # IOU between predicted anchors with ground-truth boxes
        self.ious = tf.Variable(initial_value=np.zeros(
            (mc.BATCH_SIZE, mc.ANCHORS)),
                                trainable=False,
                                name='iou',
                                dtype=tf.float32)

        self.FIFOQueue = tf.FIFOQueue(
            capacity=mc.QUEUE_CAPACITY,
            dtypes=[
                tf.float32, tf.float32, tf.float32, tf.float32, tf.float32
            ],
            shapes=[[mc.IMAGE_HEIGHT, mc.IMAGE_WIDTH, 3], [mc.ANCHORS, 1],
                    [mc.ANCHORS, 4], [mc.ANCHORS, 4], [mc.ANCHORS,
                                                       mc.CLASSES]],
        )

        self.enqueue_op = self.FIFOQueue.enqueue_many([
            self.ph_image_input, self.ph_input_mask, self.ph_box_delta_input,
            self.ph_box_input, self.ph_labels
        ])

        self.image_input, self.input_mask, self.box_delta_input, \
            self.box_input, self.labels = tf.train.batch(
                self.FIFOQueue.dequeue(), batch_size=mc.BATCH_SIZE,
                capacity=mc.QUEUE_CAPACITY)

        # model parameters
        self.model_params = []

        # model size counter
        self.model_size_counter = [
        ]  # array of tuple of layer name, parameter size
        # flop counter
        self.flop_counter = []  # array of tuple of layer name, flop number
        # activation counter
        self.activation_counter = [
        ]  # array of tuple of layer name, output activations
        self.activation_counter.append(
            ('input', mc.IMAGE_WIDTH * mc.IMAGE_HEIGHT * 3))