Esempio n. 1
0
def read_cnnHAR(filename_queue):
    class CNNHARRecord(object):
        pass

    result = CNNHARRecord()

    # Read a record, getting filenames from the filename_queue.  No
    # header or footer in the CIFAR-10 format, so we leave header_bytes
    # and footer_bytes at their default of 0.
    reader = tf.TextLineReader()
    result.key, value = reader.read(filename_queue)

    # Convert from a string to a vector of uint8 that is record_bytes long.
    record_defaults = [[1.0] for col in range(SIGNAL_SIZE * channels + 1)]

    record_bytes = tf.decode_csv(value, record_defaults=record_defaults)
    #print('!!!!!!!!!!!!!!!!!!! result.type', record_bytes)
    # The first bytes represent the label, which we convert from uint8->int32.
    result.signal = tf.cast(
        tf.strided_slice(record_bytes, [1], [SIGNAL_SIZE + 1]), tf.float32)
    result.signal = tf.reshape(result.signal, [SIGNAL_SIZE, channels])
    # labels-1 cause the logits is defaulted to start with 0~NUM_CLASS-1
    result.label = tf.cast(
        tf.strided_slice(record_bytes, [0], [1]) - 1, tf.float32)
    #print('!!!!!!!!!!!!!!!!!!! result.label before reshape', result.label)
    result.label = tf.reshape(result.label, [1, 1])

    return result
Esempio n. 2
0
def read_cifar10(filename_queue):
    class CIFAR10Record(object):
        pass

    result = CIFAR10Record()

    label_bytes = 1
    result.height = 32
    result.width = 32
    result.depth = 3
    image_bytes = result.height * result.width * result.depth
    record_bytes = label_bytes + image_bytes

    reader = tf.FixedLengthRecordReader(record_bytes)
    result.key, value = reader.read(filename_queue)

    record_bytes = tf.decode_raw(value, tf.uint8)
    result.label = tf.cast(tf.strided_slice(record_bytes, [0], [label_bytes]),
                           tf.int32)
    depth_major = tf.reshape(
        tf.strided_slice(record_bytes, [label_bytes],
                         [label_bytes + image_bytes]),
        [result.depth, result.height, result.width])
    result.uint8image = tf.transpose(depth_major, [1, 2, 0])

    return result
def check_cam_coherence(path):
    """Check the coherence of a camera path."""
    cam_gt = path + 'cam0_gt.visim'
    cam_render = path + 'cam0.render'
    lines = tf.string_split([tf.read_file(cam_render)], '\n').values
    lines = lines[3:]
    lines = tf.strided_slice(lines, [0], [lines.shape_as_list()[0]], [2])
    fields = tf.reshape(tf.string_split(lines, ' ').values, [-1, 10])
    timestamp_from_render, numbers = tf.split(fields, [1, 9], -1)
    numbers = tf.strings.to_number(numbers)
    eye, lookat, up = tf.split(numbers, [3, 3, 3], -1)
    up_vector = tf.nn.l2_normalize(up - eye)
    lookat_vector = tf.nn.l2_normalize(lookat - eye)
    rotation_from_lookat = lookat_matrix(up_vector, lookat_vector)

    lines = tf.string_split([tf.read_file(cam_gt)], '\n').values
    lines = lines[1:]
    fields = tf.reshape(tf.string_split(lines, ',').values, [-1, 8])
    timestamp_from_gt, numbers = tf.split(fields, [1, 7], -1)
    numbers = tf.strings.to_number(numbers)
    position, quaternion = tf.split(numbers, [3, 4], -1)
    rotation_from_quaternion = from_quaternion(quaternion)

    assert tf.reduce_all(tf.equal(timestamp_from_render, timestamp_from_gt))
    assert tf.reduce_all(tf.equal(eye, position))
    so3_diff = (tf.trace(
        tf.matmul(rotation_from_lookat,
                  rotation_from_quaternion,
                  transpose_a=True)) - 1) / 2
    tf.assert_near(so3_diff, tf.ones_like(so3_diff))
Esempio n. 4
0
 def build_graph(parameters):
     """Build graph for stride_slice test."""
     input_tensor = tf.compat.v1.placeholder(
         dtype=parameters["dtype"],
         name="input",
         shape=parameters["input_shape"])
     if parameters["constant_indices"]:
         begin = parameters["begin"]
         end = parameters["end"]
         strides = parameters["strides"]
         tensors = [input_tensor]
     else:
         begin = tf.compat.v1.placeholder(dtype=parameters["index_type"],
                                          name="begin",
                                          shape=[len(parameters["begin"])])
         end = tf.compat.v1.placeholder(dtype=parameters["index_type"],
                                        name="end",
                                        shape=[len(parameters["end"])])
         strides = None
         if parameters["strides"] is not None:
             strides = tf.compat.v1.placeholder(
                 dtype=parameters["index_type"],
                 name="strides",
                 shape=[len(parameters["strides"])])
         tensors = [input_tensor, begin, end]
         if strides is not None:
             tensors.append(strides)
     out = tf.strided_slice(input_tensor,
                            begin,
                            end,
                            strides,
                            begin_mask=parameters["begin_mask"],
                            end_mask=parameters["end_mask"])
     return tensors, [out]
Esempio n. 5
0
def ptb_producer(raw_data, batch_size, num_steps, name=None):
    """Iterate on the raw PTB data.

    This chunks up raw_data into batches of examples and returns Tensors that
    are drawn from these batches.

    Args:
      raw_data: one of the raw data outputs from ptb_raw_data.
      batch_size: int, the batch size.
      num_steps: int, the number of unrolls.
      name: the name of this operation (optional).

    Returns:
      A pair of Tensors, each shaped [batch_size, num_steps]. The second element
      of the tuple is the same data time-shifted to the right by one.

    Raises:
      tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
    """
    with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
        raw_data = tf.convert_to_tensor(raw_data,
                                        name="raw_data",
                                        dtype=tf.int32)

        data_len = tf.size(raw_data)
        batch_len = data_len // batch_size
        data = tf.reshape(raw_data[0:batch_size * batch_len],
                          [batch_size, batch_len])

        epoch_size = (batch_len - 1) // num_steps
        assertion = tf.assert_positive(
            epoch_size,
            message="epoch_size == 0, decrease batch_size or num_steps")
        with tf.control_dependencies([assertion]):
            epoch_size = tf.identity(epoch_size, name="epoch_size")

        i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
        x = tf.strided_slice(data, [0, i * num_steps],
                             [batch_size, (i + 1) * num_steps])
        x.set_shape([batch_size, num_steps])
        y = tf.strided_slice(data, [0, i * num_steps + 1],
                             [batch_size, (i + 1) * num_steps + 1])
        y.set_shape([batch_size, num_steps])
        return x, y
Esempio n. 6
0
 def residual(self, x, shortcut, out_filters, stride, type='B'):
     in_shape = shortcut.get_shape()
     pad = int(x.get_shape()[3] - in_shape[3])
     if pad != 0 or type == 'C':
         if type == 'A':
             shortcut = tf.strided_slice(shortcut, [0, 0, 0, 0], in_shape,
                                         strides=[1, stride, stride, 1])
             shortcut = tf.pad(shortcut, paddings=[[0, 0], [0, 0], [0, 0], [0, pad]])
         else:
             shortcut = self.conv(shortcut, 1, stride, out_filters)
             shortcut = self.norm(shortcut)
     x = shortcut + x
     x = self.relu(x)
     return x
    def get_cost(self, cost_annotation, cost_y, a_m, y_m):
        timesteps = tf.shape(cost_y)[0]
        batch_size = tf.shape(cost_y)[1]
        emb_y = tf.nn.embedding_lookup(self.embed_matrix,
                                       tf.reshape(cost_y, [-1]))
        emb_y = tf.reshape(emb_y, [timesteps, batch_size, self.word_dim])
        emb_pad = tf.fill((1, batch_size, self.word_dim), 0.0)
        emb_shift = tf.concat([
            emb_pad,
            tf.strided_slice(emb_y, [0, 0, 0], [-1, batch_size, self.word_dim],
                             [1, 1, 1])
        ],
                              axis=0)
        new_emb_y = emb_shift
        anno_mean = tf.reduce_sum(cost_annotation * a_m[:, :, :, None],
                                  axis=[1, 2]) / tf.reduce_sum(
                                      a_m, axis=[1, 2])[:, None]
        h_0 = tf.tensordot(anno_mean, self.Wa2h,
                           axes=1) + self.ba2h  # [batch, hidden_dim]
        h_0 = tf.tanh(h_0)

        ret = self.parser.get_ht_ctx(new_emb_y, h_0, cost_annotation, a_m, y_m)
        h_t = ret[0]  # h_t of all timesteps [timesteps, batch, word_dim]
        c_t = ret[1]  # c_t of all timesteps [timesteps, batch, context_dim]

        y_t_1 = new_emb_y  # shifted y | [1:] = [:-1]
        logit_gru = tf.tensordot(h_t, self.Wh, axes=1) + self.bh
        logit_ctx = tf.tensordot(c_t, self.Wc, axes=1) + self.bc
        logit_pre = tf.tensordot(y_t_1, self.Wy, axes=1) + self.by
        logit = logit_pre + logit_ctx + logit_gru
        shape = tf.shape(logit)
        logit = tf.reshape(logit, [shape[0], -1, shape[2] // 2, 2])
        logit = tf.reduce_max(logit, axis=3)

        logit = tf.layers.dropout(inputs=logit,
                                  rate=0.2,
                                  training=self.training)

        logit = tf.tensordot(logit, self.Wo, axes=1) + self.bo
        logit_shape = tf.shape(logit)
        logit = tf.reshape(logit, [-1, logit_shape[2]])
        cost = tf.nn.softmax_cross_entropy_with_logits_v2(
            logits=logit,
            labels=tf.one_hot(tf.reshape(cost_y, [-1]), depth=self.target_dim))

        cost = tf.multiply(cost, tf.reshape(y_m, [-1]))
        cost = tf.reshape(cost, [shape[0], shape[1]])
        cost = tf.reduce_sum(cost, axis=0)
        cost = tf.reduce_mean(cost)
        return cost
Esempio n. 8
0
    def read_data_set(self, filename_queue):
        record = Record()
        reader = tf.FixedLengthRecordReader(record_bytes=self.record_bytes)
        file_name, value = reader.read(filename_queue)

        byte_record = tf.decode_raw(value, tf.uint8)

        image_id = tf.strided_slice(byte_record, [0], [ID_BYTES])
        image_label = tf.strided_slice(byte_record, [ID_BYTES],
                                       [ID_BYTES + LABEL_BYTES])
        array_image = tf.strided_slice(byte_record, [ID_BYTES + LABEL_BYTES],
                                       [self.record_bytes])

        depth_major_image = tf.reshape(array_image,
                                       [self.depth, self.height, self.width])
        record.image = tf.transpose(depth_major_image, [1, 2, 0])

        # => height, width, depth order for augment
        # record.image = tf.reshape(array_image, [self.height, self.width, self.depth])

        record.id = image_id
        record.label = image_label

        return record
Esempio n. 9
0
def read_cifar10(filename_queue):
    """从CIFAR10数据集种读取数据
    @param filename_queue: 要读取的文件名队列
    @return: 某个对象,具有以下字段
             height:图片高度
             width:图片宽度
             depth:图片深度
             key: 一个描述当前抽样数据的文件名和记录数地标量字符串
             label: 一个int32类型的标签, 取值0...9
             uint8image: 一个[height, width, depth]维度的图像数据
    """
    # 建立一个空类, 方便数据的结构化储存
    class CIFAR10Record(object):
        pass
    result = CIFAR10Record()

    label_bytes = 1  # 2 for CIFAR-100
    result.height = 32
    result.width = 32
    result.depth = 3
    image_byte = result.height * result.width * result.depth
    record_bytes = label_bytes + image_byte

    # tf.FixedLengthRecordReader读取固定长度字节数信息,下次调用时会接着上次读取的位置继续读取文件
    reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
    result.key, value = reader.read(filename_queue)
    # decode_raw操作将一个字符串转换成一个uint8的张量
    record_bytes = tf.decode_raw(value, tf.uint8)
    # tf.strides_slice(input, begin, end, strides=None)截取[begin, end)之间的数据
    result.label = tf.cast(tf.strided_slice(
        record_bytes, [0], [label_bytes]), tf.int32)
    depth_major = tf.reshape(tf.strided_slice(record_bytes, [label_bytes], [label_bytes+image_byte]),
                             [result.depth, result.height, result.width])
    # convert from [depth, height, width] to [height, width, depth]
    result.uint8image = tf.transpose(depth_major, [1, 2, 0])
    return result
Esempio n. 10
0
 def subsequence(self, stride):
     return ViewSequence(
         self.scene_id, self.sequence_id,
         tf.strided_slice(self.timestamp, [0], [self.length()],
                          strides=[stride]),
         tf.strided_slice(self.rgb, [0], [self.length()], strides=[stride]),
         tf.strided_slice(self.pano, [0], [self.length()],
                          strides=[stride]),
         tf.strided_slice(self.depth, [0], [self.length()],
                          strides=[stride]),
         tf.strided_slice(self.normal, [0], [self.length()],
                          strides=[stride]),
         tf.strided_slice(self.pose, [0], [self.length()],
                          strides=[stride]),
         tf.strided_slice(self.intrinsics, [0], [self.length()],
                          strides=[stride]),
         tf.strided_slice(self.resolution, [0], [self.length()],
                          strides=[stride]))
 def build_graph(parameters):
     """Build graph for shape_stride_slice test."""
     input_tensor = tf.compat.v1.placeholder(
         dtype=parameters["dtype"],
         name="input",
         shape=parameters["dynamic_input_shape"])
     begin = parameters["begin"]
     end = parameters["end"]
     strides = parameters["strides"]
     tensors = [input_tensor]
     out = tf.strided_slice(tf.shape(input_tensor),
                            begin,
                            end,
                            strides,
                            begin_mask=parameters["begin_mask"],
                            end_mask=parameters["end_mask"])
     return tensors, [out]
Esempio n. 12
0
  def build_graph(parameters):
    """Build graph for stride_slice test."""
    input_tensor = tf.compat.v1.placeholder(
        dtype=parameters["dtype"],
        name="input",
        shape=parameters["input_shape"])
    if parameters["constant_indices"]:
      begin = parameters["begin"]
      end = parameters["end"]
      strides = parameters["strides"]
      tensors = [input_tensor]
    else:
      begin = tf.compat.v1.placeholder(
          dtype=parameters["index_type"],
          name="begin",
          shape=[len(parameters["begin"])])
      end = tf.compat.v1.placeholder(
          dtype=parameters["index_type"],
          name="end",
          shape=[len(parameters["end"])])
      strides = None
      if parameters["strides"] is not None:
        strides = tf.compat.v1.placeholder(
            dtype=parameters["index_type"],
            name="strides",
            shape=[len(parameters["strides"])])
      tensors = [input_tensor, begin, end]
      if strides is not None:
        tensors.append(strides)

    kwargs = {}
    if parameters.get("ellipsis_mask", None):
      kwargs.update({"ellipsis_mask": parameters["ellipsis_mask"]})
    if parameters.get("new_axis_mask", None):
      kwargs.update({"new_axis_mask": parameters["new_axis_mask"]})

    out = tf.strided_slice(
        input_tensor,
        begin,
        end,
        strides,
        begin_mask=parameters["begin_mask"],
        end_mask=parameters["end_mask"],
        shrink_axis_mask=parameters["shrink_axis_mask"],
        **kwargs)
    return tensors, [out]
Esempio n. 13
0
def feedforward_evaluate(layers,
                         x,
                         use_skip_connections=False,
                         hidden_are_factored=False):
    """Evaluates `layers` as a feedforward neural network on `x`.

  Args:
    layers: The neural network layers (`tf.Tensor` -> `tf.Tensor` callables).
    x: The array-like input to evaluate. Must be trivially convertible to a
      matrix (tensor rank <= 2).
    use_skip_connections: Whether or not to use skip connections between layers.
      If the layer input has too few features to be added to the layer output,
      then the end of input is padded with zeros. If it has too many features,
      then the input is truncated.
    hidden_are_factored: Whether or not hidden logical layers are factored into
      two separate linear transformations stored as adjacent elements of
      `layers`.

  Returns:
    The `tf.Tensor` evaluation result.

  Raises:
    ValueError: If `x` has a rank greater than 2.
  """
    x = tensor_to_matrix(x)
    i = 0
    while i < len(layers) - 1:
        y = layers[i](x)
        i += 1
        if hidden_are_factored:
            y = layers[i](y)
            i += 1
        if use_skip_connections:
            my_num_features = x.shape[1].value
            padding = y.shape[1].value - my_num_features
            if padding > 0:
                zeros = tf.zeros([tf.shape(x)[0], padding])
                x = tf.concat([x, zeros], axis=1)
            elif padding < 0:
                x = tf.strided_slice(x, [0, 0],
                                     [tf.shape(x)[0], y.shape[1].value])
            y = x + y
        x = y
    return layers[-1](x)
def _crop(image, offset_height, offset_width, crop_height, crop_width):
    """Crops the given image using the provided offsets and sizes.

  Note that the method doesn't assume we know the input image size but it does
  assume we know the input image rank.

  Args:
    image: an image of shape [height, width, channels].
    offset_height: a scalar tensor indicating the height offset.
    offset_width: a scalar tensor indicating the width offset.
    crop_height: the height of the cropped image.
    crop_width: the width of the cropped image.

  Returns:
    the cropped (and resized) image.

  Raises:
    InvalidArgumentError: if the rank is not 3 or if the image dimensions are
      less than the crop size.
  """
    original_shape = tf.shape(image)

    rank_assertion = tf.Assert(tf.equal(tf.rank(image), 3),
                               ['Rank of image must be equal to 3.'])
    with tf.control_dependencies([rank_assertion]):
        cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])

    size_assertion = tf.Assert(
        tf.logical_and(tf.greater_equal(original_shape[0], crop_height),
                       tf.greater_equal(original_shape[1], crop_width)),
        ['Crop size greater than the image size.'])

    offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

    # Use tf.strided_slice instead of crop_to_bounding box as it accepts tensors
    # to define the crop size.
    with tf.control_dependencies([size_assertion]):
        image = tf.strided_slice(image,
                                 offsets,
                                 offsets + cropped_shape,
                                 strides=tf.ones_like(offsets))
    return tf.reshape(image, cropped_shape)
def read_cifar10(filename_queue):
  """Reads and parses examples from CIFAR10 data files.

  Recommendation: if you want N-way read parallelism, call this function
  N times.  This will give you N independent Readers reading different
  files & positions within those files, which will give better mixing of
  examples.

  Args:
    filename_queue: A queue of strings with the filenames to read from.

  Returns:
    An object representing a single example, with the following fields:
      height: number of rows in the result (32)
      width: number of columns in the result (32)
      depth: number of color channels in the result (3)
      key: a scalar string Tensor describing the filename & record number
        for this example.
      label: an int32 Tensor with the label in the range 0..9.
      uint8image: a [height, width, depth] uint8 Tensor with the image data
  """

  class CIFAR10Record(object):
    pass

  result = CIFAR10Record()

  # Dimensions of the images in the CIFAR-10 dataset.
  # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
  # input format.
  label_bytes = 1  # 2 for CIFAR-100
  result.height = 32
  result.width = 32
  result.depth = 3
  image_bytes = result.height * result.width * result.depth
  # Every record consists of a label followed by the image, with a
  # fixed number of bytes for each.
  record_bytes = label_bytes + image_bytes

  # Read a record, getting filenames from the filename_queue.  No
  # header or footer in the CIFAR-10 format, so we leave header_bytes
  # and footer_bytes at their default of 0.
  reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
  result.key, value = reader.read(filename_queue)

  # Convert from a string to a vector of uint8 that is record_bytes long.
  record_bytes = tf.decode_raw(value, tf.uint8)

  # The first bytes represent the label, which we convert from uint8->int32.
  result.label = tf.cast(
      tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)

  # The remaining bytes after the label represent the image, which we reshape
  # from [depth * height * width] to [depth, height, width].
  depth_major = tf.reshape(
      tf.strided_slice(record_bytes, [label_bytes],
                       [label_bytes + image_bytes]),
      [result.depth, result.height, result.width])
  # Convert from [depth, height, width] to [height, width, depth].
  result.uint8image = tf.transpose(depth_major, [1, 2, 0])

  return result
Esempio n. 16
0
def preprocess_targets(targets, word2int, batch_size):
    left_side = tf.fill([batch_size, 1], word2int['<SOS>'])
    right_side = tf.strided_slice(targets, [0, 0], [batch_size, -1], [1, 1])
    preprocessed_targets = tf.concat([left_side, right_side], 1)
    return preprocessed_targets
Esempio n. 17
0
 def strided_slice(x):
     return tf.strided_slice(x, **parameters)
Esempio n. 18
0
def input_producer(raw_data,
                   batch_size,
                   num_steps,
                   shuffle=False,
                   randomize=False,
                   random_len=False):
    """Produces graph-based input for Penn Treebank.

  Args:
    raw_data: np tensor of size [num_words].
    batch_size: self-explained.
    num_steps: number of BPTT steps.
    shuffle: whether to shuffle sentences.
    randomize: use random segments instead of the continuous corpus.
    random_len: random sequence len.

  Returns:
    If `random_len` is set, return op that represents whether we have reached
      the end of a sequence.
    Otherwise, return number of batches in an epoch.
  """

    num_batches_per_epoch = (
        (np.size(raw_data) // batch_size) - 1) // num_steps
    raw_data = tf.convert_to_tensor(raw_data, name='raw_data', dtype=tf.int32)

    data_len = tf.size(raw_data)
    batch_len = data_len // batch_size
    data = tf.reshape(raw_data[0:batch_size * batch_len],
                      [batch_size, batch_len])

    epoch_size = (batch_len - 1) // num_steps
    with tf.device('/cpu:0'):
        epoch_size = tf.identity(epoch_size, name='epoch_size')

        if random_len:
            start_idx = tf.Variable(0,
                                    name='start_idx',
                                    dtype=tf.int32,
                                    trainable=False)
            base_bptt = tf.cond(
                tf.random_uniform(shape=(), minval=0., maxval=1.) < 0.95,
                lambda: tf.cast(num_steps, dtype=tf.float32),
                lambda: tf.cast(num_steps, dtype=tf.float32) / 2.)
            seq_len = tf.random.truncated_normal(shape=(),
                                                 mean=base_bptt,
                                                 stddev=5.,
                                                 dtype=tf.float32)
            seq_len = tf.cast(seq_len, dtype=tf.int32)
            seq_len = tf.minimum(seq_len,
                                 num_steps + 20)  # seq_len <= bptt + 40
            seq_len = tf.minimum(seq_len, batch_len - start_idx - 1)
            end_idx = start_idx + seq_len

            x = data[:, start_idx:end_idx]
            y = data[:, start_idx + 1:end_idx + 1]

            with tf.control_dependencies([x, y]):
                with tf.control_dependencies([tf.assign(start_idx, end_idx)]):
                    should_reset = tf.greater_equal(end_idx, batch_len - 3)

            reset_start_idx = tf.assign(start_idx, 0)
            return (x, y, num_batches_per_epoch, reset_start_idx, should_reset,
                    base_bptt)

        if randomize:
            i = tf.random_uniform([1],
                                  minval=0,
                                  maxval=batch_len - num_steps,
                                  dtype=tf.int32)
            x = tf.strided_slice(data, [0, i], [batch_size, i + num_steps])
            y = tf.strided_slice(data, [0, i + 1],
                                 [batch_size, i + num_steps + 1])
        else:
            i = tf.train.range_input_producer(epoch_size,
                                              shuffle=shuffle).dequeue()
            x = tf.strided_slice(data, [0, i * num_steps],
                                 [batch_size, (i + 1) * num_steps])
            y = tf.strided_slice(data, [0, i * num_steps + 1],
                                 [batch_size, (i + 1) * num_steps + 1])
        x.set_shape([batch_size, num_steps])
        y.set_shape([batch_size, num_steps])

        return x, y, num_batches_per_epoch
Esempio n. 19
0
def redundant_lom(label, radius, scope='redundant_lom'):
    """Convert label tensor into redundant LOM representation.

  Args:
    label: Tensor with dimensions batch, z, y, x, channels.  Channels should be
           flat.
    radius: 3-sequence of z, y, x LOM radii.
    scope: TF scope for ops.

  Returns:
    Tensor with dimensions batch, z, y, x, lomz, lomy, lomx.  Unfortunately,
    rank 7 tensors are not supported by many TF ops.  Use the helpers below to
    flatten / unflatten either the ZYX or LOM dims.

  Raises:
    ValueError: if input tensor is wrong shape.

  The LOM generated is smaller in z, y, x by 2 * radius.  Each z, y, x location
  has a full complement of lomz, lomy, lomx entries, which means that all the
  affinities except the edges are doubly represented, once at each terminal node
  voxel.

  TODO(phli): Benchmark alternative implementations.
  """
    if len(label.shape_as_list()) != 5:
        raise ValueError(
            'Input tensor must have dimensions batch, z, y, x, channels.')
    if label.shape_as_list()[4] != 1:
        raise ValueError('Input tensor must have single channel.')

    with tf.name_scope(scope):

        # Central crop to be compared to offset crops.
        core_start = [0] + list(radius) + [0]
        core_shape = list(label.shape_as_list())
        core_shape[1] -= 2 * radius[0]
        core_shape[2] -= 2 * radius[1]
        core_shape[3] -= 2 * radius[2]

        core_end = tf.add(core_start, core_shape)
        core = tf.strided_slice(label, core_start, core_end)
        core = tf.reshape(core, core_shape, name='lom_core')

        # Offset crops.  Currently this clobbers the flat channel dimension with the
        # LOMs.
        # TODO(phli): Would be nice to replace this with extract_patches, but that
        # hasn't been exposed in the TF api.
        shifts = []
        dims = lom_dims(radius)
        for z in range(dims[0]):
            for y in range(dims[1]):
                for x in range(dims[2]):
                    shifts.append(
                        tf.reshape(tf.strided_slice(
                            label, (0, z, y, x, 0),
                            tf.add((0, z, y, x, 0), core_shape)),
                                   core_shape,
                                   name='slice_lom_shift'))
        shift_tensor = tf.concat(shifts, 4, name='concat_lom_shifts')

        lom = tf.logical_and(tf.equal(core, shift_tensor),
                             core > 0,
                             name='compute_redunant_lom')
        return unravel_lom_dims(lom, radius)