Exemple #1
0
def downsample2x(tensor, interpolation='linear', axes=None):
    if struct.isstruct(tensor):
        return struct.map(lambda s: downsample2x(s, interpolation, axes),
                          tensor,
                          recursive=False)

    if interpolation.lower() != 'linear':
        raise ValueError('Only linear interpolation supported')
    rank = spatial_rank(tensor)
    if axes is None:
        axes = range(rank)
    tensor = math.pad(
        tensor, [[0, 0]] +
        [([0, 1] if
          (dim % 2) != 0 and _contains_axis(axes, ax, rank) else [0, 0])
         for ax, dim in enumerate(tensor.shape[1:-1])] + [[0, 0]], 'replicate')
    for axis in axes:
        upper_slices = tuple([(slice(1, None, 2) if i == axis else slice(None))
                              for i in range(rank)])
        lower_slices = tuple([(slice(0, None, 2) if i == axis else slice(None))
                              for i in range(rank)])
        tensor_sum = tensor[(slice(None), ) + upper_slices +
                            (slice(None), )] + tensor[(slice(None), ) +
                                                      lower_slices +
                                                      (slice(None), )]
        tensor = tensor_sum / 2
    return tensor
Exemple #2
0
def l1_loss(tensor: Tensor, batch_norm=True, reduce_batches=True):
    """
    get L1 loss

    Args:
      tensor: Tensor: 
      batch_norm:  (Default value = True)
      reduce_batches:  (Default value = True)

    Returns:

    """
    if struct.isstruct(tensor):
        all_tensors = struct.flatten(tensor)
        return sum(
            l1_loss(tensor, batch_norm, reduce_batches)
            for tensor in all_tensors)
    if reduce_batches:
        total_loss = math.sum_(math.abs(tensor))
    else:
        total_loss = math.sum_(math.abs(tensor),
                               dim=list(range(1, len(tensor.shape))))
    if batch_norm and reduce_batches:
        batch_size = tensor.shape.sizes[0]
        return math.divide_no_nan(total_loss, math.to_float(batch_size))
    else:
        return total_loss
Exemple #3
0
def downsample2x(tensor, interpolation='linear'):
    if struct.isstruct(tensor):
        return struct.map(lambda s: downsample2x(s, interpolation),
                          tensor,
                          recursive=False)

    if interpolation.lower() != 'linear':
        raise ValueError('Only linear interpolation supported')
    dims = range(spatial_rank(tensor))
    tensor = math.pad(tensor,
                      [[0, 0]] + [([0, 1] if (dim % 2) != 0 else [0, 0])
                                  for dim in tensor.shape[1:-1]] + [[0, 0]],
                      'SYMMETRIC')
    for dimension in dims:
        upper_slices = tuple([
            (slice(1, None, 2) if i == dimension else slice(None))
            for i in dims
        ])
        lower_slices = tuple([
            (slice(0, None, 2) if i == dimension else slice(None))
            for i in dims
        ])
        sum = tensor[(slice(None), ) + upper_slices +
                     (slice(None), )] + tensor[(slice(None), ) + lower_slices +
                                               (slice(None), )]
        tensor = sum / 2
    return tensor
Exemple #4
0
def placeholder(shape,
                dtype=np.float32,
                basename=None,
                item_condition=struct.VARIABLES):
    if struct.isstruct(dtype):

        def placeholder_map(trace):
            shape, dtype = trace.value
            return tf.placeholder(dtype, shape, _tf_name(trace, basename))

        zipped = struct.zip([shape, dtype],
                            leaf_condition=is_static_shape,
                            item_condition=item_condition)
        return struct.map(placeholder_map,
                          zipped,
                          leaf_condition=is_static_shape,
                          trace=True,
                          item_condition=item_condition)
    else:
        f = lambda trace: tf.placeholder(dtype, trace.value,
                                         _tf_name(trace, basename))
        return struct.map(f,
                          shape,
                          leaf_condition=is_static_shape,
                          trace=True,
                          item_condition=item_condition)
Exemple #5
0
 def test_flatten(self):
     for obj in generate_test_structs():
         flat = struct.flatten(obj)
         self.assertIsInstance(flat, list)
         self.assertGreater(len(flat), 0)
         for item in flat:
             self.assertTrue(not struct.isstruct(item),
                             'The result of flatten(%s) is not flat.' % obj)
Exemple #6
0
def build_keymap(args, kwargs):
    " maps key to (False, value) or (True, key); keys are integers for args and strings for kwargs "
    map = {}
    structs = []
    for i, value in enumerate(args):
        if struct.isstruct(value):
            map[i] = (True, len(structs))
            structs.append(value)
        else:
            map[i] = (False, value)
    for key, value in kwargs.items():
        if struct.isstruct(value):
            map[key] = (True, len(structs))
            structs.append(value)
        else:
            map[key] = (False, value)
    return structs, map
Exemple #7
0
def l_n_loss(tensor, n, batch_norm=True):
    if struct.isstruct(tensor):
        all_tensors = struct.flatten(tensor)
        return sum(l_n_loss(tensor, n, batch_norm) for tensor in all_tensors)
    total_loss = math.sum(tensor**n) / n
    if batch_norm:
        batch_size = math.shape(tensor)[0]
        return math.div(total_loss, math.to_float(batch_size))
    else:
        return total_loss
Exemple #8
0
 def read(self, obj, frame=0):
     if struct.isstruct(obj):
         obj = _transform_for_writing(obj)
         names = struct.flatten(obj)
         if not np.all([isinstance(n, six.string_types) for n in names]):
             names = struct.names(obj)
         data = struct.map(lambda name: self.read_array(self._filename(name), frame), names)
         return data
     else:
         return self.read_array('unnamed', frame)
Exemple #9
0
def placeholder(shape, dtype=np.float32, basename='Placeholder'):
    if struct.isstruct(dtype):
        def placeholder_map(trace):
            shape, dtype = trace.value
            return tf.placeholder(dtype, shape, _tf_name(trace, basename))
        zipped = struct.zip([shape, dtype], leaf_condition=is_static_shape)
        return struct.map(placeholder_map, zipped, leaf_condition=is_static_shape, trace=True)
    else:
        def f(trace): return tf.placeholder(dtype, trace.value, _tf_name(trace, basename))
        return struct.map(f, shape, leaf_condition=is_static_shape, trace=True)
Exemple #10
0
 def write(self, obj, names=None, frame=0):
     if struct.isstruct(obj):
         obj = _transform_for_writing(obj)
         if names is None:
             names = struct.names(obj)
         values = struct.flatten(obj)
         names = struct.flatten(names)
         names = [_slugify_filename(name) for name in names]
         self.write_sim_frame(values, names, frame)
     else:
         name = str(names) if names is not None else 'unnamed'
         self.write_sim_frame([obj], [name], frame)
Exemple #11
0
def l1_loss(tensor, batch_norm=True, reduce_batches=True):
    if struct.isstruct(tensor):
        all_tensors = struct.flatten(tensor)
        return sum(l1_loss(tensor, batch_norm, reduce_batches) for tensor in all_tensors)
    if reduce_batches:
        total_loss = math.sum(math.abs(tensor))
    else:
        total_loss = math.sum(math.abs(tensor), axis=list(range(1, len(tensor.shape))))
    if batch_norm and reduce_batches:
        batch_size = math.shape(tensor)[0]
        return math.div(total_loss, math.to_float(batch_size))
    else:
        return total_loss
Exemple #12
0
def dataset_handle(shape, dtype, frames=None):
    """
Creates a single virtual TensorFlow dataset (iterator_handle) for the given struct.
The dataset is expected to hold contain all fields required for loading the obj given the current context item condition.
From the dataset, graph input tensors are derived and arranged into a struct of the same shape as obj.
If an integer is passed to frames, a list of such structs is created by unstacking the second-outer-most dimension of the dataset.
    :param shape: tensor shape or struct of tensor shapes
    :param dtype: data type of struct of data types matching shape
    :param frames: Number of frames contained in each example of the dataset. Expects shape (batch_size, frames, ...)
    :type frames: int or None
    :return: list of struct and placeholder.
     1. If frames=None: valid struct corresponding to obj. If frames>1: list thereof
     2. placeholder for a TensorFlow dataset iterator handle (dtype=string)
    :rtype: tuple
    """
    shapes = tuple(struct.flatten(shape, leaf_condition=is_static_shape))
    if struct.isstruct(dtype):
        dtypes = tuple(struct.flatten(dtype))
        assert len(dtypes) == len(shapes)
    else:
        dtypes = [dtype] * len(shapes)
    if frames is not None:
        shapes = tuple(
            [shape[0:1] + (frames, ) + shape[1:] for shape in shapes])
    # --- TF Dataset handle from string ---
    iterator_handle = tf.placeholder(tf.string,
                                     shape=[],
                                     name='dataset_iterator_handle')
    iterator = tf.data.Iterator.from_string_handle(iterator_handle,
                                                   output_types=dtypes,
                                                   output_shapes=shapes)
    next_element = iterator.get_next()
    # --- Create resulting struct by splitting `next_element`s ---
    if frames is None:
        next_element_list = list(next_element)
        next_struct = struct.map(lambda _: next_element_list.pop(0),
                                 shape,
                                 leaf_condition=is_static_shape)
    else:
        # --- Remap structures -> to `frames` long list of structs ---
        next_struct = []
        for frame_idx in range(frames):
            next_element_list = list(next_element)
            frame_struct = struct.map(
                lambda _: next_element_list.pop(0)[:, frame_idx, ...],
                shape,
                leaf_condition=is_static_shape)
            next_struct.append(frame_struct)
    return next_struct, iterator_handle
Exemple #13
0
def upsample2x(tensor, interpolation='linear'):
    if struct.isstruct(tensor):
        return struct.map(lambda s: upsample2x(s, interpolation), tensor, recursive=False)

    if interpolation.lower() != 'linear':
        raise ValueError('Only linear interpolation supported')
    dims = range(spatial_rank(tensor))
    vlen = tensor.shape[-1]
    spatial_dims = tensor.shape[1:-1]
    rank = spatial_rank(tensor)
    tensor = math.pad(tensor, _get_pad_width(rank), 'replicate')
    for dim in dims:
        lower, center, upper = _dim_shifted(tensor, dim, (-1, 0, 1))
        combined = math.stack([0.25 * lower + 0.75 * center, 0.75 * center + 0.25 * upper], axis=2 + dim)
        tensor = math.reshape(combined, [-1] + [spatial_dims[dim] * 2 if i == dim else tensor.shape[i + 1] for i in dims] + [vlen])
    return tensor
Exemple #14
0
def build_graph_input(obj, input_type='placeholder', frames=None):
    """
Create placeholders for tensors in the supplied state.
    :param obj: struct or StateProxy
    :param input_type: 'placeholder' or 'dataset_handle'
    :param frames: Number of input frames. If not None, returns a list of input structs.
    :return:
      1. Valid state containing or derived from created placeholders or dataset handle
      2. dict mapping from placeholders to their default names (using struct.names)
    """
    if isinstance(obj, StateProxy):
        obj = obj.state
    assert struct.isstruct(obj)
    # --- Shapes and names ---
    writable_obj = _transform_for_writing(obj)
    shape = _writing_staticshape(obj)
    names = struct.names(writable_obj)
    if input_type == 'placeholder':
        if frames is not None: raise NotImplementedError()
        with _unsafe():
            placeholders = placeholder(shape)
        graph_in = struct.map(
            lambda x: x,
            placeholders)  # validates fields, splits staggered tensors
        return graph_in, {placeholders: names}
    elif input_type == 'dataset_handle':
        with _unsafe():
            dtypes = struct.dtype(writable_obj)
            dataset_nodes, iterator_handle = dataset_handle(shape,
                                                            dtypes,
                                                            frames=frames)
        graph_in = struct.map(
            lambda x: x,
            dataset_nodes)  # validates fields, splits staggered tensors
        shapes = struct.flatten(struct.staticshape(dataset_nodes),
                                leaf_condition=is_static_shape)
        dtypes = struct.flatten(struct.dtype(dataset_nodes))
        return graph_in, {
            'names': struct.flatten(names),
            'iterator_handle': iterator_handle,
            'shapes': shapes,
            'dtypes': dtypes,
            'frames': frames
        }
    else:
        raise ValueError(input_type)
Exemple #15
0
def frequency_loss(tensor, frequency_falloff=100, reduce_batches=True):
    """
    Instead of minimizing each entry of the tensor, minimize the frequencies of the tensor, emphasizing lower frequencies over higher ones.

    :param reduce_batches: whether to reduce the batch dimension of the loss by adding the losses along the first dimension
    :param tensor: typically actual - target
    :param frequency_falloff: large values put more emphasis on lower frequencies, 1.0 weights all frequencies equally.
    :return: scalar loss value
    """
    if struct.isstruct(tensor):
        all_tensors = struct.flatten(tensor)
        return sum(
            frequency_loss(tensor, frequency_falloff, reduce_batches)
            for tensor in all_tensors)
    diff_fft = abs_square(math.fft(tensor))
    k = fftfreq(tensor.shape[1:-1], mode='absolute')
    weights = math.exp(-0.5 * k**2 * frequency_falloff**2)
    return l1_loss(diff_fft * weights, reduce_batches=reduce_batches)
Exemple #16
0
def placeholder(shape, dtype=None, basename='Placeholder'):
    if struct.isstruct(dtype):

        def placeholder_map(trace):
            shape, dtype = trace.value
            return tf.placeholder(dtype, shape, _tf_name(trace, basename))

        zipped = struct.zip([shape, dtype], leaf_condition=is_static_shape)
        return struct.map(placeholder_map,
                          zipped,
                          leaf_condition=is_static_shape,
                          trace=True)
    else:

        def f(trace):
            return tf.placeholder(
                TF_BACKEND.precision_dtype if dtype is None else dtype,
                trace.value, _tf_name(trace, basename))

        return struct.map(f, shape, leaf_condition=is_static_shape, trace=True)
Exemple #17
0
def upsample2x(tensor, interpolation='linear'):
    if struct.isstruct(tensor):
        return struct.map(lambda s: upsample2x(s, interpolation),
                          tensor,
                          recursive=False)

    if interpolation.lower() != 'linear':
        raise ValueError('Only linear interpolation supported')
    dims = range(spatial_rank(tensor))
    vlen = tensor.shape[-1]
    spatial_dims = tensor.shape[1:-1]
    tensor = math.pad(tensor,
                      [[0, 0]] + [[1, 1]] * spatial_rank(tensor) + [[0, 0]],
                      'SYMMETRIC')
    for dim in dims:
        left_slices_1 = tuple([(slice(2, None) if i == dim else slice(None))
                               for i in dims])
        left_slices_2 = tuple([(slice(1, -1) if i == dim else slice(None))
                               for i in dims])
        right_slices_1 = tuple([(slice(1, -1) if i == dim else slice(None))
                                for i in dims])
        right_slices_2 = tuple([(slice(-2) if i == dim else slice(None))
                                for i in dims])
        left = 0.75 * tensor[(slice(None), ) + left_slices_2 +
                             (slice(None), )] + 0.25 * tensor[
                                 (slice(None), ) + left_slices_1 +
                                 (slice(None), )]
        right = 0.25 * tensor[(slice(None), ) + right_slices_2 +
                              (slice(None), )] + 0.75 * tensor[
                                  (slice(None), ) + right_slices_1 +
                                  (slice(None), )]
        combined = math.stack([right, left], axis=2 + dim)
        tensor = math.reshape(combined, [-1] + [
            spatial_dims[dim] * 2 if i == dim else tensor.shape[i + 1]
            for i in dims
        ] + [vlen])
    return tensor
Exemple #18
0
 def is_applicable(self, values):
     for value in values:
         if struct.isstruct(value):
             return True
     return False