Пример #1
0
def _pool2d(pool_fn,
            input,
            pool_size,
            strides=(1, 1),
            channels_last=True,
            padding='same',
            name=None,
            default_name=None):
    input, _, data_format = validate_conv2d_input(input, channels_last)

    # check functional arguments
    padding = validate_enum_arg('padding',
                                str(padding).upper(), ['VALID', 'SAME'])
    strides = validate_conv2d_strides_tuple('strides', strides, channels_last)
    ksize = validate_conv2d_strides_tuple('pool_size', pool_size,
                                          channels_last)

    # call pooling
    with tf.name_scope(name, default_name=default_name):
        output, s1, s2 = flatten_to_ndims(input, 4)
        output = pool_fn(value=output,
                         ksize=ksize,
                         strides=strides,
                         padding=padding,
                         data_format=data_format)
        output = unflatten_from_ndims(output, s1, s2)
    return output
Пример #2
0
def load_cifar100(label_mode='fine', channels_last=True, x_shape=None,
                  x_dtype=np.float32, y_dtype=np.int32, normalize_x=False):
    """
    Load the CIFAR-100 dataset as NumPy arrays.

    Args:
        label_mode: One of {"fine", "coarse"}.
        channels_last (bool): Whether or not to place the channels axis
            at the last?  Default :obj:`False`.
        x_shape: Reshape each digit into this shape.  Default to
            ``(32, 32, 3)`` if `channels_last` is :obj:`True`, otherwise
            default to ``(3, 32, 32)``.
        x_dtype: Cast each digit into this data type.  Default `np.float32`.
        y_dtype: Cast each label into this data type.  Default `np.int32`.
        normalize_x (bool): Whether or not to normalize x into ``[0, 1]``,
            by dividing each pixel value with 255.?  (default :obj:`False`)

    Returns:
        (np.ndarray, np.ndarray), (np.ndarray, np.ndarray): The
            (train_x, train_y), (test_x, test_y)
    """
    # check the arguments
    label_mode = validate_enum_arg('label_mode', label_mode, ('fine', 'coarse'))
    x_shape = _validate_x_shape(x_shape, channels_last)

    # fetch data
    path = CacheDir('cifar').download_and_extract(
        CIFAR_100_URI, hasher=hashlib.md5(), expected_hash=CIFAR_100_MD5)
    data_dir = os.path.join(path, CIFAR_100_CONTENT_DIR)

    # load the data
    path = os.path.join(data_dir, 'train')
    train_x, train_y = _load_batch(
        path, channels_last=channels_last, x_shape=x_shape,
        x_dtype=x_dtype, y_dtype=y_dtype, normalize_x=normalize_x,
        expected_batch_label='training batch 1 of 1',
        labels_key='{}_labels'.format(label_mode)
    )
    assert(len(train_x) == len(train_y) == 50000)

    path = os.path.join(data_dir, 'test')
    test_x, test_y = _load_batch(
        path, channels_last=channels_last, x_shape=x_shape,
        x_dtype=x_dtype, y_dtype=y_dtype, normalize_x=normalize_x,
        expected_batch_label='testing batch 1 of 1',
        labels_key='{}_labels'.format(label_mode)
    )
    assert(len(test_x) == len(test_y) == 10000)

    return (train_x, train_y), (test_x, test_y)
Пример #3
0
    def __init__(self,
                 shift_and_scale_fn,
                 axis=-1,
                 value_ndims=1,
                 secondary=False,
                 scale_type='linear',
                 sigmoid_scale_bias=2.,
                 epsilon=1e-6,
                 name=None,
                 scope=None):
        """
        Construct a new :class:`BaseCouplingLayer`.

        Args:
            shift_and_scale_fn ((tf.Tensor, int) -> (tf.Tensor, tf.Tensor or None)):
                A function to which maps ``(x1, x2.shape[axis])`` to
                ``(shift, scale)`` (see above).  If `scale_type == None`,
                it should return `scale == None`.  It should be a function
                that reuses a fixed variable scope, e.g., a template function
                derived by :func:`tf.make_template`, or an instance of
                :class:`tfsnippet.layers.BaseLayer`.
            axis (int): The feature axis, to apply the transformation.
            value_ndims (int): Number of dimensions to be considered as the
                value dimensions.  `x.ndims - value_ndims == log_det.ndims`.
            secondary (bool): Whether or not this layer is a secondary layer?
                See :class:`tfsnippet.layers.CouplingLayer`.
            scale_type: One of {"exp", "sigmoid", "linear", None}.
                See :class:`tfsnippet.layers.CouplingLayer`.
            sigmoid_scale_bias (float or Tensor): Add this bias to the `scale`
                if ``scale_type == 'sigmoid'``.  See the reason of adopting
                this in :class:`tfsnippet.layers.CouplingLayer`.
            epsilon: Small float number to avoid dividing by zero or taking
                logarithm of zero.
        """
        self._shift_and_scale_fn = shift_and_scale_fn
        self._secondary = bool(secondary)
        self._scale_type = validate_enum_arg(
            'scale_type', scale_type, ['exp', 'sigmoid', 'linear', None])
        self._sigmoid_scale_bias = sigmoid_scale_bias
        self._epsilon = epsilon
        self._n_features = None  # type: int

        super(CouplingLayer, self).__init__(axis=int(axis),
                                            value_ndims=value_ndims,
                                            name=name,
                                            scope=scope)
Пример #4
0
def get_deconv_output_length(input_length, kernel_size, strides, padding):
    """
    Get the output length of deconvolution at a specific dimension.

    Args:
        input_length: Input tensor length.
        kernel_size: The size of the kernel.
        strides: The stride of convolution.
        padding: One of {"same", "valid"}, case in-sensitive

    Returns:
        int: The output length of deconvolution.
    """
    padding = validate_enum_arg('padding',
                                str(padding).upper(), ['SAME', 'VALID'])
    output_length = input_length * strides
    if padding == 'VALID':
        output_length += max(kernel_size - strides, 0)
    return output_length
Пример #5
0
def collect_outputs(outputs, inputs, data_flow, mode='concat', axis=0,
                    feed_dict=None, session=None):
    """
    Run TensorFlow nodes by mini-batch and collect outputs from each batch.

    Args:
        outputs (Iterable[tf.Tensor] or dict[str, tf.Tensor]): The output
            tensors to be computed.
        inputs (Iterable[tf.Tensor]): Input placeholders.
        data_flow (DataFlow): Data flow to feed the input placeholders.
        mode ({'concat', 'average'}): If "concat", will concatenate the outputs
            from each mini-batch.  If "average", the output from each batch
            must be a scalar, and if so, this method will take average of the
            outputs from each mini-batch, weighted according to the batch size.
        axis (int): The axis for concatenation.
        feed_dict: Optional, additional feed dict.
        session: The TensorFlow session.  If not specified, use the
            default session.

    Returns:
        tuple[np.ndarray] or dict[str, tf.Tensor]: The collected outputs.
            Returns a dict if `outputs` is a dict, or a tuple otherwise.
    """
    mode = validate_enum_arg('mode', mode, ['concat', 'average'])
    session = session or get_default_session_or_error()

    if isinstance(outputs, (dict, OrderedDict)):
        output_keys = list(outputs)
        outputs = [tf.convert_to_tensor(outputs[k]) for k in output_keys]
    else:
        output_keys = None
        outputs = [tf.convert_to_tensor(o) for o in outputs]
    inputs = [tf.convert_to_tensor(i) for i in inputs]

    # check the shape of output tensors
    for i, o in enumerate(outputs):
        o_shape = o.get_shape()
        if mode == 'concat':
            if o_shape.ndims is not None and o_shape.ndims < 1:
                raise ValueError('`mode` is "concat", but the {}-th output '
                                 'is a scalar: {!r}'.format(i, o))
        else:
            if o_shape.ndims is not None and o_shape.ndims > 0:
                raise ValueError('`mode` is "average", but the {}-th output '
                                 'is not a scalar: {!r}'.format(i, o))

    collected = [[] for _ in range(len(outputs))]
    weights = []

    for batch in data_flow:
        weights.append(len(batch[0]))
        batch_feed_dict = merge_feed_dict(
            feed_dict,
            {k: v for (k, v) in zip(inputs, batch)}
        )
        batch_feed_dict = resolve_feed_dict(batch_feed_dict)
        for i, o in enumerate(session.run(outputs, feed_dict=batch_feed_dict)):
            collected[i].append(o)

    weights = np.asarray(weights, dtype=np.float32)
    for i, batches in enumerate(collected):
        if mode == 'average':
            stacked = np.stack(batches, axis=0)
            assert(len(stacked.shape) == 1)
            collected[i] = np.average(stacked, axis=0, weights=weights)
        else:
            collected[i] = np.concatenate(batches, axis=axis)

    if output_keys is not None:
        collected = dict(zip(output_keys, collected))
    else:
        collected = tuple(collected)
    return collected
Пример #6
0
    def __init__(self,
                 axis=-1,
                 value_ndims=1,
                 initialized=False,
                 scale_type='exp',
                 bias_regularizer=None,
                 bias_constraint=None,
                 log_scale_regularizer=None,
                 log_scale_constraint=None,
                 scale_regularizer=None,
                 scale_constraint=None,
                 trainable=True,
                 epsilon=1e-6,
                 name=None,
                 scope=None):
        """
        Construct a new :class:`ActNorm` instance.

        Args:
            axis (int or Iterable[int]): The axis to apply ActNorm.
                Dimensions not in `axis` will be averaged out when computing
                the mean of activations. Default `-1`, the last dimension.
                All items of the `axis` should be covered by `value_ndims`.
            value_ndims (int): Number of value dimensions in both `x` and `y`.
                `x.ndims - value_ndims == log_det.ndims` and
                `y.ndims - value_ndims == log_det.ndims`.
            initialized (bool): Whether or not the variables have been
                initialized?  If :obj:`False`, the first input `x` in the
                forward pass will be used to initialize the variables.

                Normally, it should take the default value, :obj:`False`.
                Setting it to :obj:`True` only if you're constructing a
                :class:`ActNorm` instance inside some reused variable scope.
            scale_type: One of {"exp", "linear"}.
                If "exp", ``y = (x + bias) * tf.exp(log_scale)``.
                If "linear", ``y = (x + bias) * scale``.
                Default is "exp".
            bias_regularizer: The regularizer for `bias`.
            bias_constraint: The constraint for `bias`.
            log_scale_regularizer: The regularizer for `log_scale`.
            log_scale_constraint: The constraint for `log_scale`.
            scale_regularizer: The regularizer for `scale`.
            scale_constraint: The constraint for `scale`.
            trainable (bool): Whether or not the variables are trainable?
            epsilon: Small float to avoid dividing by zero or taking
                logarithm of zero.
        """
        axis = validate_int_tuple_arg('axis', axis)
        self._scale_type = validate_enum_arg('scale_type', scale_type,
                                             ['exp', 'linear'])
        self._initialized = bool(initialized)
        self._bias_regularizer = bias_regularizer
        self._bias_constraint = bias_constraint
        self._log_scale_regularizer = log_scale_regularizer
        self._log_scale_constraint = log_scale_constraint
        self._scale_regularizer = scale_regularizer
        self._scale_constraint = scale_constraint
        self._trainable = bool(trainable)
        self._epsilon = epsilon

        super(ActNorm, self).__init__(axis=axis,
                                      value_ndims=value_ndims,
                                      name=name,
                                      scope=scope)