示例#1
0
    def __init__(
            self, prev_layer, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_last',
            name='meanpool3d'
    ):

        super(MeanPool3d, self).__init__(prev_layer=prev_layer, name=name)

        logging.info(
            "MeanPool3d %s: filter_size:%s strides:%s padding:%s" %
            (name, str(filter_size), str(strides), str(padding))
        )

        self.outputs = tf.layers.average_pooling3d(
            prev_layer.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name
        )

        self._add_layers(self.outputs)
示例#2
0
def lo_regularizer(scale):
    """Lo regularization removes the neurons of current layer. The `o` represents `outputs`
    Returns a function that can be used to apply group lo regularization to weights.
    The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.

    Parameters
    ----------
    scale : float
        A scalar multiplier `Tensor`. 0.0 disables the regularizer.

    Returns
    -------
    A function with signature `lo(weights, name=None)` that apply Lo regularization.

    Raises
    ------
    ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.

    """

    if isinstance(scale, numbers.Integral):
        raise ValueError('scale cannot be an integer: %s' % scale)

    if isinstance(scale, numbers.Real):
        if scale < 0.:
            raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale)
        if scale >= 1.:
            raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % scale)
        if scale == 0.:
            logging.info('Scale of 0 disables regularizer.')
            return lambda _, name=None: None

    def lo(weights, name='lo_regularizer'):
        """Applies group column regularization to weights."""
        with tf.name_scope(name) as scope:
            my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
            # if tf.__version__ <= '0.12':
            #     standard_ops_fn = standard_ops.mul
            # else:
            standard_ops_fn = standard_ops.multiply
            return standard_ops_fn(
                my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 0))),
                name=scope
            )

    return lo
示例#3
0
def set_gpu_fraction(gpu_fraction=0.3):
    """Set the GPU memory fraction for the application.

    Parameters
    ----------
    gpu_fraction : float
        Fraction of GPU memory, (0 ~ 1]

    References
    ----------
    - `TensorFlow using GPU <https://www.tensorflow.org/versions/r0.9/how_tos/using_gpu/index.html>`__

    """
    logging.info("[TL]: GPU MEM Fraction %f" % gpu_fraction)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    return sess
示例#4
0
    def __init__(self,
                 prev_layer,
                 n_filter=32,
                 filter_size=5,
                 stride=1,
                 dilation_rate=1,
                 act=None,
                 padding='SAME',
                 data_format="channels_last",
                 W_init=tf.truncated_normal_initializer(stddev=0.02),
                 b_init=tf.constant_initializer(value=0.0),
                 W_init_args=None,
                 b_init_args=None,
                 name='conv1d'):
        super(Conv1d, self).__init__(prev_layer=prev_layer,
                                     act=act,
                                     W_init_args=W_init_args,
                                     b_init_args=b_init_args,
                                     name=name)

        logging.info(
            "Conv1d %s: n_filter: %d filter_size: %s stride: %d pad: %s act: %s dilation_rate: %d"
            % (self.name, n_filter, filter_size, stride, padding,
               self.act.__name__ if self.act is not None else 'No Activation',
               dilation_rate))

        _conv1d = tf.layers.Conv1D(filters=n_filter,
                                   kernel_size=filter_size,
                                   strides=stride,
                                   padding=padding,
                                   data_format=data_format,
                                   dilation_rate=dilation_rate,
                                   activation=self.act,
                                   use_bias=(True if b_init else False),
                                   kernel_initializer=W_init,
                                   bias_initializer=b_init,
                                   name=name)

        # _conv1d.dtype = LayersConfig.tf_dtype   # unsupport, it will use the same dtype of inputs
        self.outputs = _conv1d(self.inputs)
        # new_variables = _conv1d.weights  # new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)
        # new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=self.name)  #vs.name)
        new_variables = get_collection_trainable(self.name)

        self._add_layers(self.outputs)
        self._add_params(new_variables)
示例#5
0
    def __init__(
        self,
        prev_layer,
        padding,
        name='zeropad3d',
    ):
        super(ZeroPad3d, self).__init__(prev_layer=prev_layer, name=name)

        logging.info("ZeroPad3d   %s: padding:%s" % (name, str(padding)))

        if not isinstance(padding, (int, tuple)):
            raise AssertionError()

        self.outputs = tf.keras.layers.ZeroPadding3D(
            padding=padding, name=name)(self.inputs)  # TODO: Stop using Keras

        self._add_layers(self.outputs)
示例#6
0
def unstack_layer(prev_layer, num=None, axis=0, name='unstack'):
    """
    It is layer for unstacking the given dimension of a rank-R tensor into rank-(R-1) tensors., see `tf.unstack() <https://www.tensorflow.org/api_docs/python/tf/unstack>`__.

    Parameters
    ----------
    prev_layer : :class:`Layer`
        Previous layer
    num : int or None
        The length of the dimension axis. Automatically inferred if None (the default).
    axis : int
        Dimension along which axis to concatenate.
    name : str
        A unique layer name.

    Returns
    -------
    list of :class:`Layer`
        The list of layer objects unstacked from the input.

    """
    inputs = prev_layer.outputs
    with tf.variable_scope(name):
        outputs = tf.unstack(inputs, num=num, axis=axis)

    logging.info("UnStackLayer %s: num: %s axis: %d, n_outputs: %d" %
                 (name, num, axis, len(outputs)))

    net_new = []
    scope_name = tf.get_variable_scope().name
    if scope_name:
        full_name = scope_name + '/' + name
    else:
        full_name = name

    for i, _v in enumerate(outputs):
        n = Layer(prev_layer=prev_layer, name=full_name + str(i))
        n.outputs = outputs[i]
        # n.all_layers = list(layer.all_layers)
        # n.all_params = list(layer.all_params)
        # n.all_drop = dict(layer.all_drop)
        # n.all_layers.append(inputs)

        net_new.append(n)

    return net_new
示例#7
0
def maxnorm_i_regularizer(scale):
    """Max-norm input regularization removes the neurons of previous layer.
    Returns a function that can be used to apply max-norm regularization to each row of weight matrix.
    The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.

    Parameters
    ----------
    scale : float
        A scalar multiplier `Tensor`. 0.0 disables the regularizer.

    Returns
    ---------
    A function with signature `mn_i(weights, name=None)` that apply Lo regularization.

    Raises
    ---------
    ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.

    """

    if isinstance(scale, numbers.Integral):
        raise ValueError('scale cannot be an integer: %s' % scale)

    if isinstance(scale, numbers.Real):
        if scale < 0.:
            raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale)
        # if scale >= 1.:
        #   raise ValueError('Setting a scale greater than 1 on a regularizer: %g' %
        #                    scale)
        if scale == 0.:
            logging.info('Scale of 0 disables regularizer.')
            return lambda _, name=None: None

    def mn_i(weights, name='maxnorm_i_regularizer'):
        """Applies max-norm regularization to weights."""
        with tf.name_scope(name) as scope:
            my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
            if tf.__version__ <= '0.12':
                standard_ops_fn = standard_ops.mul
            else:
                standard_ops_fn = standard_ops.multiply
            return standard_ops_fn(
                my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 1)), name=scope
            )

    return mn_i
示例#8
0
    def __init__(self, prev_layer, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='meanpool2d'):

        if strides is None:
            strides = filter_size

        super(MeanPool2d, self).__init__(prev_layer=prev_layer, name=name)

        logging.info(
            "MeanPool2d %s: filter_size:%s strides:%s padding:%s" %
            (name, str(filter_size), str(strides), str(padding))
        )

        self.outputs = tf.layers.average_pooling2d(
            self.inputs, filter_size, strides, padding=padding, data_format='channels_last', name=name
        )

        self._add_layers(self.outputs)
示例#9
0
    def restore_params(self, sess):
        logging.info("Restore pre-trained parameters")
        maybe_download_and_extract(
            'vgg16_weights.npz',
            'models',
            'http://www.cs.toronto.edu/~frossard/vgg16/',
            expected_bytes=553436134)
        npz = np.load(os.path.join('models', 'vgg16_weights.npz'))

        params = []
        for val in sorted(npz.items()):
            logging.info("  Loading params %s" % str(val[1].shape))
            params.append(val[1])
            if len(self.all_params) == len(params):
                break

        assign_params(sess, params, self.net)
        del params
示例#10
0
    def __init__(
            self,
            prev_layer,
            init_scale=0.05,
            name='scale',
    ):
        super(ScaleLayer, self).__init__(prev_layer=prev_layer, name=name)
        logging.info("ScaleLayer  %s: init_scale: %f" % (name, init_scale))

        self.inputs = prev_layer.outputs

        with tf.variable_scope(name):
            # scale = tf.get_variable(name='scale_factor', init, trainable=True, )
            scale = tf.get_variable("scale", shape=[1], initializer=tf.constant_initializer(value=init_scale))
            self.outputs = self.inputs * scale

        self.all_layers.append(self.outputs)
        self.all_params.append(scale)
示例#11
0
    def __init__(self, prev_layer, scale=2, act=None, name='subpixel_conv1d'):

        super(SubpixelConv1d, self).__init__(prev_layer=prev_layer,
                                             act=act,
                                             name=name)

        logging.info(
            "SubpixelConv1d  %s: scale: %d act: %s" %
            (name, scale,
             self.act.__name__ if self.act is not None else '- No Activation'))

        self.inputs = prev_layer.outputs

        with tf.name_scope(name):
            self.outputs = self._apply_activation(
                self._PS(self.inputs, r=scale))

        self.all_layers.append(self.outputs)
示例#12
0
    def __init__(
        self,
        prev_layer,
        rois,
        pool_height=2,
        pool_width=2,
        name='roipooling_layer',
    ):
        super(ROIPoolingLayer, self).__init__(prev_layer=prev_layer, name=name)

        logging.info("ROIPoolingLayer %s: (%d, %d)" %
                     (name, pool_height, pool_width))

        self.inputs = prev_layer.outputs

        self.outputs = roi_pooling(self.inputs, rois, pool_height, pool_width)

        self.all_layers.append(self.outputs)
示例#13
0
    def __init__(
        self,
        prev_layer,
        padding,
        name='zeropad1d',
    ):
        super(ZeroPad1d, self).__init__(prev_layer=prev_layer, name=name)

        logging.info("ZeroPad1d   %s: padding:%s" % (name, str(padding)))

        self.inputs = prev_layer.outputs

        if not isinstance(padding, (int, tuple, dict)):
            raise AssertionError()

        self.outputs = tf.keras.layers.ZeroPadding1D(padding=padding,
                                                     name=name)(self.inputs)
        self.all_layers.append(self.outputs)
示例#14
0
    def __init__(
            self,
            prev_layer,
            padding=None,
            mode='CONSTANT',
            name='pad_layer',
    ):
        super(PadLayer, self).__init__(prev_layer=prev_layer, name=name)

        logging.info("PadLayer   %s: padding: %s mode: %s" % (self.name, list(padding), mode))

        if padding is None:
            raise Exception(
                "padding should be a Tensor of type int32. see https://www.tensorflow.org/api_docs/python/tf/pad"
            )

        self.outputs = tf.pad(self.inputs, paddings=padding, mode=mode, name=name)
        self._add_layers(self.outputs)
示例#15
0
    def __init__(
        self,
        prev_layer,
        layer_class=None,
        args=None,
        name='time_distributed',
    ):
        super(TimeDistributedLayer, self).__init__(prev_layer=prev_layer,
                                                   name=name)
        logging.info("TimeDistributedLayer %s: layer_class:%s args:%s" %
                     (self.name, layer_class.__name__, args))

        if args is None:
            args = {}
        if not isinstance(args, dict):
            raise TypeError("'args' must be a dict.")

        self.inputs = prev_layer.outputs

        if not isinstance(self.inputs, tf.Tensor):
            self.inputs = tf.transpose(tf.stack(self.inputs), [1, 0, 2])

        input_shape = self.inputs.get_shape()

        timestep = input_shape[1]
        x = tf.unstack(self.inputs, axis=1)

        is_name_reuse = tf.get_variable_scope().reuse
        for i in range(0, timestep):
            with tf.variable_scope(
                    name, reuse=(is_name_reuse if i == 0 else True)) as vs:
                net = layer_class(InputLayer(x[i], name=args['name'] + str(i)),
                                  **args)
                x[i] = net.outputs
                variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES,
                                              scope=vs.name)

        self.outputs = tf.stack(x, axis=1, name=name)

        # self.all_layers = list(layer.all_layers)
        # self.all_params = list(layer.all_params)
        # self.all_drop = dict(layer.all_drop)
        self.all_layers.append(self.outputs)
        self.all_params.extend(variables)
示例#16
0
    def __init__(
            self,
            prev_layer,
            slim_layer,
            slim_args=None,
            name='tfslim_layer',
    ):

        super(SlimNetsLayer, self).__init__(prev_layer=prev_layer, name=name)
        logging.info("SlimNetsLayer %s: %s" % (name, slim_layer.__name__))

        self.inputs = prev_layer.outputs

        if slim_layer is None:
            raise ValueError("slim layer is None")
        if slim_args is None:
            slim_args = {}

        # with tf.variable_scope(name) as vs:
        #     net, end_points = slim_layer(self.inputs, **slim_args)
        #     slim_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)

        net, end_points = slim_layer(self.inputs, **slim_args)

        slim_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=name)
        if slim_variables == []:
            logging.info(
                "No variables found under %s : the name of SlimNetsLayer should be matched with the begining of the ckpt file, see tutorial_inceptionV3_tfslim.py for more details"
                % name
            )

        self.outputs = net

        slim_layers = []
        for v in end_points.values():
            # tf.contrib.layers.summaries.summarize_activation(v)
            slim_layers.append(v)

        # self.all_layers = list(layer.all_layers)
        # self.all_params = list(layer.all_params)
        # self.all_drop = dict(layer.all_drop)

        self.all_layers.extend(slim_layers)
        self.all_params.extend(slim_variables)
示例#17
0
    def __init__(self, layers, name='mux_layer'):
        super(MultiplexerLayer, self).__init__(prev_layer=layers, name=name)

        self.n_inputs = len(layers)

        self.inputs = []

        for l in layers:
            self.inputs.append(l.outputs)

        try:  # TF1.0
            all_inputs = tf.stack(
                self.inputs, name=name
            )  # pack means concat a list of tensor in a new dim  # 1.2
        except Exception:
            all_inputs = tf.pack(
                self.inputs, name=name
            )  # pack means concat a list of tensor in a new dim  # 1.2

        logging.info("MultiplexerLayer %s: n_inputs:%d" %
                     (self.name, self.n_inputs))

        self.sel = tf.placeholder(tf.int32)
        self.outputs = tf.gather(all_inputs, self.sel,
                                 name=name)  # [sel, :, : ...] # 1.2

        # logging.info(self.outputs, vars(self.outputs))
        #         # tf.reshape(self.outputs, shape=)
        # exit()
        # # the same with ConcatLayer
        # self.all_layers = list(layers[0].all_layers)
        # self.all_params = list(layers[0].all_params)
        # self.all_drop = dict(layers[0].all_drop)
        #
        # for i in range(1, len(layers)):
        #     self.all_layers.extend(list(layers[i].all_layers))
        #     self.all_params.extend(list(layers[i].all_params))
        #     self.all_drop.update(dict(layers[i].all_drop))
        #
        # self.all_layers = list_remove_repeat(self.all_layers)
        # self.all_params = list_remove_repeat(self.all_params)
        # # self.all_drop = list_remove_repeat(self.all_drop)

        self.all_layers.append(self.outputs)
示例#18
0
    def __init__(
            self,
            prev_layer,
            axis,
            name='expand_dims',
    ):
        super(ExpandDimsLayer, self).__init__(prev_layer=prev_layer, name=name)

        logging.info("ExpandDimsLayer  %s: axis:%d" % (name, axis))

        self.inputs = prev_layer.outputs

        with tf.variable_scope(name):
            try:  # TF12 TF1.0
                self.outputs = tf.expand_dims(self.inputs, axis=axis)
            except Exception:  # TF11
                self.outputs = tf.expand_dims(self.inputs, dim=axis)

        self.all_layers.append(self.outputs)
示例#19
0
    def __init__(
            self,
            prev_layer,
            ksize=(1, 2, 2, 1),
            strides=(1, 2, 2, 1),
            padding='SAME',
            pool=tf.nn.max_pool,
            name='pool_layer',
    ):
        super(PoolLayer, self).__init__(prev_layer=prev_layer, name=name)

        logging.info(
            "PoolLayer   %s: ksize:%s strides:%s padding:%s pool:%s" %
            (name, str(ksize), str(strides), padding, pool.__name__)
        )

        self.outputs = pool(self.inputs, ksize=ksize, strides=strides, padding=padding, name=name)

        self._add_layers(self.outputs)
示例#20
0
def get_variables_with_name(name=None, train_only=True, verbose=False):
    """Get a list of TensorFlow variables by a given name scope.

    Parameters
    ----------
    name : str
        Get the variables that contain this name.
    train_only : boolean
        If Ture, only get the trainable variables.
    verbose : boolean
        If True, print the information of all variables.

    Returns
    -------
    list of Tensor
        A list of TensorFlow variables

    Examples
    --------
    >>> import tensorlayer as tl
    >>> dense_vars = tl.layers.get_variables_with_name('dense', True, True)

    """
    if name is None:
        raise Exception("please input a name")

    logging.info("  [*] geting variables with %s" % name)

    # tvar = tf.trainable_variables() if train_only else tf.all_variables()
    if train_only:
        t_vars = tf.trainable_variables()

    else:
        t_vars = tf.global_variables()

    d_vars = [var for var in t_vars if name in var.name]

    if verbose:
        for idx, v in enumerate(d_vars):
            logging.info("  got {:3}: {:15}   {}".format(
                idx, v.name, str(v.get_shape())))

    return d_vars
    def __init__(
            self,
            prev_layer,
            channel_shared=False,
            a_init=tf.constant_initializer(value=0.0),
            a_init_args=None,
            # restore = True,
            name="prelu_layer"):

        if a_init_args is None:
            a_init_args = {}

        super(PReluLayer, self).__init__(prev_layer=prev_layer, name=name)
        logging.info("PReluLayer %s: channel_shared:%s" %
                     (name, channel_shared))

        self.inputs = prev_layer.outputs

        if channel_shared:
            w_shape = (1, )
        else:
            w_shape = int(self.inputs.get_shape()[-1])

        # with tf.name_scope(name) as scope:
        with tf.variable_scope(name):
            alphas = tf.get_variable(name='alphas',
                                     shape=w_shape,
                                     initializer=a_init,
                                     dtype=LayersConfig.tf_dtype,
                                     **a_init_args)
            try:  # TF 1.0
                self.outputs = tf.nn.relu(self.inputs) + tf.multiply(
                    alphas, (self.inputs - tf.abs(self.inputs))) * 0.5
            except Exception:  # TF 0.12
                self.outputs = tf.nn.relu(self.inputs) + tf.mul(
                    alphas, (self.inputs - tf.abs(self.inputs))) * 0.5

        # self.all_layers = list(layer.all_layers)
        # self.all_params = list(layer.all_params)
        # self.all_drop = dict(layer.all_drop)

        self.all_layers.append(self.outputs)
        self.all_params.extend([alphas])
示例#22
0
    def __init__(
        self,
        layers,
        axis=1,
        name='stack',
    ):

        super(StackLayer, self).__init__(prev_layer=layers, name=name)

        logging.info("StackLayer %s: axis: %d" % (self.name, axis))

        self.outputs = tf.stack(self.inputs, axis=axis, name=name)

        # for i in range(1, len(layers)):
        #     self._add_layers(list(layers[i].all_layers))
        #     self._add_params(list(layers[i].all_params))
        #     self.all_drop.update(dict(layers[i].all_drop))

        self._add_layers(self.outputs)
示例#23
0
    def __init__(
            self,
            layers,
            fn,
            fn_args=None,
            act=None,
            name='elementwiselambda_layer',
    ):

        super(ElementwiseLambdaLayer, self).__init__(prev_layer=layers, act=act, fn_args=fn_args, name=name)
        logging.info("ElementwiseLambdaLayer %s" % self.name)

        with tf.variable_scope(name) as vs:
            self.outputs = self._apply_activation(fn(*self.inputs, **self.fn_args))

            variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)

        self._add_layers(self.outputs)
        self._add_params(variables)
示例#24
0
    def __init__(self,
                 prev_layer,
                 center=True,
                 scale=True,
                 act=None,
                 reuse=None,
                 variables_collections=None,
                 outputs_collections=None,
                 trainable=True,
                 begin_norm_axis=1,
                 begin_params_axis=-1,
                 name='layernorm'):

        super(LayerNormLayer, self).__init__(prev_layer=prev_layer,
                                             act=act,
                                             name=name)

        logging.info(
            "LayerNormLayer %s: act: %s" %
            (self.name,
             self.act.__name__ if self.act is not None else 'No Activation'))

        with tf.variable_scope(name) as vs:
            self.outputs = tf.contrib.layers.layer_norm(
                self.inputs,
                center=center,
                scale=scale,
                activation_fn=self.act,
                reuse=reuse,
                variables_collections=variables_collections,
                outputs_collections=outputs_collections,
                trainable=trainable,
                begin_norm_axis=begin_norm_axis,
                begin_params_axis=begin_params_axis,
                scope='var',
            )

            variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES,
                                          scope=vs.name)

        self._add_layers(self.outputs)
        self._add_params(variables)
示例#25
0
    def __init__(self, prev_layer, num=None, axis=0, name='unstack'):

        super(UnStackLayer, self).__init__(prev_layer=prev_layer, name=name)

        outputs = tf.unstack(self.inputs, num=num, axis=axis, name=name)

        logging.info("UnStackLayer %s: num: %s axis: %d, n_outputs: %d" %
                     (self.name, num, axis, len(outputs)))

        net_new = []

        for i, unstacked_dim in enumerate(outputs):
            layer = Layer(prev_layer=self, name=name + str(i))
            layer.outputs = unstacked_dim

            net_new.append(layer)

        self.outputs = net_new

        self._add_layers(net_new)
示例#26
0
    def __init__(
            self,
            prev_layer,
            keras_layer,
            keras_args=None,
            name='keras_layer',
    ):

        super(KerasLayer, self).__init__(prev_layer=prev_layer, keras_args=keras_args, name=name)

        logging.info("KerasLayer %s: %s" % (self.name, keras_layer))

        logging.warning("This API will be removed, please use LambdaLayer instead.")

        with tf.variable_scope(name) as vs:
            self.outputs = keras_layer(self.inputs, **self.keras_args)
            variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)

        self._add_layers(self.outputs)
        self._add_params(variables)
示例#27
0
    def __init__(self,
                 prev_layer,
                 scale=2,
                 act=tf.identity,
                 name='subpixel_conv1d'):
        def _PS(I, r):
            X = tf.transpose(I, [2, 1, 0])  # (r, w, b)
            X = tf.batch_to_space_nd(X, [r], [[0, 0]])  # (1, r*w, b)
            X = tf.transpose(X, [2, 1, 0])
            return X

        super(SubpixelConv1d, self).__init__(prev_layer=prev_layer, name=name)
        logging.info("SubpixelConv1d  %s: scale: %d act: %s" %
                     (name, scale, act.__name__))

        self.inputs = prev_layer.outputs
        with tf.name_scope(name):
            self.outputs = act(_PS(self.inputs, r=scale))

        self.all_layers.append(self.outputs)
示例#28
0
    def __init__(self,
                 prev_layer,
                 filter_size=3,
                 strides=2,
                 padding='valid',
                 data_format='channels_last',
                 name='maxpool1d'):
        super(MaxPool1d, self).__init__(prev_layer=prev_layer, name=name)

        logging.info("MaxPool1d %s: filter_size: %s strides: %s padding: %s" %
                     (self.name, str(filter_size), str(strides), str(padding)))

        self.outputs = tf.layers.max_pooling1d(self.inputs,
                                               filter_size,
                                               strides,
                                               padding=padding,
                                               data_format=data_format,
                                               name=name)

        self._add_layers(self.outputs)
示例#29
0
 def get_wmt_enfr_dev_set(path):
     """Download the WMT en-fr training corpus to directory unless it's there."""
     filename = "dev-v2.tgz"
     dev_file = maybe_download_and_extract(filename,
                                           path,
                                           _WMT_ENFR_DEV_URL,
                                           extract=False)
     dev_name = "newstest2013"
     dev_path = os.path.join(path, "newstest2013")
     if not (gfile.Exists(dev_path + ".fr")
             and gfile.Exists(dev_path + ".en")):
         logging.info("Extracting tgz file %s" % dev_file)
         with tarfile.open(dev_file, "r:gz") as dev_tar:
             fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr")
             en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en")
             fr_dev_file.name = dev_name + ".fr"  # Extract without "dev/" prefix.
             en_dev_file.name = dev_name + ".en"
             dev_tar.extract(fr_dev_file, path)
             dev_tar.extract(en_dev_file, path)
     return dev_path
示例#30
0
    def __init__(
            self,
            prev_layer,
            depth_radius=None,
            bias=None,
            alpha=None,
            beta=None,
            name='lrn_layer',
    ):
        super(LocalResponseNormLayer, self).__init__(prev_layer=prev_layer, name=name)

        logging.info(
            "LocalResponseNormLayer %s: depth_radius: %s, bias: %s, alpha: %s, beta: %s" %
            (self.name, str(depth_radius), str(bias), str(alpha), str(beta))
        )

        with tf.variable_scope(name):
            self.outputs = tf.nn.lrn(self.inputs, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta)

        self._add_layers(self.outputs)