コード例 #1
0
ファイル: tf_VCRNN.py プロジェクト: ijleesw/VCRNN-tensorflow
    def call(self, inputs, state):
        add = math_ops.add
        sub = math_ops.subtract
        mult = math_ops.multiply

        # computing m_t
        m_t = add(math_ops.matmul(state, self._u),
                  math_ops.matmul(inputs, self._v))
        m_t = nn_ops.bias_add(m_t, self._b)
        m_t = math_ops.sigmoid(m_t)

        # add L1 loss
        ops.add_to_collection('L1 loss', math_ops.abs(m_t - self._m_target))

        # computing e_t (= thr)
        i = gen_math_ops._range(1, self._num_units + 1, 1)
        i = math_ops.cast(i, dtype=dtypes.float32)
        mtD = gen_array_ops.tile(mult(m_t[1], self._num_units),
                                 [self._num_units])
        thr = math_ops.sigmoid(mult(self._sharpness, sub(mtD, i)))
        thr = math_ops.round(add(thr, sub(0.5, self._epsilon)))
        ones = array_ops.ones_like(thr)
        thr_inv = sub(ones, thr)

        # computing h_t
        gate_inputs = math_ops.matmul(array_ops.concat([inputs, state], 1),
                                      self._kernel)
        gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
        output = self._activation(gate_inputs)
        output = add(mult(gate_inputs, thr), mult(state, thr_inv))

        return output, output
コード例 #2
0
def image_to_input_size(version, images, antialias=False):
    """Resizes images to size `version_input_shape`. This will convert
    grayscale to rgb.

    Note: Should be run on CPU.

    Preprocessing as in: https://github.com/tensorflow/models/blob/1af55e018eebce03fb61bba9959a04672536107d/research/slim/preprocessing/vgg_preprocessing.py#L319

    Note: Does not preserve aspect ratio if resize is necessary.

    Args:
        version: A supported inception version. See `versions()`.
        images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D
            Tensor of shape `[height, width, channels]`. Where channels can
            be `1` or `3`.
        antialias: Whether to use an anti-aliasing filter when downsampling
            an image.

    Returns:
        A tensor of shape `[batch] + version_input_shape` or
        `version_input_shape`.
    """
    channels = images.shape[-1]
    assert channels == 1 or channels == 3

    info = version_info(version)
    batch_input_shape = info["input_shape"]
    input_shape = info["input_shape"][1:]

    if not (
        images.shape.is_compatible_with(batch_input_shape)
        or images.shape.is_compatible_with(input_shape)
    ):
        images = img_ops.resize_images_v2(
            images,
            size=input_shape[0:2],
            preserve_aspect_ratio=False,
            antialias=antialias,
        )
        if channels == 1:
            rank = array_ops.rank(images) - 1
            tile_shape = array_ops.concat(
                [array_ops.ones([rank], dtype=dtypes.int32), [3]], 0
            )
            images = gen_array_ops.tile(images, tile_shape)
    images = math_ops.cast(images, dtype=dtypes.float32)
    images -= array_ops.reshape(
        constant_op.constant([_R_MEAN, _G_MEAN, _B_MEAN]), [1, 1, 3]
    )
    return images
コード例 #3
0
ファイル: tensorflow.py プロジェクト: zhanpengfang/research
def _tile_batch(tensor, multiplier):
    """ Core single-tensor implementation of tile_batch. """
    tensor = ops.convert_to_tensor(tensor, name='t')
    shape_tensor = array_ops.shape(tensor)
    if tensor.shape.ndims is None:
        raise ValueError('tensor must have statically known rank')
    if tensor.shape.ndims == 0:             # We can't tile scalars (e.g. time)
        return tensor
    tiling = [1] * (tensor.shape.ndims + 1)
    tiling[1] = multiplier
    tiled_static_batch_size = (tensor.shape[0].value * multiplier if tensor.shape[0].value is not None else None)
    tiled = gen_array_ops.tile(array_ops.expand_dims(tensor, 1), tiling)
    tiled = gen_array_ops.reshape(tiled, array_ops.concat(([shape_tensor[0] * multiplier], shape_tensor[1:]), 0))
    tiled.set_shape(tensor_shape.TensorShape([tiled_static_batch_size]).concatenate(tensor.shape[1:]))
    return tiled
コード例 #4
0
def image_to_input_size(version, images, antialias=False):
    """Resizes images to size `version_input_shape`. This will convert
    grayscale to rgb.

    Note: Should be run on CPU.

    Preprocessing as in: https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py#L253

    Args:
        version: A supported inception version. See `versions()`.
        images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D
            Tensor of shape `[height, width, channels]`. Where channels can
            be `1` or `3`.
        antialias: Whether to use an anti-aliasing filter when downsampling
            an image.

    Returns:
        A tensor of shape `[batch] + version_input_shape` or
        `version_input_shape`.
    """
    channels = images.shape[-1]
    assert channels == 1 or channels == 3

    info = version_info(version)
    batch_input_shape = info["input_shape"]
    input_shape = info["input_shape"][1:]

    images = img_ops.convert_image_dtype(images, dtype=dtypes.float32)

    if not (images.shape.is_compatible_with(batch_input_shape)
            or images.shape.is_compatible_with(input_shape)):
        images = img_ops.resize_images_v2(
            images,
            size=input_shape[0:2],
            preserve_aspect_ratio=False,
            antialias=antialias,
        )
        if channels == 1:
            rank = array_ops.rank(images) - 1
            tile_shape = array_ops.concat(
                [array_ops.ones([rank], dtype=dtypes.int32), [3]], 0)
            images = gen_array_ops.tile(images, tile_shape)
    images -= 0.5
    images *= 2.0
    return images
コード例 #5
0
    def __call__(self, inputs, state, scope=None):

        # Wa ht−1 +Ua topicj
        dtype = inputs.dtype
        c_t, h_t = state  # h_t batch_size x hidden_size
        embedding_size = self.memory.shape[2].value
        with vs.variable_scope("topic_attention"):
            query_layer = layers_core.Dense(self.attention_size, dtype=dtype)
            memory_layer = layers_core.Dense(self.attention_size, dtype=dtype)
            v = vs.get_variable("attention_v", [self.attention_size], dtype=dtype)
            keys = memory_layer(self.memory)  # batch_size x num x attention_size
            processed_query = array_ops.expand_dims(query_layer(h_t), 1)  # batch_size, 1 , attention_size
            score = math_ops.reduce_sum(v * math_ops.tanh(keys + processed_query), [2])
            score = nn_ops.softmax(score, axis=1)  # softmax
            score_tile = gen_array_ops.tile(array_ops.expand_dims(score, -1), [1, 1, embedding_size],
                                            name="weight")
            mt = math_ops.reduce_sum(self.memory * score_tile, axis=1)

        return self._cell(tf.concat([inputs, mt], axis=1), state)
コード例 #6
0
    def __while_loop(self, b, a, d, n, seed):
        def __cond(w, e, bool_mask, b, a, d):
            return math_ops.reduce_any(bool_mask)

        def __body(w_, e_, bool_mask, b, a, d):
            e = math_ops.cast(Beta((self.__mf - 1) / 2,
                                   (self.__mf - 1) / 2).sample(shape,
                                                               seed=seed),
                              dtype=self.dtype)

            u = random_ops.random_uniform(shape, dtype=self.dtype, seed=seed)

            w = (1 - (1 + b) * e) / (1 - (1 - b) * e)
            t = (2 * a * b) / (1 - (1 - b) * e)

            accept = gen_math_ops.greater(
                ((self.__mf - 1) * math_ops.log(t) - t + d), math_ops.log(u))
            reject = gen_math_ops.logical_not(accept)

            w_ = array_ops.where(gen_math_ops.logical_and(bool_mask, accept),
                                 w, w_)
            e_ = array_ops.where(gen_math_ops.logical_and(bool_mask, accept),
                                 e, e_)
            bool_mask = array_ops.where(
                gen_math_ops.logical_and(bool_mask, accept), reject, bool_mask)

            return w_, e_, bool_mask, b, a, d

        shape = array_ops.concat([[n], self.batch_shape_tensor()[:-1], [1]], 0)
        b, a, d = [
            gen_array_ops.tile(array_ops.expand_dims(e, axis=0),
                               [n] + [1] * len(e.shape)) for e in (b, a, d)
        ]

        w, e, bool_mask, b, a, d = control_flow_ops.while_loop(
            __cond, __body, [
                array_ops.zeros_like(b, dtype=self.dtype),
                array_ops.zeros_like(b, dtype=self.dtype),
                array_ops.ones_like(b, dtypes.bool), b, a, d
            ])

        return e, w
コード例 #7
0
    def __call__(self, inputs, state, scope=None):

        # Wa ht−1 +Ua topicj
        c_t, h_t = state  # h_t batch_size x hidden_size
        dtype = inputs.dtype

        with vs.variable_scope("topic_attention"):

            # Attention
            keys = self.memory_layer(self.memory)  # batch_size x num x attention_size
            processed_query = array_ops.expand_dims(self.query_layer(h_t), 1)  # batch_size, 1 , attention_size
            score = self.coverage_vector * math_ops.reduce_sum(self.v * math_ops.tanh(keys + processed_query), [2])
            score = nn_ops.softmax(score, axis=1)  # softmax
            score_tile = gen_array_ops.tile(array_ops.expand_dims(score, -1), [1, 1, self.embedding_size],
                                            name="weight")
            mt = math_ops.reduce_sum(self.memory * score_tile, axis=1)

            # update coverage vector
            self.coverage_vector = self.coverage_vector - score / self.phi_res
        return self._cell(tf.concat([inputs, mt], axis=1), state)
コード例 #8
0
    def __while_loop(self, b0, n, seed=0):
        def __cond(_w, _e, mask, _b):
            return math_ops.reduce_any(mask)

        def __body(w_, e_, mask, b):
            e = math_ops.cast(distributions.Beta((self.__mf - 1.0) / 2.0,
                                                 (self.__mf - 1.0) / 2.0).
                              sample(shape, seed=seed), dtype=self.dtype)

            u = random_ops.random_uniform(shape, dtype=self.dtype, seed=seed)
            w = (1.0 - (1.0 + b) * e) / (1.0 - (1.0 - b) * e)
            x = (1.0 - b) / (1.0 + b)
            c = self.scale * x + (self.__mf - 1) * math_ops.log1p(-x**2)

            tmp = tf.clip_by_value(x * w, 0, 1 - 1e-16)
            reject = gen_math_ops.less(((self.__mf - 1.0) * math_ops.log(1.0 - tmp) +
                                        self.scale * w - c),
                                       math_ops.log(u))
            accept = gen_math_ops.logical_not(reject)

            w_ = array_ops.where(gen_math_ops.logical_and(mask, accept), w, w_)
            e_ = array_ops.where(gen_math_ops.logical_and(mask, accept), e, e_)
            mask = array_ops.where(gen_math_ops.logical_and(mask, accept),
                                   reject, mask)

            return w_, e_, mask, b

        shape = array_ops.concat([[n], self.batch_shape_tensor()[:-1], [1]], 0)
        b0 = gen_array_ops.tile(array_ops.expand_dims(b0, axis=0),
                                [n] + [1] * len(b0.shape))

        w1, e1, bool_mask, b0 = \
            control_flow_ops.while_loop(__cond, __body,
                                        [array_ops.zeros_like(b0, dtype=self.dtype),
                                         array_ops.zeros_like(b0, dtype=self.dtype),
                                         array_ops.ones_like(b0, dtypes.bool),
                                         b0])

        return e1, w1
コード例 #9
0
ファイル: memory_wrapper.py プロジェクト: yeliruan/CTEG
    def __call__(self, inputs, state, scope=None):
        """Run the input projection and then the cell."""
        dtype = inputs.dtype
        memory = array_ops.identity(self.memory)

        # array_ops.ref_identity()
        # deep_copy(self.memory)
        with vs.variable_scope("memory_projection"):
            c_t, h_t = state

            v = math_ops.tanh(nn_ops.xw_plus_b(h_t, self.w, self.b))
            if v.get_shape()[0] != self.batch_size:
                raise Exception("Beam Search Not supported now!")
            else:
                similarity = math_ops.matmul(
                    array_ops.expand_dims(v,
                                          1),  # batch_size, 1 , embedding_size
                    array_ops.transpose(memory, [0, 2, 1]))

                weight = nn_ops.softmax(
                    array_ops.squeeze(similarity)  # batch_size, topic_num
                )
                weight_tile = gen_array_ops.tile(array_ops.expand_dims(
                    weight, -1), [1, 1, self.embedding_size],
                                                 name="weight")
                mt = math_ops.reduce_sum(memory * weight_tile, axis=1)

            # update memory
            if self.update_mem:
                gate = math_ops.matmul(memory,
                                       array_ops.expand_dims(
                                           inputs,
                                           axis=2))  # [batch_size, num, 1]
                gate = math_ops.sigmoid(
                    gen_array_ops.squeeze(gate))  # batch_size x num

                inputs_expand = gen_array_ops.tile(
                    array_ops.expand_dims(inputs, axis=1),
                    [1, self.mem_num, 1])  # batch_size x num x embedding

                uu_tile = gen_array_ops.tile(
                    array_ops.expand_dims(self.uu, axis=0),
                    [self.batch_size, 1, 1
                     ])  # batch_size x embedding x embedding

                vv_tile = gen_array_ops.tile(
                    array_ops.expand_dims(self.uv, axis=0),
                    [self.batch_size, 1, 1
                     ])  # batch_size x embedding x embedding

                candidate = math_ops.add(
                    math_ops.matmul(inputs_expand, uu_tile),
                    math_ops.matmul(memory,
                                    vv_tile))  # batch_size x num x embedding
                # print(gate)
                gate_tile = gen_array_ops.tile(array_ops.expand_dims(gate, 2),
                                               [1, 1, self.embedding_size])
                updated_mem = (1 - gate_tile) * memory + gate_tile * candidate
                self.memory = updated_mem

        with vs.variable_scope("attention_mechanism"):

            encoder_processed = self.memory_layer(
                self.encoder_outputs)  # map to attention size
            # [batch_size,  hidden_size] -> [batch_size, 1, attention_size]
            query_processed = array_ops.expand_dims(self.query_layer(c_t), 1)

            scores = math_ops.reduce_sum(
                self.attention_v *
                math_ops.tanh(encoder_processed + query_processed), [2])
            alpha = nn_ops.softmax(scores, axis=1)
            output_hidden_size = self.encoder_outputs.shape[2].value
            alpha_tile = gen_array_ops.tile(array_ops.expand_dims(alpha, -1),
                                            [1, 1, output_hidden_size],
                                            name="weight")
            # print(weight_tile) # batch_size x num x embedding_size
            weighted_sum = math_ops.reduce_sum(self.encoder_outputs *
                                               alpha_tile,
                                               axis=1)
        return self._cell(tf.concat([inputs, weighted_sum, mt], axis=1), state)
コード例 #10
0
def _expand_to_minibatch(np_array, batch_size):
  """Tile arbitrarily-sized np_array to include new batch dimension."""
  print(batch_size)
  tiles = [batch_size] + [1] * np_array.ndim
  return gen_array_ops.tile(np.expand_dims(np_array, 0), tiles)
コード例 #11
0
ファイル: tf_VCRNN.py プロジェクト: ijleesw/VCRNN-tensorflow
    def call(self, inputs, state):
        """VCGRU with nunits cells."""
        add = math_ops.add
        sub = math_ops.subtract
        mult = math_ops.multiply

        # computing m_t
        m_t = add(math_ops.matmul(state, self._u),
                  math_ops.matmul(inputs, self._v))
        m_t = nn_ops.bias_add(m_t, self._b)
        m_t = math_ops.sigmoid(m_t)

        # add L1 loss
        ops.add_to_collection('L1 loss', math_ops.abs(m_t - self._m_target))

        # computing e_t (= thr)
        i = gen_math_ops._range(1, self._num_units + 1, 1)
        i = math_ops.cast(i, dtype=dtypes.float32)
        mtD = gen_array_ops.tile(mult(m_t[1], self._num_units),
                                 [self._num_units])
        thr = math_ops.sigmoid(mult(self._sharpness, sub(mtD, i)))
        thr = math_ops.round(add(thr, sub(0.5, self._epsilon)))
        ones = array_ops.ones_like(thr)
        thr_inv = sub(ones, thr)

        # computing h_t
        if inputs.shape[1] < thr.shape[0]:
            _inputs = mult(inputs,
                           array_ops.slice(thr, [
                               0,
                           ], [
                               inputs.shape[1],
                           ]))
        elif inputs.shape[1] > thr.shape[0]:
            _inputs = mult(
                inputs,
                array_ops.concat(1, [
                    thr,
                    array_ops.zeros_like(inputs.shape[1] - thr.shape[0])
                ]))
        else:
            _inputs = mult(inputs, thr)
        _state = mult(state, thr)

        gate_inputs = math_ops.matmul(array_ops.concat([_inputs, _state], 1),
                                      self._gate_kernel)
        gate_inputs = nn_ops.bias_add(gate_inputs, self._gate_bias)

        value = math_ops.sigmoid(gate_inputs)
        r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)

        _r_state = r * _state

        candidate = math_ops.matmul(array_ops.concat([_inputs, _r_state], 1),
                                    self._candidate_kernel)
        candidate = nn_ops.bias_add(candidate, self._candidate_bias)

        c = self._activation(candidate)
        new_h = u * _state + (1 - u) * c

        output = add(mult(new_h, thr), mult(state, thr_inv))
        return output, output