コード例 #1
0
ファイル: tf_utils.py プロジェクト: ufal/neuralmonkey
def layer_norm(x: tf.Tensor, epsilon: float = 1e-6) -> tf.Tensor:
    """Layer normalize the tensor x, averaging over the last dimension.

    Implementation based on tensor2tensor.

    Arguments:
        x: The ``Tensor`` to normalize.
        epsilon: The smoothing parameter of the normalization.

    Returns:
        The normalized tensor.
    """
    with tf.variable_scope("LayerNorm"):
        gamma = get_variable(
            name="gamma",
            shape=[x.get_shape()[-1]],
            dtype=tf.float32,
            initializer=tf.ones_initializer())
        beta = get_variable(
            name="beta",
            shape=[x.get_shape()[-1]],
            dtype=tf.float32,
            initializer=tf.zeros_initializer())

        mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
        variance = tf.reduce_mean(
            tf.square(x - mean),
            axis=[-1],
            keepdims=True)
        norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
        return norm_x * gamma + beta
コード例 #2
0
ファイル: tf_utils.py プロジェクト: ufal/neuralmonkey
def get_shape_list(x: tf.Tensor) -> List[Union[int, tf.Tensor]]:
    """Return list of dims, statically where possible.

    Compute the static shape of a tensor. Where the dimension is not static
    (e.g. batch or time dimension), symbolic Tensor is returned.

    Based on tensor2tensor.

    Arguments:
        x: The ``Tensor`` to process.

    Returns:
        A list of integers and Tensors.
    """
    x = tf.convert_to_tensor(x)

    # If unknown rank, return dynamic shape
    if x.get_shape().dims is None:
        return tf.shape(x)

    static = x.get_shape().as_list()
    shape = tf.shape(x)

    ret = []
    for i, dim in enumerate(static):
        if dim is None:
            dim = shape[i]
        ret.append(dim)
    return ret
コード例 #3
0
ファイル: checking.py プロジェクト: ufal/neuralmonkey
def assert_shape(tensor: tf.Tensor,
                 expected_shape: List[Optional[int]]) -> None:
    """Check shape of a tensor.

    Args:
        tensor: Tensor to be chcecked.
        expected_shape: Expected shape where `None` means the same as in TF and
            `-1` means not checking the dimension.
    """

    shape_list = tensor.get_shape().as_list()

    if len(shape_list) != len(expected_shape):
        raise CheckingException(
            "Tensor '{}' with shape {} should have {} dimensions.".format(
                tensor.name, shape_list, len(expected_shape)))

    mismatching_dims = []
    for i, (real, expected) in enumerate(zip(shape_list, expected_shape)):
        if expected not in (real, -1):
            mismatching_dims.append(i)

    if mismatching_dims:
        expected_str = ", ".join(
            "?" if x == -1 else str(x) for x in expected_shape)
        raise CheckingException(
            ("Shape mismatch of {} in dimensions: {}. "
             "Shape was {}, but should be [{}]").format(
                 tensor.name,
                 ", ".join(str(d) for d in mismatching_dims),
                 shape_list, expected_str))
コード例 #4
0
ファイル: scoring.py プロジェクト: Peratham/factorix
def multilinear_grad(emb: tf.Tensor, tuples: tf.Tensor, score=False) -> tf.Tensor:
    tuple_shape = [d.value for d in tuples.get_shape()]
    # if len(tuple_shape) > 2:
    #     n = np.prod(tuple_shape[:-1])
    #     tuples = tf.reshape(tuples, (n, -1))
    # n = tuples.get_shape()[0].value
    order = tuples.get_shape()[2].value
    rank = emb.get_shape()[-1].value
    if order == 2:
        if score:
            emb_sel = tf.gather(emb, tuples)
            grad_score = tf.reshape(tf.reverse(emb_sel, [False, False, True, False]), tuple_shape[:-1] + [2, rank])
            prod = tf.reduce_prod(emb_sel, 2)
            preds = tf.reshape(tf.reduce_sum(prod, 2), tuple_shape[:-1])
            return grad_score, preds
    raise NotImplementedError('Todo')
コード例 #5
0
ファイル: sequence_split.py プロジェクト: ufal/neuralmonkey
def split_by_factor(
        tensor_3d: tf.Tensor, batch_size: tf.Tensor, factor: int) -> tf.Tensor:
    max_time = tf.shape(tensor_3d)[1]
    state_dim = tensor_3d.get_shape()[2].value

    if state_dim % factor != 0:
        raise ValueError((
            "Dimension of the tensor ({}) must be dividable by the given "
            "factor ({}).").format(state_dim, factor))

    return tf.reshape(
        tensor_3d, [batch_size, max_time * factor, state_dim // factor])
コード例 #6
0
ファイル: cnnText.py プロジェクト: easonnie/landOfflol
    def build_cnn(input: tf.Tensor, n_grams, n_features) -> tf.Tensor:
        max_seq_len = int(input.get_shape()[1])
        word_d = int(input.get_shape()[2])

        tran_input = tf.reshape(input, [-1, 1, max_seq_len, word_d])
        results = list()

        for n_gram, n_feature in zip(n_grams, n_features):
            with tf.name_scope("conv-maxpool-%s" % n_gram):
                filter = tf.to_float(
                    tf.Variable(tf.random_uniform([1, n_gram, word_d, n_feature], 0.05, 0.05, dtype=tf.float32)))
                f_result = tf.nn.conv2d(tran_input, filter, strides=[1, 1, 1, 1], padding='VALID')
                # batch_size * (valid_height)(1) * valid_length * n_feature

                b = tf.Variable(tf.constant(0.1, shape=[n_feature]), name="b")
                f_result = tf.nn.relu(tf.nn.bias_add(f_result, b))
                p_result = tf.reshape(
                    tf.nn.max_pool(f_result, ksize=[1, 1, max_seq_len - n_gram + 1, 1], strides=[1, 1, 1, 1],
                                   padding='VALID'), [-1, n_feature])
                results.append(p_result)
        return tf.concat(concat_dim=1, values=results)
コード例 #7
0
ファイル: computation_graph.py プロジェクト: thomasste/ugtsa
 def __input(self, node_input, tensor: tf.Tensor):
     if type(node_input) == list:
         concatenated_input = np.concatenate(
             [self.nodes[node_input_].output
              for node_input_ in node_input])
         expanded_input = np.lib.pad(
             concatenated_input,
             (0, tensor.get_shape()[-1].value -
                 concatenated_input.shape[0]),
             'constant')
         return expanded_input
     else:
         return self.nodes[node_input].output
コード例 #8
0
ファイル: checking.py プロジェクト: ufal/neuralmonkey
def assert_same_shape(tensor_a: tf.Tensor, tensor_b: tf.Tensor) -> None:
    """Check if two tensors have the same shape."""

    shape_a = tensor_a.get_shape().as_list()
    shape_b = tensor_b.get_shape().as_list()

    if len(shape_a) != len(shape_b):
        raise CheckingException(
            ("Tensor '{}' has {} dimensions and tensor '{}' has {} "
             "dimension, but should have the same shape.").format(
                 tensor_a.name, len(shape_a), tensor_b.name, len(shape_b)))

    mismatching_dims = []
    for i, (size_a, size_b) in enumerate(zip(shape_a, shape_b)):
        if size_a != size_b:
            mismatching_dims.append(i)

    if mismatching_dims:
        raise CheckingException(
            ("Shape mismatch of '{}' and '{}' in dimensions: {}. "
             "Shapes were {} and {}").format(
                 tensor_a.name, tensor_b.name,
                 ", ".join(str(d) for d in mismatching_dims),
                 shape_a, shape_b))
コード例 #9
0
ファイル: coverage.py プロジェクト: ufal/neuralmonkey
    def get_energies(self, y: tf.Tensor, weights_in_time: tf.Tensor):
        weight_sum = tf.cond(
            tf.greater(weights_in_time.size(), 0),
            lambda: tf.reduce_sum(weights_in_time, axis=0),
            lambda: 0.0)

        coverage = weight_sum / self.fertility * self.attention_mask
        coverage_exp = tf.expand_dims(tf.expand_dims(coverage, -1), -1)
        logits = tf.reduce_sum(
            self.similarity_bias_vector * tf.tanh(
                self.hidden_features + y
                + self.coverage_weights * coverage_exp),
            [2, 3])

        return logits
コード例 #10
0
ファイル: tf_util.py プロジェクト: BiggestOrg/BigAuto
def multi_conv2d_modified(inputs, filters: tf.Tensor, bias=None,
                          stride=list([1, 1, 1, 1]), padding='SAME', basis_rate=list([1, 3, 5]),
                          to_batch_norm=False, batch_norm_decay=0.997, is_training=True, activation_fn=None):
    _number_of_basis = len(basis_rate)
    # _filter_shape = tf.shape(filters)
    # _filter_center = tf.slice(filters, [1, 1, 0, 0], [1, 1, _filter_shape[2], _filter_shape[3]])

    if _number_of_basis < 2:
        raise ValueError('Number of basis_rate must be larger or equal than 2')

    input_shape = inputs.get_shape()
    output_channel = filters.get_shape()[-1]
    global_average_pooling = global_avg_pooling_layer(inputs, upsample=False)
    depth = 256
    selection_weights1 = kernels([input_shape[-1], depth],
                                 regularizer=slim.l2_regularizer(0.0001),
                                 name='rate_selection_weights1')
    selection_weights2 = kernels([depth, _number_of_basis],
                                 regularizer=slim.l2_regularizer(0.0001),
                                 name='rate_selection_weights2')

    global_avg_pooling_squeezed = tf.squeeze(global_average_pooling, axis=[1, 2])

    selection = tf.matmul(global_avg_pooling_squeezed, selection_weights1)
    selection = batch_norm(selection, is_training, batch_norm_decay)
    selection = tf.nn.relu(selection)

    selection = tf.matmul(selection, selection_weights2)
    selection = batch_norm(selection, is_training, batch_norm_decay)
    selection = tf.nn.relu(selection)

    selection = tf.transpose(selection, [1, 0])
    output = None
    for idx, r in enumerate(basis_rate):
        if idx == 0:
            output = tf.einsum('nhwc,n->nhwc', atrous_conv2d(inputs, filters, r, bias, padding, stride), selection[idx])
        output += tf.einsum('nhwc,n->nhwc', atrous_conv2d(inputs, filters, r, bias, padding, stride), selection[idx])

    if to_batch_norm:
        output = batch_norm(output, is_training, batch_norm_decay)

    if activation_fn is not None:
        output = activation_fn(output)

    return output
コード例 #11
0
def optimize_linear(grad: tf.Tensor, eps: float, norm=np.inf) -> tf.Tensor:
    """
    Solves for the optimal input to a linear function under a norm constraint.
    Optimal_perturbation = argmax_{eta, ||eta||_{norm} < eps} dot(eta, grad)

    :param grad: tf tensor containing a batch of gradients
    :param eps: float scalar specifying size of constraint region
    :param norm: int specifying order of norm
    :returns: tf tensor containing optimal perturbation
    """

    # Convert the iterator returned by `range` into a list.
    axis = list(range(1, len(grad.get_shape())))
    avoid_zero_div = 1e-12
    if norm == np.inf:
        # Take sign of gradient
        optimal_perturbation = tf.sign(grad)
        # The following line should not change the numerical results. It
        # applies only because
        # `optimal_perturbation` is the output of a `sign` op, which has zero
        # derivative anyway.
        # It should not be applied for the other norms, where the perturbation
        # has a non-zero derivative.
        optimal_perturbation = tf.stop_gradient(optimal_perturbation)
    elif norm == 1:
        abs_grad = tf.abs(grad)
        sign = tf.sign(grad)
        max_abs_grad = tf.reduce_max(abs_grad, axis, keepdims=True)
        tied_for_max = tf.dtypes.cast(tf.equal(abs_grad, max_abs_grad),
                                      dtype=tf.float32)
        num_ties = tf.reduce_sum(tied_for_max, axis, keepdims=True)
        optimal_perturbation = sign*tied_for_max/num_ties
    elif norm == 2:
        square = tf.maximum(
            avoid_zero_div, tf.reduce_sum(tf.square(grad), axis,
                                          keepdims=True))
        optimal_perturbation = grad/tf.sqrt(square)
    else:
        raise NotImplementedError(
            "Only L-inf, L1 and L2 norms are currently implemented.")

    # Scale perturbation to be the solution for the norm=eps rather than
    # norm=1 problem
    scaled_perturbation = tf.multiply(eps, optimal_perturbation)
    return scaled_perturbation
コード例 #12
0
ファイル: graph_utils.py プロジェクト: lunachy/ga
def SpatialAttention(x: tf.Tensor, name: str, k: int=1024):
    """
    空间注意力转移  https://www.e-learn.cn/content/qita/678740  与原始论文有区别
    :param x:  [batch_size, height, width, channel]
    :param name:
    :param k:
    :return:
    """
    with tf.variable_scope(name, reuse=tf.AUTO_REUSE) as scope:
        _, H, W, C = x.get_shape()
        w = tf.get_variable(name="attention_w", shape=[C, 1], dtype=tf.float32, initializer=tf.glorot_uniform_initializer())
        b = tf.get_variable(initializer=tf.constant(0.0, dtype=tf.float32, shape=[1]), trainable=True, name='attention_b')
        spatial_attention = tf.matmul(tf.reshape(x, [-1, C]), w) + b  # 每一个空间点位置的attention  多个通道的同一个位置 生成一个概率
        spatial_attention = tf.nn.sigmoid(tf.reshape(spatial_attention, [-1, W * H]))  # batch_size, w*h
        spatial_attention = tf.tile(input=spatial_attention, multiples=[1, C])  # batch_size, w*h*c
        attention = tf.reshape(spatial_attention, [-1, H, W, C])  # batch_size, height, w, channel
        attention_x = tf.multiply(x=x, y=attention)
        return attention_x
コード例 #13
0
ファイル: losses.py プロジェクト: nanaya-tachibana/sknlp
    def call(self, y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor:
        if isinstance(y_pred, tf.RaggedTensor):
            y_pred = y_pred.to_tensor(y_pred.dtype.min)
        y_true = tf.cast(y_true, y_pred.dtype)
        y_pred = pad2shape(y_pred, tf.shape(y_true))

        sample_size = tf.reduce_prod(tf.shape(y_true)[:self.flatten_axis])
        y_true = tf.reshape(y_true, (sample_size, -1))
        y_pred = tf.reshape(y_pred, (sample_size, -1))
        y_pred = (1 - 2 * y_true) * y_pred
        y_pred_neg: tf.Tensor = y_pred - y_true * y_pred.dtype.max
        y_pred_pos: tf.Tensor = y_pred - (1 - y_true) * y_pred.dtype.max
        zeros = tf.zeros_like(y_pred[..., :1])  # 用于生成logsum中的1
        y_pred_neg = tf.concat([y_pred_neg, zeros], axis=-1)
        y_pred_pos = tf.concat([y_pred_pos, zeros], axis=-1)
        neg_loss = tf.math.reduce_logsumexp(y_pred_neg, axis=-1)
        pos_loss = tf.math.reduce_logsumexp(y_pred_pos, axis=-1)
        return neg_loss + pos_loss
コード例 #14
0
def se_module(inputs: tf.Tensor, inner_features: int) -> tf.Tensor:
    input_features = inputs.get_shape().as_list()[-1]

    x = tf.reduce_mean(inputs, axis=[1, 2])
    x = tf.layers.dense(x,
                        inner_features,
                        kernel_initializer=tf.keras.initializers.he_normal(),
                        kernel_regularizer=tf.keras.regularizers.l2(
                            common.L2_REGULARIZATION))
    x = tf.nn.relu(x)
    x = tf.layers.dense(x,
                        input_features,
                        kernel_initializer=tf.keras.initializers.he_normal(),
                        kernel_regularizer=tf.keras.regularizers.l2(
                            common.L2_REGULARIZATION))
    x = tf.nn.sigmoid(x)
    x = tf.reshape(x, (-1, 1, 1, input_features))
    return x * inputs
コード例 #15
0
    def predict(self, position_indices_t: tf.Tensor):

        ndiffs = position_indices_t.get_shape().as_list()[0]

        if ndiffs == 0:
            return tf.zeros(shape=[], dtype='float32')

        batch_obj_views_t = tf.gather(self._obj_views_all_t, position_indices_t)
        batch_obj_views_t = fftshift_t(batch_obj_views_t)
        exit_waves_t = batch_obj_views_t * self.probe_cmplx_t

        out_wavefronts_t = propTF_t(exit_waves_t,
                                    reuse_transfer_function=True,
                                    transfer_function=self._transfer_function)
        amplitudes_t = tf.abs(out_wavefronts_t)
        if self.upsampling_factor > 1:
            amplitudes_t = self._downsample(amplitudes_t)
        return amplitudes_t
コード例 #16
0
    def predict(self,
                position_indices_t: tf.Tensor,
                scope_name: str = "") -> tf.Tensor:

        scope_name = scope_name + "_predict" if scope_name else "predict"
        ndiffs = position_indices_t.get_shape().as_list()[0]

        with tf.name_scope(scope_name) as scope:
            if ndiffs == 0:
                return tf.zeros(shape=[], dtype='complex64', name=scope)

            batch_obj_views_t = tf.gather(self._obj_views_all_t,
                                          position_indices_t)
            batch_obj_views_t = fftshift_t(batch_obj_views_t)
            exit_waves_t = batch_obj_views_t * self.probe_cmplx_t

            farfield_waves_t = propFF_t(exit_waves_t)
            return farfield_waves_t  #tf.reshape(tf.stack([tf.real(farfield_waves_t), tf.imag(farfield_waves_t)]), [-1])
コード例 #17
0
def map_func(filepath: tf.Tensor, label: tf.Tensor, processing=False):
    # - read file and assign label -
    fname = filepath.numpy().decode('utf-8')
    f = np.loadtxt(fname).astype('float32')
    lb = label
    lb.set_shape(lb.shape)

    # - processing if needed -
    if processing:
        f = f / f.max()
        # f_std = (f - f.min(axis=0)) / (f.max(axis=0) - f.min(axis=0))
        # f = f_std * (1 - 0) + 0
        # print(f.shape[:])
        f = np.reshape(f, (f.shape[0], f.shape[1], 1))
    f = tf.convert_to_tensor(f, dtype=tf.float32)
    f.set_shape(f.shape)

    return f, lb
コード例 #18
0
ファイル: rnn.py プロジェクト: oskopek/nlu
 def _attention_images_summary(alignments: tf.Tensor,
                               prefix: str = "") -> tf.Operation:
     # https://github.com/tensorflow/nmt/blob/master/nmt/attention_model.py
     """
     Create attention image and attention summary.
     """
     # Reshape to (batch, tgt_seq_len, src_seq_len, 1)
     print("alignments", alignments.get_shape())
     attention_images = tf.expand_dims(tf.expand_dims(alignments, axis=-1),
                                       axis=-1)
     attention_images = tf.transpose(attention_images,
                                     perm=(0, 2, 1,
                                           3))  # make img horizontal
     # Scale to range [0, 255]
     attention_images *= 255
     attention_summary = tf.contrib.summary.image(
         f"{prefix}/attention_images", attention_images)
     return attention_summary
コード例 #19
0
def displace(specimen: tf.Tensor) -> tf.Tensor:
    """
    Creates mutated offspring by selecting random gene(index in an array)
    and inserting it into random place in the same array

    :param specimen: solution to mutate

    :return: mutated offspring
    """
    specimen = specimen.numpy()

    gene_id, placement = np.random.randint(0, len(specimen), 2)
    gene = specimen[gene_id]

    specimen = np.delete(specimen, gene_id)
    specimen = np.insert(specimen, placement, gene)

    return tf.convert_to_tensor(specimen)
コード例 #20
0
ファイル: cv.py プロジェクト: zcy618/decompose
    def lowrankMask(self, X: Tensor):
        nFolds = np.array(self.nFolds)
        foldNumber = self.foldNumber

        M = np.array(X.get_shape().as_list())
        F = len(M)
        nValues = M//nFolds

        folds = np.zeros(np.product(M)).flatten()
        folds[foldNumber] = 1.
        folds = folds.reshape(M)
        foldNumbers = np.array(np.where(folds == 1.)).flatten()

        U = []
        for f in range(F):
            Uf = self.testMask(M[f], foldNumbers[f], nFolds[f], nValues[f])
            U.append(tf.constant(Uf))
        return(U)
コード例 #21
0
def _get_kernel_regularizer(kernel_tensor: tf.Tensor) -> Union[None, tf.Tensor]:
    """
    Get a kernel regularizer of the same kind as attached to kernel_tensor
    :param kernel_tensor: Kernel tensor to check for regularization
    :return: A new kernel regularizer if kernel_tensor has regularization, None otherwise
    """
    kernel_regularizer = None
    for consumer in kernel_tensor.consumers():
        if consumer.type == 'L2Loss':
            # Try to see if there is a scale value associated with it
            try:
                l2_regularizer_mul = consumer.outputs[0].consumers()[0]
                scale_op = l2_regularizer_mul.inputs[0].op
                scale_val = scale_op.get_attr('value').float_val[0]
                kernel_regularizer = tf.contrib.layers.l2_regularizer(scale_val)
            except:     # pylint: disable=bare-except
                kernel_regularizer = tf.nn.l2_loss      # pylint: disable=no-member
    return kernel_regularizer
コード例 #22
0
def _convolution(last_layer: tf.Tensor, last_n_channels: int, filter_size: int,
                 n_filters: int) -> tf.Tensor:
    """Applies convolution on a filter bank."""
    conv_w = tf.get_variable(
        "wieghts",
        shape=[filter_size, filter_size, last_n_channels, n_filters],
        initializer=tf.truncated_normal_initializer(stddev=.1))
    conv_b = tf.get_variable("biases",
                             shape=[n_filters],
                             initializer=tf.constant_initializer(.1))
    conv_activation = tf.nn.conv2d(last_layer, conv_w, [1, 1, 1, 1],
                                   "SAME") + conv_b
    assert_shape(conv_activation, [
        None,
        last_layer.get_shape()[1].value,
        last_layer.get_shape()[2].value, filter_size
    ])
    return tf.nn.relu(conv_activation)
コード例 #23
0
def shuffle_block(inputs: tf.Tensor,
                  features: int,
                  is_training: bool,
                  stride: int,
                  groups: int = 8) -> tf.Tensor:
    if stride > 1:
        first_branch, second_branch = inputs, inputs
    else:
        first_branch, second_branch = tf.split(inputs,
                                               num_or_size_splits=2,
                                               axis=-1)
    input_features = inputs.get_shape().as_list()[-1]

    if stride > 1:
        first_branch = depthwise_convo_bn(first_branch, features, is_training,
                                          stride)
        first_branch = convo_bn_relu(first_branch, features, 1, is_training, 1)

    second_branch = convo_bn_relu(second_branch, features, 1, is_training, 1)
    second_branch = depthwise_convo_bn(second_branch, features, is_training,
                                       stride)
    if stride == 1:
        second_branch = convo_bn(second_branch, features, 1, is_training, 1)
    else:
        second_branch = convo_bn_relu(second_branch, features, 1, is_training,
                                      1)
    second_branch = se_module(second_branch, features // 2)

    if stride == 1:
        if input_features != features:
            inputs = projection_convo(inputs, features, 1)
        second_branch = inputs + second_branch
        second_branch = tf.nn.relu(second_branch)

    res = tf.concat([first_branch, second_branch], axis=-1)
    feats = res.get_shape().as_list()[-1]
    input_shape = tf.shape(res)

    res = tf.reshape(res, (input_shape[0], input_shape[1], input_shape[2],
                           groups, feats // groups))
    res = tf.transpose(res, perm=[0, 1, 2, 4, 3])
    res = tf.reshape(res,
                     (input_shape[0], input_shape[1], input_shape[2], feats))
    return res
コード例 #24
0
    def _fetch(
            self, tensor: tf.Tensor,
            data_dict: typing.Optional[utils.DataDict] = None,
            batch_size: int = 4096, noisy: bool = False,
            progress_bar: bool = False, random_seed: int = config._USE_GLOBAL
    ) -> np.ndarray:
        if data_dict is None:
            return self.sess.run(tensor)
        if noisy:
            random_seed = config.RANDOM_SEED \
                if random_seed == config._USE_GLOBAL else random_seed
            random_state = np.random.RandomState(seed=random_seed)
        result_shape = tensor.get_shape().as_list()
        if result_shape[0] is None:
            result_shape[0] = data_dict.shape[0]
        result = np.empty(result_shape)

        @utils.minibatch(batch_size, desc="fetch", use_last=True,
                         progress_bar=progress_bar)
        def _fetch_minibatch(data_dict, result):
            feed_dict = {self.training_flag: False}
            if "exprs" in data_dict and "library_size" in data_dict:
                x = data_dict["exprs"]
                normalized_x = self.prob_module._normalize(x, data_dict["library_size"])
                feed_dict.update({
                    self.x: utils.densify(x),
                    self.noisy_x: self.prob_module._add_noise(
                        utils.densify(normalized_x), random_state
                    ) if noisy else utils.densify(normalized_x),
                    self.library_size: data_dict["library_size"]
                })
                # Tensorflow random samplers are fixed after creation,
                # making it impossible to re-seed and generate reproducible
                # results, so we use numpy samplers instead.
                # Also, local RandomState object is used to ensure thread-safety.
            for module in [self.latent_module, self.prob_module, *self.rmbatch_modules]:
                try:
                    feed_dict.update(module._build_feed_dict(data_dict))
                except Exception:
                    pass
            result[:] = self.sess.run(tensor, feed_dict=feed_dict)

        _fetch_minibatch(data_dict, result)
        return result
コード例 #25
0
def write_tensor_as_bin(tensor: tf.Tensor, output_path: str):
    """Write tensor as a binary file.
  Uses big endian for compatibility with whizzscooters/raven-android tests.
  """
    if tensor.dtype == tf.float32:
        tensor.numpy().flatten().astype(">f4").tofile(output_path)
    elif tensor.dtype == tf.int32:
        tensor.numpy().flatten().astype(">i4").tofile(output_path)
    elif tensor.dtype == tf.uint8:
        tensor.numpy().flatten().astype(">i1").tofile(output_path)
    else:
        raise NotImplementedError('Saving for %s is not implemented.' %
                                  (tensor.dtype))
コード例 #26
0
ファイル: graph_utils.py プロジェクト: lunachy/ga
def Basic2dConv(x: tf.Tensor,
                d_out: int,
                name: str,
                ksize: tuple = (3, 3),
                stride: tuple = (1, 1),
                active=None,
                trainable: bool = True,
                use_bias: bool = True,
                dtype=tf.float32,
                padding: str = 'SAME'):
    """
    卷积层
    :param x tensor
    :param d_out int 卷积核数目
    :param ksize list 卷积核尺寸
    :param active 激活函数
    :param trainable
    :param padding SAME或者VALID
    """
    d_in = x.get_shape()[-1].value
    with tf.variable_scope(name, reuse=tf.AUTO_REUSE) as scope:
        kernel = tf.get_variable(
            name="w",
            shape=[ksize[0], ksize[1], d_in, d_out],
            dtype=dtype,
            initializer=tf.contrib.layers.xavier_initializer_conv2d(),
            trainable=trainable)

        conv = tf.nn.conv2d(x,
                            kernel, [1, stride[0], stride[1], 1],
                            padding=padding)
        if use_bias:
            bias = tf.get_variable(initializer=tf.constant(0.0,
                                                           dtype=dtype,
                                                           shape=[d_out]),
                                   trainable=trainable,
                                   name='b')
            conv_plus_bias = tf.nn.bias_add(conv, bias)
        else:
            conv_plus_bias = conv
        if active is not None:
            return active(conv_plus_bias, name="active")
        else:
            return conv_plus_bias
コード例 #27
0
ファイル: KERAS_LIKE.py プロジェクト: vincentvigon/neural
    def __call__(self,X:tf.Tensor):

        """ (batch,h,w,channels) """

        input_shape=X.get_shape().as_list()

        W=kernel_var(shape=(self.kernel_size,self.kernel_size,input_shape[3],self.filters),type=self.kernel_initializer,dtype=X.dtype,name="W_"+self.name)

        self.res = tf.nn.conv2d(X, W, strides=[1, self.strides[0], self.strides[1], 1], padding=self.padding)


        if self.use_bias:
            self.res+=bias_var(shape=(self.filters,),type=self.bias_initializer,dtype=X.dtype,name="B_"+self.name)


        if self.activation is not None:
            self.res=activation(self.res,self.activation,name=self.name)

        return self.res
コード例 #28
0
ファイル: HRNet.py プロジェクト: revygabor/onlab
def basic_block(input_layer: tf.Tensor, name):
    """
    Creates the basic block for HRNet (Conv, BN, ReLu, Conv, BN, Add(residual), ReLU)
    :param name: name of the layers in the block
    :param input_layer: input layer of the basic block
    :return: output layer of basic block
    """
    _, _, _, n_filters = input_layer.get_shape()
    x = conv_2d(inputs=input_layer,
                filters=n_filters,
                kernel_size=3,
                activation=leaky_relu,
                name=name + '_0')
    x = conv_2d(inputs=x, filters=n_filters, kernel_size=3, name=name + '_1')

    x = tf.keras.layers.Add(name=name + '_add')([input_layer, x])
    x = leaky_relu(x, name=name + 'leaky_ReLU')

    return x
コード例 #29
0
    def process_image_train(self, img_path: tf.Tensor, purpose):
        """ * Callback function for tf.data.Dataset.map to process each image file path.
            * For each image file path, it returns a corresponding (image, label) pair.
            * This is the parent function that wraps all other processing helpers.
            * This is the processing for the training data specifically.

            Params:
                img_path - tf.Tensor, representing the path to an image file
                purpose - "train" for training; "val" for validation; "test" for testing
            Returns:
                img, label - tuple of (tf.float32, tf.uint8) arrays representing the image and label arrays
        """

        label_path = self.get_label_path(img_path, purpose)

        input_image = self.read_npy(img_path.numpy(), img_channels=4)
        input_label = self.read_npy(label_path.numpy(), label=True)

        #input_image = self.read_nifti(img_path.numpy(), img_channels=4)
        #input_label = self.read_nifti(label_path.numpy(), label=True)

        # Make label binary for tumor region in question
        if self.tumor_region:
            input_label = tf.where(
                input_label >= tf.constant(TUMOR_REGIONS[self.tumor_region],
                                           dtype=tf.int32),
                tf.constant(1, dtype=tf.int32), tf.constant(0, dtype=tf.int32))

        # Fetch random image patch
        image_patch, label_patch = self.get_random_patch(
            input_image, input_label)

        weight_map = self.get_weight_map(image_patch, label_patch)

        # Normalize image patch AFTER creating the patch
        image_patch = self.normalize(image_patch)

        if purpose == "train":
            # Augment Data
            image_patch, label_patch = self.augment_patch(
                image_patch, label_patch)

        return image_patch, label_patch
コード例 #30
0
ファイル: distributional.py プロジェクト: zhangtjtongxue/acme
def l2_project(  # pylint: disable=invalid-name
    Zp: tf.Tensor,
    P: tf.Tensor,
    Zq: tf.Tensor,
) -> tf.Tensor:
    """Project distribution (Zp, P) onto support Zq under the L2-metric over CDFs.

  This projection works for any support Zq.
  Let Kq be len(Zq) and Kp be len(Zp).

  Args:
    Zp: (batch_size, Kp) Support of distribution P
    P:  (batch_size, Kp) Probability values for P(Zp[i])
    Zq: (Kp,) Support to project onto

  Returns:
    L2 projection of (Zp, P) onto Zq.
  """

    # Asserts that Zq has no leading dimension of size 1.
    if Zq.get_shape().ndims > 1:
        Zq = tf.squeeze(Zq, axis=0)

    # Extracts vmin and vmax and construct helper tensors from Zq.
    vmin, vmax = Zq[0], Zq[-1]
    d_pos = tf.concat([Zq, vmin[None]], 0)[1:]
    d_neg = tf.concat([vmax[None], Zq], 0)[:-1]

    # Clips Zp to be in new support range (vmin, vmax).
    clipped_zp = tf.clip_by_value(Zp, vmin, vmax)[:, None, :]
    clipped_zq = Zq[None, :, None]

    # Gets the distance between atom values in support.
    d_pos = (d_pos - Zq)[None, :, None]  # Zq[i+1] - Zq[i]
    d_neg = (Zq - d_neg)[None, :, None]  # Zq[i] - Zq[i-1]

    delta_qp = clipped_zp - clipped_zq  # Zp[j] - Zq[i]

    d_sign = tf.cast(delta_qp >= 0., dtype=P.dtype)
    delta_hat = (d_sign * delta_qp / d_pos) - (
        (1. - d_sign) * delta_qp / d_neg)
    P = P[:, None, :]
    return tf.reduce_sum(tf.clip_by_value(1. - delta_hat, 0., 1.) * P, 2)
コード例 #31
0
    def apply(self, x: tf.Tensor) -> tf.Tensor:
        channels_in = x.get_shape().as_list()[-1]

        if self._stride == 1 and channels_in == self._channels:
            skip_connection = x
        else:
            skip_connection = self._conv_fn(x, num_outputs=self._channels, kernel_size=self._extra_dim + (1, 1),
                                            stride=self._extra_dim+(self._stride, self._stride), scope='skip')
        with tf.variable_scope('c1'):
            x = self._conv_fn(x, num_outputs=self._channels, kernel_size=self._extra_dim+(3, 3),
                              stride=self._extra_dim+(self._stride, self._stride), scope='c1')
            x = self._bn_fn(x)
            x = self._ln_fn(x)
        with tf.variable_scope('c2'):
            x = self._conv_fn(x, num_outputs=self._channels, kernel_size=self._extra_dim+(3, 3), stride=1, scope='c2')
            x = self._bn_fn(x)
            x = self._ln_fn(x)
        x += skip_connection
        return x
コード例 #32
0
    def predict(self, position_indices_t: tf.Tensor):
        ndiffs = position_indices_t.get_shape().as_list()[0]

        if ndiffs == 0:
            return tf.zeros(shape=[], dtype='float32')

        batch_rc_positions_indices = tf.gather(self._full_rc_positions_indices_t, position_indices_t)
        batch_obj_views_t = tf.gather(self._obj_views_all_t, batch_rc_positions_indices[:, 1])
        batch_phase_modulations_t = tf.gather(self._probe_phase_modulations_all_t, batch_rc_positions_indices[:, 0])

        batch_obj_views_t = batch_obj_views_t
        exit_waves_t = batch_obj_views_t * self.probe_cmplx_t * batch_phase_modulations_t
        exit_waves_proj_t = fftshift_t(tf.reduce_sum(exit_waves_t, axis=-3))

        out_wavefronts_t = propFF_t(exit_waves_proj_t)
        amplitudes_t = tf.abs(out_wavefronts_t)
        if self.upsampling_factor > 1:
            amplitudes_t = self._downsample(amplitudes_t)
        return amplitudes_t
コード例 #33
0
    def _tensordot_axes(
            a: tf.Tensor,
            axes: AXES_TYPE) -> Tuple[AXES_ENTRY_TYPE, AXES_ENTRY_TYPE]:
        """Generates two sets of contraction axes for the two tensor arguments."""
        a_shape = a.get_shape()
        if isinstance(axes, tf.compat.integral_types):
            if axes < 0:
                raise ValueError("'axes' must be at least 0.")
            if a_shape.ndims is not None:
                if axes > a_shape.ndims:
                    raise ValueError(
                        "'axes' must not be larger than the number of "
                        "dimensions of tensor %s." % a)
                return (list(range(a_shape.ndims - axes,
                                   a_shape.ndims)), list(range(axes)))
            rank = tf.rank(a)
            return (tf.range(rank - axes, rank,
                             dtype=tf.int32), tf.range(axes, dtype=tf.int32))
        if isinstance(axes, (list, tuple)):
            if len(axes) != 2:
                raise ValueError("'axes' must be an integer or have length 2.")
            a_axes = axes[0]
            b_axes = axes[1]
            if isinstance(a_axes, tf.compat.integral_types) and \
                isinstance(b_axes, tf.compat.integral_types):
                a_axes = [a_axes]
                b_axes = [b_axes]
            # NOTE: This fails if either a_axes and b_axes are Tensors.
            if len(a_axes) != len(b_axes):
                raise ValueError(
                    "Different number of contraction axes 'a' and 'b', %s != %s."
                    % (len(a_axes), len(b_axes)))

            # The contraction indices do not need to be permuted.
            # Sort axes to avoid unnecessary permutations of a.
            # NOTE: This fails if either a_axes and b_axes contain Tensors.
            # pylint: disable=len-as-condition
            if len(a_axes) > 0:
                a_axes, b_axes = list(zip(*sorted(zip(a_axes, b_axes))))

            return a_axes, b_axes
        axes = tf.convert_to_tensor(axes, name="axes", dtype=tf.int32)
        return axes[0], axes[1]
コード例 #34
0
ファイル: image.py プロジェクト: kzajac97/AWD
def tensor_to_image(tensor: tf.Tensor, width: int, height: int, channels: int = 3) -> np.array:
    """
    Convert tensor representation of an image into displayable array

    :param tensor: tensor with image content
    :param height: target image height
    :param width: target image width
    :param channels: number of channels in an image

    :return: ND numpy array with image content
    """
    tensor = tensor.reshape((height, width, channels))
    # Remove zero-center by mean pixel
    tensor[:, :, 0] += 103.939
    tensor[:, :, 1] += 116.779
    tensor[:, :, 2] += 123.68

    tensor = tensor[:, :, ::-1]
    return np.clip(tensor, 0, 255).astype('uint8')
コード例 #35
0
def map_colorspace(images: tf.Tensor) -> tf.Tensor:
    """
    TensorFlow graph function which converts images from RGB to CIE Lab colorspace.
    This is essentially a wrapper for rgb_to_cielab to be usable in a graph.

    Args:
        images: 3 or 4 tensor. See rgb_to_cielab for detailed information.

    Returns:
        Converted images, see rgb_to_cielab for detailed information.
    """
    [
        images_lab,
    ] = tf.py_function(rgb_to_cielab, [images], [tf.float32])

    # Make sure shape information is correct after py_function call
    images_lab.set_shape(images.get_shape())

    return images_lab
コード例 #36
0
def blaze_block(x: tf.Tensor, filters, mid_channels=None, stride=1, phase_train=True):
    # input is n,w,h,c
    mid_channels = mid_channels or x.get_shape()[3]
    assert stride in [1, 2]
    use_pool = stride > 1
    # tensorflow way to implement pad size = 2
    pad_x = tf.pad(x, [[0, 0], [2, 2], [2, 2], [0, 0]], mode='CONSTANT')
    conv1 = tf.layers.separable_conv2d(pad_x, filters=mid_channels, kernel_size=(5, 5), strides=stride, padding='VALID')
    bn1 = tf.layers.batch_normalization(conv1, training=phase_train)
    conv2 = tf.layers.conv2d(bn1, filters=filters, kernel_size=1, strides=1, padding='SAME')
    bn2 = tf.layers.batch_normalization(conv2, training=phase_train)

    if use_pool:
        shortcut = tf.layers.max_pooling2d(x, pool_size=stride, strides=stride, padding='SAME')
        shortcut = tf.layers.conv2d(shortcut, filters=filters, kernel_size=1, strides=1, padding='SAME')
        shortcut = tf.layers.batch_normalization(shortcut, training=phase_train)
        shortcut = tf.nn.relu(shortcut)
        return tf.nn.relu(bn2 + shortcut)
    return tf.nn.relu(bn2 + x)
コード例 #37
0
def perceptron_layer(x: Tensor, out_dim, batch_norm=False, activation_fn=tf.sigmoid, name="hidden_layer"):
    in_dim = x.get_shape().as_list()[1]
    with tf.variable_scope(name) as v_scope:
        w = tf.Variable(initial_value=tf.random_normal((in_dim, out_dim)))
        b = tf.Variable(initial_value=tf.random_normal((out_dim,)))
        prod = tf.einsum('ij,jk->ik', x, w)
        if(batch_norm):
            prod, b_scope = batch_normalization(prod, out_dim, b)
        else:
            prod = prod + b

        total_v_coll = v_scope.global_variables()
        if(b_scope):
            total_v_coll.append(b_scope.global_variables())

        if(activation_fn != None):
            return activation_fn(prod), total_v_coll
        else:
            return prod, total_v_coll
コード例 #38
0
def perceptronLayer(x: Tensor, out_dim, batch_norm=False, activation_fn=tf.sigmoid, name="hidden_layer"):
    in_dim = x.get_shape().as_list()[1]
    with tf.variable_scope(name):
        w = tf.Variable(initial_value=tf.random_normal((in_dim, out_dim)))
        b = tf.Variable(initial_value=tf.random_normal((out_dim,)))
        prod = tf.einsum('ij,jk->ik', x, w)
        if(batch_norm):
            with tf.variable_scope('batch_norm'):
                mean, var = tf.nn.moments(prod, axes=0)
                v = tf.Variable(tf.ones([out_dim]))
                prod = tf.div_no_nan(prod - mean, tf.sqrt(var + 1e-3))
                prod = v * prod + b
        else:
            prod = prod + b

        if(activation_fn != None):
            return activation_fn(prod)
        else:
            return prod
コード例 #39
0
ファイル: base.py プロジェクト: victorlazio109/ncgenes7
    def build_residual_block_connection(self, block_input: tf.Tensor,
                                        block_output: tf.Tensor) -> tf.Tensor:
        """
        Create residual connection by applying convolution on block_input
        and add or concatenate it with block_output

        Applies convolution with kernel and stride from sampling_params

        Parameters
        ----------
        block_input
            feature maps before convolution block
        block_output
            feature maps after convolution block

        Returns
        -------
        block_res
            feature maps after residual connection
        """
        filters = block_output.get_shape().as_list()[-1]
        layer_name = self.get_current_layer_full_name("residual")
        if self.sampling_type == 'encoder':
            residual_layer = self.add_keras_layer(
                tf.keras.layers.Conv2D(
                    filters=filters, activation=self.activation,
                    bias_initializer=self.initializer,
                    kernel_initializer=self.initializer,
                    name=layer_name, **self.sampling_params))
        else:
            residual_layer = self.add_keras_layer(
                tf.keras.layers.Conv2DTranspose(
                    filters=filters, activation=self.activation,
                    bias_initializer=self.initializer,
                    kernel_initializer=self.initializer,
                    name=layer_name, **self.sampling_params))

        res_connection = residual_layer(block_input)
        if self.block_residual_connection_type == 'sum':
            out = tf.add_n([block_output, res_connection])
        else:
            out = tf.concat([block_output, res_connection], -1)
        return out
コード例 #40
0
ファイル: feed_forward.py プロジェクト: ufal/neuralmonkey
    def attention(self,
                  query: tf.Tensor,
                  decoder_prev_state: tf.Tensor,
                  decoder_input: tf.Tensor,
                  loop_state: AttentionLoopState) -> Tuple[
                      tf.Tensor, AttentionLoopState]:
        self.query_state_size = query.get_shape()[-1].value

        y = tf.matmul(query, self.query_projection_matrix)
        y = y + self.projection_bias_vector
        y = tf.reshape(y, [-1, 1, 1, self.state_size])

        energies = self.get_energies(y, loop_state.weights)

        if self.attention_mask is None:
            weights = tf.nn.softmax(energies)
        else:
            weights_all = tf.nn.softmax(energies) * self.attention_mask
            norm = tf.reduce_sum(weights_all, 1, keepdims=True) + 1e-8
            weights = weights_all / norm

            # condition = tf.equal(self.attention_mask, 1)
            # masked_logits = tf.where(
            #     tf.tile(condition, [tf.shape(energies)[0], 1]),
            #     energies, -np.inf * tf.ones_like(energies))
            # weights = tf.nn.softmax(masked_logits)

        # Now calculate the attention-weighted vector d.
        context = tf.reduce_sum(
            tf.expand_dims(tf.expand_dims(weights, -1), -1)
            * self._att_states_reshaped, [1, 2])
        context = tf.reshape(context, [-1, self.context_vector_size])

        next_contexts = tf.concat(
            [loop_state.contexts, tf.expand_dims(context, 0)], 0)
        next_weights = tf.concat(
            [loop_state.weights, tf.expand_dims(weights, 0)], 0)
        next_loop_state = AttentionLoopState(
            contexts=next_contexts,
            weights=next_weights)

        return context, next_loop_state
コード例 #41
0
def label_smoothing(inputs: tf.Tensor, epsilon: float=0.1):
    last_dim = inputs.get_shape().as_list()[-1]
    return ((1 - epsilon) * inputs) + (epsilon / last_dim)