Esempio n. 1
0
def convCaps(
        inputs, 
        filters, 
        dims, 
        kernel_size, 
        strides, 
        padding='VALID',
        iterations=2, 
        name=None):
    with tf.variable_scope(name, default_name='convCaps'):

        tf.assert_rank(
            inputs,
            rank=5,
            message='''`inputs` must be a tensor of capsule filteres, i.e. of 
                    shape (batch_size, height, width, filteres, dims)'''
        )

        outputs = _caps_conv2d(
            inputs, 
            filters, 
            dims, 
            (kernel_size, kernel_size),
            (strides, strides),
            padding,
            iterations
        )

    return outputs
Esempio n. 2
0
def primaryCaps(inputs, filters, dims, kernel_size, strides=1, name=None):
    with tf.variable_scope(name, default_name='primaryCaps'):

        # Assert that input is a tensor of feature maps
        tf.assert_rank(
            inputs,
            rank=4,
            message='''`inputs` must be a tensor of feature maps (i.e. of shape
                    (batch_size, height, width, filters))'''
        )

        conv = tf.layers.conv2d(
            inputs,
            filters=filters*dims,
            kernel_size=kernel_size,
            strides=strides,
            padding='VALID',
            activation=tf.nn.relu,
            name='conv'
        )

        # Convert to capsules by reshaping and applying the squash nonlinearity
        _, height, width, _ = conv.shape

        capsules = tf.reshape(
            conv, 
            [-1, height, width, filters, dims], 
            name='capsules'
        )

        outputs = squash(capsules, name='outputs')

    return outputs
Esempio n. 3
0
def mlpg_univariate(means, stds, weights):
    """Generate a trajectory out of a time sequence of gaussian parameters.

    The algorithm used is taken from Tokuda, K. et alii (2000). Speech
    Parameter Generation Algorithms for HMM-based speech synthesis. It
    aims at generating the most likely trajectory sequence based on
    gaussian parameters fitted to an input sequence of some kind.

    means   : time sequence of means (1-D tensor)
    stds    : time sequence of standard deviations (1-D tensor)
    weights : matrix of weights to derive successive orders
              of dynamic features out of static ones (2-D tensor)

    The means and standard deviations should consist of the time
    sequence of parameters for static features first, followed by the
    time sequence of delta features parameters and finally by that of
    delta delta features parameters.
    """
    # Test arguments' rank validity.
    tf.control_dependencies([
        tf.assert_rank(means, 1),
        tf.assert_rank(stds, 1),
        tf.assert_rank(weights, 2)
    ])
    # Compute the terms of the parameters generation system.
    inv_stds = tf.matrix_diag(1 / (tf.square(stds) + 1e-30))
    timed_variance = tf.matmul(tf.matrix_transpose(weights), inv_stds)
    left_term = tf.matmul(timed_variance, weights)
    right_term = tf.matmul(timed_variance, tf.expand_dims(means, 1))
    # Solve the system using cholesky decomposition.
    static_features = tf.cholesky_solve(tf.cholesky(left_term), right_term)
    # Add dynamic features to the predicted static ones and return them.
    return tf.matmul(weights, static_features)
Esempio n. 4
0
def ctc_beam_search_decode(logits,
                           sequence_length,
                           beam_width=1,
                           top_paths=1,
                           blank_id=None):
    '''
    ctc beam search decode function
    param: logits, (B, T, C), output of ctc asr model
    param: sequence_length, (B, 1), sequence lengths
    param: beam_width, int, beam search beam width
    param: top_paths, int, controls output size
    return:
       decode_result, (B, T), decode result
       probs: A float matrix [batch_size, top_paths] containing sequence log-probabilities.
    '''

    logits, sequence_len = ctc_decode_data_prepare(logits, sequence_length,
                                                   blank_id)

    deps = [tf.assert_rank(logits, 3), tf.assert_rank(sequence_len, 1)]

    with tf.control_dependencies(deps):
        decode_result, probs = tf.nn.ctc_beam_search_decoder_v2(
            logits, sequence_len, beam_width=beam_width, top_paths=top_paths)
        decode_result = [
            tf.sparse_tensor_to_dense(result) for result in decode_result
        ]
    return decode_result, probs
Esempio n. 5
0
def outer_product_2D(inputs):
    """
    inputs: list of two tensors [x,y]
      x: (?,dx1,dx2,dx3)
      y: (?,dy1)
    output: 
      z: (?,dx1,dx2,dx3*dy1)
      z[:,:,:,dx*n:dx*n+1] = x*y[:,n]
    """

    x, y = inputs
    x_shape = K.shape(x)
    y_shape = K.shape(y)
    tf.assert_equal(tf.size(x_shape), 4)
    tf.assert_equal(tf.size(y_shape), 2)
    tf.assert_equal(x_shape[0], y_shape[0])
    batchSize = x_shape[0]
    y_static_size = y.shape.as_list()[1]
    output_shape = [-1] + x.shape.as_list()[1:]
    output_shape[3] *= y_static_size

    x = K.expand_dims(x, -2)
    y = K.reshape(y, (-1, 1, 1, y_static_size, 1))

    outer_product = x * y
    outer_product = K.reshape(outer_product, output_shape)
    tf.assert_rank(outer_product, 4)
    # returns a flattened batch-wise set of tensors
    return outer_product
Esempio n. 6
0
def ctc_greedy_decode(logits,
                      sequence_length,
                      merge_repeated=True,
                      blank_id=None):
    '''
    ctc greedy decode function
    param: logits, (B, T, C), output of ctc asr model
    param: sequence_length, (B, 1), sequence lengths
    param: merge_repeated, boolean, if merge consecutive repeated classes in output
    returns:
        decode_result, (B, T), decode result
        probs, (B), A float matrix containing, for the sequence found,
            the negative of the sum of the greatest logit at each timeframe.
    '''

    logits, sequence_len = ctc_decode_data_prepare(logits, sequence_length,
                                                   blank_id)

    deps = [
        tf.assert_rank(logits, 3),
        tf.assert_rank(sequence_len, 1),
    ]

    with tf.control_dependencies(deps):
        decode_result, probs = tf.nn.ctc_greedy_decoder(
            logits, sequence_len, merge_repeated=merge_repeated)
        decode_result = tf.sparse_tensor_to_dense(decode_result[0],
                                                  name="outputs")
    return decode_result, probs
def test_model_inputs(model_inputs):
    with tf.Graph().as_default():
        input_data, targets, lr, keep_prob = model_inputs()

        # Check type
        assert input_data.op.type == 'Placeholder',\
            'Input is not a Placeholder.'
        assert targets.op.type == 'Placeholder',\
            'Targets is not a Placeholder.'
        assert lr.op.type == 'Placeholder',\
            'Learning Rate is not a Placeholder.'
        assert keep_prob.op.type == 'Placeholder', \
            'Keep Probability is not a Placeholder.'

        # Check name
        assert input_data.name == 'input:0',\
            'Input has bad name.  Found name {}'.format(input_data.name)
        assert keep_prob.name == 'keep_prob:0', \
            'Keep Probability has bad name.  Found name {}'.format(keep_prob.name)

        assert tf.assert_rank(input_data, 2, message='Input data has wrong rank')
        assert tf.assert_rank(targets, 2, message='Targets has wrong rank')
        assert tf.assert_rank(lr, 0, message='Learning Rate has wrong rank')
        assert tf.assert_rank(keep_prob, 0, message='Keep Probability has wrong rank')

    _print_success_message()
Esempio n. 8
0
def tokens_to_bytes(tokens):
    """Given a sequence of strings, map to sequence of bytes.

    Args:
      tokens: A tf.string tensor

    Returns:
      A tensor of shape words.shape + [bytes_per_word] containing byte versions
      of each word.
    """
    bytes_per_word = DEFAULT_CHAR_MAXLEN
    with tf.device("/cpu:0"):
        tf.assert_rank(tokens, 1)
        shape = tf.shape(tokens)
        tf.logging.info(tokens)
        tokens_flat = tf.reshape(tokens, [-1])
        as_bytes_flat = tf.map_fn(
            fn=lambda x: _string_to_bytes(x, max_length=bytes_per_word),
            elems=tokens_flat,
            dtype=tf.int32,
            back_prop=False,
        )
        tf.logging.info(as_bytes_flat)
        as_bytes = tf.reshape(as_bytes_flat, [shape[0], bytes_per_word])
    return as_bytes
Esempio n. 9
0
    def _trainBatch(states, actions, rewards, nextStates,
                    nextStateScoreMultiplier, nextQScore):
        allW = tf.ones((tf.shape(states)[0], len(submodels)))
        nextAction = tf.argmax(model([nextStates, allW], training=False)[0],
                               axis=-1)
        futureScores = tf.gather(nextQScore, nextAction, batch_dims=1)
        nextRewards = rewards + futureScores * nextStateScoreMultiplier
        tf.assert_rank(nextRewards, 1)  # [None, ]

        with tf.GradientTape(persistent=True) as tape:
            predictions = model([states, allW])
            ########
            targets = predictions[0]
            targets = (nextRewards[:, None] * actions) + (targets *
                                                          (1.0 - actions))
            ########
            tf.assert_equal(tf.shape(targets), tf.shape(predictions[-1]))
            losses = [
                tf.reduce_mean(tf.keras.losses.huber(targets, x))
                for x in predictions[1:]
            ]

        for submodel in submodels:
            grads = tape.gradient(losses, submodel.trainable_weights)
            submodel.optimizer.apply_gradients(
                zip(grads, submodel.trainable_weights))

        return tf.reduce_mean(losses), nextRewards
Esempio n. 10
0
def calculate_reshape(original_shape, new_shape, validate=False, name=None):
  """Calculates the reshaped dimensions (replacing up to one -1 in reshape)."""
  batch_shape_static = tensor_util.constant_value_as_shape(new_shape)
  if batch_shape_static.is_fully_defined():
    return np.int32(batch_shape_static.as_list()), batch_shape_static, []
  with tf.name_scope(name, "calculate_reshape", [original_shape, new_shape]):
    original_size = tf.reduce_prod(original_shape)
    implicit_dim = tf.equal(new_shape, -1)
    size_implicit_dim = (
        original_size // tf.maximum(1, -tf.reduce_prod(new_shape)))
    new_ndims = tf.shape(new_shape)
    expanded_new_shape = tf.where(  # Assumes exactly one `-1`.
        implicit_dim, tf.fill(new_ndims, size_implicit_dim), new_shape)
    validations = [] if not validate else [
        tf.assert_rank(
            original_shape, 1, message="Original shape must be a vector."),
        tf.assert_rank(new_shape, 1, message="New shape must be a vector."),
        tf.assert_less_equal(
            tf.count_nonzero(implicit_dim, dtype=tf.int32),
            1,
            message="At most one dimension can be unknown."),
        tf.assert_positive(
            expanded_new_shape, message="Shape elements must be >=-1."),
        tf.assert_equal(
            tf.reduce_prod(expanded_new_shape),
            original_size,
            message="Shape sizes do not match."),
    ]
    return expanded_new_shape, batch_shape_static, validations
Esempio n. 11
0
def test_model_inputs(model_inputs):
    with tf.Graph().as_default():
        input_data, targets, lr, keep_prob = model_inputs()

        # Check type
        assert input_data.op.type == 'Placeholder',\
            'Input is not a Placeholder.'
        assert targets.op.type == 'Placeholder',\
            'Targets is not a Placeholder.'
        assert lr.op.type == 'Placeholder',\
            'Learning Rate is not a Placeholder.'
        assert keep_prob.op.type == 'Placeholder', \
            'Keep Probability is not a Placeholder.'

        # Check name
        assert input_data.name == 'input:0',\
            'Input has bad name.  Found name {}'.format(input_data.name)
        assert keep_prob.name == 'keep_prob:0', \
            'Keep Probability has bad name.  Found name {}'.format(keep_prob.name)

        assert tf.assert_rank(input_data, 2, message='Input data has wrong rank')
        assert tf.assert_rank(targets, 2, message='Targets has wrong rank')
        assert tf.assert_rank(lr, 0, message='Learning Rate has wrong rank')
        assert tf.assert_rank(keep_prob, 0, message='Keep Probability has wrong rank')

    _print_success_message()
Esempio n. 12
0
 def supervised_loss(self,
                     labels: tf.Tensor,
                     qZ_X: Distribution,
                     mean: bool = False,
                     mask: Optional[tf.Tensor] = None,
                     training: Optional[bool] = None) -> tf.Tensor:
     labels = tf.nest.flatten(labels)
     z = self._to_samples(qZ_X, mean=mean, stop_grad=True)
     distributions = tf.nest.flatten(self(z, training=training))
     ## applying the mask (1-labelled, 0-unlabelled)
     if mask is not None:
         mask = tf.reshape(mask, (-1, ))
         # labels = [tf.boolean_mask(y, mask, axis=0) for y in labels]
         # z_logits = tf.boolean_mask(z_logits, mask, axis=0)
     ## calculate the loss
     loss = 0.
     for dist, y_true in zip(distributions, labels):
         tf.assert_rank(y_true, 2)
         llk = dist.log_prob(y_true)
         # check the mask careful here
         # if no data for labels, just return 0
         if mask is not None:
             llk = tf.cond(tf.reduce_all(tf.logical_not(mask)), lambda: 0.,
                           lambda: tf.boolean_mask(llk, mask, axis=0))
         # negative log-likelihood here
         loss += -llk
     # check non-zero, if zero the gradient must be stop or NaN gradient happen
     loss = tf.reduce_mean(loss)
     loss = tf.cond(
         tf.abs(loss) < 1e-8, lambda: tf.stop_gradient(loss), lambda: loss)
     return loss
Esempio n. 13
0
def conv_decoder(inputs, num_filters, output_shape, scope=None):
    net = inputs
    with tf.variable_scope(scope, 'decoder', [inputs]):
        tf.assert_rank(inputs, 4)
        for layer_id, units in enumerate(num_filters):
            with tf.variable_scope('block_{}'.format(layer_id),
                                   values=(net,)):
                net = tf.contrib.layers.conv2d_transpose(net, units, stride=2)
                add_hidden_layer_summary(net)

        with tf.variable_scope('linear', values=(net,)):
            net = tf.contrib.layers.conv2d_transpose(
                net, 1, activation_fn=None)
            tf.summary.histogram('activation', net)

        with tf.name_scope('crop', values=[net]):
            shape = net.get_shape().as_list()
            assert len(shape) == len(output_shape), 'shape mismatch'
            slice_beg = [0]
            slice_size = [-1]
            for sin, sout in zip(shape[1:], output_shape[1:]):
                if sin == sout:
                    slice_beg.append(0)
                    slice_size.append(-1)
                else:
                    assert sin > sout, "{} <= {}".format(sin, sout)
                    beg = (sin - sout) // 2
                    slice_beg.append(beg)
                    slice_size.append(sout)

            net = tf.slice(net, slice_beg, slice_size)
        net = tf.identity(net, name='output')

    return net
Esempio n. 14
0
    def measureBias(self, cps):
        """
        This function can be used to obtain the bias value given cps, an array of cp!
        Usefull to find the right scales where the linearity is produced....
        :param cps:
        :return:
        """
        cps = tf.expand_dims(cps,-1)
        tf.assert_rank(cps,3)

        if self.usingLog:
            QgijTemplate_eq = self.TA0*self.k2g*self.k1Mg*tf.exp(self.Xglobal)/(1+self.k1Mg*self.E0*tf.exp(self.Xglobal)/tf.squeeze(cps,axis=-1))
            QdgijTemplate_eq = self.TI0*self.k4g*self.k3Mg*tf.exp(self.Xglobal)/(1+self.k3Mg*self.E0*tf.exp(self.Xglobal)/tf.squeeze(cps,axis=-1))
        else:
            QgijTemplate_eq = self.TA0*self.k2g*self.k1Mg*self.Xglobal/(1+self.k1Mg*self.E0*self.Xglobal/tf.squeeze(cps,axis=-1))
            QdgijTemplate_eq = self.TI0*self.k4g*self.k3Mg*self.Xglobal/(1+self.k3Mg*self.E0*self.Xglobal/tf.squeeze(cps,axis=-1))

        activBias = tf.keras.backend.sum(tf.where(self.mask > 0, tf.math.log(self.k2 * self.k1M * self.E0/(self.kdT*cps)), 0), axis=1) + \
                    tf.math.log(QgijTemplate_eq * self.E0 /tf.squeeze(cps,axis=-1))

        pT_eq = tf.keras.backend.sum(tf.where(self.mask<0,tf.math.log(self.k4 * self.E0 *self.k3M/(cps*self.kdT2)),0),axis=1) + \
                tf.math.log(QdgijTemplate_eq*self.E0/tf.squeeze(cps,axis=-1))
        inhib_bias = self.k6*self.k5M*self.E0*tf.exp(pT_eq)/(self.kdpT*tf.squeeze(cps,axis=-1))

        return activBias,inhib_bias
Esempio n. 15
0
    def __init__(self, log_diag_precision, **kwargs):
        tf.assert_rank(log_diag_precision,
                       2,
                       message="Data must be [batch dim, num features]")
        with tf.name_scope("PrecisionDiag"):
            log_diag_covariance = tf.negative(log_diag_precision,
                                              name="log_diag_covariance")

        super(PrecisionDiag,
              self).__init__(log_diag_precision=log_diag_precision,
                             log_diag_covariance=log_diag_covariance,
                             **kwargs)
        self._build_with_covariance = False

        with tf.name_scope("PrecisionDiag"):
            self._diag_shape, self._matrix_shape = self._build_shapes(
                self._log_diag_precision)
            self._diag_precision = tf.exp(self._log_diag_precision,
                                          name="diag_precision")
            self._diag_covariance = tf.exp(-self._log_diag_precision,
                                           name="diag_covariance")
            self._chol_diag_covariance = tf.exp(self._log_diag_precision *
                                                -0.5,
                                                name="chol_diag_covariance")
            self._chol_diag_precision = tf.exp(self._log_diag_precision * 0.5,
                                               name="chol_diag_precision")
Esempio n. 16
0
    def __init__(self, lower: Union[List[float], TensorType],
                 upper: Union[List[float], TensorType]):
        r"""
        If ``lower`` and ``upper`` are `list`\ s, they will be converted to tensors of dtype
        `tf.float64`.

        :param lower: The lower (inclusive) bounds of the box. Must have shape [D] for positive D,
            and if a tensor, must have float type.
        :param upper: The upper (inclusive) bounds of the box. Must have shape [D] for positive D,
            and if a tensor, must have float type.
        :raise ValueError (or InvalidArgumentError): If any of the following are true:

            - ``lower`` and ``upper`` have invalid shapes.
            - ``lower`` and ``upper`` do not have the same floating point type.
            - ``upper`` is not greater than ``lower`` across all dimensions.
        """
        tf.debugging.assert_shapes([(lower, ["D"]), (upper, ["D"])])
        tf.assert_rank(lower, 1)
        tf.assert_rank(upper, 1)

        if len(lower) == 0:
            raise ValueError(
                f"Bounds must have shape [D] for positive D, got {tf.shape(lower)}."
            )

        if isinstance(lower, list):
            self._lower = tf.cast(lower, dtype=tf.float64)
            self._upper = tf.cast(upper, dtype=tf.float64)
        else:
            self._lower = tf.convert_to_tensor(lower)
            self._upper = tf.convert_to_tensor(upper)

            tf.debugging.assert_same_float_dtype([self._lower, self._upper])

        tf.debugging.assert_less(self._lower, self._upper)
Esempio n. 17
0
def ctc_lambda_loss(logits, labels, input_length, label_length, smoothing=0.0):
    '''
  ctc loss function
  psram: logits, (B, T, D)
  psram: input_length,  (B, 1), input length of encoder
  psram: labels, (B, T)
  psram: label_length,  (B, 1), label length for convert dense label to sparse
  returns: loss, scalar
  '''
    del smoothing

    ilen = tf.cond(
        pred=tf.equal(tf.rank(input_length), 1),
        true_fn=lambda: tf.expand_dims(input_length, axis=-1),
        false_fn=lambda: input_length,
    )
    olen = tf.cond(
        pred=tf.equal(tf.rank(label_length), 1),
        true_fn=lambda: tf.expand_dims(label_length, axis=-1),
        false_fn=lambda: label_length,
    )
    deps = [
        tf.assert_rank(labels, 2),
        tf.assert_rank(logits, 3),
        tf.assert_rank(ilen, 2),  # input_length
        tf.assert_rank(olen, 2),  # output_length
    ]
    with tf.control_dependencies(deps):
        # (B, 1)
        batch_loss = K.ctc_batch_cost(labels, logits, ilen, olen)
        loss = tf.reduce_mean(batch_loss)

    return loss
Esempio n. 18
0
    def __init__(self,
                 log_eig_val_covar,
                 log_eig_val_precision,
                 eig_vec,
                 inversion_method=None,
                 **kwargs):
        if inversion_method is None:
            inversion_method = DecompMethod.EIGEN
        super(_CovarianceEigCommon,
              self).__init__(inversion_method=inversion_method, **kwargs)
        tf.assert_rank(
            eig_vec,
            3,
            message="Data must be [batch dim, num features, num features]")
        self._eig_vec = eig_vec
        self._log_eig_val_covar = log_eig_val_covar
        self._log_eig_val_precision = log_eig_val_precision

        self.dtype = self._eig_vec.dtype
        if self._eig_vec.shape.is_fully_defined():
            # Eig vec shape is [batch size, num features, num eig vectors]
            eig_vec_shape = self._eig_vec.shape
            self._matrix_shape = tf.TensorShape(
                [eig_vec_shape[0], eig_vec_shape[1], eig_vec_shape[1]])
        else:
            eig_vec_shape = tf.shape(self._eig_vec)
            self._matrix_shape = tf.stack(
                [eig_vec_shape[0], eig_vec_shape[1], eig_vec_shape[1]], axis=0)
Esempio n. 19
0
def ctc_lambda_loss(logits, labels, input_length, label_length, blank_index=0):
    '''
  ctc loss function
  psram: logits, (B, T, D)
  psram: input_length,  (B, 1), input length of encoder
  psram: labels, (B, T)
  psram: label_length,  (B, 1), label length for convert dense label to sparse
  returns: loss, scalar
  '''
    ilen = tf.cond(
        pred=tf.equal(tf.rank(input_length), 1),
        true_fn=lambda: input_length,
        false_fn=lambda: tf.squeeze(input_length),
    )
    olen = tf.cond(pred=tf.equal(tf.rank(label_length), 1),
                   true_fn=lambda: label_length,
                   false_fn=lambda: tf.squeeze(label_length))
    deps = [
        tf.assert_rank(labels, 2),
        tf.assert_rank(logits, 3),
        tf.assert_rank(ilen, 1),  # input_length
        tf.assert_rank(olen, 1),  # output_length
    ]

    with tf.control_dependencies(deps):
        # (B, 1)
        # blank index is consistent with Espnet, zero
        batch_loss = tf.nn.ctc_loss_v2(labels=labels,
                                       logits=logits,
                                       label_length=olen,
                                       logit_length=ilen,
                                       logits_time_major=False,
                                       blank_index=blank_index)
        batch_loss.set_shape([None])
    return batch_loss
Esempio n. 20
0
 def path2label(path):
     """Given proper path to a file from the database returns (path, label)."""
     sparse = tf.string_split([path], delimiter='/')
     label = sparse.values[-2]
     numeric_label = tf.where(tf.equal(label, list(LABELS)), name='XDXD')
     numeric_label = tf.squeeze(numeric_label)
     tf.assert_rank(numeric_label, 0)  # should be a scalar
     return path, numeric_label
Esempio n. 21
0
 def logit_fn(self, features):
     with tf.variable_scope("input_from_feature_columns"):
         self.user = tf.feature_column.input_layer(features=features, feature_columns=self.user_emb)
         self.item = tf.feature_column.input_layer(features=features, feature_columns=self.item_emb)
         logits = tf.reduce_sum(tf.multiply(self.user, self.item), axis=-1)
         logits = tf.expand_dims(logits, axis=-1)
         tf.assert_rank(logits, 2)
         return logits
Esempio n. 22
0
def multi_ch_conv(data, kernel, bidir=True):
    #Verify input tensor ranks
    tf.assert_rank(kernel, 1)
    tf.assert_rank(data, 2)

    num_channels = tf.shape(data, name="NumChan")[0]
    num_samples = tf.shape(data, name="NumSamples")[1]
    kernel_len = tf.shape(kernel, name="KernelLen")[0]
    '''
    NOTE
    The above assert_rank's are evaluated when the grab is being compiled.
    The below assert_less_equal is evaluated when the graph is run, so it
    must actually be *run*, which is why we have to use the control_dependencies
    call below.
    '''

    asserts = [
        tf.assert_less_equal(
            kernel_len,
            num_samples,
            message=
            "JCR: Lenth of kernel must be shorter than the length of the input."
        )
    ]

    with tf.control_dependencies(asserts):

        #Pad the beginning / end of each channel so we can do a single 1D convolve
        with tf.name_scope("PadInputTensor"):
            p = tf.cast(tf.ceil(
                tf.realdiv(tf.cast(kernel_len, tf.float32),
                           tf.constant(2.0, dtype=tf.float32))),
                        dtype=tf.int32)
            data_pad = tf.pad(data, [[0, 0], [p, p]])

        with tf.name_scope("Conv1D"):
            #Reshape to use with conv1d
            #[batch,width,chan]
            data_1d = tf.reshape(data_pad, [1, -1, 1])

            #Reshape kernel to use with conv1d
            #[width,chan_in,chan_out]
            kernel_1d = tf.reshape(kernel, [-1, 1, 1])

            conv_raw = tf.nn.conv1d(data_1d, kernel_1d, 1, 'SAME')

            if bidir:
                conv_raw_rev = tf.nn.conv1d(tf.reverse(conv_raw, [1]),
                                            kernel_1d, 1, 'SAME')
                conv_raw = tf.reverse(conv_raw_rev, [1])

        with tf.name_scope("Reconstruct"):
            conv_raw_rs = tf.reshape(conv_raw, [num_channels, -1])

            conv_raw_sliced = tf.slice(conv_raw_rs, [0, p], [-1, num_samples])

        return conv_raw_sliced
Esempio n. 23
0
def make_loss_ops(a_logits, graph_v, entropy_bonus, value_loss_coef, debug):
    actions = tf.placeholder(tf.int64, [None])
    returns = tf.placeholder(tf.float32, [None])

    # For the policy loss, we want to calculate log π(action_t | state_t).
    # That means we want log(action_prob_0 | state_t) if action_t = 0,
    #                    log(action_prob_1 | state_t) if action_t = 1, etc.
    # It turns out that's exactly what a cross-entropy loss gives us!
    # The cross-entropy of a distribution p wrt a distribution q is:
    #   - sum over x: p(x) * log2(q(x))
    # Note that for a categorical distribution, considering the
    # cross-entropy of the ground truth distribution wrt the
    # distribution of predicted class probabilities, p(x) is 1 if the
    # ground truth label is x and 0 otherwise. We therefore have:
    #   - log2(q(0)) if ground truth label = 0,
    #   - log2(q(1)) if ground truth label = 1, etc.
    # So here, by taking the cross-entropy of the distribution of
    # action 'labels' wrt the produced action probabilities, we can get
    # exactly what we want :)
    _neglogprob = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=a_logits, labels=actions)
    with tf.control_dependencies([tf.assert_rank(_neglogprob, 1)]):
        neglogprob = _neglogprob

    if debug:
        neglogprob = tf.Print(neglogprob, [actions],
                              message='\ndebug actions:',
                              summarize=2147483647)

    _advantage = returns - graph_v
    with tf.control_dependencies([tf.assert_rank(_advantage, 1)]):
        advantage = _advantage

    if debug:
        advantage = tf.Print(advantage, [returns],
                             message='\ndebug returns:',
                             summarize=2147483647)

    policy_entropy = tf.reduce_mean(logit_entropy(a_logits))

    # Note that the advantage is treated as a constant for the
    # policy network update step.
    # Note also that we're calculating advantages on-the-fly using
    # the value approximator. This might make us worry: what if we're
    # using the loss for training, and the advantages are calculated
    # /after/ training has changed the network? But for A3C, we don't
    # need to worry, because we compute the gradients seperately from
    # applying them.
    # We want to maximise entropy, which is the same as
    # minimising negative entropy.
    policy_loss = neglogprob * tf.stop_gradient(advantage)
    policy_loss = tf.reduce_mean(policy_loss) - entropy_bonus * policy_entropy
    value_loss = value_loss_coef * tf.reduce_mean(0.5 * advantage ** 2)
    loss = policy_loss + value_loss

    return actions, returns, advantage, policy_entropy, \
           policy_loss, value_loss, loss
Esempio n. 24
0
    def log_likelihood_sym(self, x_var, dist_info_vars):
        probs = dist_info_vars["prob"]
        log_probs = tf.log(probs + TINY)

        # assert ranks
        tf.assert_rank(probs, 2)
        tf.assert_rank(x_var, 2)

        return tf.batch_gather(log_probs, x_var)
Esempio n. 25
0
 def call(self, inputs, **kwargs):
     tf.assert_rank(inputs, 4) # [B, Y, X, C]
     channel_images = tf.unstack(inputs, axis=-1)
     channel_activations = [channel_conv(channel_image[..., tf.newaxis], **kwargs)
                            for channel_conv, channel_image
                            in zip(self.channel_convs, channel_images)]
     channel_activations = tf.concat(channel_activations, axis=-1)
     final_activations = self.final_conv(channel_activations, **kwargs)
     return final_activations
Esempio n. 26
0
 def call(self, inputs):
     H, h_c = inputs
     # B, 1, D
     Q = tf.matmul(h_c, self.wq)
     K = tf.matmul(H, self.wk)
     V = tf.matmul(H, self.wv)
     tf.assert_rank(Q, 3)
     # B, 1, D
     return attention(Q, K, V)
Esempio n. 27
0
def infer(encoder_cell, decoder_cell, sentences):
    tf.assert_rank(sentences, 3)
    assert sentences.get_shape()[0].value == 1  # batch size
    assert sentences.get_shape()[2].value == FEATURE_SIZE

    # stops generating output if the length reaches the double of the source
    output_len_threshold = sentences.get_shape()[1].value * 2

    final_state_tuple = encode(sentences, encoder_cell, reuse=True)
    context = bridge(final_state_tuple.c, decoder_cell.output_size, reuse=True)

    with tf.variable_scope('decoder', reuse=True):
        def cond(loop_cnt, prev_out, _, __):
            less = tf.less(loop_cnt, output_len_threshold)
            is_regular_word = tf.reduce_any(
                tf.not_equal(
                    prev_out,
                    tf.one_hot([0], FEATURE_SIZE)  # <eos>
                )
            )

            return tf.logical_and(less, is_regular_word)

        def body(loop_cnt, prev_out, prev_state, result):
            cell_output, state = decoder_cell(prev_out, prev_state)
            num_outputs = decoder_cell.output_size
            output = decoder_projection(
                cell_output,
                num_outputs=num_outputs,
                reuse=True
            )
            arg_max = tf.arg_max(output, dimension=1)
            one_hot_output = tf.one_hot(
                indices=arg_max,
                depth=num_outputs
            )

            return (
                tf.add(loop_cnt, 1),
                one_hot_output,
                state,
                result.write(result.size(), tf.cast(one_hot_output, dtype=tf.int8))
            )

        _, __, ___, inferred = tf.while_loop(
            cond,
            body,
            loop_vars=(
                tf.constant(0),
                context,
                decoder_cell.zero_state(batch_size=1, dtype=tf.float32),
                tf.TensorArray(tf.int8, size=0, dynamic_size=True)
            )
        )

        return inferred.stack()
Esempio n. 28
0
def decode_for_training(cell, final_enc_state, labels):
    # [actual batch size, max seq len, decoder cell size]
    tf.assert_rank(labels, 3)

    cell_size = cell.output_size
    context = bridge(final_enc_state, cell_size)

    # [actual batch size, decoder cell size]
    assert context.get_shape().as_list() == [None, cell_size]

    # tf.shape(labels): tuple of 1 element
    batch_size = tf.shape(labels)[0]  # type: tf.Tensor of rank 0
    max_time_step = labels.get_shape()[1].value

    with tf.variable_scope('decoder'):
        def cond(loop_cnt, _, __, ___):
            return tf.less(loop_cnt, max_time_step)

        def body(loop_cnt, prev_label, prev_state, losses):
            cell_output, state = cell(prev_label, prev_state)
            output = decoder_projection(cell_output, cell_size)

            # cut out the `loop_cnt`-th label
            label = tf.reshape(
                tf.slice(labels, begin=[0, loop_cnt, 0], size=[batch_size, 1, cell_size]),
                shape=[batch_size, cell_size]
            )

            # loss for output past the last time step is calculated to be 0
            loss = tf.nn.softmax_cross_entropy_with_logits(
                logits=output,
                labels=label
            )

            return (
                tf.add(loop_cnt, 1),
                # pass the label as the output of the current step
                label,
                state,
                losses.write(loop_cnt, loss)
            )

        _, _, _, result_loss = tf.while_loop(
            cond,
            body,
            loop_vars=(
                tf.constant(0),
                context,
                cell.zero_state(batch_size=batch_size, dtype=tf.float32),
                tf.TensorArray(tf.float32, size=0, dynamic_size=True)
            ),
        )

        losses = tf.reduce_sum(result_loss.stack(), axis=0)
        time_steps = tf.reduce_sum(tf.reduce_sum(labels, axis=2), axis=1)
        return tf.div(losses, time_steps)
Esempio n. 29
0
    def __init__(self, chol_covariance, **kwargs):
        super(CovarianceCholesky, self).__init__(**kwargs)
        tf.assert_rank(chol_covariance, 3, message="Size must be [batch dim, feature dim, feature dim]")
        self._chol_covariance = chol_covariance

        self.dtype = self._chol_covariance.dtype
        if self._chol_covariance.shape.is_fully_defined():
            self._matrix_shape = self._chol_covariance.shape
        else:
            self._matrix_shape = tf.shape(self._chol_covariance)
Esempio n. 30
0
def conv_encoder(inputs, num_filters, scope=None):
    net = inputs
    with tf.variable_scope(scope, 'encoder', [inputs]):
        tf.assert_rank(inputs, 4)
        for layer_id, num_outputs in enumerate(num_filters):
            with tf.variable_scope('block{}'.format(layer_id)):
                net = slim.repeat(net, 2, conv2d_fixed_padding, num_outputs=num_outputs)
                net = tf.contrib.layers.max_pool2d(net)

        net = tf.identity(net, name='output')
    return net
Esempio n. 31
0
    def __init__(self, chol_precision, **kwargs):
        super(PrecisionCholesky, self).__init__(**kwargs)
        tf.assert_rank(chol_precision, 3, message="Size must be [batch dim, feature dim, feature dim]")
        self._chol_precision = chol_precision
        self._build_with_covariance = False

        self.dtype = self._chol_precision.dtype
        if self._chol_precision.shape.is_fully_defined():
            self._matrix_shape = self._chol_precision.shape
        else:
            self._matrix_shape = tf.shape(self._chol_precision)
Esempio n. 32
0
def bridge(final_enc_state, decoder_cell_size, reuse=False):
    tf.assert_rank(final_enc_state, 2)

    with tf.variable_scope('bridge', reuse=reuse):
        context = tf.contrib.layers.fully_connected(
            inputs=final_enc_state,
            num_outputs=decoder_cell_size,
            activation_fn=tf.tanh
        )

        return context
Esempio n. 33
0
def separable_conv(x, filters, kernel_size, activation):
    """Apply a depthwise separable 1d convolution."""
    tf.assert_rank(x, 3)
    net = tf.expand_dims(x, 2)
    net = tf.layers.separable_conv2d(net,
                                     filters=filters,
                                     kernel_size=(kernel_size, 1),
                                     padding='same',
                                     activation=activation)
    net = tf.squeeze(net, axis=2)
    return net
Esempio n. 34
0
    def policy_fn(noptions=64,
                  nbatch=None,
                  nsteps=None,
                  sess=None,
                  observ_placeholder=None,
                  action_placeholder=None,
                  option_z_placeholder=None,
                  dones_placeholder=None):
        ob_space = env.observation_space
        ac_n = get_action_dim(env)

        X = observ_placeholder if observ_placeholder is not None else \
            observation_placeholder(ob_space, batch_size=nbatch, name='ob')
        X_next = observation_placeholder(ob_space,
                                         batch_size=nbatch,
                                         name='ob_next')
        ac = action_placeholder if action_placeholder is not None else \
            tf.placeholder(tf.float32, shape=(nbatch, ac_n), name='ac')
        op = option_z_placeholder if option_z_placeholder is not None else \
            tf.placeholder(tf.float32, shape=(nbatch, noptions), name='op_z')
        dones = dones_placeholder if dones_placeholder is not None else \
            tf.placeholder(tf.float32, shape=(nbatch), name='dones')

        extra_tensors = {}

        cnn_fm, policy_latent, vf = pi_vf_fn(X, extra_tensors, nbatch, nsteps)
        next_cnn_fm, _, next_vf = pi_vf_fn(X_next,
                                           extra_tensors,
                                           nbatch,
                                           nsteps,
                                           recurrent_subname='next')
        assert noptions == cnn_fm.get_shape().as_list()[-1], \
            'number of options for VFO should equal to channels of last conv layer'

        tf.assert_rank(policy_latent, 2)
        option_latent = tf.concat([policy_latent, op], 1)
        q_latent = tf.concat([policy_latent, op, ac], 1)

        policy = PolicyWithValue(env=env,
                                 observations=X,
                                 next_observations=X_next,
                                 actions=ac,
                                 option_z=op,
                                 dones=dones,
                                 feature_map=cnn_fm,
                                 next_feature_map=next_cnn_fm,
                                 latent=policy_latent,
                                 option_latent=tf.stop_gradient(option_latent),
                                 q_latent=tf.stop_gradient(q_latent),
                                 vf=vf,
                                 next_vf=next_vf,
                                 sess=sess,
                                 **extra_tensors)
        return policy
Esempio n. 35
0
 def test_raises_if_rank_is_not_scalar_dynamic(self):
   with self.test_session():
     tensor = tf.constant([1, 2], dtype=tf.float32, name="my_tensor")
     rank_tensor = tf.placeholder(tf.int32, name="rank_tensor")
     with self.assertRaisesOpError("Rank must be a scalar"):
       with tf.control_dependencies([tf.assert_rank(tensor, rank_tensor)]):
         tf.identity(tensor).eval(feed_dict={rank_tensor: [1, 2]})
Esempio n. 36
0
 def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
   with self.test_session():
     tensor = tf.placeholder(tf.float32, name="my_tensor")
     desired_rank = 2
     with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
       with self.assertRaisesOpError("my_tensor.*rank"):
         tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
Esempio n. 37
0
 def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
   with self.test_session():
     tensor = tf.constant([1, 2], name="my_tensor")
     desired_rank = 2
     with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
       with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
         tf.identity(tensor).eval()
Esempio n. 38
0
def _maybe_validate_rightmost_transposed_ndims(
    rightmost_transposed_ndims, validate_args, name=None):
  """Checks that `rightmost_transposed_ndims` is valid."""
  with tf.name_scope(name, 'maybe_validate_rightmost_transposed_ndims',
                     [rightmost_transposed_ndims]):
    assertions = []
    if not rightmost_transposed_ndims.dtype.is_integer:
      raise TypeError('`rightmost_transposed_ndims` must be integer type.')

    if rightmost_transposed_ndims.shape.ndims is not None:
      if rightmost_transposed_ndims.shape.ndims != 0:
        raise ValueError('`rightmost_transposed_ndims` must be a scalar, '
                         'saw rank: {}.'.format(
                             rightmost_transposed_ndims.shape.ndims))
    elif validate_args:
      assertions += [tf.assert_rank(rightmost_transposed_ndims, 0)]

    rightmost_transposed_ndims_ = tf.contrib.util.constant_value(
        rightmost_transposed_ndims)
    msg = '`rightmost_transposed_ndims` must be non-negative.'
    if rightmost_transposed_ndims_ is not None:
      if rightmost_transposed_ndims_ < 0:
        raise ValueError(msg[:-1] + ', saw: {}.'.format(
            rightmost_transposed_ndims_))
    elif validate_args:
      assertions += [tf.assert_non_negative(
          rightmost_transposed_ndims, message=msg)]

    return assertions
Esempio n. 39
0
def _maybe_validate_perm(perm, validate_args, name=None):
  """Checks that `perm` is valid."""
  with tf.name_scope(name, 'maybe_validate_perm', [perm]):
    assertions = []
    if not perm.dtype.is_integer:
      raise TypeError('`perm` must be integer type')

    msg = '`perm` must be a vector.'
    if perm.shape.ndims is not None:
      if perm.shape.ndims != 1:
        raise ValueError(
            msg[:-1] + ', saw rank: {}.'.format(perm.shape.ndims))
    elif validate_args:
      assertions += [tf.assert_rank(perm, 1, message=msg)]

    perm_ = tf.contrib.util.constant_value(perm)
    msg = '`perm` must be a valid permutation vector.'
    if perm_ is not None:
      if not np.all(np.arange(np.size(perm_)) == np.sort(perm_)):
        raise ValueError(msg[:-1] + ', saw: {}.'.format(perm_))
    elif validate_args:
      assertions += [tf.assert_equal(
          tf.contrib.framework.sort(perm),
          tf.range(tf.size(perm)),
          message=msg)]

    return assertions
Esempio n. 40
0
  def _check_valid_event_ndims(self, min_event_ndims, event_ndims):
    """Check whether event_ndims is atleast min_event_ndims."""
    event_ndims = tf.convert_to_tensor(event_ndims, name="event_ndims")
    event_ndims_ = tf.contrib.util.constant_value(event_ndims)
    assertions = []

    if not event_ndims.dtype.is_integer:
      raise ValueError("Expected integer dtype, got dtype {}".format(
          event_ndims.dtype))

    if event_ndims_ is not None:
      if event_ndims.shape.ndims != 0:
        raise ValueError("Expected scalar event_ndims, got shape {}".format(
            event_ndims.shape))
      if min_event_ndims > event_ndims_:
        raise ValueError("event_ndims ({}) must be larger than "
                         "min_event_ndims ({})".format(event_ndims_,
                                                       min_event_ndims))
    elif self.validate_args:
      assertions += [tf.assert_greater_equal(event_ndims, min_event_ndims)]

    if event_ndims.shape.is_fully_defined():
      if event_ndims.shape.ndims != 0:
        raise ValueError("Expected scalar shape, got ndims {}".format(
            event_ndims.shape.ndims))

    elif self.validate_args:
      assertions += [tf.assert_rank(event_ndims, 0, message="Expected scalar.")]
    return assertions
Esempio n. 41
0
 def test_raises_if_rank_is_not_integer_dynamic(self):
     with self.test_session():
         tensor = tf.constant([1, 2], dtype=tf.float32, name="my_tensor")
         rank_tensor = tf.placeholder(tf.float32, name="rank_tensor")
         with self.assertRaisesRegexp(TypeError, "must be of type <dtype: 'int32'>"):
             with tf.control_dependencies([tf.assert_rank(tensor, rank_tensor)]):
                 tf.identity(tensor).eval(feed_dict={rank_tensor: 0.5})
Esempio n. 42
0
 def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
     with self.test_session():
         tensor = tf.constant(1, name="my_tensor")
         desired_rank = 1
         with self.assertRaisesRegexp(ValueError, "fail.*my_tensor.*must have rank 1"):
             with tf.control_dependencies([tf.assert_rank(tensor, desired_rank, message="fail")]):
                 tf.identity(tensor).eval()
def _assert_tensor_shape(tensor, shape, display_name):
    assert tf.assert_rank(tensor, len(shape), message='{} has wrong rank'.format(display_name))

    tensor_shape = tensor.get_shape().as_list() if len(shape) else []

    wrong_dimension = [ten_dim for ten_dim, cor_dim in zip(tensor_shape, shape)
                       if cor_dim is not None and ten_dim != cor_dim]
    assert not wrong_dimension, \
        '{} has wrong shape.  Found {}'.format(display_name, tensor_shape)
Esempio n. 44
0
def encode(inputs, cell, reuse=False):
    tf.assert_rank(inputs, 3)

    time_steps = tf.reduce_sum(tf.reduce_sum(inputs, axis=2), axis=1)

    with tf.variable_scope('encoder', reuse=reuse):
        embedded = tf.contrib.layers.fully_connected(
            inputs=inputs,
            num_outputs=cell.output_size,
            activation_fn=tf.sigmoid
        )

        tf.assert_rank(embedded, 3)

        _, final_state_tuple = tf.nn.dynamic_rnn(
            cell,
            embedded,
            sequence_length=time_steps,
            dtype=tf.float32,
        )

        return final_state_tuple
Esempio n. 45
0
def op(name,
       images,
       max_outputs=3,
       display_name=None,
       description=None,
       collections=None):
  """Create an image summary op for use in a TensorFlow graph.

  Arguments:
    name: A unique name for the generated summary node.
    images: A `Tensor` representing pixel data with shape `[k, w, h, c]`,
      where `k` is the number of images, `w` and `h` are the width and
      height of the images, and `c` is the number of channels, which
      should be 1, 3, or 4. Any of the dimensions may be statically
      unknown (i.e., `None`).
    max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
      many images will be emitted at each step. When more than
      `max_outputs` many images are provided, the first `max_outputs` many
      images will be used and the rest silently discarded.
    display_name: Optional name for this summary in TensorBoard, as a
      constant `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
      constant `str`. Markdown is supported. Defaults to empty.
    collections: Optional list of graph collections keys. The new
      summary op is added to these collections. Defaults to
      `[Graph Keys.SUMMARIES]`.

  Returns:
    A TensorFlow summary op.
  """
  if display_name is None:
    display_name = name
  summary_metadata = metadata.create_summary_metadata(
      display_name=display_name, description=description)
  with tf.name_scope(name), \
       tf.control_dependencies([tf.assert_rank(images, 4),
                                tf.assert_type(images, tf.uint8),
                                tf.assert_non_negative(max_outputs)]):
    limited_images = images[:max_outputs]
    encoded_images = tf.map_fn(tf.image.encode_png, limited_images,
                               dtype=tf.string,
                               name='encode_each_image')
    image_shape = tf.shape(images)
    dimensions = tf.stack([tf.as_string(image_shape[1], name='width'),
                           tf.as_string(image_shape[2], name='height')],
                          name='dimensions')
    tensor = tf.concat([dimensions, encoded_images], axis=0)
    return tf.summary.tensor_summary(name='image_summary',
                                     tensor=tensor,
                                     collections=collections,
                                     summary_metadata=summary_metadata)
  def _maybe_validate_shape_override(self, override_shape, base_is_scalar,
                                     validate_args, name):
    """Helper to __init__ which ensures override batch/event_shape are valid."""
    if override_shape is None:
      override_shape = []

    override_shape = tf.convert_to_tensor(override_shape, dtype=tf.int32,
                                          name=name)

    if not override_shape.dtype.is_integer:
      raise TypeError("shape override must be an integer")

    override_is_scalar = _is_scalar_from_shape(override_shape)
    if tf.contrib.util.constant_value(override_is_scalar):
      return self._empty

    dynamic_assertions = []

    if override_shape.shape.ndims is not None:
      if override_shape.shape.ndims != 1:
        raise ValueError("shape override must be a vector")
    elif validate_args:
      dynamic_assertions += [tf.assert_rank(
          override_shape, 1,
          message="shape override must be a vector")]

    if tf.contrib.util.constant_value(override_shape) is not None:
      if any(s <= 0 for s in tf.contrib.util.constant_value(override_shape)):
        raise ValueError("shape override must have positive elements")
    elif validate_args:
      dynamic_assertions += [tf.assert_positive(
          override_shape,
          message="shape override must have positive elements")]

    is_both_nonscalar = _logical_and(_logical_not(base_is_scalar),
                                     _logical_not(override_is_scalar))
    if tf.contrib.util.constant_value(is_both_nonscalar) is not None:
      if tf.contrib.util.constant_value(is_both_nonscalar):
        raise ValueError("base distribution not scalar")
    elif validate_args:
      dynamic_assertions += [tf.assert_equal(
          is_both_nonscalar, False,
          message="base distribution not scalar")]

    if not dynamic_assertions:
      return override_shape
    return control_flow_ops.with_dependencies(
        dynamic_assertions, override_shape)
Esempio n. 47
0
 def _assert_non_negative_int32_scalar(self, x):
   """Helper which ensures that input is a non-negative, int32, scalar."""
   x = tf.convert_to_tensor(x, name="x")
   if x.dtype.base_dtype != tf.int32.base_dtype:
     raise TypeError("%s.dtype=%s is not %s" % (x.name, x.dtype, tf.int32))
   x_value_static = tensor_util.constant_value(x)
   if x.get_shape().ndims is not None and x_value_static is not None:
     if x.get_shape().ndims != 0:
       raise ValueError("%s.ndims=%d is not 0 (scalar)" %
                        (x.name, x.get_shape().ndims))
     if x_value_static < 0:
       raise ValueError("%s.value=%d cannot be negative" %
                        (x.name, x_value_static))
     return x
   if self.validate_args:
     x = control_flow_ops.with_dependencies(
         [tf.assert_rank(x, 0),
          tf.assert_non_negative(x)], x)
   return x
def test_model_inputs(model_inputs):
    with tf.Graph().as_default():
        input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs()

        # Check type
        assert input_data.op.type == 'Placeholder',\
            'Input is not a Placeholder.'
        assert targets.op.type == 'Placeholder',\
            'Targets is not a Placeholder.'
        assert lr.op.type == 'Placeholder',\
            'Learning Rate is not a Placeholder.'
        assert keep_prob.op.type == 'Placeholder', \
            'Keep Probability is not a Placeholder.'
        assert target_sequence_length.op.type == 'Placeholder', \
            'Target Sequence Length is not a Placeholder.'
        assert max_target_sequence_length.op.type == 'Max', \
            'Max Target Sequence Length is not a Placeholder.'
        assert source_sequence_length.op.type == 'Placeholder', \
            'Source Sequence Length is not a Placeholder.'

        # Check name
        assert input_data.name == 'input:0',\
            'Input has bad name.  Found name {}'.format(input_data.name)
        assert target_sequence_length.name == 'target_sequence_length:0',\
            'Target Sequence Length has bad name.  Found name {}'.format(target_sequence_length.name)
        assert source_sequence_length.name == 'source_sequence_length:0',\
            'Source Sequence Length has bad name.  Found name {}'.format(source_sequence_length.name)
        assert keep_prob.name == 'keep_prob:0', \
            'Keep Probability has bad name.  Found name {}'.format(keep_prob.name)

        assert tf.assert_rank(input_data, 2, message='Input data has wrong rank')
        assert tf.assert_rank(targets, 2, message='Targets has wrong rank')
        assert tf.assert_rank(lr, 0, message='Learning Rate has wrong rank')
        assert tf.assert_rank(keep_prob, 0, message='Keep Probability has wrong rank')
        assert tf.assert_rank(target_sequence_length, 1, message='Target Sequence Length has wrong rank')
        assert tf.assert_rank(max_target_sequence_length, 0, message='Max Target Sequence Length has wrong rank')
        assert tf.assert_rank(source_sequence_length, 1, message='Source Sequence Lengthhas wrong rank')

    _print_success_message()
Esempio n. 49
0
def tf_fast_walsh_hadamard(in_x, axis, normalize=True, method='two'):
    '''Compute Fast Walsh-Hadamard transform in tensorflow.

    Args:
        x: tensor of shape (a0, a1, ... aN, L, b0, b1, ..., bN).
            L must be a power of two.

        axis: the "L" axis above, aka the axis over which to do the
            Hadamard transform. All other dimensions are left alone;
            data on those dimension do not interact.

        normalize: Whether to normalize the results such that applying
            the transform twice returns to the original input
            value.

        method:
            'one': Original reshape to [2]*ll version
            'two': Deal with TF "UnimplementedError: SliceOp : Unhandled input dimensions" error...
            'c':   Use C++ FWH Op.

    Returns:
        ret: transformed tensor with same shape as x. Returned
            tensor is always float even if input was int.


    Tests:

    >>> in_x = tf.placeholder('float32')
    >>> in_x
    <tf.Tensor 'Placeholder:0' shape=<unknown> dtype=float32>
    >>> sess = tf.InteractiveSession()


    Wikipedia case:

    >>> x = np.array([1,0,1,0,0,1,1,0])

    >>> sess.run(tf_fast_walsh_hadamard(in_x, 0, False), feed_dict={in_x: x})
    array([ 4.,  2.,  0., -2.,  0.,  2.,  0.,  2.], dtype=float32)

    >>> sess.run(tf_fast_walsh_hadamard(in_x, 0, False, method='two'), feed_dict={in_x: x})
    array([ 4.,  2.,  0., -2.,  0.,  2.,  0.,  2.], dtype=float32)

    >>> sess.run(tf_fast_walsh_hadamard(tf_fast_walsh_hadamard(in_x, 0), 0), feed_dict={in_x: x})
    array([ 1.,  0.,  1.,  0.,  0.,  1.,  1.,  0.], dtype=float32)


    Verify equivalence with numpy approach:

    >>> np.random.seed(123)
    >>> x = np.random.uniform(0, 1, (3, 64, 5))

    >>> h_np = np_fast_walsh_hadamard(x, 1)
    >>> h_tf_ = tf_fast_walsh_hadamard(in_x, 1)
    >>> h_tf2_ = tf_fast_walsh_hadamard(in_x, 1, method='two')
    >>> h_tf = sess.run(h_tf_, feed_dict={in_x: x})
    >>> h_tf2 = sess.run(h_tf2_, feed_dict={in_x: x})

    >>> x.shape
    (3, 64, 5)
    >>> h_np.shape
    (3, 64, 5)
    >>> h_tf.shape
    (3, 64, 5)
    >>> h_tf2.shape
    (3, 64, 5)

    >>> abs(h_np - h_tf).max() < 1e-6
    True
    >>> abs(h_np - h_tf2).max() < 1e-6
    True

    Try a few other shapes / axes

    >>> sess.run(tf_fast_walsh_hadamard(in_x, 0), feed_dict={in_x: x[0]}).shape == x[0].shape
    True
    >>> sess.run(tf_fast_walsh_hadamard(in_x, 1), feed_dict={in_x: x[:, :, 0]}).shape == x[:, :, 0].shape
    True
    >>> sess.run(tf_fast_walsh_hadamard(in_x, 0), feed_dict={in_x: x[0, :, 0]}).shape == x[0, :, 0].shape
    True
    '''

    orig_shape = tf.shape(in_x)
    h_dim = orig_shape[axis]
    h_dim_exp = tf.cast(tf.round(tf.log(tf.to_float(h_dim)) / np.log(2)), 'int32')

    assert_pow2 = tf.assert_equal(h_dim, tf.pow(2, h_dim_exp),
                                  message='hadamard can only be computed over axis with size that is a power of two')

    with tf.control_dependencies([assert_pow2]):
        working_shape_pre = tf.expand_dims(tf.reduce_prod(orig_shape[:axis]), axis=0)        # reduce_prod of empty array is 1
        working_shape_post = tf.expand_dims(tf.reduce_prod(orig_shape[axis + 1:]), axis=0)   # reduce_prod of empty array is 1

    ii = tf.constant(0)
    assert method in ('one', 'two', 'c')
    if method == 'one':
        # expand to working dims [pre, 2, 2, 2, ..., 2, 2, post]
        working_shape_mid = tf.tile([2], [h_dim_exp])

        working_shape = tf.concat((working_shape_pre, working_shape_mid, working_shape_post),
                                  axis=0)

        ret_0 = tf.reshape(in_x, working_shape)

        cond = lambda i, x: tf.less(i, h_dim_exp)
        body = lambda i, x: (tf.add(i, 1), _fast_walsh_hadamard_one_step(x, i + 1))

        ii_final, ret = tf.while_loop(
            cond,
            body,
            [ii, ret_0],
            parallel_iterations=1     # check on this?
        )
    elif method == 'two':
        # Never expand to high rank. Roll dimensions instead. This is
        # needed because backprop through the slice operator only
        # supports up to rank 7 tensors in TF 1.3
        # [pre, 1, 2, h_dim/2, post] ->
        # [pre, 2, 2, h_dim/4, post] -> ...
        # [pre, h_dim/2, 2, 1, post]

        d1 = tf.expand_dims(tf.constant(1), axis=0)
        d2 = tf.expand_dims(tf.constant(2), axis=0)   # always 2
        d3 = tf.expand_dims(h_dim / 2, axis=0)

        working_shape_0 = tf.concat((working_shape_pre, d1, d2, d3, working_shape_post), axis=0)
        ret_0 = tf.reshape(in_x, working_shape_0)

        cond = lambda i, d1, d3, x: tf.less(i, h_dim_exp)
        body = lambda i, d1, d3, x: (tf.add(i, 1),
                                     d1 * 2,
                                     d3 / 2,
                                     _fast_walsh_hadamard_one_step_method2(x, working_shape_pre, d1, d2, d3, working_shape_post))

        ii_final, d1_final, d3_final, ret = tf.while_loop(
            cond,
            body,
            [ii, d1, d3, ret_0],
            parallel_iterations=1     # check on this?
        )
    else:
        # 'c' version
        # Only works for rank-1 (vector) input

        assert False, 'c version disabled for now'

        assert axis == 0, 'axis must be 0 for the c version of tf_fast_walsh_hadamard'
        assert normalize, 'for c version normalize must be True'
        assert_rank1 = tf.assert_rank(in_x, 1)

        with tf.control_dependencies([assert_rank1, assert_pow2]):
            ret = c_fast_walsh_hadamard(in_x)

    if normalize and method != 'c':
        ret = ret / tf.sqrt(tf.to_float(h_dim))

    ret = tf.reshape(ret, orig_shape)

    return ret
  def __init__(self,
               mixture_distribution,
               components_distribution,
               validate_args=False,
               allow_nan_stats=True,
               name="MixtureSameFamily"):
    """Construct a `MixtureSameFamily` distribution.

    Args:
      mixture_distribution: `tfp.distributions.Categorical`-like instance.
        Manages the probability of selecting components. The number of
        categories must match the rightmost batch dimension of the
        `components_distribution`. Must have either scalar `batch_shape` or
        `batch_shape` matching `components_distribution.batch_shape[:-1]`.
      components_distribution: `tfp.distributions.Distribution`-like instance.
        Right-most batch dimension indexes components.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      ValueError: `if not mixture_distribution.dtype.is_integer`.
      ValueError: if mixture_distribution does not have scalar `event_shape`.
      ValueError: if `mixture_distribution.batch_shape` and
        `components_distribution.batch_shape[:-1]` are both fully defined and
        the former is neither scalar nor equal to the latter.
      ValueError: if `mixture_distribution` categories does not equal
        `components_distribution` rightmost batch shape.
    """
    parameters = dict(locals())
    with tf.name_scope(name) as name:
      self._mixture_distribution = mixture_distribution
      self._components_distribution = components_distribution
      self._runtime_assertions = []

      s = components_distribution.event_shape_tensor()
      self._event_ndims = (
          s.shape[0].value if s.shape.with_rank_at_least(1)[0].value is not None
          else tf.shape(s)[0])

      if not mixture_distribution.dtype.is_integer:
        raise ValueError(
            "`mixture_distribution.dtype` ({}) is not over integers".format(
                mixture_distribution.dtype.name))

      if (mixture_distribution.event_shape.ndims is not None
          and mixture_distribution.event_shape.ndims != 0):
        raise ValueError("`mixture_distribution` must have scalar `event_dim`s")
      elif validate_args:
        self._runtime_assertions += [
            tf.assert_rank(
                mixture_distribution.event_shape_tensor(), 0,
                message="`mixture_distribution` must have scalar `event_dim`s"),
        ]

      mdbs = mixture_distribution.batch_shape
      cdbs = components_distribution.batch_shape.with_rank_at_least(1)[:-1]
      if mdbs.is_fully_defined() and cdbs.is_fully_defined():
        if mdbs.ndims != 0 and mdbs != cdbs:
          raise ValueError(
              "`mixture_distribution.batch_shape` (`{}`) is not "
              "compatible with `components_distribution.batch_shape` "
              "(`{}`)".format(mdbs.as_list(), cdbs.as_list()))
      elif validate_args:
        mdbs = mixture_distribution.batch_shape_tensor()
        cdbs = components_distribution.batch_shape_tensor()[:-1]
        self._runtime_assertions += [
            tf.assert_equal(
                distribution_utils.pick_vector(
                    mixture_distribution.is_scalar_batch(), cdbs, mdbs),
                cdbs,
                message=(
                    "`mixture_distribution.batch_shape` is not "
                    "compatible with `components_distribution.batch_shape`"))]

      km = mixture_distribution.logits.shape.with_rank_at_least(1)[-1].value
      kc = components_distribution.batch_shape.with_rank_at_least(1)[-1].value
      if km is not None and kc is not None and km != kc:
        raise ValueError("`mixture_distribution components` ({}) does not "
                         "equal `components_distribution.batch_shape[-1]` "
                         "({})".format(km, kc))
      elif validate_args:
        km = tf.shape(mixture_distribution.logits)[-1]
        kc = components_distribution.batch_shape_tensor()[-1]
        self._runtime_assertions += [
            tf.assert_equal(
                km, kc,
                message=("`mixture_distribution components` does not equal "
                         "`components_distribution.batch_shape[-1:]`")),
        ]
      elif km is None:
        km = tf.shape(mixture_distribution.logits)[-1]

      self._num_components = km

      super(MixtureSameFamily, self).__init__(
          dtype=self._components_distribution.dtype,
          reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
          validate_args=validate_args,
          allow_nan_stats=allow_nan_stats,
          parameters=parameters,
          graph_parents=(
              self._mixture_distribution._graph_parents  # pylint: disable=protected-access
              + self._components_distribution._graph_parents),  # pylint: disable=protected-access
          name=name)
Esempio n. 51
0
def percentile(x,
               q,
               axis=None,
               interpolation=None,
               keep_dims=False,
               validate_args=False,
               name=None):
  """Compute the `q`-th percentile of `x`.

  Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the
  way from the minimum to the maximum in a sorted copy of `x`.

  The values and distances of the two nearest neighbors as well as the
  `interpolation` parameter will determine the percentile if the normalized
  ranking does not match the location of `q` exactly.

  This function is the same as the median if `q = 50`, the same as the minimum
  if `q = 0` and the same as the maximum if `q = 100`.


  ```python
  # Get 30th percentile with default ('nearest') interpolation.
  x = [1., 2., 3., 4.]
  percentile(x, q=30.)
  ==> 2.0

  # Get 30th percentile with 'lower' interpolation
  x = [1., 2., 3., 4.]
  percentile(x, q=30., interpolation='lower')
  ==> 1.0

  # Get 100th percentile (maximum).  By default, this is computed over every dim
  x = [[1., 2.]
       [3., 4.]]
  percentile(x, q=100.)
  ==> 4.0

  # Treat the leading dim as indexing samples, and find the 100th quantile (max)
  # over all such samples.
  x = [[1., 2.]
       [3., 4.]]
  percentile(x, q=100., axis=[0])
  ==> [3., 4.]
  ```

  Compare to `numpy.percentile`.

  Args:
    x:  Floating point `N-D` `Tensor` with `N > 0`.  If `axis` is not `None`,
      `x` must have statically known number of dimensions.
    q:  Scalar `Tensor` in `[0, 100]`. The percentile.
    axis:  Optional `0-D` or `1-D` integer `Tensor` with constant values.
      The axis that hold independent samples over which to return the desired
      percentile.  If `None` (the default), treat every dimension as a sample
      dimension, returning a scalar.
    interpolation : {"lower", "higher", "nearest"}.  Default: "nearest"
      This optional parameter specifies the interpolation method to
      use when the desired quantile lies between two data points `i < j`:
        * lower: `i`.
        * higher: `j`.
        * nearest: `i` or `j`, whichever is nearest.
    keep_dims:  Python `bool`. If `True`, the last dimension is kept with size 1
      If `False`, the last dimension is removed from the output shape.
    validate_args:  Whether to add runtime checks of argument validity.
      If False, and arguments are incorrect, correct behavior is not guaranteed.
    name:  A Python string name to give this `Op`.  Default is "percentile"

  Returns:
    A `(N - len(axis))` dimensional `Tensor` of same dtype as `x`, or, if
      `axis` is `None`, a scalar.

  Raises:
    ValueError:  If argument 'interpolation' is not an allowed type.
  """
  name = name or "percentile"
  allowed_interpolations = {"lower", "higher", "nearest"}

  if interpolation is None:
    interpolation = "nearest"
  else:
    if interpolation not in allowed_interpolations:
      raise ValueError("Argument 'interpolation' must be in %s.  Found %s" %
                       (allowed_interpolations, interpolation))

  with tf.name_scope(name, [x, q]):
    x = tf.convert_to_tensor(x, name="x")
    # Double is needed here and below, else we get the wrong index if the array
    # is huge along axis.
    q = tf.to_double(q, name="q")
    _get_static_ndims(q, expect_ndims=0)

    if validate_args:
      q = control_flow_ops.with_dependencies([
          tf.assert_rank(q, 0),
          tf.assert_greater_equal(q, tf.to_double(0.)),
          tf.assert_less_equal(q, tf.to_double(100.))
      ], q)

    if axis is None:
      y = tf.reshape(x, [-1])
    else:
      axis = tf.convert_to_tensor(axis, name="axis")
      tf.assert_integer(axis)
      axis_ndims = _get_static_ndims(
          axis, expect_static=True, expect_ndims_no_more_than=1)
      axis_const = tensor_util.constant_value(axis)
      if axis_const is None:
        raise ValueError(
            "Expected argument 'axis' to be statically available.  Found: %s" %
            axis)
      axis = axis_const
      if axis_ndims == 0:
        axis = [axis]
      axis = [int(a) for a in axis]
      x_ndims = _get_static_ndims(
          x, expect_static=True, expect_ndims_at_least=1)
      axis = _make_static_axis_non_negative(axis, x_ndims)
      y = _move_dims_to_flat_end(x, axis, x_ndims)

    frac_at_q_or_above = 1. - q / 100.
    d = tf.to_double(tf.shape(y)[-1])

    if interpolation == "lower":
      index = tf.ceil((d - 1) * frac_at_q_or_above)
    elif interpolation == "higher":
      index = tf.floor((d - 1) * frac_at_q_or_above)
    elif interpolation == "nearest":
      index = tf.round((d - 1) * frac_at_q_or_above)

    # If d is gigantic, then we would have d == d - 1, even in double... So
    # let's use max/min to avoid out of bounds errors.
    d = tf.shape(y)[-1]
    # d - 1 will be distinct from d in int32.
    index = tf.clip_by_value(tf.to_int32(index), 0, d - 1)

    # Sort everything, not just the top 'k' entries, which allows multiple calls
    # to sort only once (under the hood) and use CSE.
    sorted_y = _sort_tensor(y)

    # result.shape = B
    result = sorted_y[..., index]
    result.set_shape(y.get_shape()[:-1])

    if keep_dims:
      if axis is None:
        # ones_vec = [1, 1,..., 1], total length = len(S) + len(B).
        ones_vec = tf.ones(shape=[_get_best_effort_ndims(x)], dtype=tf.int32)
        result *= tf.ones(ones_vec, dtype=x.dtype)
      else:
        result = _insert_back_keep_dims(result, axis)

    return result
Esempio n. 52
0
 def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
   with self.test_session():
     tensor = tf.constant([1, 2], name="my_tensor")
     desired_rank = 1
     with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
       tf.identity(tensor).eval()
Esempio n. 53
0
 def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
   with self.test_session():
     tensor = tf.placeholder(tf.float32, name="my_tensor")
     desired_rank = 1
     with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
       tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
Esempio n. 54
0
 def test_raises_if_rank_is_not_scalar_static(self):
   with self.test_session():
     tensor = tf.constant([1, 2], name="my_tensor")
     with self.assertRaisesRegexp(ValueError, "Rank must be a scalar"):
       tf.assert_rank(tensor, np.array([], dtype=np.int32))
Esempio n. 55
0
 def test_raises_if_rank_is_not_integer_static(self):
   with self.test_session():
     tensor = tf.constant([1, 2], name="my_tensor")
     with self.assertRaisesRegexp(ValueError,
                                  "must be of type <dtype: 'int32'>"):
       tf.assert_rank(tensor, .5)
Esempio n. 56
0
def op(name,
       audio,
       sample_rate,
       labels=None,
       max_outputs=3,
       encoding=None,
       display_name=None,
       description=None,
       collections=None):
  """Create an audio summary op for use in a TensorFlow graph.

  Arguments:
    name: A unique name for the generated summary node.
    audio: A `Tensor` representing audio data with shape `[k, t, c]`,
      where `k` is the number of audio clips, `t` is the number of
      frames, and `c` is the number of channels. Elements should be
      floating-point values in `[-1.0, 1.0]`. Any of the dimensions may
      be statically unknown (i.e., `None`).
    sample_rate: An `int` or rank-0 `int32` `Tensor` that represents the
      sample rate, in Hz. Must be positive.
    labels: Optional `string` `Tensor`, a vector whose length is the
      first dimension of `audio`, where `labels[i]` contains arbitrary
      textual information about `audio[i]`. (For instance, this could be
      some text that a TTS system was supposed to produce.) Markdown is
      supported. Contents should be UTF-8.
    max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
      many audio clips will be emitted at each step. When more than
      `max_outputs` many clips are provided, the first `max_outputs`
      many clips will be used and the rest silently discarded.
    encoding: A constant `str` (not string tensor) indicating the
      desired encoding. You can choose any format you like, as long as
      it's "wav". Please see the "API compatibility note" below.
    display_name: Optional name for this summary in TensorBoard, as a
      constant `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
      constant `str`. Markdown is supported. Defaults to empty.
    collections: Optional list of graph collections keys. The new
      summary op is added to these collections. Defaults to
      `[Graph Keys.SUMMARIES]`.

  Returns:
    A TensorFlow summary op.

  API compatibility note: The default value of the `encoding`
  argument is _not_ guaranteed to remain unchanged across TensorBoard
  versions. In the future, we will by default encode as FLAC instead of
  as WAV. If the specific format is important to you, please provide a
  file format explicitly.
  """

  if display_name is None:
    display_name = name
  if encoding is None:
    encoding = 'wav'

  if encoding == 'wav':
    encoding = metadata.Encoding.Value('WAV')
    encoder = functools.partial(tf.contrib.ffmpeg.encode_audio,
                                samples_per_second=sample_rate,
                                file_format='wav')
  else:
    raise ValueError('Unknown encoding: %r' % encoding)

  with tf.name_scope(name), \
       tf.control_dependencies([tf.assert_rank(audio, 3)]):
    limited_audio = audio[:max_outputs]
    encoded_audio = tf.map_fn(encoder, limited_audio,
                              dtype=tf.string,
                              name='encode_each_audio')
    if labels is None:
      limited_labels = tf.tile([''], tf.shape(limited_audio)[:1])
    else:
      limited_labels = labels[:max_outputs]
    tensor = tf.transpose(tf.stack([encoded_audio, limited_labels]))
    summary_metadata = metadata.create_summary_metadata(
        display_name=display_name,
        description=description,
        encoding=encoding)
    return tf.summary.tensor_summary(name='audio_summary',
                                     tensor=tensor,
                                     collections=collections,
                                     summary_metadata=summary_metadata)