示例#1
0
    def __init__(self, data, Y_var):
        super().__init__(active_dims=[0])
        self.Y_var = Y_var
        self.num_genes = data.m_obs.shape[1]
        #         l_affine = tfb.AffineScalar(shift=tf.cast(1., tf.float64),
        #                             scale=tf.cast(4-1., tf.float64))
        #         l_sigmoid = tfb.Sigmoid()
        #         l_logistic = tfb.Chain([l_affine, l_sigmoid])

        self.lengthscale = gpflow.Parameter(1.414, transform=positive())

        D_affine = tfb.AffineScalar(shift=tf.cast(0.1, tf.float64),
                                    scale=tf.cast(1.5 - 0.1, tf.float64))
        D_sigmoid = tfb.Sigmoid()
        D_logistic = tfb.Chain([D_affine, D_sigmoid])
        S_affine = tfb.AffineScalar(shift=tf.cast(0.1, tf.float64),
                                    scale=tf.cast(4. - 0.1, tf.float64))
        S_sigmoid = tfb.Sigmoid()
        S_logistic = tfb.Chain([S_affine, S_sigmoid])

        self.D = gpflow.Parameter(np.random.uniform(0.9, 1, self.num_genes),
                                  transform=positive(),
                                  dtype=tf.float64)
        #         self.D[3].trainable = False
        #         self.D[3].assign(0.8)
        self.S = gpflow.Parameter(np.random.uniform(1, 1, self.num_genes),
                                  transform=positive(),
                                  dtype=tf.float64)
        #         self.S[3].trainable = False
        #         self.S[3].assign(1)
        self.kervar = gpflow.Parameter(np.float64(1), transform=positive())
        self.noise_term = gpflow.Parameter(
            0.1353 * tf.ones(self.num_genes, dtype='float64'),
            transform=positive())
示例#2
0
    def __init__(self,
                 active_dims=[0],
                 gap_decay=0.1,
                 match_decay=0.9,
                 max_subsequence_length=3,
                 max_occurence_length=10,
                 alphabet=[],
                 maxlen=0,
                 normalize=True,
                 batch_size=1000):
        super().__init__(active_dims=active_dims)
        # constrain kernel params to between 0 and 1
        self.logistic_gap = tfb.Chain([
            tfb.AffineScalar(shift=tf.cast(0, tf.float64),
                             scale=tf.cast(1, tf.float64)),
            tfb.Sigmoid()
        ])
        self.logisitc_match = tfb.Chain([
            tfb.AffineScalar(shift=tf.cast(0, tf.float64),
                             scale=tf.cast(1, tf.float64)),
            tfb.Sigmoid()
        ])
        self.gap_decay_param = Parameter(gap_decay,
                                         transform=self.logistic_gap,
                                         name="gap_decay")
        self.match_decay_param = Parameter(match_decay,
                                           transform=self.logisitc_match,
                                           name="match_decay")
        self.max_subsequence_length = max_subsequence_length
        self.max_occurence_length = max_occurence_length
        self.alphabet = alphabet
        self.maxlen = maxlen
        self.normalize = normalize
        self.batch_size = batch_size
        self.symmetric = False

        # use will use copies of the kernel params to stop building expensive computation graph
        # we instead efficientely calculate gradients using dynamic programming
        # These params are updated at every call to K and K_diag (to check if parameters have been updated)
        self.match_decay = self.match_decay_param.numpy()
        self.gap_decay = self.gap_decay_param.numpy()
        self.match_decay_unconstrained = self.match_decay_param.unconstrained_variable.numpy(
        )
        self.gap_decay_unconstrained = self.gap_decay_param.unconstrained_variable.numpy(
        )

        # initialize helful construction matricies to be lazily computed once needed
        self.D = None
        self.dD_dgap = None

        # build a lookup table of the alphabet to encode input strings
        self.table = tf.lookup.StaticHashTable(
            initializer=tf.lookup.KeyValueTensorInitializer(
                keys=tf.constant(["PAD"] + alphabet),
                values=tf.constant(range(0,
                                         len(alphabet) + 1)),
            ),
            default_value=0)
示例#3
0
    def __init__(self,
                 m=1,
                 active_dims=[0],
                 gap_decay=0.1,
                 match_decay=0.9,
                 max_subsequence_length=3,
                 alphabet=[],
                 maxlen=0):
        super().__init__(active_dims=active_dims)
        # constrain decay kernel params to between 0 and 1
        logistic_gap = tfb.Chain([
            tfb.Shift(tf.cast(0, tf.float64))(tfb.Scale(tf.cast(1,
                                                                tf.float64))),
            tfb.Sigmoid()
        ])
        logisitc_match = tfb.Chain([
            tfb.AffineScalar(shift=tf.cast(0, tf.float64),
                             scale=tf.cast(1, tf.float64)),
            tfb.Sigmoid()
        ])
        self.gap_decay = Parameter(gap_decay,
                                   transform=logistic_gap,
                                   name="gap_decay")
        self.match_decay = Parameter(match_decay,
                                     transform=logisitc_match,
                                     name="match_decay")

        # prepare order coefs params
        order_coefs = tf.ones(max_subsequence_length)
        self.order_coefs = Parameter(order_coefs,
                                     transform=positive(),
                                     name="order_coefs")

        # get split weights
        self.m = m
        split_weights = tf.ones(2 * self.m - 1)
        self.split_weights = Parameter(split_weights,
                                       transform=positive(),
                                       name="order_coefs")

        # store additional kernel parameters
        self.max_subsequence_length = tf.constant(max_subsequence_length)
        self.alphabet = tf.constant(alphabet)
        self.alphabet_size = tf.shape(self.alphabet)[0]
        self.maxlen = tf.cast(tf.math.ceil(maxlen / self.m), dtype=tf.int32)
        self.full_maxlen = tf.constant(maxlen)
        # build a lookup table of the alphabet to encode input strings
        self.table = tf.lookup.StaticHashTable(
            initializer=tf.lookup.KeyValueTensorInitializer(
                keys=tf.constant(["PAD"] + alphabet),
                values=tf.constant(range(0,
                                         len(alphabet) + 1)),
            ),
            default_value=0)
示例#4
0
 def _init_distribution(conditions):
     loc, scale = conditions["loc"], conditions["scale"]
     return tfd.TransformedDistribution(
         distribution=tfd.Normal(loc=loc, scale=scale),
         bijector=bij.Sigmoid(),
         name="LogitNormal",
     )
示例#5
0
    def _build(self, inputs):
        mean, covariance, scale = self.create_mean_n_cov_layers(inputs)

        #TODO is this the kind of regularization we want. I think it makes sense.
        self.set_contractive_regularizer(mean, covariance,
                                        self._contractive_regularizer_inputs,
                                        self._contractive_regularizer_tuple,
                                        self._contractive_collection_network_str)
        
        gaussian = tfd.Normal(loc=mean, scale=scale)
        
        sigmoid_bijector = tfb.Sigmoid()
        logitnormal = tfd.TransformedDistribution(distribution = gaussian, bijector = sigmoid_bijector)
        
        # add reconstruction_node method (needed to some sort of mean or median to get reconstructions without sampling)
        def reconstruction_node(self):
            # this is because there is not been for the LogitNormalDiagonal distribution
            return sigmoid_bijector.forward(gaussian.mean())
        
        logitnormal.reconstruction_node = types.MethodType(reconstruction_node, logitnormal)

        clip_value = self._clip_value
         
        # make sure a rescale the input for log_prob
        def log_prob(self, x, name='log_prob', **kwargs):
            # kinda of dirty I know, it is used to avoid recursion (Luigi)
            return self._call_log_prob(tf_clip(x, low=-1.0 + clip_value, high=1.0 -clip_value), name=name, **kwargs)
        
        logitnormal.log_prob = types.MethodType(log_prob, logitnormal)

        
        return logitnormal
 def bounded_parameter(low, high, param):
     """Make parameter tfp Parameter with optimization bounds."""
     affine = tfb.AffineScalar(shift=tf.cast(low, tf.float64),
                               scale=tf.cast(high - low, tf.float64))
     sigmoid = tfb.Sigmoid()
     logistic = tfb.Chain([affine, sigmoid])
     parameter = gpf.Parameter(param, transform=logistic, dtype=tf.float64)
     return parameter
示例#7
0
def bounded_parameter(low, high, param):
    """Make parameter tfp Parameter with optimization bounds."""

    sigmoid = tfb.Sigmoid(low=tf.cast(low, tf.float64), 
                          high=tf.cast(high, tf.float64),
                         name='sigmoid')
    parameter = gpf.Parameter(param, transform=sigmoid, dtype=tf.float64)
    return parameter
示例#8
0
    def __init__(self, *args, **kwargs):
        """Initialize UnitContinuousRV.

        Developer Note
        --------------
            The inverse of the sigmoid bijector is the logodds bijector.
        """
        super().__init__(*args, **kwargs)
        self._transformed_distribution = tfd.TransformedDistribution(
            distribution=self._distribution,
            bijector=bijectors.Invert(bijectors.Sigmoid()))
 def test_additional_event_ndims(self):
     bij = tfb.Sigmoid(low=tf.zeros([2]), high=tf.ones([3, 2]))
     self.assertAllEqual(batch_shape_lib.inferred_batch_shape(bij), [3, 2])
     self.assertAllEqual(batch_shape_lib.inferred_batch_shape_tensor(bij),
                         [3, 2])
     self.assertAllEqual(
         batch_shape_lib.inferred_batch_shape(bij,
                                              additional_event_ndims=1),
         [3])
     self.assertAllEqual(
         batch_shape_lib.inferred_batch_shape_tensor(
             bij, additional_event_ndims=1), [3])
示例#10
0
    def test_bijector_event_ndims(self):
        bij = tfb.Sigmoid(low=tf.zeros([2]), high=tf.ones([3, 2]))
        self.assertAllEqual(batch_shape_lib.inferred_batch_shape(bij), [3, 2])
        self.assertAllEqual(batch_shape_lib.inferred_batch_shape_tensor(bij),
                            [3, 2])
        self.assertAllEqual(
            batch_shape_lib.inferred_batch_shape(bij,
                                                 bijector_x_event_ndims=1),
            [3])
        self.assertAllEqual(
            batch_shape_lib.inferred_batch_shape_tensor(
                bij, bijector_x_event_ndims=1), [3])

        # Verify that we don't pass Nones through to component
        # `experimental_batch_shape(x_event_ndims=None)` calls, where they'd be
        # incorrectly interpreted as `x_event_ndims=forward_min_event_ndims`.
        joint_bij = tfb.JointMap([bij, bij])
        self.assertAllEqual(
            batch_shape_lib.inferred_batch_shape(
                joint_bij, bijector_x_event_ndims=[None, None]),
            tf.TensorShape(None))
示例#11
0
 def __init__(self):
     transform = tfb.Sigmoid()
     super().__init__(transform)
示例#12
0
    def __init__(self,active_dims=[0],decay=0.1,max_subsequence_length=3,
                 alphabet = [], maxlen=0, batch_size=100):
        super().__init__(active_dims=active_dims)
        # constrain decay kernel params to between 0 and 1
        self.logistic = tfb.Chain([tfb.Shift(tf.cast(0,tf.float64))(tfb.Scale(tf.cast(1,tf.float64))),tfb.Sigmoid()])
        self.decay_param= Parameter(decay, transform=self.logistic ,name="decay")

        # use will use copies of the kernel params to stop building expensive computation graph
        # we instead efficientely calculate gradients using dynamic programming
        # These params are updated at every call to K and K_diag (to check if parameters have been updated)
        self.decay = self.decay_param.numpy()

        self.decay_unconstrained = self.decay_param.unconstrained_variable.numpy()

        self.order_coefs=tf.ones(max_subsequence_length,dtype=tf.float64)
        
        # store additional kernel parameters
        self.max_subsequence_length = tf.constant(max_subsequence_length)
        self.alphabet =  tf.constant(alphabet)
        self.alphabet_size=tf.shape(self.alphabet)[0]
        self.maxlen =  tf.constant(maxlen)
        self.batch_size = tf.constant(batch_size)

        # build a lookup table of the alphabet to encode input strings
        self.table = tf.lookup.StaticHashTable(
            initializer=tf.lookup.KeyValueTensorInitializer(
                keys=tf.constant(["PAD"]+alphabet),
                values=tf.constant(range(0,len(alphabet)+1)),),default_value=0)

        # initialize helful construction matricies to be lazily computed once needed
        self.D = None
        self.dD_dgap = None
示例#13
0
        tf.ones([], dtype, name='alpha') * 0.5,
        tf.fill(ncomponents - 1, value=np.float64(0.5), name='v')
    ]


# Create bijectors to transform unconstrained to and from constrained parameters-space.
# For example, if X ~ Exponential(theta), then X is constrained to be positive. A transformation
# that puts X onto an unconstrained space is Y = log(X). In that case, the bijector used
# should be the **inverse-transform**, which is exp(.) (i.e. so that X = exp(Y)).
#
# NOTE: Define the inverse-transforms for each parameter in sequence.
bijectors = [
    tfb.Identity(),  # mu
    tfb.Exp(),  # sigma
    tfb.Exp(),  # alpha
    tfb.Sigmoid()  # v
]

# ## HMC

# In[16]:

# Define HMC sampler.


@tf.function(autograph=False, experimental_compile=True)
def hmc_sample(num_results,
               num_burnin_steps,
               current_state,
               step_size=0.01,
               num_leapfrog_steps=100):
示例#14
0
    def __init__(self,rank=1,active_dims=[0],gap_decay=0.1, match_decay=0.9,max_subsequence_length=3,
                 alphabet = [], maxlen=0):
        super().__init__(active_dims=active_dims)
        # constrain decay kernel params to between 0 and 1
        logistic_gap = tfb.Chain([tfb.Shift(tf.cast(0,tf.float64))(tfb.Scale(tf.cast(1,tf.float64))),tfb.Sigmoid()])
        logisitc_match = tfb.Chain([tfb.AffineScalar(shift=tf.cast(0,tf.float64),scale=tf.cast(1,tf.float64)),tfb.Sigmoid()])
        self.gap_decay= Parameter(gap_decay, transform=logistic_gap ,name="gap_decay")
        self.match_decay = Parameter(match_decay, transform=logisitc_match,name="match_decay")

        # prepare similarity matrix parameters
        self.rank=rank
        W = 0.1 * tf.ones((len(alphabet), self.rank))
        kappa = tf.ones(len(alphabet))

        self.W = Parameter(W,name="W")
        self.kappa = Parameter(kappa, transform=positive(),name="kappa")
  
        # store additional kernel parameters
        self.max_subsequence_length = tf.constant(max_subsequence_length)
        self.alphabet =  tf.constant(alphabet)
        self.alphabet_size=tf.shape(self.alphabet)[0]
        self.maxlen =  tf.constant(maxlen)

        # build a lookup table of the alphabet to encode input strings
        self.table = tf.lookup.StaticHashTable(
            initializer=tf.lookup.KeyValueTensorInitializer(
                keys=tf.constant(["PAD"]+alphabet),
                values=tf.constant(range(0,len(alphabet)+1)),),default_value=0)
示例#15
0
class UnitContinuousRV(RandomVariable):
    _bijector = bijectors.Sigmoid()
示例#16
0
 def __init__(self, lower_limit, upper_limit):
     transform = tfb.Sigmoid(low=lower_limit, high=upper_limit)
     super().__init__(transform)
示例#17
0
                 mix_T(gamma_C, gamma_T, eta_C, eta_T, p, loc, tf.sqrt(
                     sigma_sq), dtype(neg_inf)), n_T)))


# TEST
n_C = 4
n_T = 6
K = 3
model = create_model(n_C=n_C, n_T=n_T, K=K)
s = model.sample()
s
model.log_prob(s)
# model.sample(2)  # FIXME

bijectors = [
    tfb.Sigmoid(),  # p
    tfb.Sigmoid(),  # gamma_C
    tfb.Sigmoid(),  # gamma_T
    tfb.SoftmaxCentered(),  # eta_C
    tfb.SoftmaxCentered(),  # eta_T
    tfb.Identity(),  # loc
    tfb.Exp()  # sigma_sq
]

d1 = util.read_data('../../data/TGFBR2/cytof-data/donor1.csv', 'CD16', 2000, 2)
model = create_model(n_C=d1['y_C'].shape[0], n_T=d1['y_T'].shape[0], K=5)
_ = model.sample()


def target_log_prob_fn(p, gamma_C, gamma_T, eta_C, eta_T, loc, sigma_sq):
    return model.log_prob(p=p,
示例#18
0
 def __init__(self):
     self._transform = tfb.Sigmoid()