示例#1
0
    def __init__(self,rank=1,active_dims=[0],gap_decay=0.1, match_decay=0.9,max_subsequence_length=3,
                 alphabet = [], maxlen=0):
        super().__init__(active_dims=active_dims)
        # constrain decay kernel params to between 0 and 1
        logistic_gap = tfb.Chain([tfb.Shift(tf.cast(0,tf.float64))(tfb.Scale(tf.cast(1,tf.float64))),tfb.Sigmoid()])
        logisitc_match = tfb.Chain([tfb.AffineScalar(shift=tf.cast(0,tf.float64),scale=tf.cast(1,tf.float64)),tfb.Sigmoid()])
        self.gap_decay= Parameter(gap_decay, transform=logistic_gap ,name="gap_decay")
        self.match_decay = Parameter(match_decay, transform=logisitc_match,name="match_decay")

        # prepare similarity matrix parameters
        self.rank=rank
        W = 0.1 * tf.ones((len(alphabet), self.rank))
        kappa = tf.ones(len(alphabet))

        self.W = Parameter(W,name="W")
        self.kappa = Parameter(kappa, transform=positive(),name="kappa")
  
        # store additional kernel parameters
        self.max_subsequence_length = tf.constant(max_subsequence_length)
        self.alphabet =  tf.constant(alphabet)
        self.alphabet_size=tf.shape(self.alphabet)[0]
        self.maxlen =  tf.constant(maxlen)

        # build a lookup table of the alphabet to encode input strings
        self.table = tf.lookup.StaticHashTable(
            initializer=tf.lookup.KeyValueTensorInitializer(
                keys=tf.constant(["PAD"]+alphabet),
                values=tf.constant(range(0,len(alphabet)+1)),),default_value=0)
示例#2
0
    def __init__(self, data, Y_var):
        super().__init__(active_dims=[0])
        self.Y_var = Y_var
        self.num_genes = data.m_obs.shape[1]
        #         l_affine = tfb.AffineScalar(shift=tf.cast(1., tf.float64),
        #                             scale=tf.cast(4-1., tf.float64))
        #         l_sigmoid = tfb.Sigmoid()
        #         l_logistic = tfb.Chain([l_affine, l_sigmoid])

        self.lengthscale = gpflow.Parameter(1.414, transform=positive())

        D_affine = tfb.AffineScalar(shift=tf.cast(0.1, tf.float64),
                                    scale=tf.cast(1.5 - 0.1, tf.float64))
        D_sigmoid = tfb.Sigmoid()
        D_logistic = tfb.Chain([D_affine, D_sigmoid])
        S_affine = tfb.AffineScalar(shift=tf.cast(0.1, tf.float64),
                                    scale=tf.cast(4. - 0.1, tf.float64))
        S_sigmoid = tfb.Sigmoid()
        S_logistic = tfb.Chain([S_affine, S_sigmoid])

        self.D = gpflow.Parameter(np.random.uniform(0.9, 1, self.num_genes),
                                  transform=positive(),
                                  dtype=tf.float64)
        #         self.D[3].trainable = False
        #         self.D[3].assign(0.8)
        self.S = gpflow.Parameter(np.random.uniform(1, 1, self.num_genes),
                                  transform=positive(),
                                  dtype=tf.float64)
        #         self.S[3].trainable = False
        #         self.S[3].assign(1)
        self.kervar = gpflow.Parameter(np.float64(1), transform=positive())
        self.noise_term = gpflow.Parameter(
            0.1353 * tf.ones(self.num_genes, dtype='float64'),
            transform=positive())
示例#3
0
    def __init__(self,
                 active_dims=[0],
                 gap_decay=0.1,
                 match_decay=0.9,
                 max_subsequence_length=3,
                 max_occurence_length=10,
                 alphabet=[],
                 maxlen=0,
                 normalize=True,
                 batch_size=1000):
        super().__init__(active_dims=active_dims)
        # constrain kernel params to between 0 and 1
        self.logistic_gap = tfb.Chain([
            tfb.AffineScalar(shift=tf.cast(0, tf.float64),
                             scale=tf.cast(1, tf.float64)),
            tfb.Sigmoid()
        ])
        self.logisitc_match = tfb.Chain([
            tfb.AffineScalar(shift=tf.cast(0, tf.float64),
                             scale=tf.cast(1, tf.float64)),
            tfb.Sigmoid()
        ])
        self.gap_decay_param = Parameter(gap_decay,
                                         transform=self.logistic_gap,
                                         name="gap_decay")
        self.match_decay_param = Parameter(match_decay,
                                           transform=self.logisitc_match,
                                           name="match_decay")
        self.max_subsequence_length = max_subsequence_length
        self.max_occurence_length = max_occurence_length
        self.alphabet = alphabet
        self.maxlen = maxlen
        self.normalize = normalize
        self.batch_size = batch_size
        self.symmetric = False

        # use will use copies of the kernel params to stop building expensive computation graph
        # we instead efficientely calculate gradients using dynamic programming
        # These params are updated at every call to K and K_diag (to check if parameters have been updated)
        self.match_decay = self.match_decay_param.numpy()
        self.gap_decay = self.gap_decay_param.numpy()
        self.match_decay_unconstrained = self.match_decay_param.unconstrained_variable.numpy(
        )
        self.gap_decay_unconstrained = self.gap_decay_param.unconstrained_variable.numpy(
        )

        # initialize helful construction matricies to be lazily computed once needed
        self.D = None
        self.dD_dgap = None

        # build a lookup table of the alphabet to encode input strings
        self.table = tf.lookup.StaticHashTable(
            initializer=tf.lookup.KeyValueTensorInitializer(
                keys=tf.constant(["PAD"] + alphabet),
                values=tf.constant(range(0,
                                         len(alphabet) + 1)),
            ),
            default_value=0)
示例#4
0
    def __init__(self,
                 m=1,
                 active_dims=[0],
                 gap_decay=0.1,
                 match_decay=0.9,
                 max_subsequence_length=3,
                 alphabet=[],
                 maxlen=0):
        super().__init__(active_dims=active_dims)
        # constrain decay kernel params to between 0 and 1
        logistic_gap = tfb.Chain([
            tfb.Shift(tf.cast(0, tf.float64))(tfb.Scale(tf.cast(1,
                                                                tf.float64))),
            tfb.Sigmoid()
        ])
        logisitc_match = tfb.Chain([
            tfb.AffineScalar(shift=tf.cast(0, tf.float64),
                             scale=tf.cast(1, tf.float64)),
            tfb.Sigmoid()
        ])
        self.gap_decay = Parameter(gap_decay,
                                   transform=logistic_gap,
                                   name="gap_decay")
        self.match_decay = Parameter(match_decay,
                                     transform=logisitc_match,
                                     name="match_decay")

        # prepare order coefs params
        order_coefs = tf.ones(max_subsequence_length)
        self.order_coefs = Parameter(order_coefs,
                                     transform=positive(),
                                     name="order_coefs")

        # get split weights
        self.m = m
        split_weights = tf.ones(2 * self.m - 1)
        self.split_weights = Parameter(split_weights,
                                       transform=positive(),
                                       name="order_coefs")

        # store additional kernel parameters
        self.max_subsequence_length = tf.constant(max_subsequence_length)
        self.alphabet = tf.constant(alphabet)
        self.alphabet_size = tf.shape(self.alphabet)[0]
        self.maxlen = tf.cast(tf.math.ceil(maxlen / self.m), dtype=tf.int32)
        self.full_maxlen = tf.constant(maxlen)
        # build a lookup table of the alphabet to encode input strings
        self.table = tf.lookup.StaticHashTable(
            initializer=tf.lookup.KeyValueTensorInitializer(
                keys=tf.constant(["PAD"] + alphabet),
                values=tf.constant(range(0,
                                         len(alphabet) + 1)),
            ),
            default_value=0)
示例#5
0
    def __init__(self,
                 a,
                 theta,
                 alpha,
                 beta,
                 validate_args=False,
                 allow_nan_stats=True,
                 name='Amoroso'):

        parameters = dict(locals())
        with tf.name_scope(name) as name:
            self._a = tensor_util.convert_nonref_to_tensor(a)
            self._theta = tensor_util.convert_nonref_to_tensor(theta)
            self._alpha = tensor_util.convert_nonref_to_tensor(alpha)
            self._beta = tensor_util.convert_nonref_to_tensor(beta)
            gamma = tfd.Gamma(alpha, 1.)

            chain = tfb.Invert(
                tfb.Chain([
                    tfb.Exp(),
                    tfb.Scale(beta),
                    tfb.Shift(-tf.math.log(theta)),
                    tfb.Log(),
                    tfb.Shift(-a),
                ]))

            super().__init__(distribution=gamma,
                             bijector=chain,
                             validate_args=validate_args,
                             parameters=parameters,
                             name=name)
示例#6
0
    def init_bijectors(self,
                       a1: tf.Tensor,
                       b1: tf.Tensor,
                       theta: tf.Tensor,
                       a2: tf.Tensor,
                       b2: tf.Tensor,
                       name: str = 'bernstein_flow') -> tfb.Bijector:
        """
        Builds a normalizing flow using a Bernstein polynomial as Bijector.

        :param      a1:     The scale of f1.
        :type       a1:     Tensor
        :param      b1:     The shift of f1.
        :type       b1:     Tensor
        :param      theta:  The Bernstein coefficients.
        :type       theta:  Tensor
        :param      a2:     The scale of f3.
        :type       a2:     Tensor
        :param      b2:     The shift of f3.
        :type       b2:     Tensor
        :param      name:   The name to give Ops created by the initializer.
        :type       name:   string

        :returns:   The Bernstein flow.
        :rtype:     Bijector
        """
        bijectors = []

        # f1: ŷ = sigma(a1(x)*y - b1(x))
        f1_scale = tfb.Scale(a1, name='f1_scale')
        bijectors.append(f1_scale)
        f1_shift = tfb.Shift(b1, name='f1_shift')
        bijectors.append(f1_shift)

        # clip to range [0, 1]
        bijectors.append(tfb.SoftClip(low=0, high=1, hinge_softness=1.5))

        # f2: ẑ = Bernstein Polynomial
        f2 = BernsteinBijector(theta=theta, name='f2')
        bijectors.append(f2)

        # clip to range [min(theta), max(theta)]
        # bijectors.append(
        #     tfb.Invert(
        #         tfb.SoftClip(
        #             high=tf.math.reduce_max(theta, axis=-1),
        #             low=tf.math.reduce_min(theta, axis=-1),
        #             hinge_softness=0.5
        #         )
        #     )
        # )
        # f3: z = a2(x)*ẑ - b2(x)
        f3_scale = tfb.Scale(a2, name='f3_scale')
        bijectors.append(f3_scale)
        f3_shift = tfb.Shift(b2, name='f3_shift')
        bijectors.append(f3_shift)

        bijectors = list(reversed(bijectors))

        return tfb.Invert(tfb.Chain(bijectors))
示例#7
0
    def _build(self, input_shape):
        input_depth = tf.compat.dimension_value(
            tensorshape_util.with_rank_at_least(input_shape, 1)[-1])

        self._input_depth = input_depth

        flow_parts = []
        for i in range(self._num_coupling_layers):
            if self._use_batch_normalization:
                # TODO(hartikainen): Allow other normalizations, e.g.
                # weight normalization?
                batch_normalization_bijector = bijectors.BatchNormalization()
                flow_parts += [batch_normalization_bijector]

            real_nvp_bijector = bijectors.RealNVP(
                fraction_masked={
                    True: 1.0,
                    False: -1.0
                }[i % 2 == 0] * 0.5,
                bijector_fn=FeedforwardBijectorFunction(
                    hidden_layer_sizes=self._hidden_layer_sizes,
                    activation=self._activation),
                name=f'real_nvp_{i}')
            flow_parts += [real_nvp_bijector]

        # bijectors.Chain applies the list of bijectors in the
        # _reverse_ order of what they are inputted, thus [::-1].
        self.flow = bijectors.Chain(flow_parts[::-1])
        self._built = True
示例#8
0
    def from_loc_and_scale(cls,
                           loc,
                           scale,
                           low=0.,
                           high=1e10,
                           scale_shift=1e-7):
        """
        Instantiate a learnable distribution with good default bijectors.

        loc : array
            The initial location of the distribution
        scale : array
            The initial scale parameter of the distribution
        low : float or array (optional)
            The lower limit of the support for the distribution.
        high : float or array (optional)
            The upper limit of the support for the distribution.
        scale_shift : float (optional)
            A small constant added to the scale to increase numerical stability.
        """
        loc = tfp.util.TransformedVariable(
            loc,
            tfb.Softplus(),
        )
        scale = tfp.util.TransformedVariable(
            scale,
            tfb.Chain([
                tfb.Softplus(),
                tfb.Shift(scale_shift),
            ]),
        )
        return cls(loc, scale, low, high)
 def __init__(self, scale=None, **kwargs):
     if scale is None:
         scaling = tfb.Identity()
     else:
         scaling = tfb.Scale(scale)
     transform = tfb.Chain([scaling, tfb.Log()])
     super().__init__(transform, **kwargs)
示例#10
0
文件: flow.py 项目: gumpfly/PSVO
    def init_bijectors(self, n_layers, hidden_layers):
        with tf.variable_scope(self.name):
            bijectors = []
            for i in range(n_layers):
                if self.flow_type == "MAF":
                    bijectors.append(
                        tfb.MaskedAutoregressiveFlow(
                            shift_and_log_scale_fn=tfb.
                            masked_autoregressive_default_template(
                                hidden_layers=hidden_layers,
                                activation=tf.nn.relu,
                                log_scale_min_clip=self.log_scale_min_clip,
                                log_scale_max_clip=self.log_scale_max_clip,
                                shift_only=self.shift_only,
                                log_scale_clip_gradient=self.
                                log_scale_clip_gradient,
                                name="MAF_template_{}".format(i)),
                            name="MAF_{}".format(i)))
                elif self.flow_type == "IAF":
                    bijectors.append(
                        tfb.Invert(tfb.MaskedAutoregressiveFlow(
                            shift_and_log_scale_fn=tfb.
                            masked_autoregressive_default_template(
                                hidden_layers=hidden_layers,
                                activation=tf.nn.relu,
                                log_scale_min_clip=self.log_scale_min_clip,
                                log_scale_max_clip=self.log_scale_max_clip,
                                shift_only=self.shift_only,
                                log_scale_clip_gradient=self.
                                log_scale_clip_gradient,
                                name="MAF_template_{}".format(i))),
                                   name="IAF_{}".format(i)))
                elif self.flow_type == "RealNVP":
                    bijectors.append(
                        tfb.RealNVP(num_masked=self.event_size - 1,
                                    shift_and_log_scale_fn=tfb.
                                    real_nvp_default_template(
                                        hidden_layers=hidden_layers,
                                        activation=tf.nn.relu,
                                        shift_only=self.shift_only,
                                        name="RealNVP_template_{}".format(i)),
                                    name="RealNVP_{}".format(i)))
                else:
                    raise ValueError("Unknown flow type {}".format(
                        self.flow_type))
                bijectors.append(
                    tfb.Permute(permutation=list(range(1, self.event_size)) +
                                [0]))
                # bijectors.append(
                #     tfb.Permute(
                #         self.init_once(np.random.permutation(self.event_size).astype("int32"),
                #                        name="permutation_{}".format(i))
                #     )
                # )

            flow_bijector = tfb.Chain(list(reversed(bijectors[:-1])),
                                      validate_args=True,
                                      name="NF_chain")

            return flow_bijector
示例#11
0
def affine_flow_actor_critic(a, k):
    d = r = act_dim = a.shape.as_list()[-1]
    DTYPE = tf.float32
    bijectors = []
    initializer = tf.initializers.truncated_normal(0, 0.1)
    for i in range(k):
        with tf.variable_scope('bijector_%d' % i):
            V = tf.get_variable('V', [d, r],
                                dtype=DTYPE,
                                initializer=initializer)
            shift = tf.get_variable('shift', [d],
                                    dtype=DTYPE,
                                    initializer=initializer)
            L = tf.get_variable('L', [d * (d + 1) / 2],
                                dtype=DTYPE,
                                initializer=initializer)
            bijectors.append(
                tfpb.Affine(
                    scale_tril=tfpd.fill_triangular(L),
                    scale_perturb_factor=V,
                    shift=shift,
                ))
            alpha = tf.abs(tf.get_variable('alpha', [], dtype=DTYPE)) + .01
            bijectors.append(PReLU(alpha=alpha))
    mlp_bijector = tfpb.Chain(list(reversed(bijectors[:-1])),
                              name='mlp_bijector')
    dist = tfpd.TransformedDistribution(
        distribution=tfpd.MultivariateNormalDiag(loc=tf.zeros(act_dim),
                                                 scale_diag=0.1 *
                                                 tf.ones(act_dim)),
        bijector=mlp_bijector)
    pi = dist.sample(1)
    logp_pi = tf.squeeze(dist.log_prob(pi))
    logp = dist.log_prob(a)
    return pi, logp, logp_pi
示例#12
0
    def __init__(self,active_dims=[0],decay=0.1,max_subsequence_length=3,
                 alphabet = [], maxlen=0, batch_size=100):
        super().__init__(active_dims=active_dims)
        # constrain decay kernel params to between 0 and 1
        self.logistic = tfb.Chain([tfb.Shift(tf.cast(0,tf.float64))(tfb.Scale(tf.cast(1,tf.float64))),tfb.Sigmoid()])
        self.decay_param= Parameter(decay, transform=self.logistic ,name="decay")

        # use will use copies of the kernel params to stop building expensive computation graph
        # we instead efficientely calculate gradients using dynamic programming
        # These params are updated at every call to K and K_diag (to check if parameters have been updated)
        self.decay = self.decay_param.numpy()

        self.decay_unconstrained = self.decay_param.unconstrained_variable.numpy()

        self.order_coefs=tf.ones(max_subsequence_length,dtype=tf.float64)
        
        # store additional kernel parameters
        self.max_subsequence_length = tf.constant(max_subsequence_length)
        self.alphabet =  tf.constant(alphabet)
        self.alphabet_size=tf.shape(self.alphabet)[0]
        self.maxlen =  tf.constant(maxlen)
        self.batch_size = tf.constant(batch_size)

        # build a lookup table of the alphabet to encode input strings
        self.table = tf.lookup.StaticHashTable(
            initializer=tf.lookup.KeyValueTensorInitializer(
                keys=tf.constant(["PAD"]+alphabet),
                values=tf.constant(range(0,len(alphabet)+1)),),default_value=0)

        # initialize helful construction matricies to be lazily computed once needed
        self.D = None
        self.dD_dgap = None
示例#13
0
    def _build(self, input_shape):
        input_depth = tf.compat.dimension_value(
            tensorshape_util.with_rank_at_least(input_shape, 1)[-1])

        self._input_depth = input_depth

        flow_parts = []
        for i in range(self._num_coupling_layers):
            if self._use_batch_normalization:
                batch_normalization_bijector = bijectors.BatchNormalization()
                flow_parts += [batch_normalization_bijector]

            real_nvp_bijector = bijectors.RealNVP(
                num_masked=input_depth // 2,
                shift_and_log_scale_fn=feedforward_scale_and_log_diag_fn(
                    hidden_layer_sizes=self._hidden_layer_sizes,
                    activation=tf.nn.relu),
                name='real_nvp_{}'.format(i))
            flow_parts += [real_nvp_bijector]

            if i < self._num_coupling_layers - 1:
                permute_bijector = bijectors.Permute(
                    permutation=list(reversed(range(input_depth))),
                    name='permute_{}'.format(i))
                flow_parts += [permute_bijector]

        # bijectors.Chain applies the list of bijectors in the
        # _reverse_ order of what they are inputted, thus [::-1].
        self.flow = bijectors.Chain(flow_parts[::-1])
        self._built = True
示例#14
0
文件: flow.py 项目: gumpfly/PSVO
	def init_bijectors(self, n_layers, hidden_layers):
		with tf.variable_scope(self.name):
			bijectors = []
			for i in range(n_layers):
				if self.flow_type == "MAF":
					bijectors.append(tfb.MaskedAutoregressiveFlow(
						shift_and_log_scale_fn=tfb.masked_autoregressive_default_template(
							hidden_layers=hidden_layers,
							name = "MAF_template_{}".format(i)),
						name = "MAF_{}".format(i)))
				elif self.flow_type == "IAF":
					bijectors.append(
						tfb.Invert(
							tfb.MaskedAutoregressiveFlow(
								shift_and_log_scale_fn=tfb.masked_autoregressive_default_template(
									hidden_layers=hidden_layers,
									name = "MAF_template_{}".format(i))
								),
							name = "IAF_{}".format(i)
						)
					)
				bijectors.append(tfb.Permute(permutation=self.init_once(
					np.random.permutation(self.event_size).astype("int32"),
					name="permutation_{}".format(i))))

			flow_bijector = tfb.Chain(list(reversed(bijectors[:-1])))

			return flow_bijector
 def bounded_parameter(low, high, param):
     """Make parameter tfp Parameter with optimization bounds."""
     affine = tfb.AffineScalar(shift=tf.cast(low, tf.float64),
                               scale=tf.cast(high - low, tf.float64))
     sigmoid = tfb.Sigmoid()
     logistic = tfb.Chain([affine, sigmoid])
     parameter = gpf.Parameter(param, transform=logistic, dtype=tf.float64)
     return parameter
 def __init__(self, shift=None, scale=None, **kwargs):
     if scale is None:
         scaling = tfb.Identity()
     else:
         scaling = tfb.Scale(scale)
     if shift is None:
         shifting = tfb.Identity()
     else:
         shifting = tfb.Shift(shift)
     transform = tfb.Chain([scaling, shifting])
     super().__init__(transform, **kwargs)
 def __init__(self, scale=None, skewness=None, tailweight=None, **kwargs):
     if scale is None:
         scaling = tfb.Identity()
     else:
         scaling = tfb.Scale(scale)
     transform = tfb.Chain([
         scaling,
         tfb.Softplus(),
         tfb.SinhArcsinh(
             skewness,
             tailweight,
         ),
     ])
     super().__init__(transform, **kwargs)
示例#18
0
    def test_noiseless_is_consistent_with_cumsum_bijector(self):
        num_timesteps = 10
        ssm = AutoregressiveMovingAverageStateSpaceModel(
            num_timesteps=num_timesteps,
            ar_coefficients=[0.7, -0.2, 0.1],
            ma_coefficients=[0.6],
            level_scale=0.6,
            level_drift=-0.3,
            observation_noise_scale=0.,
            initial_state_prior=tfd.MultivariateNormalDiag(loc=tf.zeros([3]),
                                                           scale_diag=tf.ones(
                                                               [3])))
        cumsum_ssm = IntegratedStateSpaceModel(ssm)
        x, lp = cumsum_ssm.experimental_sample_and_log_prob(
            [2], seed=test_util.test_seed())

        flatten_event = tfb.Reshape([num_timesteps],
                                    event_shape_in=[num_timesteps, 1])
        cumsum_dist = tfb.Chain(
            [tfb.Invert(flatten_event),
             tfb.Cumsum(), flatten_event])(ssm)
        self.assertAllClose(lp, cumsum_dist.log_prob(x), atol=1e-5)
示例#19
0
 def __init__(self, upper_limit):
     transform = tfb.Chain(
         [tfb.Shift(upper_limit),
          tfb.Scale(-1), tfb.Exp()])
     super().__init__(transform)
示例#20
0
 def __init__(self, lower_limit):
     transform = tfb.Chain([tfb.Shift(lower_limit), tfb.Exp()])
     super().__init__(transform)