Пример #1
0
 def __init__(self, shape, loc, scale, beta=0.5):
     scale_prior = PositiveImproper(shape=[], loc=scale, scale=1.)
     beta_prior = PositiveImproper(shape=[], loc=beta, scale=1.)
     with torch.no_grad():
         scale_prior.p.data = inv_softplus(torch.tensor(scale))
         beta_prior.p.data = inv_softplus(torch.tensor(beta))
     super().__init__(shape, loc, scale=scale_prior, beta=beta_prior)
Пример #2
0
 def __init__(self, shape, loc, scale, df=2.):
     scale_prior = PositiveImproper(shape=[], loc=scale, scale=1.)
     df_prior = PositiveImproper(shape=[], loc=df, scale=1.)
     with torch.no_grad():
         scale_prior.p.data = inv_softplus(torch.tensor(scale))
         df_prior.p.data = inv_softplus(torch.tensor(df))
     super().__init__(shape, loc, scale=scale_prior, df=df_prior)
Пример #3
0
def StepTanhL(num_blocks, num_steps, **kwargs):
    set_res, addf0, init_random, constraint = common_config(kwargs)
    if 'set_res' in kwargs.keys():
        assert kwargs[
            'set_res'] == True, 'In the step tanh flow set_res has to be True for num_steps > 1'
    set_res = True

    input_dependent, input_dim, input_dependent_config = set_input_dependent_config(
        kwargs)

    block_array = []
    for nb in range(num_blocks):

        step_flow_arr = []
        for st in range(
                num_steps
        ):  # each step in the linear combination should be initialized to different values otherwise gradients
            # will always point in the same direction and the parameters will be equal after each gradient update.
            # w = numpy.log(numpy.exp(1./float(num_steps))-1) # softmin
            e1, e2, e3, e4 = numpy.multiply(numpy.random.randn(4, ),
                                            numpy.array([1.0, 1.0, 1.0, 1.0]))
            if not init_random:
                e2 = inv_softplus(
                    torch.abs(torch.tensor(
                        (e2 + 1.0) / float(num_steps)))).item()
                e4 = inv_softplus(
                    torch.abs(torch.tensor(
                        (e4 + 1.0) / float(num_steps)))).item()

            init_tanh = {
                'init_a': e1,
                'init_b': e2,
                'init_c': e3,
                'init_d': e4,
                'add_init_f0': False,
                'set_restrictions': set_res,
                'input_dependent': input_dependent,
                'input_dim': input_dim,
                'input_dependent_config': input_dependent_config
            }

            step_flow_arr.append(('tanh', init_tanh))

        if init_random:
            a_aff, b_aff = numpy.random.randn(2)
        else:
            a_aff, b_aff = 1.0, 0.0

        init_affine = {
            'init_a': a_aff,
            'init_b': b_aff,
            'set_restrictions': False
        }
        init_step_tanh = {'flow_arr': step_flow_arr, 'add_init_f0': addf0}

        block = [('step_flow', init_step_tanh), ('affine', init_affine)]
        block_array.extend(block)

    return block_array
Пример #4
0
 def __init__(self, shape, loc, scale, lengthscale=1.0):
     lengthscale_prior = PositiveImproper(shape=[],
                                          loc=lengthscale,
                                          scale=1.)
     scale_prior = PositiveImproper(shape=[], loc=scale, scale=1.)
     with torch.no_grad():
         lengthscale_prior.p.data = inv_softplus(torch.tensor(lengthscale))
         scale_prior.p.data = inv_softplus(torch.tensor(scale))
     super().__init__(shape,
                      loc,
                      scale=scale_prior,
                      lengthscale=lengthscale_prior)
Пример #5
0
 def __init__(self, shape, loc, scale, lengthscale=1., rate=1.):
     lengthscale_prior = Gamma(shape=[],
                               concentration=lengthscale,
                               rate=rate)
     scale_prior = Gamma(shape=[], concentration=scale, rate=rate)
     with torch.no_grad():
         lengthscale_prior.p.data = inv_softplus(torch.tensor(lengthscale))
         scale_prior.p.data = inv_softplus(torch.tensor(scale))
     super().__init__(shape,
                      loc,
                      scale=scale_prior,
                      lengthscale=lengthscale_prior)
Пример #6
0
    def __init__(self, shape, loc, scale, base_dist="gaussian", scales=None):
        if scales is None:
            self.scales = [scale / 9, scale / 3, scale, scale * 3, scale * 9]
        else:
            self.scales = scales
        super().__init__(shape, loc, scale)
        self.mixture_weights = torch.nn.Parameter(torch.zeros(len(
            self.scales)))
        scale_priors = [
            PositiveImproper(shape=[], loc=scl, scale=1.)
            for scl in self.scales
        ]
        for scale_prior, scl in zip(scale_priors, self.scales):
            with torch.no_grad():
                scale_prior.p.data = inv_softplus(torch.tensor(scl))
        self.components = [
            get_prior(base_dist)(shape, loc, scl) for scl in scale_priors
        ]
        for comp in self.components:
            comp.p = self.p
            comp._old_log_prob = comp.log_prob
            # Prevent the sum over priors from double-counting this one
            comp.log_prob = (lambda: 0.)

        for i, comp in enumerate(self.components):
            self.add_module(f"component_{i}", comp)

        # Now that all parameters are initialized, sample properly
        self.sample()
Пример #7
0
 def __init__(self,is_trainable: bool,  n_steps : int):
     super(switch_off, self).__init__()
     self.is_trainable = is_trainable
     if self.is_trainable:
         # the weight a is initialized to 1/number_steps so that if we perform a linear combination of lots of flows we do not easily saturate
         a = torch.tensor(1.0/float(n_steps),dtype=cg.dtype)
         a = inv_softplus(a) # as we apply the softplus we need to first compute the softminus so that at initialization the scale parameter a = 1/n_steps
         self.a = nn.Parameter(a) 
         self.b = nn.Parameter(torch.tensor(0.0,dtype=cg.dtype))
Пример #8
0
def StepSAL(num_blocks, num_steps, **kwargs):

    set_res, addf0, init_random, constraint = common_config(kwargs)
    if 'set_res' in kwargs.keys():
        assert kwargs[
            'set_res'] == True, 'In the step sa flow set_res has to be True for num_steps > 1'
    set_res = True

    block_array = []
    for nb in range(num_blocks):

        step_flow_arr = []
        for st in range(
                num_steps
        ):  # each step in the linear combination should be initialized to different values otherwise gradients
            a_sal, b_sal = numpy.random.randn(2)
            if not init_random:
                b_sal += 1.0
                b_sal = inv_softplus(torch.abs(torch.tensor(b_sal))).item()

            init_sinh_arcsinh = {
                'init_a': a_sal,
                'init_b': b_sal,
                'add_init_f0': False,
                'set_restrictions': set_res
            }
            step_flow_arr.append(('sinh_arcsinh', init_sinh_arcsinh))

        if init_random:
            a_aff, b_aff = numpy.random.randn(2)
        else:
            a_aff, b_aff = 1.0, 0.0

        init_affine = {
            'init_a': a_aff,
            'init_b': b_aff,
            'set_restrictions': False
        }
        init_step_sa = {'flow_arr': step_flow_arr, 'add_init_f0': addf0}

        block = [('step_flow', init_step_sa), ('affine', init_affine)]
        block_array.extend(block)

    return block_array
Пример #9
0
 def __init__(self, shape, loc, scale, hyperscale=1., gradient_clip=1.):
     scale_prior = HalfCauchy(shape=[], scale=hyperscale, multiplier=scale)
     with torch.no_grad():
         scale_prior.p.data = inv_softplus(torch.tensor(1.))
     super().__init__(shape, loc, scale_prior)
Пример #10
0
 def __init__(self, shape, loc, scale, rate=1., df=2, gradient_clip=1.):
     scale_prior = Gamma(shape=[], concentration=scale, rate=rate)
     with torch.no_grad():
         scale_prior.p.data = inv_softplus(torch.tensor(scale))
     super().__init__(shape, loc, scale_prior, df=df)
Пример #11
0
 def _sample_value(self, shape: torch.Size):
     x = super()._sample_value(shape)
     return inv_softplus(x)
Пример #12
0
def StepAllL(num_blocks, **kwargs):
    set_res, addf0, init_random, constraint = common_config(kwargs)
    if 'set_res' in kwargs.keys():
        assert kwargs[
            'set_res'] == True, 'In the step tanh flow set_res has to be True for num_steps > 1'
    set_res = True

    num_steps = 5
    block_array = []
    for nb in range(num_blocks):

        step_flow_arr = []

        ## Inverse Box Cox
        init_lam = numpy.random.randn(1, )
        if not init_random:
            init_lam += 5.0

        init_bc = {
            'init_lam': init_lam,
            'add_init_f0': addf0,
            'constraint': constraint
        }
        step_flow_arr.append(('inverseboxcox', init_bc))

        ## Box Cox Flow
        init_lam = numpy.random.randn(1, )
        if not init_random:
            init_lam += 5.0

        init_bc = {
            'init_lam': init_lam,
            'add_init_f0': addf0,
            'constraint': constraint
        }
        step_flow_arr.append(('boxcox', init_bc))

        ## Arcsinh Flow
        e1, e2, e3, e4 = numpy.multiply(numpy.random.randn(4, ),
                                        numpy.array([1.0, 1.0, 1.0, 1.0]))
        if not init_random:
            e2 = inv_softplus(
                torch.abs(torch.tensor((e2 + 1.0) / float(num_steps)))).item()
            e4 = inv_softplus(
                torch.abs(torch.tensor((e4 + 1.0) / float(num_steps)))).item()

        init_arcsinh = {
            'init_a': e1,
            'init_b': e2,
            'init_c': e3,
            'init_d': e4,
            'add_init_f0': False,
            'set_restrictions': set_res
        }
        step_flow_arr.append(('arcsinh', init_arcsinh))

        ## SAL Flow
        a_sal, b_sal = numpy.random.randn(2)
        if not init_random:
            b_sal += 1.0
            b_sal = inv_softplus(torch.abs(torch.tensor(b_sal))).item()

        init_sinh_arcsinh = {
            'init_a': a_sal,
            'init_b': b_sal,
            'add_init_f0': False,
            'set_restrictions': set_res
        }
        step_flow_arr.append(('sinh_arcsinh', init_sinh_arcsinh))

        ## Tanh Flow
        e1, e2, e3, e4 = numpy.multiply(numpy.random.randn(4, ),
                                        numpy.array([1.0, 1.0, 1.0, 1.0]))
        if not init_random:
            e2 = inv_softplus(
                torch.abs(torch.tensor((e2 + 1.0) / float(num_steps)))).item()
            e4 = inv_softplus(
                torch.abs(torch.tensor((e4 + 1.0) / float(num_steps)))).item()

        init_tanh = {
            'init_a': e1,
            'init_b': e2,
            'init_c': e3,
            'init_d': e4,
            'add_init_f0': False,
            'set_restrictions': set_res
        }
        step_flow_arr.append(('tanh', init_tanh))

        if init_random:
            a_aff, b_aff = numpy.random.randn(2)
        else:
            a_aff, b_aff = 1.0, 0.0

        init_affine = {
            'init_a': a_aff,
            'init_b': b_aff,
            'set_restrictions': False
        }
        init_step = {'flow_arr': step_flow_arr, 'add_init_f0': addf0}

        block = [('step_flow', init_step), ('affine', init_affine)]
        block_array.extend(block)

        return block_array