def initial_weights(d,
                    D,
                    K,
                    bp=True,
                    std1=0.05,
                    std2=0.05,
                    stationary=False,
                    device='cuda:0',
                    dtype=torch.float):
    m = Uniform(torch.tensor([0.0], device=device),
                torch.tensor([2 * math.pi], device=device))
    if stationary:
        b1 = m.sample((1, D)).view(-1, D).to(device)
        Omega1 = torch.empty(d, D, device=device, requires_grad=bp)
        torch.nn.init.normal_(Omega1, 0, std1)
        W = torch.randn(D, K, device=device, dtype=dtype, requires_grad=True)
        return [Omega1, b1], W
    else:
        Omega1 = torch.empty(d, D, device=device, requires_grad=bp)
        Omega2 = torch.empty(d, D, device=device, requires_grad=bp)
        b1 = m.sample((1, D)).view(-1, D).to(device)
        b2 = m.sample((1, D)).view(-1, D).to(device)
        torch.nn.init.normal_(Omega1, 0, std1)
        torch.nn.init.normal_(Omega2, 0, std2)
        W = torch.randn(D, K, device=device, dtype=dtype, requires_grad=True)
        return [Omega1, Omega2, b1, b2], W
コード例 #2
0
    def __init__(self,
                 num_pars,
                 num_edges,
                 alpha=0.8,
                 a_uc_init=-1.0,
                 thres=1e-3,
                 kl_scale=1.0):
        super(BBGDC, self).__init__()
        self.num_pars = num_pars
        self.num_samps = num_edges
        self.alpha = alpha
        self.thres = thres
        self.kl_scale = kl_scale

        self.a_uc = nn.Parameter(torch.FloatTensor(self.num_pars),
                                 requires_grad=False)
        self.b_uc = nn.Parameter(torch.FloatTensor(self.num_pars),
                                 requires_grad=False)
        self.a_uc.data.uniform_(1.0, 1.5)
        self.b_uc.data.uniform_(0.49, 0.51)

        self.u_samp = Uniform(0.0, 1.0).rsample(
            torch.Size([self.num_samps, self.num_pars])).cuda()
        self.mask_samp = Uniform(0.0, 1.0).rsample(
            torch.Size([self.num_samps, self.num_pars])).cuda()
        self.u = torch.rand(self.num_pars).clamp(1e-6, 1 - 1e-6).cuda()
コード例 #3
0
 def __init__(self, cont, logits0=None, probs0=None, validate_args=None):
     """
     - with probability p_0 = sigmoid(logits0) this returns 0
     - with probability 1 - p_0 this returns a sample in the open interval (0, 1)
     
     logits0: logits for p_0
     cont: a (properly normalised) distribution over (0, 1)
         e.g. RightTruncatedExponential
     """
     if logits0 is None and probs0 is None:
         raise ValueError("You must specify either logits0 or probs0")
     if logits0 is not None and probs0 is not None:
         raise ValueError("You cannot specify both logits0 and probs0")
     shape = cont.batch_shape
     super(MixtureD0C01, self).__init__(batch_shape=shape,
                                        validate_args=validate_args)
     if logits0 is None:
         self.logits = probs_to_logits(probs0, is_binary=True)
     else:
         self.logits = logits0
     self.cont = cont
     self.p0, self.pc = bernoulli_probs_from_logit(self.logits)
     self.log_p0, self.log_pc = bernoulli_log_probs_from_logit(self.logits)
     self.uniform = Uniform(
         torch.zeros(shape).to(self.logits.device),
         torch.ones(shape).to(self.logits.device))
コード例 #4
0
    def get_sample_wlen(self, bs=1):
        # idx only acts as a counter while generating batches.
        prob = 0.5 * torch.ones([self.input_seq_len, bs, self.seq_width],
                                dtype=torch.float64)
        seq = Binomial(1, prob).sample()
        # Extra input channel for providing priority value
        input_seq = torch.zeros([self.input_seq_len, bs, self.in_dim])
        input_seq[:self.input_seq_len, :, :self.seq_width] = seq

        # torch's Uniform function draws samples from the half-open interval
        # [low, high) but in the paper the priorities are drawn from [-1,1].
        # This minor difference is being ignored here as supposedly it doesn't
        # affects the task.
        priority = Uniform(torch.tensor([-1.0] * bs), torch.tensor([1.0] * bs))
        for i in range(self.input_seq_len):
            input_seq[i, :, self.seq_width] = priority.sample()

        target_seq = []
        for j in range(bs):
            sorted, ind = torch.sort(input_seq[:, j, self.seq_width],
                                     0,
                                     descending=True)
            sorted = input_seq[ind, j]
            target_seq.append(
                sorted[:self.target_seq_len, :self.seq_width].unsqueeze(1))
        target_seq = torch.cat(target_seq, 1)
        return {'input': input_seq, 'target': target_seq}
コード例 #5
0
ファイル: bnn.py プロジェクト: LMikeH/ocbnn-public
    def _sample_from_hypercube(self, region, nsamples, mode='even'):
        """ Generate `nsamples` points from the hypercube `region`.

			Arguments:
				mode: 'uniform' for uniform sampling, 'even' for evenly-spaced sampling
			Returns:
				Tensor of shape (nsamples, self.Xdim)
		"""
        samples = torch.tensor([])
        for d in range(self.Xdim):
            lower = min(self.X_train[:, d])
            if region[2 * d] > -np.inf:
                lower = region[2 * d]
            upper = max(self.X_train[:, d])
            if region[2 * d + 1] < np.inf:
                upper = region[2 * d + 1]
            if mode == 'uniform':
                unif = Uniform(torch.tensor([lower]),
                               torch.tensor([upper])).sample(
                                   torch.Size([nsamples])).squeeze()
                samples = torch.cat((samples, unif.unsqueeze(0)), 0)
            elif mode == 'even':
                samples = torch.cat(
                    (samples, torch.linspace(lower, upper,
                                             nsamples).unsqueeze(0)), 0)
        return torch.t(samples)
コード例 #6
0
class RandomSample(Sample):
    def __init__(self, upper_bound, lower_bound, action_dim, is_discrete=False):
        """
        Random sample actions.
        """
        self.upper_bound = torch.tensor(upper_bound)
        self.lower_bound = torch.tensor(lower_bound)
        self.action_dim = action_dim
        self.is_discrete = is_discrete
        if is_discrete:
            self.sampler = RandIntSampler(self.lower_bound, self.upper_bound)
        else:
            self.sampler = Uniform(self.lower_bound, self.upper_bound)

    def sample(self, timesteps):
        shape = (timesteps, self.action_dim)
        actions = self.sampler.sample(shape).cpu().numpy()
        return actions

    def sample_n(self, sample_nums, timesteps, **kwargs):
        shape = (
            (sample_nums, timesteps, self.action_dim)
            if self.is_discrete
            else (sample_nums, timesteps)
        )
        actions = self.sampler.sample(shape).cpu().numpy()
        return actions
    def __init__(self,
                 n_components,
                 tolerance=1e-5,
                 max_step=1000,
                 verbose=True):
        '''
        Initialize the object.

        Input
        -----
        n_components: int
                      number of Binomial Distributions in the Mixture.
        tolerance: Float
                   the object fits the data by EM iteration. tolerance is used to define one
                   of the conditions for the iteration to stop. This condition says that
                   the iteration continues until the change of the parameters within the
                   current iteration is greater than tolerance.
        max_step: int
                  the maximum number of iteration steps.
        verbose: Boolean
                 whether to print out the information of the fitting process.
        '''
        self.K = n_components  # int, number of Binomial distributions in the Mixture
        self.tolerance = tolerance
        self.max_step = max_step
        self.verbose = verbose

        # initialize the pi_list
        pi_list = Uniform(low=1e-6,
                          high=1e0 - 1e-6).sample([self.K - 1]).to(device)
        pi_K = t.FloatTensor([1e0]) - pi_list.sum()
        self.pi_list = torch.cat([pi_list, pi_K], dim=0)

        # initialize the theta_list
        self.theta_list = Uniform(low=1e-6, high=1e0 - 1e-6).sample([self.K])
コード例 #8
0
    def __getitem__(self, idx):
        # idx only acts as a counter while generating batches.
        prob = 0.5 * torch.ones([self.input_seq_len, self.seq_width],
                                dtype=torch.float64)
        seq = Binomial(1, prob).sample()
        # Extra input channel for providing priority value
        input_seq = torch.zeros([self.input_seq_len, self.seq_width + 1])
        input_seq[:self.input_seq_len, :self.seq_width] = seq

        # torch's Uniform function draws samples from the half-open interval
        # [low, high) but in the paper the priorities are drawn from [-1,1].
        # This minor difference is being ignored here as supposedly it doesn't
        # affects the task.
        if not self.uniform:
            alpha = torch.tensor([2.0])
            beta = torch.tensor([5.0])
            if self.random_distr:
                alpha_beta_gen = Uniform(torch.tensor([0.0]),
                                         torch.tensor([100.0]))
                alpha = alpha_beta_gen.sample()
                beta = alpha_beta_gen.sample()
            priority = Beta(alpha, beta)
        else:
            priority = Uniform(torch.tensor([-1.0]), torch.tensor([1.0]))

        for i in range(self.input_seq_len):
            input_seq[i, self.seq_width] = priority.sample()

        sorted_index = torch.sort(input_seq[:, -1], descending=True)[1]
        target_seq = input_seq[sorted_index][:self.target_seq_len, :self.
                                             seq_width]

        return {'input': input_seq, 'target': target_seq}
コード例 #9
0
def gen_noise(x: Tensor, snr_db: float) -> Tensor:
    mean_sq_x = (x**2).mean()
    snr = 10**(snr_db / 10)
    high = torch.sqrt(3.0 * mean_sq_x / snr)
    uniform = Uniform(low=0, high=high)
    noise = uniform.sample(x.shape)
    return noise
コード例 #10
0
ファイル: hmc_sampler.py プロジェクト: hao-w/apgs
    def __init__(self, models, AT, S, B, T, K, z_where_dim, z_what_dim,
                 hmc_num_steps, step_size_what, step_size_where,
                 leapfrog_num_steps, CUDA, device):
        (_, self.dec_coor, _, self.dec_digit) = models
        self.AT = AT
        self.S = S
        self.B = B
        self.T = T
        self.K = K
        self.z_where_dim = z_where_dim
        self.z_what_dim = z_what_dim
        self.Sigma = torch.ones(1)
        self.mu = torch.zeros(1)
        self.accept_count = 0.0
        self.smallest_accept_ratio = 0.0
        self.hmc_num_steps = hmc_num_steps
        self.lf_step_size_where = step_size_where
        self.lf_step_size_what = step_size_what
        self.lf_num_steps = leapfrog_num_steps
        if CUDA:
            with torch.cuda.device(device):
                self.Sigma = self.Sigma.cuda()
                self.mu = self.mu.cuda()
                self.uniformer = Uniform(
                    torch.Tensor([0.0]).cuda(),
                    torch.Tensor([1.0]).cuda())
        else:
            self.uniformer = Uniform(torch.Tensor([0.0]), torch.Tensor([1.0]))

        self.gauss_dist = Normal(self.mu, self.Sigma)
コード例 #11
0
    def __init__(self, cont, logits=None, probs=None, validate_args=None):
        """
        cont: a (properly normalised) distribution over (0, 1)
            e.g. RightTruncatedExponential, Uniform(0, 1)
        logits: [..., 3] 
        probs: [..., 3]
        """
        if logits is None and probs is None:
            raise ValueError("You must specify either logits or probs")
        if logits is not None and probs is not None:
            raise ValueError("You cannot specify both logits and probs")
        shape = cont.batch_shape
        super(MixtureD01C01, self).__init__(batch_shape=shape,
                                            validate_args=validate_args)
        if logits is None:
            self.logits = probs_to_logits(probs, is_binary=False)
            self.probs = probs
        else:
            self.logits = logits
            self.probs = logits_to_probs(logits, is_binary=False)

        self.logprobs = F.log_softmax(self.logits, dim=-1)
        self.cont = cont
        self.p0, self.p1, self.pc = [
            t.squeeze(-1) for t in torch.split(self.probs, 1, dim=-1)
        ]
        self.log_p0, self.log_p1, self.log_pc = [
            t.squeeze(-1) for t in torch.split(self.logprobs, 1, dim=-1)
        ]
        self.uniform = Uniform(
            torch.zeros(shape).to(self.logits.device),
            torch.ones(shape).to(self.logits.device))
コード例 #12
0
 def __init__(self, survival_prob):
     """
     A module that implements drop connection
     :param survival_prob: the probability if connection survival
     """
     super(DropConnect, self).__init__()
     self.survival_prob = survival_prob
     self.u = Uniform(0, 1)
コード例 #13
0
ファイル: exponential.py プロジェクト: probabll/dists.pt
 def __init__(self, rate, upper):
     self.base = Exponential(rate)
     self._batch_shape = self.base.rate.size()
     self._upper = upper
     self.upper = torch.full_like(self.base.rate, upper)
     # normaliser
     self.normaliser = self.base.cdf(self.upper)
     self.uniform = Uniform(torch.zeros_like(self.upper), self.normaliser)
コード例 #14
0
ファイル: concrete.py プロジェクト: eelcovdw/dists.pt
 def rsample_truncated(self, k0, k1, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     probs = torch.distributions.utils.clamp_probs(self.probs.expand(shape))
     uniforms = Uniform(self.cdf(torch.full_like(self.logits, k0)),
                        self.cdf(torch.full_like(self.logits,
                                                 k1))).rsample(sample_shape)
     x = (uniforms.log() - (-uniforms).log1p() + probs.log() -
          (-probs).log1p()) / self.temperature
     return torch.sigmoid(x)
コード例 #15
0
 def test_uniform_shape_tensor_params(self):
     uniform = Uniform(torch.Tensor([0, 0]), torch.Tensor([1, 1]))
     self.assertEqual(uniform._batch_shape, torch.Size((2, )))
     self.assertEqual(uniform._event_shape, torch.Size(()))
     self.assertEqual(uniform.sample().size(), torch.Size((2, )))
     self.assertEqual(
         uniform.sample(torch.Size((3, 2))).size(), torch.Size((3, 2, 2)))
     self.assertEqual(
         uniform.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
     self.assertRaises(ValueError, uniform.log_prob, self.tensor_sample_2)
コード例 #16
0
    def process(self, data: Tensor) -> Tensor:
        if isinstance(self.rolls, tuple):
            uniform = Uniform(*self.rolls)
            roll_scale = uniform.sample().item()
        else:
            roll_scale = self.rolls

        roll_size = round(data.shape[self.dim] * roll_scale)
        data = data.roll(roll_size, dims=self.dim)
        return data
コード例 #17
0
 def __init__(self, loc, scale, validate_args=None):
     self.loc, self.scale = broadcast_all(loc, scale)
     finfo = _finfo(self.loc)
     if isinstance(loc, Number) and isinstance(scale, Number):
         base_dist = Uniform(finfo.tiny, 1 - finfo.eps)
     else:
         base_dist = Uniform(self.loc.new(self.loc.size()).fill_(finfo.tiny), 1 - finfo.eps)
     transforms = [ExpTransform().inv, AffineTransform(loc=0, scale=-torch.ones_like(self.scale)),
                   ExpTransform().inv, AffineTransform(loc=loc, scale=-self.scale)]
     super(Gumbel, self).__init__(base_dist, transforms, validate_args=validate_args)
コード例 #18
0
 def __init__(self, upper_bound, lower_bound, action_dim, is_discrete=False):
     """
     Random sample actions.
     """
     self.upper_bound = torch.tensor(upper_bound)
     self.lower_bound = torch.tensor(lower_bound)
     self.action_dim = action_dim
     self.is_discrete = is_discrete
     if is_discrete:
         self.sampler = RandIntSampler(self.lower_bound, self.upper_bound)
     else:
         self.sampler = Uniform(self.lower_bound, self.upper_bound)
コード例 #19
0
 def __init__(self, action_space=None, batch_size=1):
     super().__init__(action_space=action_space, batch_size=batch_size)
     if isinstance(action_space, Discrete):
         self.sample_func = self.sample_discrete
         num_actions = action_space.n
         self.dist = Uniform(
             torch.tensor([0.0 for _ in range(batch_size)]),
             torch.tensor([num_actions for _ in range(batch_size)]))
     elif isinstance(action_space, Box):
         self.sample_func = self.sample_continuous
     else:
         raise NotImplementedError
コード例 #20
0
    def __init__(self, spec: EnvSpec, use_cuda: bool = False):
        """
        Constructor

        :param spec: environment specification
        :param use_cuda: `True` to move the policy to the GPU, `False` (default) to use the CPU
        """
        super().__init__(spec, use_cuda)

        low = to.from_numpy(spec.act_space.bound_lo)
        high = to.from_numpy(spec.act_space.bound_up)
        self._distr = Uniform(low, high)
コード例 #21
0
def sample_truncated_normal(mu, sigma, a, b):
    alpha = (a - mu) / sigma
    beta = (b - mu) / sigma
    uniform = Uniform(low=0.0, high=1.0)
    sampled_uniform = uniform.sample(mu.size())
    sampled_uniform = sampled_uniform.cuda()
    gamma = phi(alpha) + sampled_uniform * (phi(beta) - phi(alpha))

    return torch.clamp(
        phi_inv(torch.clamp(gamma, min=1e-5, max=1.0 - 1e-5)) * sigma + mu,
        min=a,
        max=b)
コード例 #22
0
    def rsample(self, sample_shape=torch.Size([])):
        val_vec, pdf_vec = zip(*self.probs.items())

        cdf_vector = torch.cumsum(torch.Tensor(pdf_vec), 0)

        unif = Uniform(0, 1)
        u_samples = unif.rsample(sample_shape).reshape(-1)
        n_samples = len(u_samples)

        ids = torch.sum(cdf_vector.repeat(n_samples, 1) < u_samples.repeat(len(cdf_vector), 1).T, axis=1)

        return (torch.Tensor([val_vec[i] for i in ids]).reshape(*sample_shape, -1))
コード例 #23
0
    def ll(self, batch):
        h = self.get_hidden_state(batch)

        ts = batch.int_events  # (*)
        params = self.get_params(h)  # (*, num_basis)

        basis_vals = self.kernel(ts.unsqueeze(-1), params)  # (*, num_basis)
        int_vals = self.to_positive(basis_vals.sum(-1))

        int_sum = torch.zeros(len(batch))
        compensator = torch.zeros(len(batch))
        for idx in range(len(batch)):
            cur_m = batch.mask[idx]

            # Intensity sum calculation
            cur_int = int_vals[idx][cur_m]
            cur_int_sum = torch.sum(torch.log(cur_int + 1e-8))

            int_sum[idx] = cur_int_sum

            if self.mc_approx:
                # Compensator calculation
                cur_ts = ts[idx][cur_m]
                mc_ts = cur_ts.repeat_interleave(self.compensator_sims)

                sampler = Uniform(torch.zeros_like(mc_ts), mc_ts)
                mc_points = sampler.sample()

                mc_params = [
                    p[idx][cur_m, :].repeat_interleave(self.compensator_sims,
                                                       dim=0).unsqueeze(0)
                    for p in params
                ]  # (masked * x sims, num_basis)

                mc_basis = self.kernel(
                    mc_points.unsqueeze(-1),
                    mc_params)  # (masked * x sims, num_basis)
                mc_int = self.to_positive(
                    mc_basis.sum(-1))  # (masked * x sims)

                mc_approx = mc_int.view(-1, self.compensator_sims).sum(
                    -1) * cur_ts / self.compensator_sims

                mc_integral = torch.sum(mc_approx)

                compensator[idx] = mc_integral
            else:
                # Trapezoidal rule
                cur_ts = ts[idx][cur_m]
                compensator[idx] = torch.sum(
                    (cur_int[:-1] + cur_int[1:]) * cur_ts[1:] / 2)

        return (int_sum - compensator) / batch.seq_lengths
コード例 #24
0
class MultivariateUniform(BasePrior):
    """Uniformly draw samples from [a, b]."""
    def __init__(self, a: torch.Tensor, b: torch.Tensor):
        super().__init__()
        self.dist = Uniform(a, b)

    def log_prob(self, x: torch.Tensor):
        axes = range(1, len(x.shape))
        return torch.sum(self.dist.log_prob(x), dim=tuple(axes))

    def sample_n(self, batch_size: int):
        return self.dist.sample((batch_size, ))
コード例 #25
0
class MultivariateUniform(torch.nn.Module):
    """Uniformly draw samples from [a, b]."""
    def __init__(self, a, b):
        super().__init__()
        self.dist = Uniform(a, b)

    def log_prob(self, x):
        axes = range(1, len(x.shape))
        return torch.sum(self.dist.log_prob(x), dim=tuple(axes))

    def sample_n(self, batch_size):
        return self.dist.sample((batch_size, ))
コード例 #26
0
    def train(self, dataloader):

        for epoch in range(self.epoches):
            for i, data in enumerate(dataloader):

                #训练判别模型
                self.dis_model.zero_grad()

                #在正样例上训练
                real_img = data[0].to(device)
                B = real_img.shape[0]
                label = Uniform(0.95, 1.0).sample((B, )).to(device)
                #label = torch.full((B,), self.real_label, device=device, dtype=torch.float)
                output = self.dis_model(real_img).view(-1)
                loss_real = self.criterion(output, label)
                loss_real.backward()

                #在负样例上训练
                noise = torch.randn(B, 100, 1, 1, device=device)
                fake_img = self.gen_model(noise)
                label = Uniform(0., 0.05).sample((B, )).to(device)
                #label = torch.full((B,), self.fake_label, device=device, dtype=torch.float)
                output = self.dis_model(fake_img.detach()).view(-1)
                loss_fake = self.criterion(output, label)
                loss_fake.backward()

                #训练判别器
                loss_dis = loss_real + loss_fake
                #loss_dis.backward()
                self.optim_D.step()

                #训练生成器
                self.gen_model.zero_grad()
                output = self.dis_model(fake_img).view(-1)
                label = Uniform(0.95, 1.0).sample((B, )).to(device)
                #label = torch.full((B,), self.real_label, device=device, dtype=torch.float)
                loss_gen = self.criterion(output, label)
                loss_gen.backward()
                self.optim_G.step()

                if i % 300 == 0:
                    print('epoch:', epoch, 'loss_dis:', loss_dis.item(),
                          'loss_gen:', loss_gen.item())

            with torch.no_grad():
                fake_img = self.gen_model(self.fixed_noise).detach().cpu()

                fake_img = make_grid(fake_img, padding=2, normalize=True)
                fig = plt.figure(figsize=(10, 10))
                plt.imshow(fake_img.permute(1, 2, 0))
                plt.savefig('./result/' + str(epoch) + '.png')
                plt.close()
コード例 #27
0
 def __init__(self,
              *args,
              noise_dist='gaussian',
              noise_scale=0.1,
              **kwargs):
     super().__init__(*args, **kwargs)
     if noise_dist == 'gaussian':
         self.noise = Normal(loc=0, scale=noise_scale)
     elif noise_dist == 'uniform':
         self.noise = Uniform(low=-noise_scale, high=noise_scale)
     else:
         raise NotImplementedError(
             f"{noise_dist} not recognized as a noise distribution")
コード例 #28
0
 def _sample_volume_alphas(self, n_related):
     if self.uniform_volumes:
         u = Uniform(0.25, 1.25)
         return u.sample().repeat(n_related)
     if isinstance(self.concentration, (float, int)):
         concentration = self.concentration
     else:
         concentration = self.concentration.rvs()
     dirichlet = Dirichlet(
         torch.tensor([concentration for _ in range(n_related)]))
     if self.random_seed is not None:
         torch.manual_seed(self.random_seed)
     return dirichlet.sample() * float(self.n_classes)
コード例 #29
0
 def integrate(self, duration, states):
     #t_prev_np = t_prev.cpu().data.numpy()
     #t_next_np = t_next.cpu().data.numpy()
     sample_count = 0
     loss = torch.zeros(duration.shape).to(duration.device)
     uniform_sampler = Uniform(torch.zeros(duration.shape).to(duration.device), duration)
     while(sample_count < self.nsamples):
         t = (uniform_sampler.sample()).view(-1, 1)
         weight = (duration / self.nsamples)
         loss += (self.intensity_criterion.getIntensity(states, duration)) * weight  
         sample_count = sample_count + 1
     
     loss = torch.sum(loss)
     assert(not np.isnan(loss.item()))
     return loss
コード例 #30
0
def init_mu(size, prior, Ninput, type = "Linear"):

    # Initializer for the weights with the prior for the first 
    if(type == "Linear"):
        stdv = 1. / math.sqrt(Ninput)
        samples_mu = Uniform(-stdv, stdv).sample(size).to(device = device, dtype = dtype)
    elif(type == "LinearSimilarity"):
        if(size[0] == 1): # The biasç
            samples_mu = torch.tensor([0.0]).to(device = device, dtype = dtype)
        else:
            std = math.sqrt(6 / (Ninput + 1))
            samples_mu = Uniform(-std, std).sample(size).to(device = device, dtype = dtype)
#    print (sigma_max_init, sigma_min_init)
#    print (samples_sigma)
    return samples_mu