def __init__(self,
                 hop_len=None,
                 num_bands=128,
                 fft_len=2048,
                 norm='whiten',
                 stretch_param=[0.4, 0.4]):

        super(MelspectrogramStretch, self).__init__()

        self.prob = stretch_param[0]
        self.dist = Uniform(-stretch_param[1], stretch_param[1])
        self.norm = {
            'whiten': spec_whiten,
            'db': amplitude_to_db
        }.get(norm, None)

        self.stft = STFT(fft_len=fft_len, hop_len=hop_len)
        self.pv = StretchSpecTime(hop_len=self.stft.hop_len,
                                  num_bins=fft_len // 2 + 1)
        self.cn = ComplexNorm(power=2.)

        fb = MelFilterbank(num_bands=num_bands).get_filterbank()
        self.app_fb = ApplyFilterbank(fb)

        self.fft_len = fft_len
        self.hop_len = self.stft.hop_len
        self.num_bands = num_bands
        self.stretch_param = stretch_param

        self.counter = 0
示例#2
0
    def __init__(self, prsi_dim=32, trsi_dim=8,
                 pm_dim=8, vol_dim=8, tvol_dim=8,
                 hidden_dim=4):
        super(BitInvestor, self).__init__()

        self.prsi_dim = prsi_dim
        prsi_dim2 = int(prsi_dim / 2)
        self.trsi_dim = trsi_dim
        self.pm_dim = pm_dim
        self.vol_dim = vol_dim
        self.tvol_dim = tvol_dim
        self.hidden_dim = hidden_dim

        tshape = 5 * hidden_dim
        self.spike = Uniform(0., 1.)
        self.sigmoid = nn.Sigmoid()
        self.relu = nn.ReLU()

        self.prsi_net1 = nn.Linear(prsi_dim, prsi_dim2)
        self.prsi_net2 = nn.Linear(prsi_dim2, hidden_dim)
        self.trsi_net = nn.Linear(trsi_dim, hidden_dim)
        self.pm_net = nn.Linear(pm_dim, hidden_dim)
        self.vol_net = nn.Linear(vol_dim, hidden_dim)
        self.tvol_net = nn.Linear(tvol_dim, hidden_dim)

        self.wnet = nn.Linear(tshape, 2)
        self.qnet = nn.Linear(tshape, 2)
        self.renet = nn.Linear(tshape, 1)
示例#3
0
    def __init__(self,
                 a_lms: SO3Vec,
                 sphs: SphericalHarmonics,
                 empty: torch.Tensor = None,
                 validate_args=None,
                 device=None,
                 dtype=torch.float) -> None:
        # SO3Vec: -ell, ..., ell: (batch size, tau's, m's, 2)
        assert all(a_lm.shape[:-3] == a_lms[0].shape[:-3] for a_lm in a_lms)
        super().__init__(batch_shape=a_lms[0].shape[:-3],
                         validate_args=validate_args,
                         device=device,
                         dtype=dtype)

        assert sphs.sh_norm == 'qm'
        self.sphs = sphs

        assert empty is None or empty.shape == self.batch_shape
        self.empty = empty

        self.coefficients = normalize_alms(a_lms)  # (batches, taus, ms, 2)

        self.spherical_uniform = SphericalUniform(batch_shape=self.batch_shape,
                                                  device=device,
                                                  dtype=dtype,
                                                  validate_args=validate_args)
        self.uniform_dist = Uniform(low=0.0,
                                    high=1.0,
                                    validate_args=validate_args)
示例#4
0
文件: m_layer.py 项目: AndreevP/MEML
    def __init__(
        self, input_dim, m_dim, matrix_init='normal', 
        with_bias=False, expm=SecondGreatLimitExpm(6), device='cuda'):
        '''
        :Parameters:
        input_dim: tuple: shape of input tensor
        m_dim: int : matrix dimension
        matrix_init: str or torch.distribution : parameters initializer
        with_bias : bool : whether to use bias (when constructing matrix from input)
        expm : callable : method to compute matrix exponential
        device : str : torch device
        '''
        super().__init__()
        self.input_dim = input_dim
        self.m_dim = m_dim
        self.matrix_init = matrix_init
        self.bias_init = Uniform(0., 1.)
        self.with_bias = with_bias
        self.expm = expm
        self.device = device

        self._rep_to_exp_tensor = Parameter(
            self.matrix_init.sample(
                (np.prod(self.input_dim), self.m_dim, self.m_dim)).to(self.device), 
                requires_grad=True)
        
        if self.with_bias:
            self._matrix_bias = Parameter(
                self.bias_init.sample((1, self.m_dim, self.m_dim)).to(self.device), 
                requires_grad=True)
示例#5
0
  def sample(self, size):
    if cfg['USE_CUDA']:
      z = Uniform(torch.cuda.FloatTensor([0.]), torch.cuda.FloatTensor([1.])).sample(size)
    else:
      z = Uniform(torch.FloatTensor([0.]), torch.FloatTensor([1.])).sample(size)

    return torch.log(z) - torch.log(1. - z)
    def prepare_dec_io(self, z_sample_ids, z_sample_emb, sentences, x_lambd):
        """Prepare the decoder output g based on the inferred z from the CRF 

    Args:
      x_lambd: word dropout ratio. 1 = all dropped

    Returns:
      dec_inputs: size=[batch, max_len, state_size]
      dec_targets_x: size=[batch, max_len]
      dec_targets_z: size=[batch, max_len]
    """
        batch_size = sentences.size(0)
        max_len = sentences.size(1)
        device = sentences.device

        sent_emb = self.embeddings(sentences)
        z_sample_emb[:, 0] *= 0.  # mask out z[0]

        # word dropout ratio = x_lambd. 0 = no dropout, 1 = all drop out
        m = Uniform(0., 1.)
        mask = m.sample([batch_size, max_len]).to(device)
        mask = (mask > x_lambd).float().unsqueeze(2)

        dec_inputs = z_sample_emb + sent_emb * mask
        dec_inputs = dec_inputs[:, :-1]

        dec_targets_x = sentences[:, 1:]
        dec_targets_z = z_sample_ids[:, 1:]
        return dec_inputs, dec_targets_x, dec_targets_z
示例#7
0
def sample_action(I, mean=None, cov=None):
    '''TODO: unit test
    each action sequence length: H
    number of action sequences: N
    '''
    action = torch.tensor([0] * 4, dtype=torch.float)
    multiplier = torch.tensor([50, 50, 2 * math.pi, 0.14])
    addition = torch.tensor([0, 0, 0, 0.01])
    thres = 0.9

    if I[0][0][0][0] == 1.:
        if ((mean is None) and (cov is None)):
            action_base = Uniform(low=0.0, high=1.0).sample((4, ))
            action = torch.mul(action_base, multiplier) + addition
        else:
            cov = add_eye(cov)
            action = MultivariateNormal(mean, cov).sample()
        action[0], action[1] = 0, 0
        return action

    while I[0][0][torch.floor(action[0]).type(torch.LongTensor)][torch.floor(
            action[1]).type(torch.LongTensor)] != 1.:
        if ((mean is None) and (cov is None)):
            action_base = Uniform(low=0.0, high=1.0).sample((4, ))
            action = torch.mul(action_base, multiplier) + addition
        else:
            cov = add_eye(cov)
            action = MultivariateNormal(mean, cov).sample()
            while torch.floor(action[0]).type(
                    torch.LongTensor) >= 50 or torch.floor(action[1]).type(
                        torch.LongTensor) >= 50:
                cov = add_eye(cov)
                action = MultivariateNormal(mean, cov).sample()

    return action
示例#8
0
    def __init__(self,
                 hop_length=None,
                 num_mels=128,
                 fft_length=2048,
                 norm='whiten',
                 stretch_param=[0.4, 0.4]):

        super(MelspectrogramStretch, self).__init__()

        self.prob = stretch_param[0]
        self.dist = Uniform(-stretch_param[1], stretch_param[1])
        self.norm = {
            'whiten': spec_whiten,
            'db': amplitude_to_db
        }.get(norm, None)

        self.stft = STFT(fft_length=fft_length, hop_length=fft_length // 4)
        self.pv = TimeStretch(hop_length=self.stft.hop_length,
                              num_freqs=fft_length // 2 + 1)
        self.cn = ComplexNorm(power=2.)

        fb = MelFilterbank(num_mels=num_mels, max_freq=1.0).get_filterbank()
        self.app_fb = ApplyFilterbank(fb)

        self.fft_length = fft_length
        self.hop_length = self.stft.hop_length
        self.num_mels = num_mels
        self.stretch_param = stretch_param

        self.counter = 0
示例#9
0
def _random_crop_size_gen(
        size: Tuple[int, int], scale: Tuple[float, float],
        ratio: Tuple[float, float]) -> Tuple[torch.Tensor, torch.Tensor]:
    area = Uniform(scale[0] * size[0] * size[1],
                   scale[1] * size[0] * size[1]).rsample((10, ))
    log_ratio = Uniform(math.log(ratio[0]), math.log(ratio[1])).rsample((10, ))
    aspect_ratio = torch.exp(log_ratio)

    w = torch.sqrt(area * aspect_ratio).int()
    h = torch.sqrt(area / aspect_ratio).int()
    # Element-wise w, h condition
    cond = ((0 < h) * (h < size[1]) * (0 < w) * (w < size[0])).int()
    if torch.sum(cond) > 0:
        return (h[torch.argmax(cond)], w[torch.argmax(cond)])

    # Fallback to center crop
    in_ratio = float(size[0]) / float(size[1])
    if (in_ratio < min(ratio)):
        w = torch.tensor(size[0])
        h = torch.round(w / min(ratio))
    elif (in_ratio > max(ratio)):
        h = torch.tensor(size[1])
        w = torch.round(h * max(ratio))
    else:  # whole image
        w = torch.tensor(size[0])
        h = torch.tensor(size[1])
    return (h, w)
示例#10
0
    def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None:
        angle = _range_bound(self.angle,
                             'angle',
                             center=0.0,
                             bounds=(-360, 360)).to(device=device, dtype=dtype)
        direction = _range_bound(self.direction,
                                 'direction',
                                 center=0.0,
                                 bounds=(-1, 1)).to(device=device, dtype=dtype)
        if isinstance(self.kernel_size, int):
            if not (self.kernel_size >= 3 and self.kernel_size % 2 == 1):
                raise AssertionError(
                    f"`kernel_size` must be odd and greater than 3. Got {self.kernel_size}."
                )
            self.ksize_sampler = Uniform(self.kernel_size // 2,
                                         self.kernel_size // 2,
                                         validate_args=False)
        elif isinstance(self.kernel_size, tuple):
            # kernel_size is fixed across the batch
            if len(self.kernel_size) != 2:
                raise AssertionError(
                    f"`kernel_size` must be (2,) if it is a tuple. Got {self.kernel_size}."
                )
            self.ksize_sampler = Uniform(self.kernel_size[0] // 2,
                                         self.kernel_size[1] // 2,
                                         validate_args=False)
        else:
            raise TypeError(f"Unsupported type: {type(self.kernel_size)}")

        self.angle_sampler = Uniform(angle[0], angle[1], validate_args=False)
        self.direction_sampler = Uniform(direction[0],
                                         direction[1],
                                         validate_args=False)
示例#11
0
    def test_efficiency_property(self):
        seq_features = torch.arange(3)  # [0, 1, 2]
        distribution = Uniform(0, 1)
        characteristic_fn_scores = {
            frozenset(subset): [distribution.sample().item()]
            for subset in powerset(seq_features.numpy())
        }
        characteristic_fn_scores[frozenset({})] = [0.0]

        def characteristic_fn(batch_seq_features):
            results = []
            for seq_features in batch_seq_features.numpy():
                results_key = frozenset(list(seq_features))
                results.append(characteristic_fn_scores[results_key])
            return torch.tensor(results, dtype=torch.float32)

        attributor = CharacteristicFunctionExampleShapleyAttributor(
            seq_features,
            characteristic_fn=characteristic_fn,
            iterations=1,
            subset_sampler=ExhaustiveSubsetSampler(),
            n_classes=1,
        )
        shapley_values, scores = attributor.run()
        assert_array_equal(
            shapley_values.sum(dim=0).numpy(),
            characteristic_fn_scores[frozenset(list(seq_features.numpy()))],
        )
示例#12
0
    def forward(self, x, train):
        x = random.choice(self.resamples)(x)

        x = self.stft(x)

        if train:
            dist = Uniform(1. - self.max_perc, 1 + self.max_perc)
            x = self.time_stretch(x, dist.sample().item())
            x = self.com_norm(x)
            x = self.fm(x, 0)
            x = self.tm(x, 0)
        else:
            x = self.com_norm(x)

        x = self.mel_specgram.mel_scale(x)
        x = self.AtoDB(x)

        size = torch.tensor(x.size())

        if size[3] > 157:
            x = x[:, :, :, 0:157]
        else:
            x = torch.cat([
                x,
                torch.cuda.FloatTensor(size[0], size[1], size[2],
                                       157 - size[3]).fill_(0)
            ],
                          dim=3)

        return x
    def get_data(self, n_exp, n_samp):
        d = Uniform(self.min, self.max)
        thetas = d.sample(torch.Size([n_exp, self.theta_dim]))

        X = self.forward(thetas, n_samp=n_samp)

        return thetas.reshape([-1, self.theta_dim]), X
示例#14
0
 def _gen_constant(self, data: Tensor) -> Tensor:
     if isinstance(self.fill_value, float):
         fill_value = self.fill_value
     else:
         uniform = Uniform(*self.fill_value)
         fill_value = uniform.sample()
     return torch.full_like(data, fill_value)
示例#15
0
def mh_step(x, log_energy, kernel_gen):
    """
    Given a vectorofstarting points it perform a step of the metropolis hastings algorithm
    to sample log_energy 
    
    x: tensor of shape (batch_dims, distr_dims) of real numbers representing initial samples
    log_energy : function that given a batch (batch_dims, distr_dims) of points 
                    returns a tensor of shape (batch_dims) ofthe log_energy of each point
    kernel_gen: function that taken a batch of points, returns a kernel distr centered at those points
                in form of a torch.distribution
    
    returns: a tensor of shape (batch_dims, distr_dims) of the new samples
    """

    s = torch.Size((1, ))
    x = x.double()
    ker = kernel_gen(x)
    x1 = ker.sample(s).squeeze(0)
    ker1 = kernel_gen(x1)

    log_p_x1_x = ker.log_prob(x1).double()
    log_p_x_x1 = ker1.log_prob(x).double()
    log_acceptance = log_energy(x1) - log_energy(x) + log_p_x_x1 - log_p_x1_x
    u = Uniform(
        torch.zeros(log_acceptance.shape).double(),
        torch.ones(log_acceptance.shape).double(),
    )

    acceptance_mask = u.sample(s).log().squeeze(0) <= log_acceptance

    x[acceptance_mask] = x1[acceptance_mask]

    return x
示例#16
0
    def _get_batch_random_prob(self, shape, same_on_batch=False):

        dist = Uniform(low=0., high=1.)
        if same_on_batch:
            return dist.rsample((1, )).repeat(shape[0])
        else:
            return dist.rsample((shape[0], ))
 def entropy_uniform(self, state_info):
     u = Uniform(-0.999, +0.999)
     uniform_sample = u.sample((*state_info.size()[:-1], self._action_size))
     log_prob = self.log_prob(state_info, uniform_sample)
     log_prob -= torch.log(1 - uniform_sample.pow(2)).sum(-1)
     entropy = -log_prob
     return entropy
示例#18
0
    def __init__(self, latent_dimensions=32, rbm_block_size=16, smoother=None):
        super(VAE, self).__init__()

        self._encoderNodes = [
            (784, 128),
        ]

        self._reparamNodes = (128, latent_dimensions)

        self._decoderNodes = [
            (latent_dimensions, 128),
        ]

        self._outputNodes = (128, 784)

        self._encoderLayers = nn.ModuleList([])
        self._decoderLayers = nn.ModuleList([])
        self._reparamLayers = nn.ModuleDict({
            'mu':
            nn.Linear(self._reparamNodes[0], self._reparamNodes[1]),
            'var':
            nn.Linear(self._reparamNodes[0], self._reparamNodes[1])
        })
        self._outputLayer = nn.Linear(self._outputNodes[0],
                                      self._outputNodes[1])

        for node in self._encoderNodes:
            self._encoderLayers.append(nn.Linear(node[0], node[1]))
        for node in self._decoderNodes:
            self._decoderLayers.append(nn.Linear(node[0], node[1]))

        # activation functions per layer
        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()

        #smoothing function used to 'blur' discrete latent variables
        # self.smoother = smoother

        ###################################################################################
        ###################################### PRIOR ######################################
        ###################################################################################

        # The last set of parameters we need to worry about are those of the RBM prior.
        # Let's initialize these to some random values, and update them as we go along
        self.rbm_block_size = rbm_block_size

        self.rbm_weights = nn.Parameter(
            Normal(loc=0, scale=0.01).sample(
                (self.rbm_block_size, self.rbm_block_size)))

        # Should use the proportion of training vectors in which unit i is turned on
        # For now let's just set them randomly
        self.rbm_z1_bias = nn.Parameter(
            Uniform(low=0, high=1).sample((self.rbm_block_size, )))

        # Unless there is some sparsity, initialize these all to 0
        #self._hidden_bias = nn.Parameter(torch.zeros((self.n_hidden, )))
        self.rbm_z2_bias = nn.Parameter(
            Uniform(low=-0.1, high=0.1).sample((self.rbm_block_size, )))
示例#19
0
 def test_uniform_shape_tensor_params(self):
     uniform = Uniform(torch.Tensor([0, 0]), torch.Tensor([1, 1]))
     self.assertEqual(uniform._batch_shape, torch.Size((2,)))
     self.assertEqual(uniform._event_shape, torch.Size(()))
     self.assertEqual(uniform.sample().size(), torch.Size((2,)))
     self.assertEqual(uniform.sample(torch.Size((3, 2))).size(), torch.Size((3, 2, 2)))
     self.assertEqual(uniform.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
     self.assertRaises(ValueError, uniform.log_prob, self.tensor_sample_2)
示例#20
0
 def _gen_random(self, data: Tensor) -> Tensor:
     if isinstance(self.fill_value, float):
         raise ValueError(
             'Invalid fill_value with random fill_mode. Please use a tuple of 2 floats for fill_value or use '
             'fill_mode="constant".')
     else:
         uniform = Uniform(*self.fill_value)
         return uniform.sample(data.shape)
    def _build_pv(self):
        fft_size = self.n_fft//2 + 1
        self.phi_advance = nn.Parameter(torch.linspace(0, 
            math.pi * self.hop, 
            fft_size)[..., None], requires_grad=False)

        self.prob = self.stretch_param[0]
        self.dist = Uniform(-self.stretch_param[1], self.stretch_param[1])
示例#22
0
class EmbeddingLayer(nn.Module):

    def __init__(self, args, pre_trained_embed):
        super(EmbeddingLayer, self).__init__()
        self.word_dropout = args.word_dropout
        self.word_permute = args.word_permute
        self.uniform = Uniform(torch.tensor([0.0]), torch.tensor([1.0]))

        if pre_trained_embed is not None:
            weights = torch.FloatTensor(pre_trained_embed)
            self.embedding = nn.Embedding.from_pretrained(weights, freeze=False, padding_idx=PAD)
        else:
            self.embedding = nn.Embedding(args.vocab_size, args.embed_dim, padding_idx=PAD)

        if args.embed_dropout > 0:
            self.embed_drop = nn.Dropout(args.embed_dropout)
        else:
            self.embed_drop = None

    def _drop_words(self, inputs, inputs_len):
        mask = torch.zeros_like(inputs)
        if inputs.get_device() >= 0:
            mask = mask.cuda()

        for i, ll in enumerate(inputs_len):
            ll = int(ll.item())
            drop = self.uniform.sample((ll,)) < self.word_dropout
            mask[:ll, i] = torch.squeeze(drop, dim=-1)

        return torch.where(mask > 0, mask, inputs)

    def _rand_perm_with_constraint(self, inputs, inputs_len, k):
        """
        Randomly permutes words ensuring that words are no more than k positions
        away from their original position.
        """
        device = 'cuda' if inputs.get_device() >= 0 else None
        for i, l in enumerate(inputs_len):
            length = int(l.item())
            offset = torch.squeeze(self.uniform.sample((length,)), dim=-1) * (k + 1)
            if inputs.get_device() >= 0:
                offset = offset.cuda()
            new_pos = torch.arange(length, dtype=torch.float, device=device) + offset
            inputs[:length, i] = torch.take(inputs[:length, i], torch.argsort(new_pos))
        return inputs

    def forward(self, inputs, inputs_len):
        if self.word_dropout > 0 and self.training:
            inputs = self._drop_words(inputs, inputs_len)

        if self.word_permute > 0 and self.training:
            inputs = self._rand_perm_with_constraint(inputs, inputs_len, self.word_permute)

        embed = self.embedding(inputs)
        if self.embed_drop:
            embed = self.embed_drop(embed)

        return embed
示例#23
0
 def __init__(self, loc, scale, validate_args=None):
     base_dist = Uniform(t.zeros(loc.shape),
                         t.ones(loc.shape),
                         validate_args=validate_args)
     if not base_dist.batch_shape:
         base_dist = base_dist.expand([1])
     super(Logistic, self).__init__(
         base_dist, [SigmoidTransform().inv,
                     AffineTransform(loc, scale)])
示例#24
0
def _adapted_uniform(shape, low, high):
    r"""The uniform sampling function that accepts 'same_on_batch'.
    """
    low = torch.as_tensor(low, device=low.device, dtype=low.dtype)
    high = torch.as_tensor(high, device=high.device, dtype=high.dtype)

    dist = Uniform(low, high)

    return dist.rsample(shape)
示例#25
0
 def test_uniform_shape_scalar_params(self):
     uniform = Uniform(0, 1)
     self.assertEqual(uniform._batch_shape, torch.Size())
     self.assertEqual(uniform._event_shape, torch.Size())
     self.assertEqual(uniform.sample().size(), torch.Size((1,)))
     self.assertEqual(uniform.sample(torch.Size((3, 2))).size(), torch.Size((3, 2)))
     self.assertRaises(ValueError, uniform.log_prob, self.scalar_sample)
     self.assertEqual(uniform.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
     self.assertEqual(uniform.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
示例#26
0
    def _adapted_uniform(self, shape, low, high):
        """
            The Uniform Dist function, used to choose parameters within a range 
        """
        low = torch.as_tensor(low, device=low.device, dtype=low.dtype)
        high = torch.as_tensor(high, device=high.device, dtype=high.dtype)

        dist = Uniform(low, high)
        return dist.rsample(shape)
示例#27
0
def _adapted_uniform(shape: Union[Tuple, torch.Size], low, high, same_on_batch=False):
    r""" The uniform function that accepts 'same_on_batch'.
    If same_on_batch is True, all values generated will be exactly same given a batch_size (shape[0]).
    By default, same_on_batch is set to False.
    """
    dist = Uniform(low, high)
    if same_on_batch:
        return dist.rsample((1, *shape[1:])).repeat(shape[0])
    else:
        return dist.rsample(shape)
示例#28
0
 def __init__(self, sample_rate, n_fft, top_db, max_perc):
     super().__init__()
     self.time_stretch = TimeStretch(hop_length=None, n_freq=n_fft // 2 + 1)
     self.stft = Spectrogram(n_fft=n_fft, power=None)
     self.com_norm = ComplexNorm(power=2.)
     self.mel_specgram = MelSpectrogram(sample_rate,
                                        n_fft=n_fft,
                                        f_max=8000)
     self.AtoDB = AmplitudeToDB(top_db=top_db)
     self.dist = Uniform(1. - max_perc, 1 + max_perc)
示例#29
0
    def forward(self, recon_x, low, high, x):
        MSE = F.mse_loss(recon_x, x, reduction='elementwise_mean')

        uniform = Uniform(torch.zeros_like(low), torch.ones_like(high))

        KLD = kl_divergence(uniform, Uniform(low, high)).mean() / low.numel()

        self.execute_hooks(kld_loss=KLD, mse_loss=MSE)

        return KLD + MSE
示例#30
0
文件: fade.py 项目: Labbeti/MLU
    def process(self, x: Tensor) -> Tensor:
        if isinstance(self.factor, float):
            factor = self.factor
        else:
            uniform = Uniform(*self.factor)
            factor = uniform.sample()

        min_ = x.min()
        x = min_ + (x - min_) * factor
        return x