Пример #1
0
    def generate(self, z, t=1.):

        A = z.new(z.size(0), 155, 155).bool() | 1
        X = z.new(z.size(0), 155, self.n_features)
        X = X * 0 + 1

        c = self.z_to_c(z).view(z.shape[0], 155, -1)
        X_dec = self.decoder(X, A, c)
        X_hat = mask_parameters(self.logits(X_dec))
        X_hat = torch.distributions.Categorical(logits=X_hat / t)

        return X_hat
Пример #2
0
    def generate(self, X, X_a, sample=True, t=1.):

        q = self.features(X, X_a)

        z = q.sample()

        c_gamma, c_beta = self.z_to_c(z).chunk(2, -1)

        X_hat = mask_parameters(self.logits(c_gamma))

        X_hat = torch.distributions.Categorical(logits=X_hat / t)

        return X_hat
Пример #3
0
    def generate(self, X, X_a):
        
    
        A = (~X_a.unsqueeze(-1)) & (X_a.unsqueeze(-2))
        eye = torch.eye(A.shape[-1]).bool().to(self.device) & (~X_a.unsqueeze(-2))
        A = A | eye

        X = self.embedder(X) * X_a.unsqueeze(-1).float()
        X = self.encoder(X, A)
        X_hat = mask_parameters(self.logits(X))

        X_hat = torch.distributions.Categorical(logits=X_hat)

        return X_hat
Пример #4
0
    def generate_z(self, X, X_a, z, t=1.):

        A = (~X_a.unsqueeze(-1)) & (X_a.unsqueeze(-2))
        eye = torch.eye(A.shape[-1]).bool().to(
            self.device) & (~X_a.unsqueeze(-2))
        A = A | eye

        X = self.embedder(X)
        X = X * X_a.unsqueeze(-1).float()

        c = self.z_to_c(z).view(z.shape[0], 155, -1)
        X_dec = self.decoder(X, A, c)
        X_hat = mask_parameters(self.logits(X_dec))
        X_hat = torch.distributions.Categorical(logits=X_hat / t)

        return X_hat
Пример #5
0
    def generate(self, z, t=1.):
        """
        Given a sample from the latent distribution, reporojects it back to data space
        
        z - the array of dx7 voices, torch.FloatTensor(batch_size, latent_dim)
        t - the temperature of the output distribution. approaches determenistic as t->0 and approach uniforms as t->infty, requires t>0
        """
        A = z.new(z.size(0), 155, 155).bool() | 1
        X = z.new(z.size(0), 155, self.n_features)
        X = X * 0 + 1

        c = self.z_to_c(z).view(z.shape[0], 155, -1)
        X_dec = self.decoder(X, A, c)
        X_hat = mask_parameters(self.logits(X_dec))
        X_hat = torch.distributions.Categorical(logits=X_hat / t)

        return X_hat
Пример #6
0
# plt.imshow(logits.log_prob(batch))

# %%

from itertools import count
from neuralDX7.utils import dx7_bulk_pack, mask_parameters
import mido

iter_X = iter(loader)
for n in range(10):
    X = next(iter_X)['x']
    # syx = dx7_bulk_pack(X.numpy().tolist())
    # mido.write_syx_file('/home/nintorac/.local/share/DigitalSuburban/Dexed/Cartridges/neuralDX7/OG.syx', [syx])

    X_d = torch.distributions.Categorical(
        logits=mask_parameters(torch.zeros(32, 155, 128)))

    X_a = torch.rand_like(X.float()) < 0.3
    X_a = torch.ones_like(X).bool()
    X_a[:, :-10] = 0

    X = X[[0] * 32]
    X[~X_a] = X_d.sample()[~X_a]

    max_to_sample = max((~X_a).sum(-1))

    # for i in tqdm(range(max_to_sample)):

    logits = model.generate(X, X_a)
    samples = logits.sample()