Example #1
0
    def encode(self, indices, lengths, noise):
        embeddings = self.embedding(indices)
        packed_embeddings = pack_padded_sequence(input=embeddings,
                                                 lengths=lengths,
                                                 batch_first=True)

        # Encode
        packed_output, state = self.encoder(packed_embeddings)

        hidden, cell = state
        # batch_size x nhidden
        hidden = hidden[-1]  # get hidden state of last layer of encoder

        # normalize to unit ball (l2 norm of 1) - p=2, dim=1
        norms = torch.norm(hidden, 2, 1)
        
        # For older versions of PyTorch use:
        hidden = torch.div(hidden, norms.expand_as(hidden))
        # For newest version of PyTorch (as of 8/25) use this:
        # hidden = torch.div(hidden, norms.unsqueeze(1).expand_as(hidden))

        if noise and self.noise_radius > 0:
            gauss_noise = torch.normal(means=torch.zeros(hidden.size()),
                                       std=self.noise_radius)
            hidden = hidden + to_gpu(self.gpu, Variable(gauss_noise))

        return hidden
Example #2
0
 def sample_n(self, n):
     # cleanly expand float or Tensor or Variable parameters
     def expand(v):
         if isinstance(v, Number):
             return torch.Tensor([v]).expand(n, 1)
         else:
             return v.expand(n, *v.size())
     return torch.normal(expand(self.mean), expand(self.std))
def main():
    n_data = torch.ones(100, 2)
    x0 = torch.normal(2 * n_data, 1)  # class0 x data (tensor), shape=(100, 2)
    y0 = torch.zeros(100)  # class0 y data (tensor), shape=(100, 1)
    x1 = torch.normal(-2 * n_data, 1)  # class1 x data (tensor), shape=(100, 2)
    y1 = torch.ones(100)  # class1 y data (tensor), shape=(100, 1)
    x = torch.cat((x0, x1), 0).type(torch.FloatTensor)  # shape (200, 2) FloatTensor = 32-bit floating
    y = torch.cat((y0, y1), ).type(torch.LongTensor)  # shape (200,) LongTensor = 64-bit integer
    x, y = Variable(x), Variable(y)
    print(y.size())

    net = ClafNN(2,2)

    loss_func = torch.nn.CrossEntropyLoss()

    optm = torch.optim.SGD(net.parameters(),lr=0.01)

    plt.ion()

    for i in range(100):

        o = net(x)

        loss = loss_func(o,y)

        optm.zero_grad()
        loss.backward()
        optm.step()

        if i % 10 ==0:
            plt.cla()
            prediction = torch.max(F.softmax(o),1)[1]
            pred_y = prediction.data.numpy().squeeze()
            target_y = y.data.numpy()
            plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')
            accuracy = sum(pred_y == target_y) / 200.
            plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'})
            plt.pause(0.1)
    plt.ioff()
    plt.show()
def load_data(opt):
    with open('SQuAD/meta.msgpack', 'rb') as f:
        meta = msgpack.load(f, encoding='utf8')
    embedding = torch.Tensor(meta['embedding'])
    opt['pretrained_words'] = True
    opt['vocab_size'] = embedding.size(0)
    opt['embedding_dim'] = embedding.size(1)
    if not opt['fix_embeddings']:
        embedding[1] = torch.normal(means=torch.zeros(opt['embedding_dim']), std=1.)
    with open(args.data_file, 'rb') as f:
        data = msgpack.load(f, encoding='utf8')
    train_orig = pd.read_csv('SQuAD/train.csv')
    dev_orig = pd.read_csv('SQuAD/dev.csv')
    train = list(zip(
        data['trn_context_ids'],
        data['trn_context_features'],
        data['trn_context_tags'],
        data['trn_context_ents'],
        data['trn_question_ids'],
        train_orig['answer_start_token'].tolist(),
        train_orig['answer_end_token'].tolist(),
        data['trn_context_text'],
        data['trn_context_spans']
    ))
    dev = list(zip(
        data['dev_context_ids'],
        data['dev_context_features'],
        data['dev_context_tags'],
        data['dev_context_ents'],
        data['dev_question_ids'],
        data['dev_context_text'],
        data['dev_context_spans']
    ))
    dev_y = dev_orig['answers'].tolist()[:len(dev)]
    dev_y = [eval(y) for y in dev_y]
    return train, dev, dev_y, embedding, opt
Example #5
0
 def forward(ctx, means, stddevs=None):
     samples = torch.normal(means, stddevs)
     ctx.mark_non_differentiable(samples)
     return samples
Example #6
0
def get_action(mu, std):
    action = torch.normal(mu, std)
    action = action.data.numpy()
    return action
Example #7
0
View more, visit my tutorial page: https://mofanpy.com/tutorials/
My Youtube Channel: https://www.youtube.com/user/MorvanZhou

Dependencies:
torch: 0.4
matplotlib
"""
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt

# torch.manual_seed(1)    # reproducible

# make fake data
n_data = torch.ones(100, 2)
x0 = torch.normal(2 * n_data, 1)  # class0 x data (tensor), shape=(100, 2)
y0 = torch.zeros(100)  # class0 y data (tensor), shape=(100, 1)
x1 = torch.normal(-2 * n_data, 1)  # class1 x data (tensor), shape=(100, 2)
y1 = torch.ones(100)  # class1 y data (tensor), shape=(100, 1)
x = torch.cat(
    (x0, x1),
    0).type(torch.FloatTensor)  # shape (200, 2) FloatTensor = 32-bit floating
y = torch.cat((y0, y1), ).type(
    torch.LongTensor)  # shape (200,) LongTensor = 64-bit integer

# The code below is deprecated in Pytorch 0.4. Now, autograd directly supports tensors
# x, y = Variable(x), Variable(y)

# plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn')
# plt.show()
Example #8
0
 def sampling(self, mean, logvar):
     std = torch.exp((logvar*0.5))
     nor_dis = torch.normal(0, 1, size=mean.shape).cuda() if self.use_GPU else torch.normal(0, 1, size=mean.shape)
     
     return nor_dis*std + mean
Example #9
0
 def _test_non_randomness(self, device):
     # Noises should be the same in a batch
     x0 = torch.normal(0, 1, size=(1, 6), dtype=torch.float32, device=device)
     x = x0.repeat(2, 1)
     y = self.linear(x)
     torch.testing.assert_allclose(y[0], y[1], rtol=1e-4, atol=0)
Example #10
0
Dependencies:
torch: 0.1.11
matplotlib
"""
import torch
from torch.autograd import Variable
import matplotlib.pyplot as plt

torch.manual_seed(1)  # reproducible

N_SAMPLES = 20
N_HIDDEN = 300

# training data
x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), 1)
y = x + 0.3 * torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))
x, y = Variable(x), Variable(y)

# test data
test_x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), 1)
test_y = test_x + 0.3 * torch.normal(torch.zeros(N_SAMPLES, 1),
                                     torch.ones(N_SAMPLES, 1))
test_x, test_y = Variable(test_x, volatile=True), Variable(test_y,
                                                           volatile=True)

# show data
plt.scatter(x.data.numpy(),
            y.data.numpy(),
            c='magenta',
            s=50,
            alpha=0.5,
Example #11
0
    # calculate additional VAE loss
    # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
    KLD = -0.5 * torch.sum(1 + torch.log(latent_variance) - latent_mu.pow(2) -
                           latent_variance)
    # KLD = -0.5 * torch.sum(1 + latent_logvar - latent_mu.pow(2) - latent_logvar.exp())

    rewards = []
    rewards_raw = []
    log_probs = []
    entropies = []

    for k in range(params["SAMPLES"]):
        # sample K times
        # eps = torch.randn(latent_mu.size()).to(device)
        eps = torch.normal(mean=torch.zeros_like(latent_mu), std=.1).to(device)
        action = torch.sigmoid(latent_mu + latent_variance.sqrt() * eps)
        prob = normal(action, latent_mu, latent_variance)
        log_prob = (prob + 0.0000001).log()
        entropy = -0.5 * (
            (latent_variance + 2 * pi.expand_as(latent_variance)).log() + 1)

        log_probs.append(log_prob)
        entropies.append(entropy)

        # vertex_params = policy.decode(action).detach().view(-1).cpu().numpy()
        vertex_params = action.detach().view(-1).cpu().numpy()

        experiment.log_metric("vertices mean", np.mean(vertex_params))
        experiment.log_metric("vertices min", np.min(vertex_params))
        experiment.log_metric("vertices max", np.max(vertex_params))
Example #12
0
"""
import torch
import torch.utils.data as Data
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt

# torch.manual_seed(1)    # reproducible

LR = 0.01
BATCH_SIZE = 32
EPOCH = 12

# fake dataset
x = torch.unsqueeze(torch.linspace(-1, 1, 1000), dim=1)
y = x.pow(2) + 0.1*torch.normal(torch.zeros(*x.size()))

# plot dataset
plt.scatter(x.numpy(), y.numpy())
plt.show()

# put dateset into torch dataset
torch_dataset = Data.TensorDataset(data_tensor=x, target_tensor=y)
loader = Data.DataLoader(dataset=torch_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2,)


# default network
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.hidden = torch.nn.Linear(1, 20)   # hidden layer
Example #13
0
#%%
from d2l import torch as d2l
import torch
import torch.nn as nn

#%%
T = 1000
time = torch.arange(1, T + 1, dtype=torch.float32)
x = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T, ))
d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))
# %%
Example #14
0
    # calculate additional VAE loss
    # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
    KLD = -0.5 * torch.sum(1 + torch.log(latent_variance) - latent_mu.pow(2) -
                           latent_variance)
    # KLD = -0.5 * torch.sum(1 + latent_logvar - latent_mu.pow(2) - latent_logvar.exp())

    rewards = []
    rewards_raw = []
    log_probs = []
    entropies = []

    for k in range(params["SAMPLES"]):
        # sample K times
        # eps = torch.randn(latent_mu.size()).to(device)
        eps = torch.normal(mean=torch.zeros_like(latent_mu),
                           std=params["NOISE"]).to(device)
        action = latent_mu + latent_variance.sqrt() * eps
        prob = normal(action, latent_mu, latent_variance)
        log_prob = (prob + 0.0000001).log()
        entropy = -0.5 * (
            (latent_variance + 2 * pi.expand_as(latent_variance)).log() + 1)

        log_probs.append(log_prob)
        entropies.append(entropy)

        # vertex_params = policy.decode(action).detach().view(-1).cpu().numpy()
        vertex_params = action.detach().view(-1).cpu().numpy()

        if LOGGING:
            wandb.log({
                "vertices mean": np.mean(vertex_params),
    def __init__(self, input_size, output_size, structure_graph,cuda):
        """
        :param input_size:
        :type input_size int
        :param output_size:
        :type output_size int
        :param structure_graph: A graph object specifying the structure of your arbitrary designed graph.
        :type structure_graph igraph.Graph
        """
        super(_SparseTorch, self).__init__()
        self._input_size = input_size
        self._output_size = output_size
        self._structure_graph = structure_graph

        layer_index, vertices_by_layer = build_layer_index(self._structure_graph)
        self._layer_index = layer_index
        self._vertices_by_layer = vertices_by_layer

        self._fully_input_to_sources = nn.Linear(self._input_size, len(vertices_by_layer[0]))

        # Contains variables for each layer in size of number of its vertices
        self._layers = {}
        # Contains variables for each vertex in size of the number of its incoming connections
        self._weights_per_vertice = {}
        vertices_with_highway_to_output = []
        for layer in vertices_by_layer:
            if layer is 0:
                pass
            else:
                #self._layers[layer] = nn.Parameter(torch.zeros(len(vertices_by_layer[layer])))
                self._layers[layer] = Variable(torch.normal(
                    mean=torch.zeros(len(vertices_by_layer[layer])),
                    std=torch.ones(len(vertices_by_layer[layer])) * 0.1)).cuda() if cuda else Variable(torch.normal(
                    mean=torch.zeros(len(vertices_by_layer[layer])),
                    std=torch.ones(len(vertices_by_layer[layer])) * 0.1))
                #self._layers[layer] = Variable(torch.zeros(len(vertices_by_layer[layer])))
                for vertice in vertices_by_layer[layer]:
                    incoming = self._structure_graph.es.select(_target=vertice)
                    ordered_sources = sorted(edge.source for edge in incoming)
                    incoming_size = len(ordered_sources)
                    self._weights_per_vertice[vertice] = nn.Parameter(torch.normal(
                        mean=torch.zeros(incoming_size),
                        std=torch.ones(incoming_size) * 0.1
                    ))
                    '''self._weights_per_vertice[vertice] = Variable(torch.normal(
                        mean=torch.zeros(incoming_size),
                        std=torch.ones(incoming_size) * 0.1))'''

                    # Add to list of vertices with no outgoing edges
                    successors = self._structure_graph.vs[vertice].successors()
                    if len(successors) is 0:
                        # We have no outgoing connections, so this vertice should be connected to last FF layer
                        vertices_with_highway_to_output.append(vertice)

        last_hidden_layer_index = max(layer_index.values())
        vertices_to_fully_layer = [vertice for vertice in vertices_by_layer[last_hidden_layer_index]]
        vertices_to_fully_layer.extend(vertices_with_highway_to_output)
        self._vertices_to_fully_layer = sorted(vertices_to_fully_layer)
        last_layer_to_output_dim = [self._output_size, len(vertices_to_fully_layer)]
        self._weights_to_output_layer = nn.Parameter(torch.normal(
            mean=torch.zeros(last_layer_to_output_dim),
            std=torch.ones(last_layer_to_output_dim) * 0.1
        ))
        #self._weights_to_output_layer = Variable(torch.normal(mean=torch.zeros(last_layer_to_output_dim), std=torch.ones(last_layer_to_output_dim) * 0.1))

        self._param_list = nn.ParameterList([param for param in itertools.chain(
            [self._weights_to_output_layer],
            self._weights_per_vertice.values()
        )])
        self._linear_dummy = nn.Linear(len(self._vertices_to_fully_layer), self._output_size)
def train(epoch, models):
    global total_batches_seen

    for model in models:
        model.train()

    for batch_idx, (data, target) in enumerate(train_loader):
        total_batches_seen += 1
        time = float(total_batches_seen) / len(train_loader)
        if h.use_cuda:
            data, target = data.cuda(), target.cuda()

        for model in models:
            model.global_num += data.size()[0]

            timer = Timer(
                "train a sample from " + model.name + " with " + model.ty.name,
                data.size()[0], False)
            lossy = 0
            with timer:
                for s in model.getSpec(data.to_dtype(), target, time=time):
                    model.optimizer.zero_grad()
                    loss = model.aiLoss(*s, time=time, **vargs).mean(dim=0)
                    lossy += loss.detach().item()
                    loss.backward()
                    torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
                    for p in model.parameters():
                        if p is not None and torch.isnan(p).any():
                            print("Such nan in vals")
                        if p is not None and p.grad is not None and torch.isnan(
                                p.grad).any():
                            print("Such nan in postmagic")
                            stdv = 1 / math.sqrt(h.product(p.data.shape))
                            p.grad = torch.where(
                                torch.isnan(p.grad),
                                torch.normal(mean=h.zeros(p.grad.shape),
                                             std=stdv), p.grad)

                    model.optimizer.step()

                    for p in model.parameters():
                        if p is not None and torch.isnan(p).any():
                            print("Such nan in vals after grad")
                            stdv = 1 / math.sqrt(h.product(p.data.shape))
                            p.data = torch.where(
                                torch.isnan(p.data),
                                torch.normal(mean=h.zeros(p.data.shape),
                                             std=stdv), p.data)

                    if args.clip_norm:
                        model.clip_norm()
                    for p in model.parameters():
                        if p is not None and torch.isnan(p).any():
                            raise Exception("Such nan in vals after clip")

            model.addSpeed(timer.getUnitTime())

            if batch_idx % args.log_interval == 0:
                print((
                    'Train Epoch {:12} {:' + str(largest_domain) +
                    '}: {:3} [{:7}/{} ({:.0f}%)] \tAvg sec/ex {:1.8f}\tLoss: {:.6f}'
                ).format(model.name, model.ty.name, epoch,
                         batch_idx * len(data), len(train_loader.dataset),
                         100. * batch_idx / len(train_loader), model.speed,
                         lossy))
Example #17
0
def normal(*args, **kwargs):
    return torch.normal(*args, **kwargs).to(device)
Example #18
0
 def get_action(self, state):
     t_tmp = self.net_state(state)
     a_avg = self.net_a_avg(t_tmp)  # NOTICE! it is a_avg without .tanh()
     a_std = self.net_a_std(t_tmp).clamp(-20, 2).exp()  # todo
     return torch.normal(a_avg, a_std).tanh()  # re-parameterize
Example #19
0
 def generate_action_space_noise(self, action_batch):
     noise = torch.normal(torch.zeros(action_batch.size()),
                          self.noise_std).clamp(-self.noise_bound,
                                                self.noise_bound).to(
                                                    self.device)
     return noise
Example #20
0
 def sample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     with torch.no_grad():
         return torch.normal(self.loc.expand(shape), self.scale.expand(shape))
def sample_2d_data(dataset, n_samples):

    z = torch.randn(n_samples, 2)

    if dataset == '8gaussians':
        scale = 4
        sq2 = 1/math.sqrt(2)
        centers = [(1,0), (-1,0), (0,1), (0,-1), (sq2,sq2), (-sq2,sq2), (sq2,-sq2), (-sq2,-sq2)]
        centers = torch.tensor([(scale * x, scale * y) for x,y in centers])
        return sq2 * (0.5 * z + centers[torch.randint(len(centers), size=(n_samples,))])

    elif dataset == 'sine':
        xs = torch.rand((n_samples, 1)) * 4 - 2
        ys = torch.randn(n_samples, 1) * 0.25

        return torch.cat((xs, torch.sin(3 * xs) + ys), dim=1)

    elif dataset == 'moons':
        from sklearn.datasets import make_moons
        data = make_moons(n_samples=n_samples, shuffle=True, noise=0.05)[0]
        data = torch.tensor(data)
        return data

    elif dataset == 'trimodal':
        centers = torch.tensor([(0, 0), (5, 5), (5, -5)])
        stds = torch.tensor([1., 0.5, 0.5]).unsqueeze(-1)
        seq = torch.randint(len(centers), size=(n_samples,))
        return stds[seq] * z + centers[seq]

    elif dataset == 'trimodal2':
        centers = torch.tensor([(0, 0), (5, 5), (5, -5)])
        stds = torch.tensor([0.5, 0.5, 0.5]).unsqueeze(-1)
        seq = torch.randint(len(centers), size=(n_samples,))
        return stds[seq] * z + centers[seq]

    elif dataset == 'smile':
        scale = 4
        sq2 = 1 / math.sqrt(2)

        # SMILE

        centers = []
        centers.append((scale * 0.5, -scale * 0.8660254037844387))
        centers.append((-scale * 0.5, -scale * 0.8660254037844387))

        centers.append((scale * 0, -scale * 0))

        centers.append((scale * 0, scale * 1))
        centers.append((scale * sq2, scale * sq2))
        centers.append((scale * -sq2, scale * sq2))
        centers.append((scale * 0.5, scale * math.sqrt(3)/2))
        centers.append((scale * 0.25881904510252074, scale * 0.9659258262890683))
        centers.append((-scale * 0.5, scale * math.sqrt(3)/2))
        centers.append((-scale * 0.25881904510252074, scale * 0.9659258262890683))
        centers = torch.tensor(centers)

        weights = torch.tensor([0.5/3, 0.5/3, 0.5/3, 0.5/7, 0.5/7, 0.5/7, 0.5/7, 0.5/7, 0.5/7, 0.5/7])

        stds = torch.tensor([0.5] * len(centers)).unsqueeze(-1)

        from torch.distributions import Categorical
        seq = Categorical(probs=weights).sample((n_samples,))
        # print(seq, seq.dtype, seq.size())
        # seq = torch.randint(len(centers), size=(n_samples,))
        # print(seq, seq.dtype, seq.size())

        return stds[seq] * z + centers[seq]

    elif dataset == '2spirals':
        n = torch.sqrt(torch.rand(n_samples // 2)) * 540 * (2 * math.pi) / 360
        d1x = - torch.cos(n) * n + torch.rand(n_samples // 2) * 0.5
        d1y =   torch.sin(n) * n + torch.rand(n_samples // 2) * 0.5
        x = torch.cat([torch.stack([ d1x,  d1y], dim=1),
                       torch.stack([-d1x, -d1y], dim=1)], dim=0) / 3
        return x + 0.1*z

    elif dataset == 'checkerboard':
        x1 = torch.rand(n_samples) * 4 - 2
        x2_ = torch.rand(n_samples) - torch.randint(0, 2, (n_samples,), dtype=torch.float) * 2
        x2 = x2_ + x1.floor() % 2
        return torch.stack([x1, x2], dim=1) * 2

    elif dataset == 'rings':
        n_samples4 = n_samples3 = n_samples2 = n_samples // 4
        n_samples1 = n_samples - n_samples4 - n_samples3 - n_samples2

        # so as not to have the first point = last point, set endpoint=False in np; here shifted by one
        linspace4 = torch.linspace(0, 2 * math.pi, n_samples4 + 1)[:-1]
        linspace3 = torch.linspace(0, 2 * math.pi, n_samples3 + 1)[:-1]
        linspace2 = torch.linspace(0, 2 * math.pi, n_samples2 + 1)[:-1]
        linspace1 = torch.linspace(0, 2 * math.pi, n_samples1 + 1)[:-1]

        circ4_x = torch.cos(linspace4)
        circ4_y = torch.sin(linspace4)
        circ3_x = torch.cos(linspace4) * 0.75
        circ3_y = torch.sin(linspace3) * 0.75
        circ2_x = torch.cos(linspace2) * 0.5
        circ2_y = torch.sin(linspace2) * 0.5
        circ1_x = torch.cos(linspace1) * 0.25
        circ1_y = torch.sin(linspace1) * 0.25

        x = torch.stack([torch.cat([circ4_x, circ3_x, circ2_x, circ1_x]),
                         torch.cat([circ4_y, circ3_y, circ2_y, circ1_y])], dim=1) * 3.0

        # random sample
        x = x[torch.randint(0, n_samples, size=(n_samples,))]

        # Add noise
        return x + torch.normal(mean=torch.zeros_like(x), std=0.08*torch.ones_like(x))

    else:
        raise RuntimeError('Invalid `dataset` to sample from.')
# coding: utf-8

import matplotlib.pyplot as plt

import torch
import torch.nn.functional as F  # 激励函数都在这
from torch.autograd import Variable

# 假数据
n_data = torch.ones(100, 2)  # 数据的基本形态 shape=(100, 2) 数值都为1
x0 = torch.normal(2 * n_data, 1)  # 类型0 x data (tensor), shape=(100, 2)
y0 = torch.zeros(100)  # 类型0 y data (tensor), shape=(100, 1)
x1 = torch.normal(-2 * n_data, 1)  # 类型1 x data (tensor), shape=(100, 2)
y1 = torch.ones(100)  # 类型1 y data (tensor), shape=(100, 1)

# 注意 x, y 数据的数据形式是一定要像下面一样 (torch.cat 是在合并数据, 默认是0,0表示追加行)
x = torch.cat(
    (x0, x1), 0).type(torch.FloatTensor)  # FloatTensor = 32-bit floating
y = torch.cat((y0, y1), ).type(torch.LongTensor)  # LongTensor = 64-bit integer

# torch 只能在 Variable 上训练, 所以把它们变成 Variable
x, y = Variable(x), Variable(y)

print(zip(x0, y0))
print('00', 20)
print(zip(x1, y1))

# # 画散点图
# # s表示size大小
# # c表示颜色,可以用于不同组之间的颜色
# # lw表示标记边缘的线宽。注意:默认的边框颜色 是'face'
Example #23
0
def train_network(args, device):
    writer = setup_writer(args)

    dcd_path = f"ensembles/{args.load}.dcd"
    traj = load_trajectory(args.pdb_path, dcd_path, align=False)
    traj.unitcell_lengths = None
    traj.unitcell_angles = None

    n_dim = traj.xyz.shape[1] * 3
    ensemble = traj.xyz.reshape(-1, n_dim)
    ensemble = torch.from_numpy(ensemble.astype("float32"))
    print(f"Ensemble has size {ensemble.shape[0]} x {ensemble.shape[1]}.\n")

    validation_dcd_path = f"validation/{args.validation}.dcd"
    valid_traj = load_trajectory(args.pdb_path,
                                 validation_dcd_path,
                                 align=False)
    n_valid_dim = valid_traj.xyz.shape[1] * 3
    validation_data = valid_traj.xyz.reshape(-1, n_valid_dim)
    validation_data = torch.from_numpy(
        validation_data.astype("float32")).to(device)
    validation_indices = np.arange(validation_data.shape[0])
    print(
        f"Validation has size {validation_data.shape[0]} x {validation_data.shape[1]}.\n"
    )

    net = load_network(f"models/{args.load}.pkl", device=device)

    mu = torch.zeros(validation_data.shape[-1] - 6, device=device)
    cov = torch.eye(validation_data.shape[-1] - 6, device=device)
    latent_distribution = distributions.MultivariateNormal(
        mu, covariance_matrix=cov).expand((args.batch_size, ))

    optimizer = setup_optimizer(
        net=net,
        init_lr=args.init_lr / args.warmup_factor,
        weight_decay=args.weight_decay,
    )
    scheduler = setup_scheduler(
        optimizer,
        init_lr=args.init_lr,
        final_lr=args.final_lr,
        epochs=args.epochs,
        warmup_epochs=args.warmup_epochs,
        warmup_factor=args.warmup_factor,
    )

    openmm_context = get_openmm_context(args.pdb_path)
    energy_evaluator = get_energy_evaluator(
        openmm_context=openmm_context,
        temperature=args.temperature,
        energy_high=args.energy_high,
        energy_max=args.energy_max,
        device=device,
    )

    trainer = training.ParticleFilter(
        net=net,
        device=device,
        data=ensemble,
        batch_size=args.batch_size,
        energy_evaluator=energy_evaluator,
        step_size=args.step_size,
        mc_target_low=args.mc_target_low,
        mc_target_high=args.mc_target_high,
    )

    with tqdm(range(args.epochs)) as progress:
        for epoch in progress:
            net.train()

            trainer.sample_and_compute_losses()

            loss = (trainer.forward_loss * args.example_weight +
                    trainer.inverse_loss * args.energy_weight)

            optimizer.zero_grad()
            loss.backward()
            gradient_norm = clip_grad_norm_(net.parameters(),
                                            args.max_gradient)
            optimizer.step()
            scheduler.step(epoch)

            if epoch % args.log_freq == 0:
                net.eval()

                # Compute our validation loss
                with torch.no_grad():
                    sample_inidices = torch.from_numpy(
                        np.random.choice(validation_indices,
                                         args.batch_size,
                                         replace=True))
                    valid_sample = validation_data[sample_inidices, :]
                    z_valid, valid_jac = net.forward(valid_sample)
                    valid_ml = -torch.mean(
                        latent_distribution.log_prob(z_valid))
                    valid_jac = -torch.mean(valid_jac)
                    valid_loss = valid_ml + valid_jac
                    writer.add_scalar("val_example_ml_loss", valid_ml.item(),
                                      epoch)
                    writer.add_scalar("val_example_jac_loss", valid_jac.item(),
                                      epoch)
                    writer.add_scalar("val_example_total_loss",
                                      valid_loss.item(), epoch)

                # Output our training losses
                writer.add_scalar("acceptance_rate",
                                  trainer.acceptance_probs[-1], epoch)
                writer.add_scalar("step_size", trainer.step_size, epoch)
                writer.add_scalar("total_loss", loss.item(), epoch)
                writer.add_scalar("gradient_norm", gradient_norm, epoch)
                writer.add_scalar("example_ml_loss", trainer.forward_ml, epoch)
                writer.add_scalar("example_jac_loss", trainer.forward_jac,
                                  epoch)
                writer.add_scalar("example_total_loss", trainer.forward_loss,
                                  epoch)
                writer.add_scalar(
                    "weighted_example_total_loss",
                    trainer.forward_loss * args.example_weight,
                    epoch,
                )
                writer.add_scalar("energy_kl_loss", trainer.inverse_kl, epoch)
                writer.add_scalar("energy_jac_loss", trainer.inverse_jac,
                                  epoch)
                writer.add_scalar("energy_total_loss", trainer.inverse_loss,
                                  epoch)
                writer.add_scalar(
                    "weighted_energy_total_loss",
                    trainer.inverse_loss * args.energy_weight,
                    epoch,
                )
                writer.add_scalar("minimum_energy", trainer.min_energy.item(),
                                  epoch)
                writer.add_scalar("median_energy",
                                  trainer.median_energy.item(), epoch)
                writer.add_scalar("mean_energy", trainer.mean_energy.item(),
                                  epoch)

                progress.set_postfix(loss=f"{loss.item():8.3f}")

    # Save our final model
    torch.save(net, f"models/{args.save}.pkl")

    # Save our reservoir
    x = trainer.reservoir.cpu().detach().numpy()
    x = x.reshape(trainer.res_size, -1, 3)
    traj.xyz = x
    traj.save(f"ensembles/{args.save}.dcd")

    # Generate examples and write trajectory
    net.eval()
    z = torch.normal(0, 1, size=(args.batch_size, n_dim - 6), device=device)
    x, _ = net.inverse(z)
    x = x.cpu().detach().numpy()
    x = x.reshape(args.batch_size, -1, 3)
    traj.xyz = x
    traj.save(f"gen_samples/{args.save}.dcd")
def select_action(state):
    state = torch.from_numpy(state).unsqueeze(0)
    action_mean, _, action_std = policy_net(Variable(state))
    action = torch.normal(action_mean, action_std)
    return action
Example #25
0
def train_loop(cfg, model, optimizer, criterion, train_loader, epoch):
    model.train()
    model = model.to(cfg.device)
    N = cfg.N
    total_loss = 0
    running_loss = 0
    szm = ScaleZeroMean()
    # random_eraser = th_trans.RandomErasing(scale=(0.40,0.80))
    random_eraser = th_trans.RandomErasing(scale=(0.02, 0.33))

    # if cfg.N != 5: return
    # for batch_idx, (burst_imgs, raw_img) in enumerate(train_loader):
    for batch_idx, (burst_imgs, res_imgs, raw_img) in enumerate(train_loader):

        optimizer.zero_grad()
        model.zero_grad()

        # fig,ax = plt.subplots(figsize=(10,10))
        # imgs = burst_imgs + 0.5
        # imgs.clamp_(0.,1.)
        # raw_img = raw_img.expand(burst_imgs.shape)
        # print(imgs.shape,raw_img.shape)
        # all_img = torch.cat([imgs,raw_img],dim=1)
        # print(all_img.shape)
        # grids = [vutils.make_grid(all_img[i],nrow=16) for i in range(cfg.dynamic.frames)]
        # ims = [[ax.imshow(np.transpose(i,(1,2,0)), animated=True)] for i in grids]
        # ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True)
        # Writer = animation.writers['ffmpeg']
        # writer = Writer(fps=1, metadata=dict(artist='Me'), bitrate=1800)
        # ani.save(f"{settings.ROOT_PATH}/train_loop_voc.mp4", writer=writer)
        # print("I DID IT!")
        # return

        # -- reshaping of data --
        # raw_img = raw_img.cuda(non_blocking=True)
        input_order = np.arange(cfg.N)
        # print("pre",input_order,cfg.blind,cfg.N)
        middle_img_idx = -1
        if not cfg.input_with_middle_frame:
            middle = len(input_order) // 2
            # print(middle)
            middle_img_idx = input_order[middle]
            input_order = np.r_[input_order[:middle], input_order[middle + 1:]]
        else:
            middle = len(input_order) // 2
            middle_img_idx = input_order[middle]
            input_order = np.arange(cfg.N)
        # print("post",input_order,middle_img_idx,cfg.blind,cfg.N)

        # -- add input noise --
        burst_imgs = burst_imgs.cuda(non_blocking=True)
        burst_imgs_noisy = burst_imgs.clone()
        if cfg.input_noise:
            noise = np.random.rand() * cfg.input_noise_level
            burst_imgs_noisy[middle_img_idx] = torch.normal(
                burst_imgs_noisy[middle_img_idx], noise)

        # if cfg.middle_frame_random_erase:
        #     for i in range(burst_imgs_noisy[middle_img_idx].shape[0]):
        #         tmp = random_eraser(burst_imgs_noisy[middle_img_idx][i])
        #         burst_imgs_noisy[middle_img_idx][i] = tmp
        # burst_imgs_noisy = torch.normal(burst_imgs_noisy,noise)
        # print(torch.sum(burst_imgs_noisy[middle_img_idx] - burst_imgs[middle_img_idx]))

        # print(cfg.N,cfg.blind,[input_order[x] for x in range(cfg.input_N)])
        if cfg.color_cat:
            stacked_burst = torch.cat(
                [burst_imgs_noisy[input_order[x]] for x in range(cfg.input_N)],
                dim=1)
        else:
            stacked_burst = torch.stack(
                [burst_imgs_noisy[input_order[x]] for x in range(cfg.input_N)],
                dim=1)
        # print("stacked_burst",stacked_burst.shape)

        # if cfg.input_noise:
        #     stacked_burst = torch.normal(stacked_burst,noise)

        # -- extract target image --
        if cfg.blind:
            t_img = burst_imgs[middle_img_idx]
        else:
            t_img = szm(raw_img.cuda(non_blocking=True))

        # -- denoising --
        rec_img = model(stacked_burst)

        # -- compute loss --
        loss = F.mse_loss(t_img, rec_img)

        # -- dncnn denoising --
        # rec_res = model(stacked_burst)

        # -- compute loss --
        # t_res = t_img - burst_imgs[middle_img_idx]
        # loss = F.mse_loss(t_res,rec_res)

        # -- update info --
        running_loss += loss.item()
        total_loss += loss.item()

        # -- BP and optimize --
        loss.backward()
        optimizer.step()

        if (batch_idx % cfg.log_interval) == 0 and batch_idx > 0:
            running_loss /= cfg.log_interval
            print("[%d/%d][%d/%d]: %2.3e " % (epoch, cfg.epochs, batch_idx,
                                              len(train_loader), running_loss))
            running_loss = 0
    total_loss /= len(train_loader)
    return total_loss
Example #26
0
import torch
import torch.nn as nn
import torch.nn.functional as func
from torch.autograd import Variable
import matplotlib.pyplot as plt

# hyper parameters
LR = 0.01
BATCH_SIZE = 20
EPOCH = 20

# test data
x = torch.unsqueeze(torch.linspace(-1, 1, 1000), dim=1)
y = x.pow(2) + 0.1 * torch.normal(torch.zeros(*x.size()))

# plt.scatter(x.numpy(), y.numpy())
# plt.show()

torch_dataset = torch.utils.data.TensorDataset(x, y)
data_loader = torch.utils.data.DataLoader(dataset=torch_dataset,
                                          batch_size=BATCH_SIZE,
                                          shuffle=True)


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.hidden = nn.Linear(1, 10)
        self.output = nn.Linear(10, 1)

    def forward(self, x):
def sample_from_gauss_prior(n_samples: int, latent_space_dim: int) -> Tensor:
    """Returns a (n_samples, latent_space_dim)-shaped tensor with samples from the Gaussian prior latent space."""
    return torch.normal(mean=0, std=torch.ones((n_samples, latent_space_dim)))
def get_action(mu, std):
    action = torch.normal(mu, std)
    action = action.data.numpy()
    return action
Example #29
0
 def get__noise_action(self, s):
     x = self.net__s(s)
     a_avg = self.net__a(x)  # action_average
     a_std_log = self.net__d(x).clamp(-16, 2)  # action_log_std
     return torch.normal(a_avg, a_std_log.exp()).tanh()
Example #30
0
 def sample_action(self, s):
     return T.normal(self.forward(s), T.exp(self.log_std))
Example #31
0
    def __init__(self, seq_length, input_dim, hidden_dim, num_classes,
                 batch_size, device):

        super(LSTM, self).__init__()
        ########################
        # PUT YOUR CODE HERE  #
        #######################
        self.seq_length = seq_length
        self.batch_size = batch_size
        self.device = device
        self.gx = nn.Parameter(
            torch.normal(0, 0.0001, size=(hidden_dim, input_dim)))
        self.gh = nn.Parameter(
            torch.normal(0, 0.0001, size=(hidden_dim, hidden_dim)))
        self.gb = nn.Parameter(torch.normal(0, 0.0001, size=(hidden_dim, 1)))

        self.ix = nn.Parameter(torch.normal(0, 0.0001, size=self.gx.shape))
        self.ih = nn.Parameter(torch.normal(0, 0.0001, size=self.gh.shape))
        self.ib = nn.Parameter(torch.normal(0, 0.0001, size=self.gb.shape))

        self.fx = nn.Parameter(torch.normal(0, 0.0001, size=self.gx.shape))
        self.fh = nn.Parameter(torch.normal(0, 0.0001, size=self.gh.shape))
        self.fb = nn.Parameter(torch.normal(0, 0.0001, size=self.gb.shape))

        self.ox = nn.Parameter(torch.normal(0, 0.0001, size=self.gx.shape))
        self.oh = nn.Parameter(torch.normal(0, 0.0001, size=self.gh.shape))
        self.ob = nn.Parameter(torch.normal(0, 0.0001, size=self.gb.shape))

        self.ph = nn.Parameter(
            torch.normal(0, 0.0001, size=(num_classes, hidden_dim)).T)
        self.pb = nn.Parameter(torch.normal(0, 0.0001, size=(num_classes, 1)))

        self.emb = nn.Embedding(num_classes, 1)
        self.sig = nn.Sigmoid()
        self.tan = nn.Tanh()
        self.soft = nn.Softmax(dim=1)
Example #32
0
from __future__ import print_function
import torch
from torch.autograd import Variable

#mean = torch.FloatTensor(0)
#variance_square = torch.FloatTensor(1)

for i in range(5):
    print(torch.normal(torch.tensor([0.0]), torch.tensor([1.0])), " ")
print("")

mu = Variable(torch.Tensor([1]), requires_grad=True)
sigma = Variable(torch.Tensor([1]), requires_grad=True)
for i in range(5):
    print(torch.normal(mu, sigma))
print("")

mean = torch.zeros(3)
variance_square = torch.ones(3)
print("mean: ",mean)
print("variance_square: ", variance_square)
for i in range(5):
    print(torch.normal(mean, variance_square), " ")

mean = torch.zeros(3,2)
variance_square = torch.ones(3,2)
print("mean(3*2): ",mean)
print("variance_square(3*2): ", variance_square)
print("normal distribution(3*2): ")
for i in range(5):
    print(torch.normal(mean, variance_square), " ")
Example #33
0
    def _apply_noise(tensor, mu=0, sigma=0.0001):
        noise = torch.normal(mean=torch.ones_like(tensor) * mu,
                             std=torch.ones_like(tensor) * sigma)

        return tensor + noise
Example #34
0
 def sample(self):
     return torch.normal(self.mean, self.std)
Example #35
0
Dependencies:
torch: 0.1.11
matplotlib
"""
import torch
from torch.autograd import Variable
import matplotlib.pyplot as plt

# torch.manual_seed(1)    # reproducible

N_SAMPLES = 20
N_HIDDEN = 300

# training data
x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), 1)
y = x + 0.3*torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))
x, y = Variable(x), Variable(y)

# test data
test_x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), 1)
test_y = test_x + 0.3*torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))
test_x, test_y = Variable(test_x, volatile=True), Variable(test_y, volatile=True)

# show data
plt.scatter(x.data.numpy(), y.data.numpy(), c='magenta', s=50, alpha=0.5, label='train')
plt.scatter(test_x.data.numpy(), test_y.data.numpy(), c='cyan', s=50, alpha=0.5, label='test')
plt.legend(loc='upper left')
plt.ylim((-2.5, 2.5))
plt.show()

net_overfitting = torch.nn.Sequential(
 def _get_std_parameter(self, action_dim: int):
     # std = inverse_softplus(ch.ones(action_dim) * init_std).diagflat()
     # flat_chol = fill_triangular_inverse(std)
     chol_shape = action_dim * (action_dim + 1) // 2
     flat_chol = ch.normal(0, 0.01, (chol_shape,))
     return nn.Parameter(flat_chol)
    def train(steps, base_steps=0):
        transformer.train()
        count = 0
        agg_content_loss = 0.
        agg_style_loss = 0.
        agg_reg_loss = 0.
        agg_stable_loss = 0.
        while True:
            print(0)
            for x, _ in train_loader:
                print(0)
                count += 1
                optimizer.zero_grad()
                x = x.to(device)
                y = transformer(x)
                with torch.no_grad():
                    mask = torch.bernoulli(
                        torch.ones_like(x, device=device, dtype=torch.float) *
                        NOISE_P)
                    noise = torch.normal(
                        torch.zeros_like(x),
                        torch.ones_like(x, device=device, dtype=torch.float) *
                        NOISE_STD).clamp(-1, 1)
                    # print((noise * mask).sum())
                y_noise = transformer(x + noise * mask)
                print(0)
                with torch.no_grad():
                    xc = x.detach()
                    features_xc = loss_network(xc)

                features_y = loss_network(y)
                print(0)
                with torch.no_grad():
                    f_xc_c = features_xc[2].detach()

                content_loss = CONTENT_WEIGHT * mse_loss(features_y[2], f_xc_c)
                print(0)
                reg_loss = REGULARIZATION * (
                    torch.sum(torch.abs(y[:, :, :, :-1] - y[:, :, :, 1:])) +
                    torch.sum(torch.abs(y[:, :, :-1, :] - y[:, :, 1:, :])))
                print(0)
                style_loss = 0.
                for l, weight in enumerate(STYLE_WEIGHTS):
                    gram_s = gram_style[l]
                    gram_y = gram_matrix(features_y[l])
                    style_loss += float(weight) * mse_loss(
                        gram_y, gram_s.expand_as(gram_y))

                stability_loss = NOISE_WEIGHT * mse_loss(
                    y_noise.view(-1),
                    y.view(-1).detach())
                print(0)
                total_loss = content_loss + style_loss + reg_loss + stability_loss
                total_loss.backward()
                optimizer.step()

                agg_content_loss += content_loss
                agg_style_loss += style_loss
                agg_reg_loss += reg_loss
                agg_stable_loss += stability_loss
                print(0)
                if count % LOG_INTERVAL == 0:
                    mesg = "{} [{}/{}] content: {:.2f}  style: {:.2f}  reg: {:.2f} stable: {:.2f} total: {:.6f}".format(
                        time.ctime(), count, steps,
                        agg_content_loss / LOG_INTERVAL,
                        agg_style_loss / LOG_INTERVAL,
                        agg_reg_loss / LOG_INTERVAL,
                        agg_stable_loss / LOG_INTERVAL,
                        (agg_content_loss + agg_style_loss + agg_reg_loss +
                         agg_stable_loss) / LOG_INTERVAL)
                    print(mesg)
                    agg_content_loss = 0.
                    agg_style_loss = 0.
                    agg_reg_loss = 0.
                    agg_stable_loss = 0.
                    transformer.eval()
                    y = transformer(x)
                    save_debug_image(
                        x, y.detach(), y_noise.detach(),
                        "./fast_neural_style_transfer/debug/{}.png".format(
                            base_steps + count))
                    transformer.train()
                print(0)
                if count >= steps:
                    return
View more, visit my tutorial page: https://morvanzhou.github.io/tutorials/
My Youtube Channel: https://www.youtube.com/user/MorvanZhou

Dependencies:
torch: 0.4
matplotlib
"""
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt

# torch.manual_seed(1)    # reproducible

# make fake data
n_data = torch.ones(100, 2)
x0 = torch.normal(2*n_data, 1)      # class0 x data (tensor), shape=(100, 2)
y0 = torch.zeros(100)               # class0 y data (tensor), shape=(100, 1)
x1 = torch.normal(-2*n_data, 1)     # class1 x data (tensor), shape=(100, 2)
y1 = torch.ones(100)                # class1 y data (tensor), shape=(100, 1)
x = torch.cat((x0, x1), 0).type(torch.FloatTensor)  # shape (200, 2) FloatTensor = 32-bit floating
y = torch.cat((y0, y1), ).type(torch.LongTensor)    # shape (200,) LongTensor = 64-bit integer

# The code below is deprecated in Pytorch 0.4. Now, autograd directly supports tensors
# x, y = Variable(x), Variable(y)

# plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn')
# plt.show()


class Net(torch.nn.Module):
    def __init__(self, n_feature, n_hidden, n_output):
Example #39
0
# 二分类问题

import torch
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt


# 准备训练数据
n_data = torch.ones(100,2)              # 生成100*2,值为1的矩阵
x0 = torch.normal(2*n_data,1)           # 生成正态分布x0,100个,均值为2,标准差为1
y0 = torch.zeros(100)                   # 数据x0的标签都为0
x1 = torch.normal(-2*n_data,1)          # 生成正太分布的x1,100个,均值为-2,标准差为1
y1 = torch.ones((100))                  # 数据x1的标签都为1
x = torch.cat((x0,x1),0).type(torch.FloatTensor)  # 将x0,x1合并,并将类型改为torch.FloatTensor
y = torch.cat((y0,y1)).type(torch.LongTensor)     # 将y0,y1合并,并将类型改为torch.LongTensor

x,y = Variable(x),Variable(y)           # x,y放到神经网络中学习

# 训练数据可视化
plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c=y.data.numpy(),s=100,lw=0,cmap='RdYlGn')
plt.show()

# 定义神经网络 method1
class Net(torch.nn.Module):
    def __init__(self,n_feature,n_hidden,n_output):
        super(Net,self).__init__()
        self.hidden = torch.nn.Linear(n_feature,n_hidden)       # 定义 hidden层 的数据处理方式
        self.predict = torch.nn.Linear(n_hidden,n_output)       # 定义 predict层 的数据处理方式

    def forward(self,x):
Example #40
0
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt

n_data = torch.ones(100, 2)
x0 = torch.normal(2 * n_data, 1)
y0 = torch.zeros(100)
x1 = torch.normal(-2 * n_data, 1)
y1 = torch.ones(100)
x = torch.cat((x0, x1), 0).type(torch.FloatTensor)
y = torch.cat((y0, y1), ).type(torch.LongTensor)

plt.scatter(x.data.numpy()[:, 0],
            x.data.numpy()[:, 1],
            c=y.data.numpy(),
            s=100,
            lw=0,
            cmap='RdYlGn')


#method 1
class Net(torch.nn.Module):
    def __init__(self, n_feature, n_hidden, n_output):
        super(Net, self).__init__()
        self.hidden = torch.nn.Linear(n_feature, n_hidden)
        self.predict = torch.nn.Linear(n_hidden, n_output)

    def forward(self, x):
        x = F.relu(self.hidden(x))
        x = self.predict(x)
Example #41
0
    def fit(self, epochs=None):
        """Fit the CTGAN Synthesizer models to the training data.
        """

        if epochs is None:
            epochs = self._epochs
        else:
            warnings.warn(
                ('`epochs` argument in `fit` method has been deprecated and will be removed '
                 'in a future version. Please pass `epochs` to the constructor instead'),
                DeprecationWarning
            )

        mean = torch.zeros(self._batch_size, self._embedding_dim, device=self._device)
        std = mean + 1

        steps_per_epoch = max(len(self.train_data) // self._batch_size, 1)
        for i in range(epochs):
            self.trained_epochs += 1
            for id_ in range(steps_per_epoch):

                for n in range(self._discriminator_steps):
                    fakez = torch.normal(mean=mean, std=std)

                    condvec = self._data_sampler.sample_condvec(self._batch_size)
                    if condvec is None:
                        c1, m1, col, opt = None, None, None, None
                        real = self._data_sampler.sample_data(self._batch_size, col, opt)
                    else:
                        c1, m1, col, opt = condvec
                        c1 = torch.from_numpy(c1).to(self._device)
                        m1 = torch.from_numpy(m1).to(self._device)
                        fakez = torch.cat([fakez, c1], dim=1)

                        perm = np.arange(self._batch_size)
                        np.random.shuffle(perm)
                        real = self._data_sampler.sample_data(
                            self._batch_size, col[perm], opt[perm])
                        c2 = c1[perm]

                    fake = self._generator(fakez)
                    fakeact = self._apply_activate(fake)

                    real = torch.from_numpy(real.astype('float32')).to(self._device)

                    if c1 is not None:
                        fake_cat = torch.cat([fakeact, c1], dim=1)
                        real_cat = torch.cat([real, c2], dim=1)
                    else:
                        real_cat = real
                        fake_cat = fake

                    y_fake = self._discriminator(fake_cat)
                    y_real = self._discriminator(real_cat)

                    pen = self._discriminator.calc_gradient_penalty(
                        real_cat, fake_cat, self._device)
                    loss_d = -(torch.mean(y_real) - torch.mean(y_fake))

                    self._optimizerD.zero_grad()
                    pen.backward(retain_graph=True)
                    loss_d.backward()
                    self._optimizerD.step()

                fakez = torch.normal(mean=mean, std=std)
                condvec = self._data_sampler.sample_condvec(self._batch_size)

                if condvec is None:
                    c1, m1, col, opt = None, None, None, None
                else:
                    c1, m1, col, opt = condvec
                    c1 = torch.from_numpy(c1).to(self._device)
                    m1 = torch.from_numpy(m1).to(self._device)
                    fakez = torch.cat([fakez, c1], dim=1)

                fake = self._generator(fakez)
                fakeact = self._apply_activate(fake)

                if c1 is not None:
                    y_fake = self._discriminator(torch.cat([fakeact, c1], dim=1))
                else:
                    y_fake = self._discriminator(fakeact)

                if condvec is None:
                    cross_entropy = 0
                else:
                    cross_entropy = self._cond_loss(fake, c1, m1)

                loss_g = -torch.mean(y_fake) + cross_entropy

                self._optimizerG.zero_grad()
                loss_g.backward()
                self._optimizerG.step()

            if self._verbose:
                print(f"Epoch {self.trained_epochs}, Loss G: {loss_g.detach().cpu(): .4f},"
                      f"Loss D: {loss_d.detach().cpu(): .4f}",
                      flush=True)
Example #42
0
 def transform_factory(input_dim):
     transform = T.batchnorm(input_dim)
     transform._inverse(torch.normal(torch.arange(0., input_dim), torch.arange(1., 1. + input_dim) / input_dim))
     transform.eval()
     return transform
Example #43
0
def g(x, noise=False):
    true_vals = torch.cos(x) + torch.sin(2 * x)
    if not noise:
        return true_vals
    return true_vals + torch.normal(0, 0.1, x.shape)