Ejemplo n.º 1
0
def val_loss(model, val_data, val_labels, criterion):
    '''
    :param model: PyTorch Neural Network model
    :param val_data: Loaded numpy data
    :param val_labels: Loaded numpy target
    :param criterion: Loss function
    :return: Mean value of Loss function
    '''
    losses = [
        to_np(
            criterion(model(to_var(val_data[i])[None]),
                      to_var(val_labels[i])[None]))
        for i in range(len(val_data))
    ]
    return np.mean(losses)
Ejemplo n.º 2
0
def denoise_on_test(model,
                    data,
                    shapes,
                    length,
                    path,
                    result_path,
                    names=None):
    '''
    :param model: PyTorch Neural Network model
    :param data: Loaded numpy data
    :param shapes: Shapes to convert back cyclic data
    :param length: The length to convert back cyclic data
    :param path: Model's weights path
    :param result_path: Experiment path to save denoised input
    :param names: If None each entry save with name 'i.npy', in other way name for each entry can be provided
    :return: None
    '''
    # load_best_model
    model = load_model_state(model, path)

    result = [to_np(model(to_var(d, is_on_cuda(model))[None])) for d in data]
    output = postprocessing(result, shapes, length)

    result_path = Path(result_path)

    if names is not None:
        for i, n in enumerate(names):
            s = (result_path / n)
            s.mkdir(exist_ok=True)
            np.save(s, np.squeeze(output[i]).T)
    else:
        for i, o in enumerate(output):
            name = str(i) + '.npy'
            np.save(result_path / name, np.squeeze(o).T)
Ejemplo n.º 3
0
    def val():
        generator.eval()
        t = sample_on_sphere(latent_dim, 144, state=0).astype('float32')
        with torch.no_grad():
            y = to_np(generator(to_var(t, device=generator)))[:, 0]

        return {'generated__image': bytescale(stitch(y, bytescale))}
Ejemplo n.º 4
0
def r1_loss(images, discriminator):
    images = to_var(images, device=discriminator, requires_grad=True)
    logits = discriminator(images)
    grads, = torch.autograd.grad(logits,
                                 images,
                                 torch.ones_like(logits),
                                 create_graph=True,
                                 retain_graph=True)
    return (grads**2).sum() / len(logits)
Ejemplo n.º 5
0
def evaluate(model, data, targets):
    '''
    :param model: PyTorch Neural Network model
    :param data:  Loaded numpy data to evaluate model
    :param targets:  Numpy categorical target
    :return: Accuracy score of the model
    '''
    model.eval()

    preds = [
        to_np(model(to_var(d, is_on_cuda(model))[None])).argmax(1)
        for d in data
    ]
    return accuracy_score(targets, preds)
Ejemplo n.º 6
0
def train_step(image_groups, *, generator, discriminator, gen_optimizer,
               disc_optimizer, latent_dim, r1_weight, **optimizer_kwargs):
    def latent(reference):
        return to_var(sample_on_sphere(latent_dim,
                                       len(reference))).to(reference)

    assert len(image_groups)
    discriminator.train()
    generator.eval()

    losses = defaultdict(list)
    for i, images in enumerate(image_groups):
        real = to_var(images, device=discriminator)
        fake = generator(latent(real))

        real_logits = discriminator(real)
        fake_logits = discriminator(fake)

        dis_fake = functional.softplus(fake_logits).mean()
        dis_real = functional.softplus(-real_logits).mean()
        loss = dis_fake + dis_real

        if i == 0 and r1_weight > 0:
            r1 = r1_loss(images, discriminator)
            loss = loss + r1_weight * r1
            losses['dis_r1'].append(to_np(r1))

        optimizer_step(disc_optimizer, loss, **optimizer_kwargs)
        losses['dis_fake'].append(to_np(dis_fake))
        losses['dis_real'].append(to_np(dis_real))

    # it's ok, `real` is already defined
    generator.train()
    discriminator.eval()
    fake = generator(latent(real))
    fake_logits = discriminator(fake)

    loss = functional.softplus(-fake_logits).mean()
    optimizer_step(gen_optimizer, loss, **optimizer_kwargs)
    return {**dmap(np.mean, losses), 'gen': to_np(loss).item()}
Ejemplo n.º 7
0
def q_update(states, actions, rewards, done, *, gamma, agent, target_agent=None, optimizer, max_grad_norm=None,
             norm_type='inf', **optimizer_params):
    check_shape_along_axis(actions, rewards, axis=1)
    n_steps = actions.shape[1]
    assert n_steps > 0
    assert states.shape[1] == n_steps + 1

    agent.train()
    if target_agent is None:
        target_agent = agent
    else:
        target_agent.eval()

    # first and last state
    start, stop = states[:, 0], states[:, -1]
    # discounted rewards
    rewards = discount_rewards(np.moveaxis(rewards, 1, 0), gamma)
    actions = actions[:, [0]]
    gamma = gamma ** n_steps

    start, stop, actions, rewards, done = to_var(start, stop, actions, rewards, done, device=agent)

    predicted = agent(start).gather(1, actions).squeeze(1)
    with torch.no_grad():
        values = target_agent(stop).detach().max(1).values
        expected = (1 - done.to(values)) * values * gamma + rewards

    loss = functional.mse_loss(predicted, expected)
    set_params(optimizer, **optimizer_params)
    optimizer.zero_grad()
    loss.backward()
    if max_grad_norm is not None:
        torch.nn.utils.clip_grad_norm_(agent.parameters(), max_grad_norm, norm_type)

    optimizer.step()
    return to_np(loss)
Ejemplo n.º 8
0
def get_q_values(state, agent):
    return to_np(agent(to_var(state[None], device=agent)))[0]
Ejemplo n.º 9
0
 def latent(reference):
     return to_var(sample_on_sphere(latent_dim,
                                    len(reference))).to(reference)
Ejemplo n.º 10
0
def predict(x):
    model.eval()
    x = to_var(x, args.device)
    prediction = model(x[..., 0])
    prediction[:, 1] = torch.sigmoid(prediction[:, 1])
    return to_np(prediction)[..., None]