Ejemplo n.º 1
0
def prepare_generator(writer, dataset, device):
    generator = modules.Generator(in_channels=3)
    optimizer = torch.optim.SGD(generator.parameters(), lr=0.0001)
    step = (steps.Generator(
        tt.loss.SmoothBinaryCrossEntropy(alpha=0.1), device)**tt.Select(
            loss=0)**tt.pytorch.ZeroGrad(optimizer)**tt.pytorch.Backward()**
            tt.pytorch.Optimize(optimizer)**tt.pytorch.Detach())

    step**tt.Select(generated_images=2)**tt.callbacks.tensorboard.Images(
        writer, "Generator/Images")**operations.AddFakeImages(dataset)

    return generator, step
Ejemplo n.º 2
0
def accuracy_pipeline(iteration, writer, name: str):
    return (iteration > tt.Select(predictions=1, labels=2) >
            tt.metrics.classification.multiclass.Accuracy() >
            tt.accumulators.Mean() > tt.Split(
                tt.callbacks.Log(f"{name} Accuracy"),
                tt.callbacks.tensorboard.Scalar(writer, f"{name}/Accuracy"),
            ))
Ejemplo n.º 3
0
def prepare_discriminator(device):
    discriminator = modules.Discriminator(in_channels=3)
    optimizer = torch.optim.SGD(discriminator.parameters(), lr=0.0004)
    step = (steps.Discriminator(
        tt.loss.SmoothBinaryCrossEntropy(alpha=0.1), device)**tt.Select(
            loss=0)**tt.pytorch.ZeroGrad(optimizer)**tt.pytorch.Backward()**
            tt.pytorch.Optimize(optimizer)**tt.pytorch.Detach())

    return discriminator, step
Ejemplo n.º 4
0
def test_step():
    module = torch.nn.Linear(10, 1)
    optimizer = torch.optim.Adam(module.parameters())
    step = (Step(torch.nn.MSELoss())**tt.Select(
        loss=0)**tt.pytorch.ZeroGrad(optimizer)**tt.pytorch.Backward(
            accumulate=2)**tt.pytorch.Optimize(
                optimizer,
                accumulate=2)**tt.pytorch.Detach()**tt.callbacks.Log("Loss"))

    step = Step(torch.nn.MSELoss())**tt.Select(loss=0)

    step**tt.Select(
        predictions=1,
        target=2)**tt.metrics.regression.MaxError()**tt.callbacks.Log(
            name="Max Error")

    for _ in range(5):
        step(module, (torch.randn(8, 10), torch.randn(8)))
Ejemplo n.º 5
0
def prepare_iteration(
    writer,
    generator,
    generator_step,
    discriminator,
    discriminator_step,
    noise_dataset,
    cifar10_with_fake,
):
    iteration = tt.iterations.MultiIteration(
        steps=(generator_step, discriminator_step),
        modules=((generator, discriminator), discriminator),
        datas=(noise_dataset, cifar10_with_fake),
        intervals=(4, 1),
        log="INFO",
    )

    iteration**tt.Select(
        loss=0)**tt.pytorch.Detach()**tt.device.CPU()**tt.Except(
            tt.accumulators.Mean(), 4)**tt.Split(
                tt.callbacks.tensorboard.Scalar(writer, "Generator/Loss"),
                tt.callbacks.Logger(name="Generator Mean"),
                tt.callbacks.Save(generator,
                                  "generator.pt",
                                  comparator=operator.lt),
            )
    iteration**tt.Select(
        loss=0)**tt.pytorch.Detach()**tt.device.CPU()**tt.Except(
            tt.accumulators.Mean(), begin=0, end=4)**tt.Split(
                tt.callbacks.tensorboard.Scalar(writer, "Discriminator/Loss"),
                tt.callbacks.Logger(name="Generator Mean"),
                tt.callbacks.Save(discriminator,
                                  "generator.pt",
                                  comparator=operator.lt),
            )

    return iteration
Ejemplo n.º 6
0
def train(optimizer, criterion, device):
    return (Step(
        criterion, gradient=True,
        device=device)**tt.Select(loss=0)**tt.pytorch.ZeroGrad(optimizer)**
            tt.pytorch.Backward()**tt.pytorch.Optimize(optimizer))
Ejemplo n.º 7
0
def loss_pipeline(iteration, writer, name: str):
    return (iteration > tt.Select(loss=0) > tt.accumulators.Mean() > tt.Split(
        tt.callbacks.Log(f"{name} Loss"),
        tt.callbacks.tensorboard.Scalar(writer, f"{name}/Loss", log="INFO"),
    ))