Пример #1
0
    def __init__(self):
        super(StupidLayer, self).__init__()

    def forward(self,x):
        return x[:,:,2:-2,2:-2]

def MLPBlock(dim):
    return SkipConnection(
        nn.BatchNorm2d(dim),
        nn.LeakyReLU(),
        nn.Conv2d(dim, dim, 1)
    )

proposal_network = nn.Sequential(
    nn.Conv2d(2, 8, 1,padding=2), #28,28,8
    ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),
    nn.AvgPool2d(2, 2), # 16, 16,8
    ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),
    nn.AvgPool2d(2, 2), nn.Conv2d(8, 16, 1), # 8, 8, 16
    ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8), # 8,8, 16?
    nn.AvgPool2d(2, 2), nn.Conv2d(16, 32, 1), # 4, 4, 32
    ResBlock(32, 16), ResBlock(32, 16),
    ResBlock(32, 16), ResBlock(32, 16),
    nn.AvgPool2d(2, 2), nn.Conv2d(32, 64, 1), # 2,2 64
    ResBlock(64, 32), ResBlock(64, 32),
    ResBlock(64, 32), ResBlock(64, 32),
    nn.AvgPool2d(2, 2), nn.Conv2d(64, 128, 1),
    MLPBlock(128), MLPBlock(128), MLPBlock(128), MLPBlock(128),
)

prior_network = nn.Sequential(
Пример #2
0
reconstruction_log_prob = GaussianLoss()

# improve train computational stability by dividing the loss
# by this scale factor right before backpropagation
vlb_scale_factor = 256**2


def MLPBlock(dim):
    return SkipConnection(nn.BatchNorm2d(dim), nn.LeakyReLU(),
                          nn.Conv2d(dim, dim, 1))


prior_network = nn.Sequential(
    # MemoryLayer('#0'),
    nn.Conv2d(1, 16, 1),
    ResBlock(16, 16),
    ResBlock(16, 16),
    ResBlock(16, 16),
    ResBlock(16, 16),
    # MemoryLayer('#1'),
    nn.AvgPool2d(2, 2),
    ResBlock(16, 16),
    ResBlock(16, 16),
    ResBlock(16, 16),
    ResBlock(16, 16),
    # MemoryLayer('#1'),
    nn.AvgPool2d(2, 2),
    ResBlock(16, 16),
    ResBlock(16, 16),
    ResBlock(16, 16),
    ResBlock(16, 16),
Пример #3
0
mask_generator = DropoutMaskGenerator()

# improve train computational stability by dividing the loss
# by this scale factor right before backpropagation
vlb_scale_factor = 128**2


def MLPBlock(dim):
    return SkipConnection(nn.BatchNorm2d(dim), nn.LeakyReLU(),
                          nn.Conv2d(dim, dim, 1))


proposal_network = nn.Sequential(
    nn.Conv2d(6, 8, 1),
    ResBlock(8, 8),
    ResBlock(8, 8),
    ResBlock(8, 8),
    ResBlock(8, 8),
    nn.AvgPool2d(2, 2),  # 64
    ResBlock(8, 8),
    ResBlock(8, 8),
    ResBlock(8, 8),
    ResBlock(8, 8),
    nn.AvgPool2d(2, 2),
    nn.Conv2d(8, 16, 1),  # 32
    ResBlock(16, 8),
    ResBlock(16, 8),
    ResBlock(16, 8),
    ResBlock(16, 8),
    nn.AvgPool2d(2, 2),
Пример #4
0
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from VAE import VAE, train, loss_function_BCE, loss_function_MSE
from nn_utils import Flatten, UnFlatten, ResBlock
import numpy as np
import matplotlib.pyplot as plt
from torchvision.utils import make_grid

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

encoder = nn.Sequential(nn.Conv2d(3, 16, 1),
                        ResBlock(16, 16), ResBlock(16, 16), ResBlock(16, 16),
                        nn.AvgPool2d(2, 2), nn.Conv2d(16, 32, 1),
                        ResBlock(32, 32), ResBlock(32, 32), ResBlock(32, 32),
                        nn.AvgPool2d(2, 2), nn.Conv2d(32, 64, 1),
                        ResBlock(64, 64), ResBlock(64, 64), ResBlock(64, 64),
                        nn.AvgPool2d(2, 2), nn.Conv2d(64, 128, 1),
                        ResBlock(128, 128), ResBlock(128, 128),
                        ResBlock(128, 128), nn.AvgPool2d(2, 2),
                        nn.Conv2d(128, 256, 1), nn.ReLU(), nn.BatchNorm2d(256),
                        Flatten(), nn.Linear(4096, 512))

decoder = nn.Sequential(nn.Linear(256, 4096), UnFlatten(256, 4, 4),
                        nn.BatchNorm2d(256), nn.ReLU(), nn.Conv2d(256, 128, 1),
                        nn.Upsample(scale_factor=2), ResBlock(128, 128),
                        ResBlock(128, 128), ResBlock(128, 128),
                        nn.Conv2d(128, 64, 1), nn.Upsample(scale_factor=2),
                        ResBlock(64, 64), ResBlock(64, 64), ResBlock(64, 64),