Beispiel #1
0
def load_model(model_path: Path, params_path: Path) -> nn.Module:
    """
    Load a pytorch module with trained weights.

    Parameters
    ----------
    model_path : pathlib.Path
        Path object of the model class pickle.

    params_path : pathlib.Path
        Path object of the model weight pickle.

    Returns
    -------
    model : nn.Module
        Trained model.
    """
    model = torch.load(model_path, map_location="cpu")
    params = torch.load(params_path, map_location="cpu")
    model.load_state_dict(params)
    model = model.to(util.current_device())
    model.device = util.current_device()
    model.eval()

    return model
Beispiel #2
0
    def __init__(self, configs: Dict[str, Any]):
        super().__init__()

        self.configs = configs

        ndf = 64
        self.device = util.current_device()

        use_noise: bool = configs["use_noise"]
        noise_sigma: float = configs["noise_sigma"]

        self.main = nn.Sequential(
            # input is (nc) x 64 x 64
            Noise(use_noise, sigma=noise_sigma),
            nn.Conv2d(1, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32
            Noise(use_noise, sigma=noise_sigma),
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16
            Noise(use_noise, sigma=noise_sigma),
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*4) x 8 x 8
            Noise(use_noise, sigma=noise_sigma),
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*8) x 4 x 4
        )
Beispiel #3
0
    def __init__(self, latent_vars: Dict[str, LatentVariable]):
        super().__init__()

        self.latent_vars = latent_vars
        self.dim_input = sum(map(lambda x: x.cdim, latent_vars.values()))
        ngf = 64
        self.device = util.current_device()

        # main layers
        self.main = nn.Sequential(
            # input is Z, going into a convolution
            nn.ConvTranspose2d(self.dim_input, ngf * 8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
            # state size. (ngf*8) x 4 x 4
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 4),
            nn.ReLU(True),
            # state size. (ngf*4) x 8 x 8
            nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 2),
            nn.ReLU(True),
            # state size. (ngf*2) x 16 x 16
            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf),
            nn.ReLU(True),
            # state size. (ngf) x 32 x 32
            nn.ConvTranspose2d(ngf, 1, 4, 2, 1, bias=False),
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )
Beispiel #4
0
    def __init__(
        self,
        dataloader: DataLoader,
        latent_vars: Dict[str, LatentVariable],
        models: Dict[str, nn.Module],
        optimizers: Dict[str, Any],
        losses: Dict[str, Any],
        configs: Dict[str, Any],
        logger: Logger,
    ):

        self.dataloader = dataloader
        self.latent_vars = latent_vars
        self.models = models
        self.optimizers = optimizers
        self.losses = losses
        self.configs = configs
        self.logger = logger

        self.device = util.current_device()

        self.grad_max_norm = configs["grad_max_norm"]

        self.n_log_samples = configs["n_log_samples"]

        self.gen_images_path = self.logger.path / "images"
        self.model_snapshots_path = self.logger.path / "models"
        for p in [self.gen_images_path, self.model_snapshots_path]:
            p.mkdir(parents=True, exist_ok=True)

        self.iteration = 0
        self.epoch = 0

        self.snapshot_models()
    def __init__(self, categorical_dim):
        super().__init__()
        self.device = current_device()

        self.rnn = nn.LSTMCell(categorical_dim, 128)

        self.main = nn.Sequential(
            nn.Linear(128, 10),
            nn.Softmax(dim=-1)
        )
Beispiel #6
0
    def __init__(self, dataloader, models, optimizers, losses):
        self.dataloader = dataloader
        self.models = models
        self.optimizers = optimizers
        self.losses = losses
        self.epoch = 1

        self.device = current_device()

        for model in models.keys():
            setattr(self, model, self.models[model])
Beispiel #7
0
    def __init__(
        self,
        dim_z_content: int,
        dim_z_motion: int,
        channel: int,
        geometric_info: str,
        ngf: int = 64,
        video_length: int = 16,
    ):
        super(GeometricVideoGenerator, self).__init__()

        self.dim_z_content = dim_z_content
        self.dim_z_motion = dim_z_motion
        self.channel = channel
        self.geometric_info = geometric_info
        self.video_length = video_length
        self.ngf = ngf

        dim_z = dim_z_motion + dim_z_content
        self.dim_z = dim_z

        self.recurrent: nn.Module = nn.GRUCell(dim_z_motion, dim_z_motion)

        modules: List[nn.Module] = [
            nn.ConvTranspose2d(dim_z, ngf * 8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 8),
            nn.ReLU(inplace=True),
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 4),
            nn.ReLU(inplace=True),
            nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 2),
            nn.ReLU(inplace=True),
            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf),
            nn.ReLU(inplace=True),
            nn.ConvTranspose2d(ngf, self.channel, 4, 2, 1, bias=False),
        ]
        if self.geometric_info == "segmentation":
            modules.append(nn.Softmax(dim=1))
        else:
            modules.append(nn.Tanh())

        self.main = nn.Sequential(*modules)

        self.device = util.current_device()
    def __init__(self, dataloader, dataloader_subset, testloader, models,
                 optimizers, loss_functions):
        self.dataloader = dataloader
        self.dataloader_subset = dataloader_subset
        self.testloader = testloader
        self.models = models
        self.optimizers = optimizers
        self.loss_functions = loss_functions
        self.epoch = 1
        self.losses = []
        self.accuracies = []
        self.misclassifications = []

        self.device = current_device()

        for model in models.keys():
            setattr(self, model, self.models[model])
Beispiel #9
0
    def __init__(
        self,
        dataloader: VideoDataLoader,
        logger: Logger,
        models: Dict[str, nn.Module],
        optimizers: Dict[str, Any],
        loss: Loss,
        configs: Dict[str, Any],
    ):
        self.dataloader = dataloader
        self.logger = logger
        self.models = models
        self.optimizers = optimizers
        self.loss = loss
        self.configs = configs
        self.device = util.current_device()
        self.geometric_info = configs["geometric_info"]["name"]

        self.num_log, self.rows_log, self.cols_log = 25, 5, 5

        self.eval_batchsize = configs["evaluation"]["batchsize"]
        self.eval_num_samples = configs["evaluation"]["num_samples"]
        self.eval_metrics = configs["evaluation"]["metrics"]

        # dataloader for logging real samples on tensorboard
        self.dataloader_log = VideoDataLoader(
            self.dataloader.dataset,
            batch_size=self.num_log,
            num_workers=4,
            shuffle=True,
            drop_last=True,
            pin_memory=True,
        )

        self.model_snapshots_path = self.logger.path / "models"
        self.model_snapshots_path.mkdir(parents=True, exist_ok=True)

        self.adv_loss = nn.BCEWithLogitsLoss(reduction="sum")

        # copy config file to log directory
        shutil.copy(configs["config_path"], str(self.logger.path / "config.yml"))

        self.iteration: int = 0
        self.epoch: int = 0
        self.save_classobj()
Beispiel #10
0
    def __init__(
        self,
        ch1: int,
        ch2: int,
        use_noise: bool = False,
        noise_sigma: float = 0,
        ndf: int = 64,
    ):
        super(VideoDiscriminator, self).__init__()

        self.ch1, self.ch2 = ch1, ch2
        self.use_noise = use_noise
        self.noise_sigma = noise_sigma
        self.ndf = ndf

        self.conv_g = nn.Sequential(
            nn.Conv3d(
                ch1, ndf // 2, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False
            ),
            nn.LeakyReLU(0.2, inplace=True),
        )

        self.conv_c = nn.Sequential(
            nn.Conv3d(
                ch2, ndf // 2, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False
            ),
            nn.LeakyReLU(0.2, inplace=True),
        )

        self.main = nn.Sequential(
            Noise(use_noise, sigma=noise_sigma),
            nn.Conv3d(ndf, ndf * 2, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),
            nn.BatchNorm3d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            Noise(use_noise, sigma=noise_sigma),
            nn.Conv3d(
                ndf * 2, ndf * 4, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False
            ),
            nn.BatchNorm3d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            Noise(use_noise, sigma=noise_sigma),
            nn.Conv3d(ndf * 4, 1, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),
        )
        self.device = util.current_device()
Beispiel #11
0
    def __init__(
        self,
        in_ch: int,
        dim_z: int,
        geometric_info: str,
        ngf: int = 64,
        video_length: int = 16,
    ):
        super(ColorVideoGenerator, self).__init__()

        self.in_ch = in_ch
        self.out_ch = 3
        self.dim_z = dim_z
        self.geometric_info = geometric_info

        self.inconv = Inconv(in_ch, ngf * 1)
        self.down_blocks = nn.ModuleList([
            DownBlock(ngf * 1, ngf * 1),
            DownBlock(ngf * 1, ngf * 2),
            DownBlock(ngf * 2, ngf * 4),
            DownBlock(ngf * 4, ngf * 4),
            DownBlock(ngf * 4, ngf * 4),
            DownBlock(ngf * 4, ngf * 4),
        ])

        self.up_blocks = nn.ModuleList([
            UpBlock(ngf * 4 + dim_z, ngf * 4, dropout=True),
            UpBlock(ngf * 8, ngf * 4, dropout=True),
            UpBlock(ngf * 8, ngf * 4),
            UpBlock(ngf * 8, ngf * 2),
            UpBlock(ngf * 4, ngf * 1),
            UpBlock(ngf * 2, ngf * 1),
        ])
        self.outconv = Outconv(ngf * 2, self.out_ch)

        self.n_down_blocks = len(self.down_blocks)
        self.n_up_blocks = len(self.up_blocks)

        self.device = util.current_device()

        self.channel = 3
        self.video_length = video_length
    def __init__(self, categorical_dim):
        super().__init__()
        self.relu = nn.ReLU()
        self.categorical_dim = categorical_dim

        self.device = current_device()

        self.encoder = nn.Sequential(
            nn.Linear(784, 512),
            nn.ReLU(),
            nn.Linear(512, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
        )

        self.rnn = nn.LSTMCell(categorical_dim, 128)

        self.fc2 = nn.Sequential(nn.Linear(128, 64), nn.ReLU(),
                                 nn.Linear(64, categorical_dim))
Beispiel #13
0
    def __init__(
        self,
        ch1: int,
        ch2: int,
        use_noise: bool = False,
        noise_sigma: float = 0,
        ndf: int = 64,
    ):
        super(GradientDiscriminator, self).__init__()

        # do not use ch2 now
        self.ch1, self.ch2 = ch1, ch2
        self.use_noise = use_noise
        self.noise_sigma = noise_sigma
        self.ndf = ndf

        self.main = nn.Sequential(
            # 1st
            Noise(use_noise, sigma=noise_sigma),
            nn.Conv3d(ch1, ndf, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),
            nn.BatchNorm3d(ndf),
            nn.LeakyReLU(0.2, inplace=True),
            # 2nd
            Noise(use_noise, sigma=noise_sigma),
            nn.Conv3d(ndf, ndf * 2, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),
            nn.BatchNorm3d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # 3rd
            Noise(use_noise, sigma=noise_sigma),
            nn.Conv3d(
                ndf * 2, ndf * 4, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False
            ),
            nn.BatchNorm3d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # 4th
            Noise(use_noise, sigma=noise_sigma),
            nn.Conv3d(ndf * 4, 1, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),
        )
        self.device = util.current_device()
Beispiel #14
0
 def __init__(self, batch_size):
     self.batch_size = batch_size
     self.device = current_device()
Beispiel #15
0
from dataloader import get_data

from util import weights_init_normal, current_device
from models.classifier import Classifier

from trainer import Trainer
import loss

def create_optimizer(models, lr):
    parameters = []
    for model in models:
        parameters += list(model.parameters())

    return torch.optim.Adam(parameters, lr)

device = current_device()

n_features = 256
latent_dim = 1
latent_dim_cont = 0
categorical_dim = 10
batch_size = 100
lr = 1e-4
n_epochs = 100

dataloader, _, _ = get_data(batch_size)

models = {
    'classifier': Classifier(latent_dim, categorical_dim),
}
Beispiel #16
0
 def __init__(self):
     super().__init__()
     self.loss_func = nn.BCEWithLogitsLoss(reduction="sum")
     self.device = util.current_device()
Beispiel #17
0
def sample_gumbel(shape, eps=1e-20):
    U = torch.rand(shape).to(current_device())
    return -torch.log(-torch.log(U + eps) + eps)
Beispiel #18
0
 def __init__(self, use_noise: float, sigma: float = 0.2):
     super().__init__()
     self.use_noise = use_noise
     self.sigma = sigma
     self.device = util.current_device()
Beispiel #19
0
    def __init__(self, latent_vars: Dict[str, LatentVariable]):
        self.latent_vars = latent_vars

        self.discrete_loss = nn.CrossEntropyLoss()
        self.continuous_loss = NormalNLLLoss()
        self.device = util.current_device()
Beispiel #20
0
 def __init__(self):
     self.loss = nn.BCELoss(reduction="mean")
     self.device = util.current_device()
Beispiel #21
0
 def test_current_device(self):
     self.assertEqual(torch.device("cpu"), current_device())
Beispiel #22
0
def sample_normal(mu, logvar):
    std = torch.exp((1. / 2) * logvar)
    normals = torch.randn(*mu.shape).to(current_device())
    return mu + std * normals