Beispiel #1
0
###############################################
"""Load and batch the dataset"""

data = pd.read_pickle(opt.data_file)
dataset = ApogeeDataset(data[:50000], opt.n_bins)
loader = torch.utils.data.DataLoader(dataset=dataset,
                                     batch_size=opt.n_batch,
                                     shuffle=True,
                                     drop_last=True)

####################################################
"""Initialize the neural networks"""

encoder = Feedforward(
    [opt.n_bins + opt.n_conditioned, 2048, 512, 128, opt.n_z],
    activation=nn.SELU()).to(device)
decoder = Feedforward(
    [opt.n_z + opt.n_conditioned, 512, 2048, 8192, opt.n_bins],
    activation=nn.SELU()).to(device)
conditioning_autoencoder = ConditioningAutoencoder(encoder,
                                                   decoder,
                                                   n_bins=opt.n_bins,
                                                   n_embedding=0).to(device)
#conditioning_autoencoder = torch.load("../../feed/trueCont/exp1/feedN7214I1700")
#conditioning_autoenocoder = torch.load("adN7214I560")

pred_u0_given_v = Feedforward(
    [opt.n_z, 512, 256, opt.n_cat**opt.n_conditioned],
    activation=nn.SELU()).to(device)

Beispiel #2
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 kernel_size,
                 stride,
                 padding=0,
                 norm='none',
                 activation='relu',
                 pad_type='zero'):
        super(Conv2dBlock, self).__init__()
        self.use_bias = True
        # initialize padding
        if pad_type == 'reflect':
            self.pad = nn.ReflectionPad2d(padding)
        elif pad_type == 'replicate':
            self.pad = nn.ReplicationPad2d(padding)
        elif pad_type == 'zero':
            self.pad = nn.ZeroPad2d(padding)
        else:
            assert 0, "Unsupported padding type: {}".format(pad_type)

        # initialize normalization
        norm_dim = output_dim
        if norm == 'bn':
            self.norm = nn.BatchNorm2d(norm_dim)
        elif norm == 'in':
            #self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True)
            self.norm = nn.InstanceNorm2d(norm_dim)
        elif norm == 'ln':
            self.norm = LayerNorm(norm_dim)
        elif norm == 'adain':
            self.norm = AdaptiveInstanceNorm2d(norm_dim)
        elif norm == 'none' or norm == 'sn':
            self.norm = None
        else:
            assert 0, "Unsupported normalization: {}".format(norm)

        # initialize activation
        if activation == 'relu':
            self.activation = nn.ReLU(inplace=True)
        elif activation == 'lrelu':
            self.activation = nn.LeakyReLU(0.2, inplace=True)
        elif activation == 'prelu':
            self.activation = nn.PReLU()
        elif activation == 'selu':
            self.activation = nn.SELU(inplace=True)
        elif activation == 'tanh':
            self.activation = nn.Tanh()
        elif activation == 'none':
            self.activation = None
        else:
            assert 0, "Unsupported activation: {}".format(activation)

        # initialize convolution
        if norm == 'sn':
            self.conv = SpectralNorm(
                nn.Conv2d(input_dim,
                          output_dim,
                          kernel_size,
                          stride,
                          bias=self.use_bias))
        else:
            self.conv = nn.Conv2d(input_dim,
                                  output_dim,
                                  kernel_size,
                                  stride,
                                  bias=self.use_bias)
Beispiel #3
0
from typing import Union

import torch
from torch import nn

Activation = Union[str, nn.Module]

_str_to_activation = {
    'relu': nn.ReLU(),
    'tanh': nn.Tanh(),
    'leaky_relu': nn.LeakyReLU(),
    'sigmoid': nn.Sigmoid(),
    'selu': nn.SELU(),
    'softplus': nn.Softplus(),
    'identity': nn.Identity(),
}


def build_mlp(
    input_size: int,
    output_size: int,
    n_layers: int,
    size: int,
    activation: Activation = 'tanh',
    output_activation: Activation = 'identity',
) -> nn.Module:
    """
        Builds a feedforward neural network

        arguments:
            n_layers: number of hidden layers
import os
import glob
import numpy as np

import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt

from EDSR import train

#plot and save one
epoch_list = {'1': 3, '2': 3, '3': 3, '4': 3}
activate_list = {
    '1': [nn.ReLU(), 'ReLU'],
    '2': [nn.ELU(), 'ELU'],
    '3': [nn.SELU(), 'SELU'],
    '4': [nn.LeakyReLU(), 'LeakyReLU']
}
for i in range(len(activate_list)):
    train(epoch_list[str(i + 1)], activate_list[str(i + 1)][0],
          activate_list[str(i + 1)][1])

##plot all
status_type = ['train_loss', 'train_psnr', 'train_ssim']
activate_list = ['ReLU', 'ELU', 'SELU', 'LeakyReLU']
color_list = ['green', 'red', 'skyblue', 'blue']

status_list = []

for _status_type in status_type:
    fig = plt.figure()
Beispiel #5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 pad_type='zero',
                 activation='lrelu',
                 norm='none',
                 sn=False,
                 bias=True):
        super(Conv2dLayer, self).__init__()
        # Initialize the padding scheme
        if pad_type == 'reflect':
            self.pad = nn.ReflectionPad2d(padding)
        elif pad_type == 'replicate':
            self.pad = nn.ReplicationPad2d(padding)
        elif pad_type == 'zero':
            self.pad = nn.ZeroPad2d(padding)
        else:
            assert 0, "Unsupported padding type: {}".format(pad_type)

        # Initialize the normalization type
        if norm == 'bn':
            self.norm = nn.BatchNorm2d(out_channels)
        elif norm == 'in':
            self.norm = nn.InstanceNorm2d(out_channels)
        elif norm == 'ln':
            self.norm = LayerNorm(out_channels)
        elif norm == 'none':
            self.norm = None
        else:
            assert 0, "Unsupported normalization: {}".format(norm)

        # Initialize the activation funtion
        if activation == 'relu':
            self.activation = nn.ReLU(inplace=True)
        elif activation == 'lrelu':
            self.activation = nn.LeakyReLU(0.2, inplace=True)
        elif activation == 'prelu':
            self.activation = nn.PReLU()
        elif activation == 'selu':
            self.activation = nn.SELU(inplace=True)
        elif activation == 'tanh':
            self.activation = nn.Tanh()
        elif activation == 'sigmoid':
            self.activation = nn.Sigmoid()
        elif activation == 'none':
            self.activation = None
        else:
            assert 0, "Unsupported activation: {}".format(activation)

        # Initialize the convolution layers
        if sn:
            self.conv2d = SpectralNorm(
                nn.Conv2d(in_channels,
                          out_channels,
                          kernel_size,
                          stride,
                          padding=0,
                          dilation=dilation,
                          bias=bias))
        else:
            self.conv2d = nn.Conv2d(in_channels,
                                    out_channels,
                                    kernel_size,
                                    stride,
                                    padding=0,
                                    dilation=dilation,
                                    bias=bias)