Exemple #1
0
    def _make_conv_layer(self, in_c, out_c):

        conv_layer = nn.Sequential(
            nn.Conv3d(in_c, out_c, kernel_size=(3, 3, 3), padding=0),
            nn.relu(),
            nn.Conv3d(out_c, out_c, kernel_size=(3, 3, 3), padding=1),
            nn.relu(),
            nn.MaxPool3d((2, 2, 2)),
        )

        return conv_layer
Exemple #2
0
def deconv_blocks_size_5(in_dim,out_dim,Use_pool=True):
    layer = nn.Sequential()
    layer.add_module( "conv1",nn.ConvTranspose2d(in_dim , out_dim , 5 , 2, 1))
    layer.add_module('relu', nn.relu(True))
    layer.add_module('bn', nn.BatchNorm2d(out_dim))

    layer.add_module("conv2", nn.ConvTranspose2d(out_dim, out_dim ,5, 2, 1))
    layer.add_module('relu', nn.relu(True))
    layer.add_module('bn', nn.BatchNorm2d(out_dim))

    if Use_pool:
        layer.add_module("Upsamp",nn.UpsamplingNearest2d(scale_factor= 2))
    return layer
Exemple #3
0
    def forward(self, rdm_model, rl_input, rl_state):
        """
        rl_input: [batchsize, max_word_num, embedding_dim]
        rl_state: [1, batchsize, hidden_dim]
        """
        assert(rl_input.ndim==3)
        batchsize, max_word_num, embedding_dim = rl_input.shape
        assert(embedding_dim==self.embedding_dim)
        
        pooled_rl_input = self.PoolLayer(
            rl_input.reshape(
                [-1, 1, max_word_num, self.embedding_dim]
            )
        ).reshape([-1, 1, self.hidden_dim])
        
        print("pooled_rl_input:", pooled_rl_input.shape)
        print("rl_state:", rl_state.shape)
        rl_output, rl_new_state = rdm_model.gru_model(
                                            pooled_rl_input, 
                                            rl_state
                                        )
        rl_h1 = nn.relu(
            self.DenseLayer(
#                 rl_state.reshape([len(rl_input), self.hidden_dim]) #it is not sure to take rl_state , rather than rl_output, as the feature
                rl_output.reshape(
                    [len(rl_input), self.hidden_dim]
                )
            )
        )
        stopScore = self.Classifier(rl_h1)
        isStop = stopScore.argmax(axis=1)
        return stopScore, isStop, rl_new_state
 def __init__(self):
     self.net = nn.sequental(
         nn.Conv2d(16,32, kernel=3, stride = 1, padding = 1),
         nn.BatchNormal2d(32),
         nn.relu()
     )
     self.fc = nn.linear(32, 10)
    def __init__(self, in_channels):
        super().__init__()

        self.relu = nn.ReLU(inplace=True)

        # B, C, H, W -> B, C, H, W

        self.norm1 = nn.BatchNorm2d(in_channels)
        self.selu = nn.relu(inplace=True)

        self.conv1 = nn.Conv2d(in_channels, in_channels, 3, 1, padding=1)
        self.norm1 = nn.BatchNorm2d(in_channels)
        self.selu = nn.relu(in_channels)
        self.conv2 = nn.Conv2d(in_channels,
                               in_channels,
                               3,
                               1,
                               padding=2,
                               dilation=2)
def eucl_non_lin(eucl_h, non_lin):
    if non_lin == 'id':
        return eucl_h
    elif non_lin == 'relu':
        return nn.relu(eucl_h)
    elif non_lin == 'tanh':
        return tanh(eucl_h)
    elif non_lin == 'sigmoid':
        return nn.sigmoid(eucl_h)
    return eucl_h
Exemple #7
0
def pixelshuffle_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True, \
                        pad_type='zero', norm_type=None, activation='relu'):
    conv = conv_block(in_nc, out_nc * (upscale_factor ** 2), kernel_size, stride, bias=bias, \
                        pad_type=pad_type, norm_type=None, activation=None)
    pixel_shuffle = nn.PixelShuffle(upscale_factor)

    n = nn.BatchNorm2d(out_nc, affine=True) if norm_type else None
    a = None
    if activation == 'relu':
        a = nn.relu(True)
    elif activation == 'leakyrelu':
        a = nn.LeakyReLU(True)
    return BlockSequent(conv, pixel_shuffle, n, a)
    def hybrid_forward(self, x):
        print("********************************")
        print("this is for checking ConvBnRelu Block Parameters")
        print("%s --- in_channels: %d, out_channels: %d, kernel_size: %d, strides: %d, padding: %d, groups: %d, is_bn_relu: %s"
            % (self.block_name, self.in_channels, self.out_channels, self.kernel, self.strides, self.padding, self.groups, self.is_bn_relu))

        x = self.conv(x)
        if self.is_bn_relu:
            x = self.relu(self.bn(x))
            print("Conv BN ReLU")
        else:
            x = self.bn(nn.relu(x))
            print("Conv ReLU BN")
        return x
Exemple #9
0
    def forward(self, input, hidden, encoder_outputs):
        embedded_out = self.embedding(input)

        attn_weights = nn.softmax(
            self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
        attn_applied = torch.bmm(attn_weights.unsqueeze(0),
                                 encoder_outputs.unsqueeze(0))

        output = torch.cat((embedded[0], attn_applied[0]), 1)
        output = self.attn_combine(output).unsqueeze(0)

        output = nn.relu(output)
        output, hidden = self.gru(output, hidden)

        output = nn.log_softmax(self.out(output[0]), dim=1)
        return output, hidden, attn_weights
Exemple #10
0
def conv_block(inputNC, outputNC, kernel_size, stride=1, dilation=1, groups=1, bias=True, \
               pad_type='zero', norm_type=None, activation='relu', mode='CNA'):

    kernel_size_t = kernel_size + (kernel_size - 1) * (dilation - 1)
    padding = (kernel_size_t - 1) // 2
    p = None if pad_type and pad_type != 'zero' else None
    padding = padding if pad_type == 'zero' else 0

    c = nn.Conv2d(inputNC, outputNC, kernel_size=kernel_size, stride=stride, padding=padding, \
            dilation=dilation, bias=bias, groups=groups)
    a = None
    if activation == 'relu':
        a = nn.relu(True)
    elif activation == 'leakyrelu':
        a = nn.LeakyReLU(True)
    n = nn.BatchNorm2d(outputNC, affine=True) if norm_type else None
    return BlockSequent(p, c, n, a)
Exemple #11
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 downsample=None,
                 norm_layer=None):

        super(BasicBlock, self).__init__()
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d  # 如果bn层没有自定义,就是用标准的bn层
        self.conv1 = conv3x3(inplanes, planes, stride)
        self.bn1 = norm_layer(planes)
        self.relu = nn.relu(inplace=True)
        self.conv2 = conv3x3(planes, planes)
        self.bn2 = norm_layer(planes)
        self.downsample = downsample
        self.stride = stride
Exemple #12
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              norm_layer=None):
     # downsample: 调整纬度一致之后才能相加
     # norm_layer:batch normalization_layer
     super(BasicBlock, self).__init__()
     if norm_layer is None:
         norm_layer = nn.BatchNorm2d  # 如果bn层没有自定义, 就使用标准bn层
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = norm_layer(planes)
     self.relu = nn.relu(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = norm_layer(planes)
     self.downsample = downsample
     self.stride = stride
Exemple #13
0
 def __init__(self, in_channels, out_channels, kernel_size, stride):
     super(ResidualBlock, self).__init__()
     self.conv1 = nn.Conv2d(in_channels=in_channels,
                            out_channels=out_channels,
                            kernel_size=kernel_size,
                            stride=stride)
     self.bn1 = nn.BatchNorm2d(out_channels)
     self.relu = nn.relu(inplace=True)
     self.conv2 = nn.Conv2d(in_channels=out_channels,
                            out_channels=out_channels,
                            kernel_size=kernel_size)
     self.bn2 = nn.BatchNorm2d(out_channels)
     self.residual = nn.Conv2d(in_channels,
                               out_channels,
                               kernel_size=1,
                               stride=stride,
                               bias=False)
     self.residual_bn = nn.BatchNorm2d(out_channels)
    def __init__(self):
        super(DynamicsNet, self).__init__()

        self.l1 = nn.LSTMCell(16, 12)
        self.l2 = nn.LSTMCell(12, 12)
        self.l3_mean = nn.LSTMCell(12, 12)
        self.l3_dev = nn.LSTMCell(12, 12)

        nn.init.xavier_uniform_(self.l1.weight_hh)
        nn.init.xavier_uniform_(self.l2.weight_hh)
        nn.init.xavier_uniform_(self.l3_mean.weight_hh)
        nn.init.xavier_uniform_(self.l3_dev.weight_hh)

        nn.init.xavier_uniform_(self.l1.weight_ih)
        nn.init.xavier_uniform_(self.l2.weight_ih)
        nn.init.xavier_uniform_(self.l3_mean.weight_ih)
        nn.init.xavier_uniform_(self.l3_dev.weight_ih)

        self.l1 = nn.Sequential(self.l1, nn.relu())
        self.l2 = nn.Sequential(self.l2, nn.SELU())
        self.l3_mean = nn.Sequential(self.l3_mean, nn.Tanh())
        self.l3_dev = nn.Sequential(self.l3_dev, nn.Tanh())
Exemple #15
0
def define_PRSNet(input_nc,
                  output_nc,
                  conv_layers,
                  num_plane,
                  num_quat,
                  biasTerms,
                  useBn,
                  activation,
                  init_gain=0.02,
                  gpu_ids=[]):
    if activation == 'relu':
        ac_fun = nn.relu()
    elif activation == 'tanh':
        ac_fun = nn.tanh()
    elif activation == 'lrelu':
        ac_fun = nn.LeakyReLU(0.2, True)
    if useBn:
        print('using batch normalization')

    net = PRSNet(input_nc, output_nc, conv_layers, num_plane, num_quat,
                 biasTerms, useBn, ac_fun)
    return init_net(net, init_gain, gpu_ids)
 def __init__(self, seq_len):
     super(xd2, self).__init__()
     #Convolutions
     self.length = seq_len
     self.conv1 = nn.Conv2d(1, 8, kernel_size=(5, 4))
     self.pool1 = nn.MaxPool2d(kernel_size=(1, 2), stride=(1, 1))
     self.conv2 = nn.Conv2d(8, 16, kernel_size=(1, 2))
     self.pool2 = nn.MaxPool2d(kernel_size=(1, 2), stride=(1, 1))
     #Getting the dimension after convolutions
     self.dummy_param = nn.Parameter(torch.empty(0))
     self.name = 'xd2_' + str(seq_len)
     #Linear/Dense layers
     if seq_len >= 14:
         self.fc1 = nn.Sequential(nn.Linear(16 * (seq_len - 6), 96),
                                  nn.BatchNorm1d(96), nn.relu(),
                                  nn.Linear(96, 50))
     else:
         self.fc1 = nn.Linear(16 * (seq_len - 6), 50)
     self.bn1 = nn.BatchNorm1d(50)
     self.fc2 = nn.Linear(50, 10)
     self.bn2 = nn.BatchNorm1d(10)
     self.fc3 = nn.Linear(10, 2)
     self.dropout = nn.Dropout(0.4)
    # Sample data
    z = Variable(torch.randn(mb_size, z_dim))
    X, _ = mnist.train.next_batch(mb_size)
    X = Variable(torch.from_numpy(X))

    if use_gpu:
        z = z.cuda()
        X = X.cuda()

    # Dicriminator
    G_sample = G(z)
    D_real = D(X)
    D_fake = D(G_sample)

    # EBGAN D loss. D_real and D_fake is energy, i.e. a number
    D_loss = D_real + nn.relu(m - D_fake)

    # Reuse D_fake for generator loss
    D_loss.backward()
    D_solver.step()
    reset_grad()

    # Generator
    G_sample = G(z)
    D_fake = D(G_sample)

    G_loss = D_fake

    G_loss.backward()
    G_solver.step()
    reset_grad()
Exemple #18
0
    def _build_fused(self, hp):
        n_input = hp['n_input']
        n_rnn = hp['n_rnn']
        n_output = hp['n_output']

        # Activation functions
        if hp['activation'] == 'power':
            f_act = lambda x: torch.square(F.relu(x))
        elif hp['activation'] == 'retanh':
            f_act = lambda x: torch.tanh(F.relu(x))
        elif hp['activation'] == 'relu+':
            f_act = lambda x: nn.relu(x + init.constant_(1.))
        else:
            f_act = getattr(F, hp['activation'])

        # Recurrent activity
        if hp['rnn_type'] == 'LeakyRNN':
            n_in_rnn = self.x.get_shape().as_list()[-1]
            cell = LeakyRNNCell(n_rnn,
                                n_in_rnn,
                                hp['alpha'],
                                sigma_rec=hp['sigma_rec'],
                                activation=hp['activation'],
                                w_rec_init=hp['w_rec_init'],
                                rng=self.rng)
        elif hp['rnn_type'] == 'LeakyGRU':
            cell = LeakyGRUCell(n_rnn,
                                hp['alpha'],
                                sigma_rec=hp['sigma_rec'],
                                activation=f_act)
        elif hp['rnn_type'] == 'LSTM':
            cell = tf.contrib.rnn.LSTMCell(n_rnn, activation=f_act)

        elif hp['rnn_type'] == 'GRU':
            cell = tf.contrib.rnn.GRUCell(n_rnn, activation=f_act)
        else:
            raise NotImplementedError("""rnn_type must be one of LeakyRNN,
                    LeakyGRU, EILeakyGRU, LSTM, GRU
                    """)

        # Dynamic rnn with time major
        self.h, states = rnn.dynamic_rnn(cell,
                                         self.x,
                                         dtype=torch.float32,
                                         time_major=True)

        # Output
        with tf.variable_scope("output"):
            # Using default initialization `glorot_uniform_initializer`
            w_out = tf.get_variable('weights', [n_rnn, n_output],
                                    dtype=tf.float32)
            b_out = tf.get_variable('biases', [n_output],
                                    dtype=tf.float32,
                                    initializer=tf.constant_initializer(
                                        0.0, dtype=tf.float32))

        h_shaped = tf.reshape(self.h, (-1, n_rnn))
        y_shaped = tf.reshape(self.y, (-1, n_output))
        # y_hat_ shape (n_time*n_batch, n_unit)
        y_hat_ = tf.matmul(h_shaped, w_out) + b_out
        if hp['loss_type'] == 'lsq':
            # Least-square loss
            y_hat = tf.sigmoid(y_hat_)
            self.cost_lsq = tf.reduce_mean(
                tf.square((y_shaped - y_hat) * self.c_mask))
        else:
            y_hat = tf.nn.softmax(y_hat_)
            # Cross-entropy loss
            self.cost_lsq = tf.reduce_mean(
                self.c_mask * tf.nn.softmax_cross_entropy_with_logits(
                    labels=y_shaped, logits=y_hat_))

        self.y_hat = tf.reshape(y_hat, (-1, tf.shape(self.h)[1], n_output))
        y_hat_fix, y_hat_ring = tf.split(self.y_hat, [1, n_output - 1],
                                         axis=-1)
        self.y_hat_loc = tf_popvec(y_hat_ring)
Exemple #19
0
 def __init__(self, input_size, hidden_size, num_classes):
     super(NeuralNet, self).__init__()
     self.l1 = nn.Linear(input_size, hidden_size)
     self.l2 = nn.Linear(hidden_size, hidden_size)
     self.l3 = nn.Linear(hidden_size, num_classes)
     self.relu = nn.relu()
# GAN training
for t in range(n_epoch):
    s_x, s_z = torch.zeros(1), torch.zeros(1)

    for it in range(n_iter):
        # Sample data
        z = Variable(torch.randn(mb_size, z_dim))
        X, _ = mnist.train.next_batch(mb_size)
        X = Variable(torch.from_numpy(X))

        # Dicriminator
        G_sample = G(z)
        D_real = D(X)
        D_fake = D(G_sample)

        D_loss = torch.mean(D_real) + nn.relu(m - torch.mean(D_fake))

        D_loss.backward()
        D_solver.step()

        # Update real samples statistics
        s_x += torch.sum(D_real.data)

        reset_grad()

        # Generator
        z = Variable(torch.randn(mb_size, z_dim))
        G_sample = G(z)
        D_fake = D(G_sample)

        G_loss = torch.mean(D_fake)
Exemple #21
0
 def forward(self, x):
     x = nn.relu(self.net1(x))
     x = nn.relu(self.net2(x))
     x = nn.relu(self.net3(x))
     x = self.net4(x)
     return F.log_softmax(x, dim=1)
Exemple #22
0
def convtrans_block_gen(in_f, out_f, *args, **kwargs):
        return nn.Sequential(
        nn.ConvTranspose2d(in_f, out_f, *args, **kwargs),
        nn.BatchNorm2d(out_f),
        nn.relu()
       )
Exemple #23
0
# GAN training
for t in range(n_epoch):
    s_x, s_z = torch.zeros(1), torch.zeros(1)

    for it in range(n_iter):
        # Sample data
        z = Variable(torch.randn(mb_size, z_dim))
        X, _ = mnist.train.next_batch(mb_size)
        X = Variable(torch.from_numpy(X))

        # Dicriminator
        G_sample = G(z)
        D_real = D(X)
        D_fake = D(G_sample)

        D_loss = torch.mean(D_real) + nn.relu(m - torch.mean(D_fake))

        D_loss.backward()
        D_solver.step()

        # Update real samples statistics
        s_x += torch.sum(D_real.data)

        reset_grad()

        # Generator
        z = Variable(torch.randn(mb_size, z_dim))
        G_sample = G(z)
        D_fake = D(G_sample)

        G_loss = torch.mean(D_fake)
D_solver = optim.Adam(D_.parameters(), lr=lr)


for it in range(1000000):
    # Sample data
    z = Variable(torch.randn(mb_size, z_dim))
    X, _ = mnist.train.next_batch(mb_size)
    X = Variable(torch.from_numpy(X))

    # Dicriminator
    G_sample = G(z)
    D_real = D(X)
    D_fake = D(G_sample)

    # EBGAN D loss. D_real and D_fake is energy, i.e. a number
    D_loss = D_real + nn.relu(m - D_fake)

    # Reuse D_fake for generator loss
    D_loss.backward()
    D_solver.step()
    reset_grad()

    # Generator
    G_sample = G(z)
    D_fake = D(G_sample)

    G_loss = D_fake

    G_loss.backward()
    G_solver.step()
    reset_grad()
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
from torchvision.transforms.transforms import Normalize

# Implement the sequential module for feature extraction
torch.manual_seed(50)
network1 = nn.sequential(
    nn.Conv2d(in_channels=1, out_channels=6, kernal_size=5),
    nn.relu(),
    nn.MaxPool2d(kernel_size=2, stride=2),
    nn.Conv2d(in_channels=6, out_channels=12, kernal_size=5),
    nn.relu(),
    nn.MaxPool2d(kernel_size=2, stride=2),
    nn.flattern(start_dim=1),
    nn.linear(in_features=12 * 4 * 4, out_features=120),
    nn.relu(),
    nn.linear(in_features=20, out_features=60),
    nn.relu(),
    nn.linear(in_features=60, out_features=10))
torch.manual_seed(50)
network2 = nn.sequential(
    nn.Conv2d(in_channels=1, out_channels=6, kernal_size=5),
    nn.relu(),
    nn.MaxPool2d(kernel_size=2, stride=2),
    nn.BatchNorm2d(6),
    nn.Conv2d(in_channels=6, out_channels=12, kernal_size=5),
Exemple #26
0
 def forward(self, input):
     fc1 = self.fc1(input)
     relu1 = nn.ReLU(fc1)
     fc2 = self.fc2(relu1)
     relu2 = nn.relu(fc2)
     return relu2