Exemple #1
0
    def __init__(self, in_planes, planes, stride=1, act='relu'):
        super(Bottleneck, self).__init__()
        self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes,
                               planes,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = nn.Conv2d(planes,
                               self.expansion * planes,
                               kernel_size=1,
                               bias=False)
        self.bn3 = nn.BatchNorm2d(self.expansion * planes)
        if act == 'relu':
            self.act = nn.RelU()
        elif act == 'tanh':
            self.act = nn.Tanh()
        elif act == 'sigmoid':
            self.act = nn.Sigmoid()

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion * planes:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes,
                          self.expansion * planes,
                          kernel_size=1,
                          stride=stride,
                          bias=False), nn.BatchNorm2d(self.expansion * planes))
Exemple #2
0
    def __init__(self, layer_size, activation):
        super(DNN, self).__init__()
        layers = []
        self.activation = activation

        # Defaults to sigmoid
        if self.activation == 'relu':
            self.activation_func = nn.RelU()
        elif self.activation == 'tanh':
            self.activation_func = nn.Tanh()
        elif self.activation == 'sigmoid':
            self.activation_func = nn.Sigmoid()
        else:
            self.activation_func = nn.Sigmoid()

        for l_id in range(len(layer_size) - 1):
            if l_id == len(layer_size) - 2:  #second last layer
                layers.append(
                    nn.Sequential(
                        nn.BatchNorm1d(num_features=layer_size[l_id],
                                       affine=False),
                        nn.Linear(layer_size[l_id], layer_size[l_id + 1]),
                    ))
            else:  #all other layers
                layers.append(
                    nn.Sequential(
                        nn.Linear(layer_size[l_id], layer_size[l_id + 1]),
                        self.activation_func,
                        nn.BatchNorm1d(num_features=layer_size[l_id + 1],
                                       affine=False),
                    ))

        self.layers = nn.ModuleList(layers)
def load_model(arch='vgg16', num_labels=102, hidden_units=4096):

    if arch == 'vgg16':
        model = models.vgg16(pretrained=true)
    else:
        raise ValueError('Unexpected network architecture', arch)

    for param in model.parameters():
        param.requires_grad = False

    features = list(model.classifier.children())[:-1]
    num_filters = model.classifier[len(features)].in_features

    features.extend([
        nn.Dropout(),
        nn.Linear(num_filters, hidden_units),
        nn.RelU(True),
        nn.Dropout(),
        nn.Linear(hidden_units, hidden_units),
        nn.ReLU(True),
        nn.Liner(hidden_units, hidden_units),
    ])

    model.classifier = nn.Sequential(*features)

    return model
Exemple #4
0
 def __init__(self, in_channels):
     self.block = nn.Sequential(nn.ReflectionPad2d(1),
                                nn.Conv2d(in_channels, in_channels, 3),
                                nn.InstanceNorm2d(in_channels),
                                nn.RelU(inplace=True),
                                nn.ReflectionPad2d(1),
                                nn.Conv2d(in_channels, in_channels, 3),
                                nn.InstanceNorm2d(in_channels))
Exemple #5
0
 def __init__(self,in_channels, out_channels, identity_downsample = None, stride=1):
     super(block, self).__init__() 
     self.expansion = 4 
     self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
     self.bn1 = nn.BatchNorm2d(out_channels)
     self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, padding=0)
     self.bn2 = nn.BatchNorm2d(out_channels)
     self.conv3 = nn.Conv2d(out_channels, out_channels*self.expansion, kernel_size=1, stride=1, padding=0)
     self.bn3 = nn.BatchNorm2d(out_channels*self.expansion)
     self.relu = nn.RelU()
     self.identity_downsample = identity_downsample
Exemple #6
0
    def __init__(self):
        super().__init__()

        self.hidden1 = nn.Sequential(
            nn.Conv2d(8, 32, kernel_size=(7,7), padding=3, bias=False), #Unclear about input dim
            #nn.BatchNorm2d(32, ) How to add BatchNorm for (N,H,W,C) to(N,C,H,W)
            nn.ReLU()
        )
        self.hidden2 = nn.Sequential(
            nn.Conv2d(32, 64,kernel_size=(3,3), padding=1, bias=False),
            #BatchNorm2d
            nn.RelU()
        )
        self.hidden3 = nn.Sequential(
            nn.Conv2d(64, 128,kernel_size=(3,3), padding=1, bias=False),
            #BatchNorm2d
            nn.RelU()
        )
        '''
        ADD Residual Block
        '''
        '''
        ADD deconv blocks
        self.hidden4 = nn.Sequential(
            nn.ConvTranspose2d(128, 64),
            #BatchNorm2d
            nn.ReLU()
        )
        self.hidden5 = nn.Sequential(
            nn.ConvTranspose2d(64, 32),
            #BatchNorm2d
            nn.ReLU()
        )
        '''
        self.hidden6 = nn.Sequential(
            nn.Conv2d(32, 3, kernel_size=(7,7))
        )
Exemple #7
0
    def __init__(self):
        super().__init__()
        self.input_layer = nn.Conv2d(in_channels=1,out_channels=64,
                               kernel_size=3,stride=1,padding=1,bias=False)

        residual_block = []
        for _ in range(18):
            residual_block.append(nn.Conv2d(in_channels=64,out_channels=64,
                                  kernel_size=3,stride=1,padding=1,bias=False))
            residual_block.append(nn.RelU(inplace=True))
        self.residual_block = nn.Sequential(*residual_block)

        self.output_layer = nn.Conv2d(in_channels=64,out_channels=1,
                                      kernel_size=3,stride=1,
                                      padding=1,bias=False)

        self.weight_initialize()
Exemple #8
0
    def __init__(self, num_classes=2):

        super(CNNNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.RelU(),
            nn.MaxPool2d(192, 384, kernel_size=3, padding=1), nn.ReLU(), nn,
            Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2))
        self.avgpool = nn.AdaptiveAvgPool2d((6, 9))
        self.classifier = nn.Sequential(nn.Dropout(),
                                        nn.Linear(256 * 6 * 6, 4096),
                                        nn.ReLU(), nn.Dropout(),
                                        nn.Linear(4096, 4096), nn.ReLU(),
                                        nn.Linear(4096, num_classes))
Exemple #9
0
    def __init__(self, source_image_shape, target_image_shape, latent_vars):
        super(SimpleGenerator, self).__init__()
        self.target_size = target_image_shape
        self.latent_vars = latent_vars

        ###################################################
        # Transfer the source images to the target style. #
        ###################################################
        self.conv_layers = []
        in_channel = list(source_image_shape)[1]
        for i in range(0, params.simple_num_conv_layers):
            self.conv1 = nn.Sequential(
                nn.Conv2d(in_channel,
                          params.simple_conv_filters,
                          kernal_size=params.generator_kernel_size),
                nn.BatchNorm2d(params.simple_conv_filters), nn.RelU())
            in_channel = params.simple_conv_filters

        # Project back to the right # image channels
        self.conv2 = nn.Sequential(
            nn.Conv2d(in_channel, list(target_image_shape)[1], kernal_size=1),
            nn.Tanh())
Exemple #10
0
 def __init__(self, reuse_params=False):
     super(DeepSleepNet, self).__init__()
     self.sampling_rate = 100
     self.input_size = 3000
     self.in_chan_size = 1
     self.n_classes = n_classes
     self.reuse_params = reuse_params
     self.input_dim = (24576 // batch_size)
     self.n_rnn_layers = 2
     self.hidden_size = 512
     self.relu = nn.ReLU()
     self.fc = nn.Sequential(nn.Linear(self.input_dim, 512), nn.ReLU())
     self.dropout = nn.Dropout(0.5)
     self.seq_length = 10
     self.softmax = nn.Softmax()
     self.lstm = nn.LSTM(512,
                         self.hidden_size,
                         self.n_rnn_layers,
                         dropout=0.5,
                         bidirectional=True)
     self.fc = nn.Sequential(nn.Linear(3456, 1024), nn.RelU())
     self.output = nn.Linear(512, self.n_classes)
    def __init__(self, stroke_number):
        super(AutoEncoder, self).__init__()
        # VGG16 Code
        self.block_1 = nn.Sequential(
            nn.Conv2d(
                in_channels=3,
                out_channels=64,
                kernel_size=(3, 3),
                stride=(1, 1),
                # (1(32-1)- 32 + 3)/2 = 1
                padding=1),
            nn.ReLU(),
            nn.Conv2d(in_channels=64,
                      out_channels=64,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)))

        self.block_2 = nn.Sequential(
            nn.Conv2d(in_channels=64,
                      out_channels=128,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=1), nn.ReLU(),
            nn.Conv2d(in_channels=128,
                      out_channels=128,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=1), nn.ReLU(),
            nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)))

        self.block_3 = nn.Sequential(
            nn.Conv2d(in_channels=128,
                      out_channels=256,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=1), nn.ReLU(),
            nn.Conv2d(in_channels=256,
                      out_channels=256,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=1), nn.ReLU(),
            nn.Conv2d(in_channels=256,
                      out_channels=256,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=1), nn.ReLU(),
            nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)))

        self.block_4 = nn.Sequential(
            nn.Conv2d(in_channels=256,
                      out_channels=512,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=1), nn.ReLU(),
            nn.Conv2d(in_channels=512,
                      out_channels=512,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=1), nn.ReLU(),
            nn.Conv2d(in_channels=512,
                      out_channels=512,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=1), nn.ReLU(),
            nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)))

        self.block_5 = nn.Sequential(
            nn.Conv2d(in_channels=512,
                      out_channels=512,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=1), nn.ReLU(),
            nn.Conv2d(in_channels=512,
                      out_channels=512,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=1), nn.ReLU(),
            nn.Conv2d(in_channels=512,
                      out_channels=512,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=1),
            nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)))

        self.feature_extractor = nn.Sequential(nn.Linear(512 * 4 * 4, 4096),
                                               nn.ReLU(True),
                                               nn.Linear(4096, 1024),
                                               nn.ReLU(True),
                                               nn.Linear(1024, 64))

        self.RNN_hidden = nn.Sequential(nn.Linear(141, 64), nn.RelU())

        self.RNN_stroke = nn.Sequential(nn.Linear(64, 13))
    def __init__(self, conv_dim=64, c_dim=5, repeat_num=6):
        super(Generator, self).__int__()
        self._name = 'generator_wgan'

        layers = []
        layers.append(
            nn.Conv2d(3 + c_dim,
                      conv_dim,
                      kernel_size=7,
                      stride=1,
                      padding=3,
                      bias=False))
        layers.append(nn.InstanceNorm2d(conv_dim, affline=True))
        layers.append(nn.RelU(inplace=True))

        # Down-Sampling
        curr_dim = conv_dim
        for i in range(2):
            layers.append(
                nn.Conv2d(curr_dim,
                          curr_dim * 2,
                          kernel_size=4,
                          stride=2,
                          padding=1,
                          bias=False))
            layers.append(nn.InstanceNorm2d(curr_dim * 2, affline=True))
            layers.append(nn.RelU(inplace=True))
            curr_dim = curr_dim * 2

        # Bottleneck
        for i in range(repeat_num):
            layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim))

        # Up-Sampling
        for i in range(2):
            layers.append(
                nn.ConvTranspose2d(curr_dim,
                                   curr_dim // 2,
                                   kernel_size=4,
                                   stride=2,
                                   padding=1,
                                   bias=False))
            layers.append(nn.InstanceNorm2d(curr_dim // 2, affline=True))
            layers.append(nn.RelU(inplace=True))
            curr_dim = curr_dim // 2

        self.main = nn.Sequential(*layers)

        layers = []
        layers.append(
            nn.Conv2d(curr_dim,
                      3,
                      kernel_size=7,
                      stride=1,
                      padding=3,
                      bias=False))
        layers.append(nn.Tanh())
        self.img_reg = nn.sequential(*layers)

        layers = []
        layers.append(
            nn.Conv2d(curr_dim,
                      1,
                      kernel_size=7,
                      stride=1,
                      padding=3,
                      bias=False))
        layers.append(nn.Sigmoid())
        self.attetion_reg = nn.Sequential(*layers)