Пример #1
0
    def __init__(self):
        super(KDD_LCAE, self).__init__()

        self.x_dim = 115
        self.encoder_h_dim = [30]
        self.z_dim = 5
        self.decoder_h_dim = [30]

        self.encoder_layers = []
        self.encoder_layers.append(nn.Linear(self.x_dim,
                                             self.encoder_h_dim[0]))
        self.encoder_layers.append(nn.ReLU())
        for i in range(len(self.encoder_h_dim) - 1):
            self.encoder_layers.append(
                nn.Linear(self.encoder_h_dim[i], self.encoder_h_dim[i + 1]))
            self.encoder_layers.append(nn.ReLu())
        self.encoder_layers.append(
            nn.Linear(self.encoder_h_dim[len(self.encoder_h_dim) - 1],
                      self.z_dim))
        self.encoder_layers = nn.ModuleList(self.encoder_layers)

        self.decoder_layers = []
        self.decoder_layers.append(nn.Linear(self.z_dim,
                                             self.decoder_h_dim[0]))
        self.decoder_layers.append(nn.ReLU())
        for i in range(len(self.decoder_h_dim) - 1):
            self.decoder_layers.append(
                nn.Linear(self.decoder_h_dim[i], self.decoder_h_dim[i + 1]))
            self.decoder_layers.append(nn.ReLu())
        self.decoder_layers.append(
            nn.Linear(self.decoder_h_dim[len(self.decoder_h_dim) - 1],
                      self.x_dim))
        self.decoder_layers.append(nn.Tanh())
        self.decoder_layers = nn.ModuleList(self.decoder_layers)
    def __init__(self, num_players, rnn_size, comm_size, num_actions,
                 observation_size, init_param_range):

        super(DRQNet, self).__init__()
        self.num_players = 3
        self.rnn_size = 128
        self.comm_size = 2
        self.num_actions = num_actions
        self.observation_size = observation_size
        self.init_param_range = (-0.08, 0.08)

        #Embedding matrix for DRQN
        self.action_matrix = nn.Embedding(self.num_players, self.rnn_size)
        self.observation_matrix = nn.Embedding(self.observation_size,
                                               self.rnn_size)
        self.previous_action_matrix = nn.Embedding(self.num_actions,
                                                   self.rnn_size)

        #Single layer NN for producing embeddings for messages
        self.message = nn.Sequential(nn.BatchNorm1d(self.comm_size),
                                     nn.Linear(self.comm_size, self.rnn_size),
                                     nn.ReLu(inplace=True))

        #RNN component for history over POMDP
        self.rnn = nn.GRU(input_size=self.rnn_size,
                          hidden_size=self.rnn_size,
                          num_layers=2,
                          batch_first=True)

        #Output from RNN layer
        self.output = nn.Sequential(
            nn.Linear(self.rnn_size, self.rnn_size),
            nn.BatchNorm1d(self.rnn_size), nn.ReLu(),
            nn.Linear(self.rnn_size, self.observation_size))
Пример #3
0
 def __init__(self, gridSize=25):
     super(FaceGridModel, self).__init__()
     self.fc = nn.Sequential(
         nn.Linear(gridSize * gridSize, 256),
         nn.ReLu(inplace=True),
         nn.Linear(256, 128),
         nn.ReLu(inplace=True),
     )
Пример #4
0
 def __init__(self):
     super(ItrackerImageModel, self).__init__()
     self.features = nn.Sequential(
         nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=0),
         nn.ReLu(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         nn.Conv2d(96, 64, kernel_size=1, stride=1, padding=0),
         nn.ReLu(inplace=True),
     )
Пример #5
0
    def forward(self, x):
        x = nn.ReLu(self.conv1(x))
        x = nn.bn1(x)
        x = nn.ReLu(self.conv2(x))
        x = nn.bn2(x)
        x += self.skip_layer(x)
        x = nn.ReLu()(x)

        return x
Пример #6
0
    def __init__(self, num_features: int):
        self.num_features = num_features
        super().__init__()
        self.block1 = nn.Sequential(nn.Linear(num_features, 256),
                                    nn.BatchNormalization(256), nn.ReLu())

        self.block2 = nn.Sequential(nn.Linear(256, 128),
                                    nn.BatchNormalization(128), nn.ReLu())

        self.out = nn.Linear(128, 1)
Пример #7
0
    def _init_(self):
        super(LeNet, self)._init_()
        self.CN = nn.Sequential(nn.Conv2d(3, 6, 5), nn.ReLu(), nn.MaxPool2d(2),
                                nn.Conv2d(6, 16, 5), nn.ReLu(),
                                nn.MaxPool2d(2))

        self.FC = nn.Sequential(
            nn.Linear(16 * 5 * 5, 120),
            nn.ReLu(),
            nn.Linear(120, 84),
            nn.ReLu(),
            nn.Linear(84, 10),
        )
Пример #8
0
    def __init__(self, input_size, output_size, requires_grad=True):
        super().__init__()

        # vehicle embedding
        self.vehicle_embedding = nn.Sequential(nn.Linear(8, 16), nn.ReLU(),
                                               nn.Linear(16, 32), nn.ReLU(),
                                               nn.Linear(32, 64), nn.ReLU(),
                                               nn.Linear(64, 32), nn.ReLu(),
                                               nn.Linear(32, 32))

        # create a neural network
        self.network = nn.Sequential(
            nn.Linear(32, 64),
            nn.ReLU(),
            nn.Linear(64, 128),
            nn.ReLU(),
            nn.Linear(128, 256),
            nn.ReLU(),
            nn.Linear(256, 256),
            nn.ReLU(),
            nn.Linear(256, 512),
            nn.ReLU(),
            #nn.Linear(512, 512),
            #nn.ReLU(),
            nn.Linear(512, output_size))
        self.loss_fn = F.mse_loss
        self.optimiser = torch.optim.Adam(self.network.parameters())

        if not requires_grad:
            for parameter in self.network.parameters():
                parameter.requires_grad = False
Пример #9
0
 def __init__(self):
     super(FaceImageModel, self).__init__()
     self.conv = ItrackerImageModel()
     self.fc = nn.Sequential(
         nn.Linear(43264, 64),
         nn.ReLu(inplace=True),
     )
Пример #10
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              groups=1,
              base_width=64,
              dilation=1,
              norm_layer=None):
     super(BasicBlock, self).__init__()
     if norm_layer is None:
         norm_layer = nn.BatchNorm2d
     if groups != 1 or base_width != 64:
         raise ValueError(
             'BasicBlock only support groups=1 and base_width=64')
     if dilation > 1:
         raise NotImplementedError(
             'Dilation > 1 not supported in BasicBlock')
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = norm_layer(planes)
     self.relu = nn.ReLu(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = norm_layer(planes)
     self.downsample = downsample
     self.stride = stride
Пример #11
0
    def __init__(self,
                 input_size,
                 hidden_size,
                 num_layers,
                 dropout,
                 use_gpu=False,
                 output_dim=1):
        super(LSTM, self).__init__()

        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.output_dim = output_dim
        self.lstm = nn.LSTM(input_size=int(input_size),
                            hidden_size=int(hidden_size),
                            num_layers=int(num_layers),
                            dropout=float(dropout),
                            batch_first=True)
        self.use_gpu = use_gpu

        #Deal with Pytorch initialization for LSTMs being flawed
        for name, param in self.lstm.named_parameters():
            if 'weight' in name:
                init.kaiming_uniform(param)

        #TODO: Layer Norm?

        self.fc1 = nn.Linear(hidden_size, 50)
        self.fc2 = nn.Linear(50, 1)
        self.relu = nn.ReLu(dim=2)
        self.float = self.FloatTensor
        if use_gpu:
            self.float = torch.cuda.FloatTensor
    def __init__(self, total_size, ics, init_weights=True):
        super(DenseNet, self).__init__()
        self.total_size = total_size
        self.init_weights = init_weights
        self.ics = ics
        self.num_ics = sum(self.ics)
        self.num_class = 10
        self.num_output = 0

        self.train_func = mf.iter_training_0
        self.test_func = mf.sdn_test

        self.input_size = 32
        self.in_channels = 16
        self.cum_in_channels = self.in_channels

        self.init_conv = nn.Sequential(*[
            nn.Conv2d(3, self.in_channels, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(self.in_channels),
            nn.ReLu()
        ])

        self.end_layers = nn.Sequential(*[
            nn.AvgPool2d(kernel_size=8),
            af.Flatten(),
            nn.linear(2560, self.num_class)
        ])
        self.grow()

        if self.init_weights:
            self._init_weights(self.modules())
Пример #13
0
def single_conv_module(in_chs,
                       out_chs,
                       kernel,
                       deconv=False,
                       activation=True,
                       leaky=True):
    assert kernel % 2 == 1

    layers = [
        nn.Conv2d(in_chs,
                  out_chs,
                  kernel_size=kernel,
                  padding=(kernel - 1) // 2),
        nn.BatchNorm2d(out_chs),
        nn.LeakyReLU(0.2, inplace=True)
    ]

    if deconv:
        layers[0] = nn.ConvTranspose2d(in_chs,
                                       out_chs,
                                       kernel_size=kernel,
                                       padding=(kernel - 1) // 2)
    if not activation:
        del layers[-1]
    if not leaky:
        layers[2] = nn.ReLu(inplace=True)

    return nn.Sequential(*layers)
Пример #14
0
    def __init__(self, block, blocks_num, num_class=1000, include_top=True):
        super(ResNet, self).__init__()
        self.include_top = include_top
        self.in_channel = 64
        self.relu = nn.ReLu()

        self.conv1 = nn.Conv2d(3,
                               self.in_channel,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(self.in_channel)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, blocks_num[0])
        self.layer2 = self._make_layer(block, 128, blocks_num[1], stride=2)
        self.layer3 = self._make_layer(block, 256, blocks_num[2], stride=2)
        self.layer4 = self._make_layer(block, 512, blocks_num[3], stride=2)
        if self.include_top:
            # avgpool
            self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
            # Linear
            self.fc = nn.Linear(512 * block.expansion, num_class)

        for m in self.modules():
            if isinstance((m, nn.Conv2d)):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
        def conv(i, normalization="batch", dropout=None, activ='relu'):
            in_dim = n_input_ch if i == 0 else n_filt[i - 1]
            out_dim = n_filt[i]
            cnn.add_module(
                "conv{0}".format(i),
                nn.Conv2d(in_dim, out_dim, kernel[i], stride[i], pad[i]))
            if normalization == "batch":
                cnn.add_module(
                    "batchnorm{0}".format(i),
                    nn.BatchNorm2d(out_dim, eps=0.001, momentum=0.99))
            elif normalization == "layer":
                cnn.add_module("layernorm{0}".format(i),
                               nn.GroupNorm(1, out_dim))

            if activ.lower() == "leakyrelu":
                cnn.add_module("Relu{0}".format(i), nn.LeakyReLu(0.2))
            elif activ.lower() == "relu":
                cnn.add_module("Relu{0}".format(i), nn.ReLu())
            elif activ.lower() == "glu":
                cnn.add_module("glu{0}".format(i), GLU(out_dim))
            elif activ.lower() == "cg":
                cnn.add_module("cg{0}".format(i), ContextGating(out_dim))

            if dropout is not None:
                cnn.add_module("dropout{0}".format(i), nn.Dropout(dropout))
Пример #16
0
 def __init__(self, state_dim, action_dim):
     super(MLSHMasterPolicy, self).__init__()
     self._A = action_dim
     self._mlp = nn.Sequential(_init(nn.Linear(state_dim, 64)), nn.ReLU(),
                               _init(nn.Linear(64, 64)), nn.ReLu(),
                               _init(nn.Linear(64, self._A * 2)))
     return
Пример #17
0
    def __init__(self,in_f,out_f,p_size=2,k_size=3,
        stride=1,activation=nn.ReLu()):
        super(BNConv1D,self).__init__()

        self.conv = nn.Conv1d(in_f,out_f,k_size,stride,padding=k_size/2)
        self.pool = nn.MaxPool1d(p_size)
        self.bn = nn.BatchNorm1d(out_f)
        self.activation = activation
Пример #18
0
 def __init__(self, in_planes, out_planes, stride=1):
     super(ResNetBlock, self).__init__()
     self.res = nn.Sequential(nn.BatchNorm2d(in_planes),
                              nn.ReLU(inplace=True),
                              nn.Conv2d(in_planes, out_planes, 3, stride),
                              nn.BatchNorm2d(in_planes),
                              nn.ReLu(inplace=True),
                              nn.Conv2d(in_planes, out_planes, 3, stride))
Пример #19
0
 def __init__(self):
     super(CNN, self).__init__()
     self.layer1 = nn.Sequential(
         nn.Conv2d(1, 16, kernel_size=3),
         nn.BatchNorm2d(16),
         nn.ReLU(inplace=True),
     )
     self.layer2 = nn.Sequential(nn.Conv2d(16, 32, kernel=3),
                                 nn.BatchNorm2d(32), nn.ReLU(inplace=True),
                                 nn.MaxPool2d(kernel_size=2, stride=2))
     self.layer3 = nn.Sequential(nn.Conv2d(32, 64, kernel=3),
                                 nn.BatchNorm2d(64), nn.ReLu(inplace=True))
     self.layer4 = nn.Sequential(nn.Conv2d(64, 128, kernel=3),
                                 nn.BatchNorm2d(128), nn.ReLU(inplace=True),
                                 nn.MaxPool2d(kernel_size=2, stride=2))
     self.fc = nn.Sequential(nn.Linear(128 * 4 * 4, 1024),
                             nn.ReLu(inplace=True), nn.Linear(1024, 128),
                             nn.ReLu(inplace=True), nn.Linear(128, 10))
 def __init__(self, in_channel, out_channel, kernel=3, stride=2):
     self.conv = nn.Conv2d(in_channel,
                           out_channel,
                           kernel,
                           stride,
                           padding=1,
                           padding_mode='reflect')
     self.norm = nn.InstanceNorm2d(out_channel)
     self.act = nn.ReLu()
Пример #21
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = nn.BatchNorm2d(planes)
     self.relu = nn.ReLu(inplce=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = nn.BatchNorm2d(planes)
     self.downsample = downsample
     self.stride = stride
Пример #22
0
 def __init__(self, inchannnel, outchannel, stride=1, shortcut=None):
     super(ResidualBlock, self).__init__()
     self.left = nn.Sequential(
         nn.Conv2d(inchannnel, outchannel, 3, stride, 1, bias=False),
         nn.BatchNorm2d(outchannel),
         nn.ReLu(inplace=True),
         nn.Conv2d(outchannel, outchannel, 3, 1, 1, bias=False),
         nn.BatchNorm2d(outchannel)
     )
     self.right = shortcut
Пример #23
0
    def __init__(self,
                 src_vocab_size,
                 tgt_vocab_size,
                 word_emb_size,
                 src_vocab,
                 tgt_vocab,
                 use_cuda=False):
        super(Discriminator, self).__init__()

        self.src_vocab_size = src_vocab_size
        self.tgt_vocab_size = tgt_vocab_size
        self.word_emb_size = word_emb_size
        self.src_vocab = src_vocab
        self.tgt_vocab = tgt_vocab
        self.use_cuda = use_cuda

        self.embedding_s = nn.Embedding(src_vocab_size, word_emb_size)
        self.embedding_t = nn.Embedding(tgt_vocab_size, word_emb_size)

        self.conv1 = nn.Sequential(
            nn.Conv2d(
                in_channel=word_emb_size * 2,
                out_channel=64,
                kernel_size=3,
                stride=1,
                padding=1,
            ), nn.BatchNorm2d(64), nn.ReLu(),
            nn.MaxPool2d(kernel_size=2, stride=2))

        self.conv2 = nn.Sequential(
            nn.Conv2d(
                in_channel=64,
                out_channel=20,
                kernel_size=3,
                stride=1,
                padding=1,
            ), nn.BatchNorm2d(20), nn.ReLu(),
            nn.MaxPool2d(kernel_size=2, stride=2))
        #why 1280
        self.mlp = nn.Linear(1280, 20)
        self.ll = nn.Linear(20, 2)
        self.sigmoid = nn.Sigmoid()
Пример #24
0
    def __init__(self):
        super(NeuralNetwor, self).__init__()

        self.number_of_actions = 2
        self.gamma = 0.95
        self.final_epsilon = 0.0001
        self.initial_epsilon = 0.1
        self.number_of_iterations = 2000000
        self.replay_memory_size = 10000
        self.minibatch_size = 32

        self.conv1 = nn.Conv2d(4, 32, 8, 4)
        self.relu1 = nn.ReLu(inplace=True)
        self.conv2 = nn.Conv2d(32, 64, 4, 2)
        self.relu2 = nn.ReLu(inplace=True)
        self.conv3 = nn.Conv2d(64, 64, 3, 1)
        self.relu3 = nn.ReLu(inplace=True)
        self.fc4 = nn.Linear(3136, 512)
        self.relu4 = nn.ReLu(inplace=True)
        self.fc5 = nn.Linear(512, self.number_of_actions)
Пример #25
0
 def __init__(self, class_num, filter_num, num):
    super().__init__():
    self.layer1 = nn.Sequential(nn.Conv2d(in_channels = 1,
                                     out_channels = 32,
                                     kernel_size = 5,
                                     stride = 1,
                                     padding = 2),
                        nn.BatchNorm2d(num_feature = 32),
                        nn.ReLu(),
                        nn.MaxPool2d(kernel_size = 2, stride = 2))
    self.layer2 = nn.Sequential(nn.Conv2d(in_channels = 32,
                                     out_channels = 64,
                                     kernel_size = 5,
                                     stride = 1,
                                     padding = 2),
                        nn.BatchNorm2d(num_features = 64),
                        nn.ReLu(),
                        nn.MaxPool2d(kernel_size = 2, stride = 2))
    self.fc = nn.Linear(out_channels * num * num, class_num)
    self.dropout = nn.Dropout()
Пример #26
0
 def __init__(self, n_neurons):
     super(BasicNN, self).__init__()
     self.hidden_block = nn.Sequential(
         nn.Linear(21, n_neurons),
         nn.ReLu(inplace=True)
     )
     
     self.out_block = nn.Sequential(
         nn.Linear(n_neurons, 2),
         nn.Sigmoid()
     )
Пример #27
0
def make_conv_layers(vgg16_config):
    layers = []
    in_channels = 3
    for ii in vgg16_config:
        if ii == 'Max':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            conv2d = nn.Conv2d(in_channels, 64, kernel_size =3, padding =1)
            layers += [conv2d, nn.ReLu(inplace=True)]
        in_channels = ii
    return nn.Sequential(*layers)
 def __init__(self, in_channels, out_channels, add_ic=False, num_classes=10, input_size=32):
     super(ConvBNRLUnit, self)
     self.layers = nn.Sequential(*[
         nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1),
         nn.BatchNorm2d(out_channels),
         nn.ReLu()
     ])
     
     if add_ic:
         self.ic = af.InternalClassifier(input_size, out_channels, num_classes)
     else:
         self.ic = None
Пример #29
0
 def __init__(self):
     super(ITrackerModel, self).__init__()
     self.eyeModel = ItrackerImageModel()
     self.faceModel = FaceImageModel()
     self.gridModel = FaceGridModel()
     # Joining both eyes
     self.eyesFC = nn.Sequential(
         nn.Linear(2 * 12 * 12 * 64, 128),
         nn.ReLu(inplace=True),
     )
     # Joining everything
     self.fc = nn.Sequential(nn.Linear(128 + 64 + 128, 2), )
Пример #30
0
def downsampling_module(in_chs, pooling_kenel, leaky=True):
    layers = [
        nn.AvgPool2d(pooling_kenel),
        nn.Conv2d(in_chs, in_chs * 2, kernel_size=1, padding=0),
        nn.BatchNorm2d(in_chs * 2),
        nn.LeakyReLU(0.2, inplace=True)
    ]

    if not leaky:
        layers[3] = nn.ReLu(inplace=True)

    return nn.Sequential(*layers)