Пример #1
0
 def __init__(self):
     super(Network, self).__init__()
     self.layers = nn.Sequential(nn.Flatten(), BinaryLinear(28 * 28, 1024),
                                 nn.BatchNorm1d(1024), nn.Relu(),
                                 BinaryLinear(1024, 1024),
                                 nn.BatchNorm1d(1024), nn.Relu(),
                                 BinaryLinear(1024, 10), nn.LogSoftmax())
Пример #2
0
	def __init__(self):
		super(CNN,self).__init__()
		self.layer1=nn.Sequential(
			nn.Conv2d=(1,25,kernel_size=3)
			nn.BatchNorm2d(25)
			nn.Relu(inplace=True)
			)

		self.layer2=nn.Sequential(
			nn.MaxPool2d(kernel_size=2,stride=2)
			)
		self.layer3=nn.Sequential(
			nn.Conv2d(25,50,kernel_size=3)
			nn.BatchNorm2d(50)
			nn.Relu(inplace=True)
			)
		self.layer4=nn.Sequential(
			nn.MaxPool2d(kernel_size=2,stride=2)
			)
		self.fc=nn.Sequential(
			nn.Linear(50*5*5,1024)
			nn.Relu(inplace=True)
			nn.Linear(1024,128)
			nn.Relu(inplace=True)
			nn.Linear(128,10)
			)
Пример #3
0
 def __init__(self):
     super(deepCNN, self).__init__()
     self.layer1 = nn.Sequential(
         nn.Conv2d(in_channels=1,
                   out_channels=32,
                   kernel_size=3,
                   strie=1,
                   padding=1), nn.Relu(), nn.MaxPool2d(2, 2))
     self.layer2 = nn.Sequential(
         nn.Conv2d(in_channels=32,
                   out_channels=64,
                   kernel_size=3,
                   strie=1,
                   padding=1), nn.Relu(), nn.MaxPool2d(2, 2))
     self.layer3 = nn.Sequential(
         nn.Conv2d(in_channels=64,
                   out_channels=128,
                   kernel_size=3,
                   strie=1,
                   padding=1), nn.Relu(), nn.MaxPool2d(2, 2))
     self.fc1 = nn.Linear(4 * 4 * 128, 625, bias=True)
     torch.nn.init.xavier_uniform_(self.fc1.weight)
     self.layer4 = nn.Sequential(self.fc1, nn.ReLU(),
                                 nn.Dropout(p=1 - self.keep_prob))
     self.fc2 = nn.Linear(625, 10, bias=True)
     torch.nn.init.xavier_uniform_(self.fc2.weight)
     self.softmax = nn.Softmax()
Пример #4
0
  def forward(self, x):

    c1 = nn.Relu()(self._lab(x)  + x)
    c2 = nn.Relu()(self._lcd(c1) + c1)
    c3 = nn.Relu()(self._lef(c2) + c2)
    out1 = self.l_k(c3)

    return out1
Пример #5
0
 def __init__(self, args):
     super(Policy, self).__init__()
     self.name, self.input_size, self.output_size, self.mem_size, self.device = args
     self.net = nn.Sequentional(
         nn.Linear(self.input_size, 128),
         nn.Relu(),
         nn.Linear(128, 64)
         nn.Relu(),
         nn.Linear(64, self.output_size)
     )
Пример #6
0
	def __init__(self, inch, outch):
		super(conv_module, self).__init__()
		self.conv = nn.Sequential(
			nn.Conv2d(inch, outch, 3, passing=1),
			nn.BachNorm2d(outch),
			nn.Relu(inplace=True),

			nn.Conv2d(inch, outch, 3, passing=1),
			nn.BachNorm2d(outch),
			nn.Relu(inplace=True)
			)
Пример #7
0
 def __init__(self):
     super(CNN, self).__init__()
     self.conv1 = nn.Sequential(
         nn.Conv2d(in_channels=1,
                   out_channels=16,
                   kernel_size=5,
                   stride=1,
                   padding=2), nn.Relu(), nn.MaxPool2d(kernel_size=2))
     self.conv2 = nn.Sequential(nn.Conv2d(16, 32, 5, 1, 2), nn.Relu(),
                                nn.MaxPool2d(2))
     self.out = nn.Linear(32 * 7 * 7, 10)
def downsample_conv(in_planes, out_planes, kernel_size=3):
    return nn.Sequential(
        nn.Conv2d(in_planes,
                  out_planes,
                  kernel_size=kernel_size,
                  stride=2,
                  padding=(kernel_size - 1) // 2), nn.Relu(inplace=True),
        nn.BatchNorm2d(out_planes),
        nn.Conv2d(out_planes,
                  out_planes,
                  kernel_size=kernel_size,
                  padding=(kernel_size - 1) // 2), nn.Relu(inplace=True))
Пример #9
0
    def __init__(self,
                 n_vocab,
                 conv_hidden=24,
                 embed_hidden=32,
                 lstm_hidden=128,
                 mlp_hidden=256,
                 classes=29):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(3,
                      conv_hidden,
                      kernel_size=3,
                      stride=2,
                      padding=1,
                      bias=False),
            nn.BatchNorm2d(conv_hidden),
            nn.Relu(),
            nn.Conv2d(conv_hidden,
                      conv_hidden,
                      kernel_size=3,
                      stride=2,
                      padding=1,
                      bias=False),
            nn.BatchNorm2d(conv_hidden),
            nn.Relu(),
            nn.Conv2d(conv_hidden,
                      conv_hidden,
                      kernel_size=3,
                      stride=2,
                      padding=1,
                      bias=False),
            nn.BatchNorm2d(conv_hidden),
            nn.Relu(),
            nn.Conv2d(conv_hidden,
                      conv_hidden,
                      kernel_size=3,
                      stride=2,
                      padding=1,
                      bias=False),
            nn.BatchNorm2d(conv_hidden),
            nn.Relu(),
        )
        self.embed = nn.Embedding(n_vocab, embed_hidden)
        self.lstm = nn.LSTM(embed_hidden, lstm_hidden, batch_first=True)
        self.n_concat = conv_hidden * 2 + lstm_hidden + 2 * 2
        self.g = nn.Sequential()

        coords = torch.linspace(-4, 4, 8)
        x = coords.unsqueeze(0).repeat(8, 1)
        y = coords.unsqueeze(1).repeat(1, 8)
        coords = torch.stack([x, y]).unsqueeze(0)
Пример #10
0
    def _make_gen_block(in_channels: int,
                        out_channels: int,
                        kernel_size: int = 4,
                        stride: int = 2,
                        padding: int = 1,
                        bias: bool = False,
                        last_block: bool = False,
                        use_relu: bool = False) -> nn.Sequential:
        if not last_block:
            gen_block = nn.Sequential(
                nn.ConvTranspose2d(in_channels,
                                   out_channels,
                                   kernel_size,
                                   stride,
                                   padding,
                                   bias=bias),
                nn.BatchNorm2d(out_channels),
                nn.Relu() if use_relu else nn.Mish(),
            )
        else:
            gen_block = nn.Sequential(
                nn.ConvTranspose2d(in_channels,
                                   out_channels,
                                   kernel_size,
                                   stride,
                                   padding,
                                   bias=bias),
                nn.Sigmoid(),
            )

        return gen_block
Пример #11
0
    def __init__(self):
        super(ResidualAtentionNet, self).__init__()
        self.Conv1 = nn.Conv2d(in_channels=3,
                               out_channels=64,
                               kernel_size=7,
                               stride=2,
                               padding=0)
        self.MaxPooling = nn.MaxPool2d(kernel_size=3, stride=2)
        self.ResUnit1 = ResUnitUp(64, 256)
        self.Attention1 = AttentionModule(256)
        self.ResUnit2 = ResUnitUp(256, 512)
        self.Attention2 = AttentionModule(512)
        self.ResUnit3 = ResUnitUp(512, 1024)
        self.Attention3 = AttentionModule(1024)
        self.ResUnit4 = ResUnitUp(1024, 2048)
        self.AveragePooling = nn.AvgPool2d((7, 7))
        self.FC = nn.Sequential(nn.Linear(2048, 1024), nn.Relu(),
                                nn.Linear(1024, 1), nn.Sigmoid())

        def forward(x):
            C1 = self.Conv1(x)
            P1 = self.MaxPooling(C1)
            R1 = self.ResUnit1(P1)
            A1 = self.Attention1(R1)
            # R2=self.ResUnit2(A1)
            #A2=self.Attention2(R2)
            #R3=self.ResUnit3(A2)
            #A3=self.Attention3(R3)
            #R4=self.ResUnit4(A3)
            #P2=self.AveragePooling(R4)
            #y=self.FC(P2)
            return A1
Пример #12
0
def parse_activation(activation):
    if activation == 'relu':
        return nn.Relu()
    if activation == 'tanh':
        return nn.Tanh()
    if activation == 'sigmoid':
        return nn.Sigmoid()
Пример #13
0
 def __init__(self):
     super(AE, self).__init__()
     self.fc1 = nn.Linear(num_games, 256)
     self.fc2 = nn.Linear(256, 100)
     self.fc3 = nn.Linear(100, 256)
     self.fc4 = nn.Linear(256, num_games)
     self.activation = nn.Relu()
Пример #14
0
 def __init__(self, num_obs, num_action, num_hidden_1, num_hidden_2):
     super(Critic, self).__init__()
     self.fc_o = nn.Linear(num_obs, num_hidden_1)
     self.fc_a = nn.Linear(num_action, num_hidden_1)
     self.fc_2 = nn.Linear(num_hidden_1 * 2, num_hidden_2)
     self.out = nn.Linear(num_hidden_2, 1)
     self.Relu = nn.Relu()
Пример #15
0
 def __init__(self, input_size=27, hidden_size=64, output_size=9):
     super(Policy, self).__init__()
     # TODO
     self.features = nn.Sequential(nn.Linear(input_size, hidden_size),
                                   nn.Relu(inplace=True),
                                   nn.Linear(hidden_size, output_size),
                                   nn.Softmax())
Пример #16
0
 def __init__(self, num_obs, num_action, num_hidden_1, num_hidden_2):
     super(Actor, self).__init__()
     self.base = nn.Sequential(nn.Linear(num_obs, num_hidden_1), nn.ReLU(),
                               nn.Linear(num_hidden_1, num_hidden_2),
                               nn.Relu(), nn.Linear(num_hidden_2,
                                                    num_action), nn.Tanh())
     self.train()
Пример #17
0
def load(path):
    #import everything
    import torch
    from torch import nn
    from torch import optim
    import trch.nn.functional as F
    from torchvision import datasets, transforms, models

    # define the model architecture
    model = models.resnet18(pretrained=True)

    # freez parameters 
    for param in model.parameters():
        param.requires_grad = False

    from collections import OrderDict
    model.classifier = nn.sequential(nn.Linear(1024, 256),
                                    nn.Relu(),
                                    nn.DropOut(0.2),
                                    nn.Linear(256, 2),
                                    nn.LogSoftmax(dim=1))

    # load weights
    model.classifier.load_state_dict(torch.load(path, map_location="cpu"))

    # return weights loaded model
    return model
Пример #18
0
    def __init__(self, img_in_channels, num_layers_list, num_classes):
        super(Resnet, self).__init__()
        self.in_chans = 64
        self.conv1 = Conv2d(img_in_channels,
                            out_chan=64,
                            kernel_size=7,
                            stride=2,
                            padding=3)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.Relu()
        self.maxpool = Maxpool2d(kernel=3, stride=2, padding=1)

        self.layer_block_1 = self._make_layer_block(num_layers_list[0],
                                                    out_chan=64,
                                                    stride=1)
        self.layer_block_2 = self._make_layer_block(num_layers_list[1],
                                                    out_chan=128,
                                                    stride=2)
        self.layer_block_3 = self._make_layer_block(num_layers_list[2],
                                                    out_chan=256,
                                                    stride=2)
        self.layer_block_4 = self._make_layer_block(num_layers_list[3],
                                                    out_chan=512,
                                                    stride=2)

        self.avgpool = AvgPool2d(1, 1)
        self.fc = Linear(512 * 4, num_classes)
        self.softmax = Softmax()
Пример #19
0
    def __init__(self, num_classes=10):
        super(LeNet5_nmp, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(1, 64, kernel_size=(3,3), stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(64, 64, kernel_size=(3,3), stride=1, padding=1),
            nn.ReLU(),
            # nn.MaxPool2d(kernel_size=2),
            # nn.Flatten(),
            nn.Conv2d(64, 64, kernel_size=(4,4), stride=2, padding=1),
            nn.Relu()
        )
        self.classifier = nn.Sequential(
            nn.Linear(64*14*14, 256),
            nn.ReLU(inplace=True),
            nn.Linear(256, 256),
            nn.ReLU(inplace=True),
            nn.Linear(256, num_classes),
        )

        self.param_info = [{'layer_type': 'Conv2d', 'kernel_size':(3,3), 'stride':1, 'padding': 1, 'name':'Conv1'},
                            {'layer_type': 'Conv2d', 'kernel_size':(3,3), 'stride':1, 'padding':1, 'name':'Conv2'},
                            {'layer_type':'Conv2d', 'kernel_size':(4,4), 'stride':2, 'padding':1, 'name':'Conv3'},
                            {'layer_type':'Linear', 'name': 'Linear1'},
                            {'layer_type':'Linear', 'name': 'Linear2'},
                           {'layer_type': 'Linear', 'name': 'Linear3'}]
Пример #20
0
 def __init__(self, args):
     super(HIDIO, self).__init__()
     self.input_size, self.output_size, self.option_num, self.mem_size, self.lr, self.device = args
     self.option_phi = Policy(args = ("option_phi", self.input_size, self.output_size, self.mem_size, self.device))
     self.policy_sche = Policy(args = ("scheduler_policy", self.input_size + self.output_size + 1, self.option_num, self.mem_size, self.device))
     self.optimizer_sche = optim.Adam(self.policy_sche.parameter(), self.lr)
     self.optimizer_option_phi = optim.Adam([{'params': self.option_phi.parameter()},{'params': self.persi_net.parameter()}], self.lr)
     self.sche_replay_buffer = ReplayBuffer(args = ("sche_rb", self.mem_size, self.device))
     self.option_replay_buffer = ReplayBuffer(args = ("option_rb", self.mem_size, self.device))
     self.persi_net = nn.Sequentional(
         nn.Linear(self.input_size + self.output_size, 128),
         nn.Relu(),
         nn.Linear(128, 64),
         nn.Relu(),
         nn.Linear(64, 1)
     )
Пример #21
0
    def __init__(self, num_classes=10):
        super(Net, self).__init__()
        self.conv1 = nn.Sequential(nn.Conv2d(1, 32, 3, 1, 1), nn.Relu())
        self.conv2 = nn.Sequential(nn.Conv2d(32, 32, 3), nn.Relu(),
                                   nn.MaxPooling2d(2, 2), nn.Dropout(0.25))

        self.conv3 = nn.Sequential(nn.Conv2d(32, 64, 3, 1, 1), nn.Relu())
        self.conv4 = nn.Sequential(
            nn.Conv2d(64, 64, 3),
            nn.Relu(),
            nn.MaxPooling2d(2, 2),  #24*24
            nn.Dropout(0.25))
        self.fc1 = nn.Sequential(nn.Linear(64 * 24 * 24, 512),
                                 nn.BatchNorm1d(512), nn.ReLU(),
                                 nn.Dropout(0.25))
        self.fc2 = nn.Sequential(nn.Linear(512, num_classes),
                                 nn.BatchNorm1d(num_classes), nn.Softmax())
 def init_phi(self):
     phi_type = self.phi_type
     if phi_type == "linear":
         self.phi = nn.Identity()
     elif phi_type == 'tanh':
         self.phi = nn.Tanh()
     elif phi_type == 'relu':
         self.phi = nn.Relu()
 def __init__(self, models):
     self.models = models
     super(Net, self).__init__()
     self.inputs = inputs
     self.output = outputs
     self.linear = nn.Sequential(nn.Linear(7 * 7 * 512, 256), nn.Relu(),
                                 nn.Dropout(0.5))
     self.linear2 = nn.Sequential(nn.Linear(512, num_classes), nn.Softmax())
Пример #24
0
    def __init__(self, input_dim, output_dim):
        super(Discriminator, self).__init__()

        fc = [
            nn.Linear(input_dim, 512),
            nn.Relu(),
            nn.Linear(512, 256),
            nn.Relu(),
            nn.Linear(256, output_dim),
            nn.Sigmoid(),
        ]

        self.fc = nn.Sequential(*fc)

        for m in nn.modules():
            if isinstance(nn.Linear):
                m.weight.data.normal_(0, 0.02)
Пример #25
0
    def __init__(self, n_classes=1):
        super().__init__()

        self.n_classes = n_classes
        self.rolling_times = 4
        self.rolling_ratio = 0.075

        self.Base = VGG16()
        self.Extra = nn.Sequential(OrderedDict([
            ('extra1_1', nn.Conv2d(1024, 256, 1)),
            ('extra1_2', nn.Conv2d(256, 256, 3, padding=1, stride=2)),
            ('extra2_1', nn.Conv2d(256, 128, 1)),
            ('extra2_2', nn.Conv2d(128, 256, 3, padding=1, stride=2)),
            ('extra3_1', nn.Conv2d(256, 128, 1)),
            ('extra3_2', nn.Conv2d(128, 256, 3, padding=1, stride=2))]))
        self.pred_layers = ['conv4_3', 'conv7', 'extra1_2', 'extra2_2', 'extra3_2']

        self.L2Norm = nn.ModuleList([L2Norm(512, 20)])
        self.l2norm_layers = ['conv4_3']

        # intermediate layers
        self.Inter = nn.ModuleList([
            nn.Sequential(nn.Conv2d(512, 256, 3, padding=1), nn.ReLU(inplace=True))
            nn.Sequential(nn.Conv2d(1024, 256, 3, padding=1), nn.ReLU(inplace=True))
            nn.Sequential(),
            nn.Sequential(),
            nn.Sequential()])
        n_channels = [256, 256, 256, 256, 256]

        # Recurrent Rolling
        self.RollLeft = nn.ModuleList([])
        self.RollRight = nn.ModuleList([])
        self.Roll = nn.ModuleList([])
        for i in range(len(n_channels)):
            n_out = int(n_channels[i] * self.rolling_ratio)
            if i > 0:
                self.RollLeft.append( nn.Sequential(
                    nn.Conv2d(n_channels[i-1], n_out, 1), 
                    nn.ReLU(inplace=True), 
                    nn.MaxPool2d(2, ceil_mode=True)))
            if i < len(n_channels) - 1:
                self.RollRight.append( nn.Sequential(
                    nn.Conv2d(n_channels[i+1], n_out, 1), 
                    nn.Relu(inplace=True), 
                    nn.ConvTranspose2d(n_out, n_out, kernel_size=4, stride=2, padding=1)))

            n_out = n_out * (int(i>0) + int(i<len(n_channels)-1))
            self.Roll.append(nn.Sequential(
                    nn.Conv2d(n_channels[i] + n_out, n_channels[i], 1), 
                    nn.ReLU(inplace=True)))

        # Prediction
        self.Loc = nn.ModuleList([])
        self.Conf = nn.ModuleList([])
        for i in range(len(n_channels)):
            n_boxes = len(self.config['aspect_ratios'][i]) + 1
            self.Loc.append(nn.Conv2d(n_channels[i], n_boxes * 4, 3, padding=1))
            self.Conf.append(nn.Conv2d(n_channels[i], n_boxes * (self.n_classes + 1), 3, padding=1))
Пример #26
0
    def __init__(self, num_classes = 2):
        
        super(AlexNet,self).__init__()

        self.model_name = 'alexnet'

        self.features = nn.Sequential(
            nn.Conv2d(3, 64, Kernel_size=11, stride=4, padding=2),
            nn.Relu(inplace=True),
            nn.MaxPool2d(Kernel_size=3, stride=2),
            nn.Conv2d(64, 192, Kernel_size=5, padding=2),
            nn.Relu(inplace=True),
            nn.MaxPool2d(Kernel_size=3, stride=2),
            nn.Conv2d(192, 384, Kernel_size=3, padding=1),
            nn.Relu(inplace=True),
            nn.Conv2d(384, 256, Kernel_size=3, padding=1),
            nn.Relu(inplace=True),
            nn.Conv2d(256, 256, Kernel_size=3, padding=1),
            nn.Relu(inplace=True),
            nn.MaxPool2d(Kernel_size=3, stride=2),
        )
        self.classifier = nn.Sequential(
            nn.Dropout(),
            nn.Linear(256 * 6 * 6, 4096),
            nn.Relu(inplace = True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.Relu(inplace=True),
            nn.Linear(4096, num_classes),
        )
 def __init__(self):
     super(FeatureModule, self).__init__()
     self.conv1 = nn.Sequential([
         nn.Conv2d(3, 64, 3, stride=2, padding=1, bias=False),
         nn.BatchNorm2d(64),
         nn.Relu(inplace=True),
     ])
     self.conv2 = nn.Sequential([
         nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False),
         nn.BatchNorm2d(64),
         nn.Relu(inplace=True),
     ])
     self.conv3 = nn.Sequential([
         nn.Conv2d(64, 128, 3, stride=1, padding=1, bias=False),
         nn.BatchNorm2d(128),
         nn.Relu(inplace=True),
     ])
     self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
Пример #28
0
    def forward(input, hidden):
        embd_input = self.embedding(input)
        rnn_output, rnn_hidden = self.gru(embd_input)
        rnn_output = nn.Relu(rnn_output)
        rnn_output = self.linear1(rnn_output)
        rnn_output = self.linear2(rnn_output)
        log_soft_output = self.log_softmax(rnn_output)

        return log_soft_output
Пример #29
0
 def __init__(self, ind_in, cond_in, hidden_sizes, nout, device="cpu"):
     self.net = []
     for i in range(ind_in):
         net = []
         hs = [cond_in + 1] + hidden_sizes + [nout]
         for l_in, l_out in zip(hs[:-1], hs[1:]):
             net += [nn.Linear(l_in, l_out), nn.Relu()]
         net.pop()
         self.net += [nn.Sequential(net)]
Пример #30
0
 def __init__(self,inchannel,outchannel,stride=1,shortcut=None):
     super(ResidualBlock, self).__init__()
     self.left=nn.Sequential(
         nn.Conv12d(inchannel,outchannel,3,stride,1,bias=False),
         nn.BatchNorm2d(outchannel),
         nn.Relu(inplace=True),
         nn.Conv12d(outchannel, outchannel, 3, 1, 1, bias=False),
         nn.BatchNorm2d(outchannel),
     )
     self.right=shortcut