Beispiel #1
0
    def __init__(self, imgsz, ch):
        """

        :param imgsz:
        :param ch: base channels
        """
        super(Encoder, self).__init__()

        x = torch.randn(2, 3, imgsz, imgsz)
        print('Encoder:', list(x.shape), end='=>')

        layers = [
            nn.Conv2d(3, ch, kernel_size=5, stride=1, padding=2),
            nn.BatchNorm2d(ch),
            nn.ReLU(inplace=True),
            nn.AvgPool2d(2, stride=None, padding=0),
        ]
        # just for print
        out = nn.Sequential(*layers)(x)
        print(list(out.shape), end='=>')

        # [b, ch_cur, imgsz, imgsz] => [b, ch_next, mapsz, mapsz]
        mapsz = imgsz // 2
        ch_cur = ch
        ch_next = ch_cur * 2

        while mapsz > 4:  # util [b, ch_, 4, 4]
            # add resblk
            layers.extend([
                ResBlk([1, 3, 3], [ch_cur, ch_next, ch_next, ch_next]),
                nn.AvgPool2d(kernel_size=2, stride=None)
            ])
            mapsz = mapsz // 2
            ch_cur = ch_next
            ch_next = ch_next * 2 if ch_next < 512 else 512  # set max ch=512

            # for print
            out = nn.Sequential(*layers)(x)
            print(list(out.shape), end='=>')

        layers.extend([
            ResBlk([3, 3], [ch_cur, ch_next, ch_next]),
            nn.AvgPool2d(kernel_size=2, stride=None),
            ResBlk([3, 3], [ch_next, ch_next, ch_next]),
            nn.AvgPool2d(kernel_size=2, stride=None),
            Flatten()
        ])

        self.net = nn.Sequential(*layers)

        # for printing
        out = nn.Sequential(*layers)(x)
        print(list(out.shape))
 def __init__(self, dropout=0):
     super().__init__(
         nn.Conv2d(3, 6, kernel_size=5),
         nn.ReLU(),
         nn.MaxPool2d(kernel_size=2, stride=2),
         nn.Conv2d(6, 16, kernel_size=5),
         nn.ReLU(),
         nn.MaxPool2d(kernel_size=2, stride=2),
         Flatten(16*5*5),
         nn.Linear(16*5*5, 120), nn.ReLU(), nn.Dropout(dropout),
         nn.Linear(120, 84), nn.ReLU(), nn.Dropout(dropout),
         nn.Linear(84, 10))
Beispiel #3
0
 def __init__(self, n_input, n_classes, n_hidden=512, p=0.1):
     super(MLPClassifier, self).__init__()
     self.n_input = n_input
     self.n_classes = n_classes
     self.n_hidden = n_hidden
     if n_hidden is None:
         # use linear classifier
         self.block_forward = nn.Sequential(
             Flatten(), nn.Dropout(p=p),
             nn.Linear(n_input, n_classes, bias=True))
     else:
         # use simple MLP classifier
         self.block_forward = nn.Sequential(
             Flatten(),
             nn.Dropout(p=p),
             nn.Linear(n_input, n_hidden, bias=False),
             nn.BatchNorm1d(n_hidden),
             nn.ReLU(inplace=True),
             nn.Dropout(p=p),
             nn.Linear(n_hidden, n_classes, bias=True),
         )
def main():
    trained_model = resnet18(pretrained=True)
    # 用children()方法取resnet18前17层的权重,转换成list,[:-1]即前17层,
    # 由于Sequential接收的是打散的数据,所以加*
    model = nn.Sequential(
        *list(trained_model.children())[:-1],
        Flatten(),  # [b,512,1,1]->[b,512]
        nn.Linear(512, 5))  # 迁移学习
    model = model.to(device)
    # x = torch.randn(2,3,224,224)
    # print(model(x).shape)

    optimizer = optim.Adam(model.parameters(), lr=lr)
    criterion = nn.CrossEntropyLoss()

    best_acc, best_epoch = 0, 0
    global_step = 0

    viz.line([0], [-1], win='loss', opts=dict(title='loss'))
    viz.line([0], [-1], win='val_acc', opts=dict(title='val_acc'))

    for epoch in range(epoches):
        for step, (x, y) in enumerate(train_loader):
            # x:[b,3,224,224,] y:[b]
            x, y = x.to(device), y.to(device)
            logits = model(x)
            loss = criterion(logits, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            viz.line([loss.item()], [global_step], win='loss', update='append')
            global_step += 1

        # validation
        if epoch % 1 == 0:
            val_acc = evalute(model, val_loader)
            if val_acc > best_acc:
                best_epoch = epoch
                best_acc = val_acc
                torch.save(model.state_dict(), 'best_for_transfer.mdl')  # 保存模型

                viz.line([val_acc], [global_step],
                         win='val_acc',
                         update='append')

    print('best acc: ', best_acc, ' best epoch: ', best_epoch)
    model.load_state_dict(torch.load('best_for_transfer.mdl'))
    print('loader from ckpt!')

    test_acc = evalute(model, test_loader)
    print('test acc:', test_acc)
def main():
    # Load the pre-training model
    trained_model = resnet18(pretrained=True)
    # Use the transfer learning replacement structure
    model = nn.Sequential(
        *list(trained_model.children())[:-1],  # [b, 512, 1, 1]
        Flatten(),  # [b, 512, 1, 1] => [b, 512]
        nn.Linear(512, 6)).to(device)
    # Start test
    test_acc = test(model, test_loader)
    # print final accuracy
    print('test acc:', test_acc)
Beispiel #6
0
def main():
    # model = ResNet18(5).to(device)
    trained_model = resnet18(pretrained=True)
    model = nn.Sequential(*list(trained_model.children())[:-1],  # [b, 512, 1, 1]
                          Flatten(),  # [b, 512, 1, 1] => [b, 512]
                          nn.Linear(512, 5)
                          ).to(device)
    # x = torch.randn(2, 3, 224, 224)
    # print(model(x).shape)

    optimizer = optim.Adam(model.parameters(), lr=lr)
    criteon = nn.CrossEntropyLoss()

    best_acc, best_epoch = 0, 0
    global_step = 0
    viz.line([0], [-1], win='loss', opts=dict(title='loss'))
    viz.line([0], [-1], win='val_acc', opts=dict(title='val_acc'))
    for epoch in range(epochs):

        for step, (x, y) in enumerate(train_loader):
            # x: [b, 3, 224, 224], y: [b]
            x, y = x.to(device), y.to(device)

            model.train()
            logits = model(x)
            loss = criteon(logits, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            viz.line([loss.item()], [global_step], win='loss', update='append')
            global_step += 1

        if epoch % 1 == 0:

            val_acc = evalute(model, val_loader)
            if val_acc > best_acc:
                best_epoch = epoch
                best_acc = val_acc

                torch.save(model.state_dict(), 'best.mdl')

                viz.line([val_acc], [global_step], win='val_acc', update='append')

    print('best acc:', best_acc, 'best epoch:', best_epoch)

    model.load_state_dict(torch.load('best.mdl'))
    print('loaded from ckpt!')

    test_acc = evalute(model, test_loader)
    print('test acc:', test_acc)
def full_sharing(task_outputs, num_modules):
    nt = len(task_outputs)
    return nn.Sequential(
        IgnoreTaskRouting(conv_layer(3, 32)),
        IgnoreTaskRouting(conv_layer(32, 32)),
        IgnoreTaskRouting(conv_layer(32, 32)),
        IgnoreTaskRouting(conv_layer(32, 32)),
        IgnoreTaskRouting(Flatten()),
        IgnoreTaskRouting(dense_layer(128, 128)),
        IgnoreTaskRouting(dense_layer(128, 128)),
        IgnoreTaskRouting(dense_layer(128, 128)),
        StaticTaskRouting(nt, [nn.Linear(128, s) for s in task_outputs]),
    )
Beispiel #8
0
	def __init__(self, Nid=105, Npcode=48, nInputCh=4):
		super(discriminator3D, self).__init__()
		self.nInputCh = nInputCh

		self.conv = nn.Sequential(
			nn.Conv3d(nInputCh, 32, 4, 2, 1, bias=False), # 128 -> 64
			nn.BatchNorm3d(32),
			nn.LeakyReLU(0.2),
			nn.Conv3d(32, 64, 4, 2, 1, bias=False), # 64 -> 32
			nn.BatchNorm3d(64),
			nn.LeakyReLU(0.2),
			nn.Conv3d(64, 128, 4, 2, 1, bias=False), # 32 -> 16
			nn.BatchNorm3d(128),
			nn.LeakyReLU(0.2),
			nn.Conv3d(128, 256, 4, 2, 1, bias=False), # 16 -> 8
			nn.BatchNorm3d(256),
			nn.LeakyReLU(0.2),
			nn.Conv3d(256, 512, 4, 2, 1, bias=False), # 8 -> 4
			nn.BatchNorm3d(512),
			nn.LeakyReLU(0.2)
		)

		self.convGAN = nn.Sequential(
			nn.Conv3d(512, 1, 4, bias=False),
			nn.Sigmoid(),
			Flatten()
		)

		self.convID = nn.Sequential(
			nn.Conv3d(512, Nid, 4, bias=False),
			Flatten()
		)

		self.convPCode = nn.Sequential(
			nn.Conv3d(512, Npcode, 4, bias=False),
			Flatten()
		)
		utils.initialize_weights(self)
Beispiel #9
0
	def __init__(self, Nid=105, Npcode=48, nInputCh=3):
		super(discriminator2D, self).__init__()
		self.nInputCh = nInputCh

		self.conv = nn.Sequential(
			nn.Conv2d(nInputCh, 64, 11, 4, 1,bias=True), # 256 -> 64
			nn.BatchNorm2d(64),
			nn.ReLU(),
			nn.Conv2d(64, 128, 5, 2, 1,bias=True), # 64 -> 32
			nn.BatchNorm2d(128),
			nn.ReLU(),
			nn.Conv2d(128, 256, 5, 2, 1,bias=True), # 32 -> 16
			nn.BatchNorm2d(256),
			nn.ReLU(),
			nn.Conv2d(256, 512, 5, 2, 1,bias=True), # 16 -> 8
			nn.BatchNorm2d(512),
			nn.ReLU(),
			nn.Conv2d(512, 320, 8 , 1, 1, bias=True),
			nn.Sigmoid(),
			Flatten(),
		)

		self.fcGAN = nn.Sequential(
			nn.Linear(320, 1),
			nn.Sigmoid(),
			Flatten()
		)

		self.fcID = nn.Sequential(
			nn.Linear(320, Nid),
			Flatten()
		)

		self.fcPCode = nn.Sequential(
			nn.Linear(320, Npcode),
			Flatten()
		)
		utils.initialize_weights(self)
Beispiel #10
0
    def __init__(self,
                 lstm_hidden=64,
                 init_hidden=50,
                 lstm_linear=256,
                 MHC_len=34,
                 Pep_len=15,
                 lstm_layers=2,
                 full_lstm=False):
        super(Frozen_resnet, self).__init__()
        self.full_lstm = full_lstm
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.lstm_hidden = lstm_hidden
        self.init_hidden = init_hidden
        self.MHC_len = MHC_len
        self.Pep_len = Pep_len
        self.final_linear_dim = 1024
        self.lstm_layers = lstm_layers

        # Linear Init
        if full_lstm:
            self.MHC_init = BidirectionalLSTM(MHC_len,
                                              hidden_shape=init_hidden,
                                              n_layers=2)
            self.pep_init = BidirectionalLSTM(Pep_len,
                                              hidden_shape=init_hidden,
                                              n_layers=2)
        else:
            self.MHC_init = nn.Sequential(nn.Linear(MHC_len, init_hidden),
                                          nn.ReLU())
            self.pep_init = nn.Sequential(nn.Linear(Pep_len, init_hidden),
                                          nn.ReLU())

        # LSTM
        if full_lstm:
            self.LSTM = BidirectionalLSTM(init_hidden * 4,
                                          hidden_shape=lstm_hidden,
                                          n_layers=lstm_layers)
        else:
            self.LSTM = BidirectionalLSTM(init_hidden * 2,
                                          hidden_shape=lstm_hidden,
                                          n_layers=lstm_layers)
        self.LSTM_linear = nn.Sequential(
            Flatten(),
            nn.Linear(2 * lstm_hidden * 40, lstm_linear),
            nn.BatchNorm1d(lstm_linear),
            nn.ReLU(),
        )

        self.final_linear = nn.Linear(lstm_linear + 2, 1)
Beispiel #11
0
def generate_aux(input_size: int, input_features: int, num_classes: int,
                 num_fully_conv: int = 3, num_fully_connected: int = 3,
                 num_fully_connected_features: int = 32):
    base = [nn.AdaptiveAvgPool2d(input_size // 4)]
    base += [nn.Sequential(nn.Conv2d(input_features, input_features, 1, bias=False),
                           nn.BatchNorm2d(input_features),
                           nn.ReLU())
             for _ in range(num_fully_conv)]
    base += [nn.AdaptiveAvgPool2d(2), Flatten()]
    base += [nn.Sequential(nn.Linear(4 * input_features if i == 0 else num_fully_connected_features,
                                     num_fully_connected_features),
                           nn.BatchNorm1d(num_fully_connected_features))
             for i in range(num_fully_connected - 1)]
    base += [nn.Linear(4 * input_features if num_fully_connected == 1 else num_fully_connected_features, num_classes)]
    return nn.Sequential(*base)
def learned_sharing(task_outputs, num_modules):
    nt = len(task_outputs)
    return nn.Sequential(
        LearnedTaskRouting(nt,
                           [conv_layer(1, 32) for _ in range(num_modules)]),
        LearnedTaskRouting(nt,
                           [conv_layer(32, 32) for _ in range(num_modules)]),
        LearnedTaskRouting(nt,
                           [conv_layer(32, 32) for _ in range(num_modules)]),
        IgnoreTaskRouting(Flatten()),
        LearnedTaskRouting(nt,
                           [dense_layer(288, 128)
                            for _ in range(num_modules)]),
        StaticTaskRouting(nt, [nn.Linear(128, s) for s in task_outputs]),
    )
Beispiel #13
0
 def __init__(self, n_decoder_input,n_decoder_hidden = 512, original_size = 64, original_channels=3, p = 0.1):
     super(MLP_Decoder,self).__init__()
     self.n_decoder_input = n_decoder_input
     self.n_decoder_hidden = n_decoder_hidden
     self.original_size = original_size
     self.original_channels =original_channels
     self.decoder_block_forward =  nn.Sequential(
         Flatten(),
         nn.Dropout(p=p),
         nn.Linear(n_decoder_input,n_decoder_hidden, bias = False),
         nn.BatchNorm1d(n_decoder_hidden),
         nn.ReLU(inplace=True),
         nn.Dropout(p= p),
         nn.Linear(n_decoder_hidden, original_size *original_size*original_channels)
     )
Beispiel #14
0
 def __init__(self):
     super(Classifier, self).__init__()
     self.model = nn.Sequential(
         nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1),
         nn.Conv2d(16, 32, kernel_size=3),
         nn.MaxPool2d(3, stride=2, padding=(0, 1, 0, 1)),
         nn.Conv2d(32, 32, kernel_size=7, stride=1), nn.ReLU(inplace=True),
         nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
         nn.ReLU(inplace=True),
         nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
         nn.ReLU(inplace=True),
         nn.Conv2d(128, 64, kernel_size=5, stride=1, padding=1),
         nn.ReLU(inplace=True),
         nn.Conv2d(64, 32, kernel_size=1, stride=1, padding=1),
         nn.AvgPool2d(7, stride=2, padding=(0, 1, 0, 1)), Flatten(),
         nn.Linear(96, 10))
Beispiel #15
0
def predicts(img):

    device = torch.device('cuda')
    torch.manual_seed(1234)
    resize = 224
    className = {
        '0': 'bulbasaur',
        '1': 'charmander',
        '2': 'mewtw',
        '3': 'pikachu',
        '4': 'squirtle'
    }

    # model = ResNet18(5).to(device)
    trained_model = resnet18(pretrained=True)
    model = nn.Sequential(
        *list(trained_model.children())[:-1],  # [b, 512, 1, 1]
        Flatten(),  # [b, 512, 1, 1] => [b, 512]
        nn.Linear(512, 5)).to(device)
    # x = torch.randn(2, 3, 224, 224)
    # print(model(x).shape)
    basepath = os.path.dirname(__file__)  # 当前文件所在路径
    ckpt_path = os.path.join(basepath, 'best.mdl')
    print(ckpt_path)
    model.load_state_dict(torch.load(ckpt_path))
    print('loaded from ckpt!')

    tf = transforms.Compose([
        lambda x: Image.open(x).convert('RGB'),  # string path= > image data
        transforms.Resize((int(resize), int(resize))),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    # img = 'pokeman\\pikachu\\00000003.jpg'
    x = tf(img)
    x = x.unsqueeze(0)
    x = x.to(device)

    model.eval()
    with torch.no_grad():
        logits = model(x)
        pred = logits.argmax(dim=1).item()
        prob = np.max(softmax(logits.cpu().numpy()), axis=1)[0]
    # print('Our model predicts : %s'%className[str(pred)])
    return className[str(pred)], str(round(prob * 100, 2)) + '%'
Beispiel #16
0
    def __init__(self, n_decoder_input, n_decoder_hidden = 1024, in_channels =16, out_channels = 3):
        super(Resnet_Decoder,self).__init__()
        self.n_decoder_input = n_decoder_input
        self.n_decoder_hidden = n_decoder_hidden
        self.decoder_block_forward = nn.Sequential(
            Flatten(),
            nn.Linear(n_decoder_input, n_decoder_hidden, bias = False),
            nn.BatchNorm1d(n_decoder_hidden),
            nn.ReLU(inplace=True)
        )

        self.residual1 = _make_residual(in_channels) # Nawid - Performs a 3x3 followed by a 1x1 convolution
        self.residual2 = _make_residual(in_channels)
        self.conv1 = nn.ConvTranspose2d(in_channels, in_channels, 4, stride=2, padding=1) # Nawid - Increases the size
        self.conv2 = nn.ConvTranspose2d(in_channels, in_channels, 4, stride=2, padding=1) # Nawid - Increases the size further
        self.residual3 = _make_residual(in_channels)
        self.conv3 = nn.ConvTranspose2d(in_channels, out_channels, 4, stride=2, padding=1) # Nawid - Increases the size further
Beispiel #17
0
    def __init__(self):
        super(Std_Model, self).__init__()

        self.body = torchvision.models.mobilenet_v2(pretrained=True)
        self.body = nn.Sequential(*(list(self.body.children())[:-1]))
        self.head = nn.Sequential(
            nn.MaxPool2d(7),
            Flatten(), 
            nn.Linear(1280, 1)
        )
        
        # freeze model weight
        count = 0
        for name, param in self.body.named_parameters():
            count += 1
            if(count < 100):
                param.requires_grad = False
 def __init__(self, inputs=3, outputs = 10):
     super().__init__(
         nn.Conv2d(inputs, 64, kernel_size=11, stride=4, padding=5),
         nn.Softplus(),
         nn.MaxPool2d(kernel_size=2, stride=2),
         nn.Conv2d(64, 192, kernel_size=5, padding=2),
         nn.Softplus(),
         nn.MaxPool2d(kernel_size=2, stride=2),
         nn.Conv2d(192, 384, kernel_size=3, padding=1),
         nn.Softplus(),
         nn.Conv2d(384, 256, kernel_size=3, padding=1),
         nn.Softplus(),
         nn.Conv2d(256, 128, kernel_size=3, padding=1),
         nn.Softplus(),
         nn.MaxPool2d(kernel_size=2, stride=2),
         Flatten(1 * 1 * 128), 
         nn.Linear(128, outputs))
Beispiel #19
0
    def __init__(self, input_shape):
        super(MNISTR_Encoder, self).__init__()
        _, col, row = input_shape
        latent_col = ((col - 4) - 4) / 2
        latent_row = ((row - 4) - 4) / 2
        self.latent_dim = 48 * latent_col * latent_row

        self.feature = nn.Sequential()
        self.feature.add_module('f_conv1', nn.Conv2d(1, 32, kernel_size=5))
        self.feature.add_module('f_relu1', nn.ReLU(True))
        self.feature.add_module('f_conv2', nn.Conv2d(32, 48, kernel_size=5))
        self.feature.add_module('f_relu2', nn.ReLU(True))
        self.feature.add_module('f_pool2', nn.MaxPool2d(2))
        self.feature.add_module('f_flat', Flatten())
        self.feature.add_module('fc_fc1', nn.Linear(self.latent_dim, 100))
        self.feature.add_module('fc_relu1', nn.ReLU(True))
        self.feature.add_module('fc_fc2', nn.Linear(100, 100))
        self.feature.add_module('fc_relu2', nn.ReLU(True))
 def __init__(self, task_outputs, num_modules):
     super().__init__()
     self.num_tasks = len(task_outputs)
     self.num_modules = num_modules
     self.ch_in = 64
     self.conv1 = IgnoreTaskRouting(
         nn.Sequential(
             nn.Conv2d(3, 64, 3, stride=1, padding=1, bias=False),
             nn.BatchNorm2d(64),
             nn.ReLU(),
         ))
     self.layer1 = self.make_layer(64, 2, stride=1)
     self.layer2 = self.make_layer(128, 2, stride=2)
     self.layer3 = self.make_layer(256, 2, stride=2)
     self.layer4 = self.make_layer(512, 2, stride=2)
     self.pool_flatten = IgnoreTaskRouting(
         nn.Sequential(nn.AvgPool2d(4), Flatten()))
     self.fc = StaticTaskRouting(self.num_tasks,
                                 [nn.Linear(512, s) for s in task_outputs])
def main():
    # Load the pre-training model
    trained_model = resnet18(pretrained=True)
    # Use the transfer learning replacement structure
    model = nn.Sequential(
        *list(trained_model.children())[:-1],  # [b, 512, 1, 1]
        Flatten(),  # [b, 512, 1, 1] => [b, 512]
        nn.Linear(512, 6)).to(device)
    # Set optimizer and loss function
    optimizer = optim.Adam(model.parameters(), lr=lr)
    criteon = nn.CrossEntropyLoss()

    # Start training
    best_acc, best_epoch = 0, 0
    # The number of iterations
    for epoch in range(epochs):
        for step, (x, y) in enumerate(train_loader):
            # x: [b, 3, 224, 224], y: [b]
            x, y = x.to(device), y.to(device)
            model.train()
            logits = model(x)
            loss = criteon(logits, y)

            # Back propagation
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        # Visual loss value
        if epoch % 1 == 0:
            val_acc = evalute(model, val_loader)
            print('Epoch:', epoch, '/', epochs - 1, 'acc_val:', val_acc,
                  'loss:', loss)
            if val_acc > best_acc:
                best_epoch = epoch
                best_acc = val_acc
                print(best_acc)
                # Save the model weight parameter values
                torch.save(model.state_dict(), 'mydata/weights_resnet18.mdl')
    # Print the best training result
    print('best acc:', best_acc, 'best epoch:', best_epoch)
Beispiel #22
0
    def __init__(self, num_inputs, use_gru):
        super(CNNBase, self).__init__()

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0))

        self.main = nn.Sequential(
            init_(nn.Conv2d(num_inputs, 16, 8, stride=4)), nn.ReLU(),
            init_(nn.Conv2d(16, 32, 4, stride=2)), nn.ReLU(), Flatten(),
            init_(nn.Linear(32 * 9 * 9, 256)), nn.ReLU())

        if use_gru:
            self.gru = nn.GRUCell(512, 512)
            nn.init.orthogonal_(self.gru.weight_ih.data)
            nn.init.orthogonal_(self.gru.weight_hh.data)
            self.gru.bias_ih.data.fill_(0)
            self.gru.bias_hh.data.fill_(0)

        self.critic_linear = init_(nn.Linear(256, 1))

        self.train()
Beispiel #23
0
    def __init__(self,
                 channels,
                 hidden_size=512,
                 scale_factor=2,
                 loss_weights={},
                 attention=False):
        super().__init__()
        self.save_hyperparameters()
        self.hidden_size = 512
        self.frames_in = 3
        self.frames_out = 3
        self.loss_weights = loss_weights

        down_channels = [
            self.frames_in
        ] + channels if self.frames_in is not None else channels
        self.downscale = nn.ModuleList()
        for i in range(len(down_channels) - 1):
            self.downscale.append(
                ConvBlock2D(down_channels[i],
                            down_channels[i + 1],
                            pooling=scale_factor))

        hidden_size = 512
        self.flatten = Flatten()
        self.latent = nn.Linear(hidden_size, hidden_size)
        self.latent = nn.Linear(hidden_size, hidden_size)
        self.unflatten = Unflatten(size=hidden_size)

        up_channels = [
            self.frames_out
        ] + channels if self.frames_out is not None else channels
        self.upscale = nn.ModuleList()
        for i in range(len(up_channels) - 1):
            self.upscale.append(
                ConvTransposeBlock2D(up_channels[-i - 1],
                                     up_channels[-i - 2],
                                     pooling=scale_factor,
                                     attention=attention))
        return
Beispiel #24
0
	def __init__( self ):
		super(Encoder2D, self).__init__()
		self.input_dim = 3

		self.conv = nn.Sequential(
			nn.Conv2d(self.input_dim, 64, 11, 4, 1,bias=True),
			nn.BatchNorm2d(64),
			nn.ReLU(),
			nn.Conv2d(64, 128, 5, 2, 1,bias=True),
			nn.BatchNorm2d(128),
			nn.ReLU(),
			nn.Conv2d(128, 256, 5, 2, 1,bias=True),
			nn.BatchNorm2d(256),
			nn.ReLU(),
			nn.Conv2d(256, 512, 5, 2, 1,bias=True),
			nn.BatchNorm2d(512),
			nn.ReLU(),
			nn.Conv2d(512, 320, 8 , 1, 1, bias=True),
			nn.Sigmoid(),
			Flatten(),
		)

		utils.initialize_weights(self)
Beispiel #25
0
    def __init__(self,
                 filters=256,
                 n_layers=5,
                 seq_len=49,
                 block_type=None,
                 lstm_hidden=128,
                 lstm_linear=256):
        super(DeepLigand, self).__init__()

        # Convolutional network
        stride = 1
        self.lstm_hidden = lstm_hidden
        self.lstm_linear = lstm_linear
        self.seq_len = seq_len
        self.stride = stride
        self.filters = filters
        self.n_layers = n_layers
        self.block_type = block_type

        # LSTM
        self.ELMo = BidirectionalLSTM(seq_len,
                                      hidden_shape=lstm_hidden,
                                      n_layers=n_layers)
        self.ELMo_Linear = nn.Sequential(
            Flatten(),
            nn.Linear(2 * lstm_hidden * 40, lstm_linear),
            nn.BatchNorm1d(lstm_linear),
            nn.ReLU(),
        )

        self.final_linear = nn.Sequential(
            nn.Linear(self.final_linear_dim, 128),
            nn.BatchNorm1d(128),
            nn.ReLU(),
            nn.Linear(128, 1),
        )
 def __init__(self, num_classes=10, dropout = 0):
     super().__init__(
         nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=2),
         nn.Conv2d(64, 192, kernel_size=3, padding=1),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=2),
         nn.Conv2d(192, 384, kernel_size=3, padding=1),
         nn.ReLU(inplace=True),
         nn.Conv2d(384, 256, kernel_size=3, padding=1),
         nn.ReLU(inplace=True),
         nn.Conv2d(256, 256, kernel_size=3, padding=1),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=2),
         Flatten(256*2*2),
         nn.Dropout(dropout),
         nn.Linear(256 * 2 * 2, 1024),
         nn.ReLU(inplace=True),
         nn.Dropout(dropout),
         nn.Linear(1024, 512),
         nn.Dropout(dropout),
         nn.ReLU(inplace=True),
         nn.Linear(512, num_classes))
# Do one-hot coding of labels
depth = 6


def one_hot(label, depth=6):
    out = torch.zeros(label.size(0), depth)
    idx = torch.LongTensor(label).view(-1, 1)
    out.scatter_(dim=1, index=idx, value=1)
    return out


# load model
trained_model = densenet121(pretrained=True)
model = nn.Sequential(
    *list(trained_model.children())[:-1],  # [b, 512, 1, 1]
    Flatten(),  # [b, 512, 1, 1] => [b, 512]
    nn.Linear(512, 6))
# load weights
model.load_state_dict(torch.load('mydata/weights_densenet121.mdl'))
print('loaded from ckpt!')

# plot ROC curve
for x, y in test_loader:
    x, y = x.to(device), y.to(device)
    y_onehot = one_hot(y, depth)  # y.shape: [280, 6]
    with torch.no_grad():
        logits = model(x)

        # The horizontal and vertical coordinate values and AUC values of each category are calculated
        fpr_EDH, tpr_EDH, thresholds_EDH = roc_curve(y_onehot[:, 0], logits[:,
                                                                            0])
Beispiel #28
0
 def projection_conv(self, input_dim):
     pca = torch.nn.Sequential(*[
         Flatten(),
         nn.Conv2d(input_dim, self.embedding_size, (1, 1), bias=False),
     ])
     return pca
Beispiel #29
0
import torch
import torch.nn as nn

from utils import Flatten, Unflatten, Visualizer, get_dataloader

import settings

# Discriminator network  -----------------------------------------
D = nn.Sequential(
    nn.Conv2d(1, 4, 4, stride=2),
    nn.LeakyReLU(0.2),
    nn.Conv2d(4, 8, 4, stride=2),
    nn.LeakyReLU(0.2),
    Flatten(),
    nn.Linear(200, 10),
    nn.LeakyReLU(0.2),
    nn.Linear(10, 1),
    nn.Sigmoid(),
)

# Generator network  -----------------------------------------
G = nn.Sequential(
    nn.Linear(10, 200),
    nn.LeakyReLU(0.2),
    Unflatten(),
    nn.ConvTranspose2d(8, 4, 5, stride=2),
    nn.LeakyReLU(0.2),
    nn.ConvTranspose2d(4, 1, 4, stride=2),
    nn.Sigmoid(),
)
Beispiel #30
0
                 "sgd": optim.SGD(lr=args.lr, momentum=0.9, weight_decay=1e-4)}[args.optimizer]

    train_loader, test_loader = cifar10_loaders(args.batch_size)
    resnet = module_converter(resnet56(num_classes=10), keys=["conv1", "bn1", "relu", "layer1", "layer2", "layer3"])
    aux = nn.ModuleDict(OrderedDict({k: v for k, v in {
        # 32x32
        "conv1": generate_aux(32, 16, 10, args.num_convs, args.num_fcs),
        # 32x32
        "layer1": generate_aux(32, 16, 10, args.num_convs, args.num_fcs),
        # 16x16
        "layer2": generate_aux(16, 32, 10, args.num_convs, args.num_fcs),
        # 8x8
        "layer3": generate_aux(8, 64, 10, args.num_convs, args.num_fcs),
    }.items() if k in args.group}))
    model = NaiveGreedyModule(resnet, aux=aux,
                              tail=nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten(), nn.Linear(64, 10)))

    # import torch
    # from homura.debug import module_debugger as simple_debugger
    # simple_debugger(model, (torch.randn(1, 3, 32, 32), "layer1"), target=torch.tensor([1]),
    #                 loss=lambda x, y: F.cross_entropy(x.pred, y))

    print(args)
    # print(model)
    greedy_loss = [greedy_loss_by_name(name) for name in args.group]
    tb = reporter.TensorboardReporter([_callbacks.LossCallback(), _callbacks.AccuracyCallback()] + greedy_loss,
                                      "../results")
    # tb.enable_report_params()
    trainer = Trainer(model, optimizer, F.cross_entropy, callbacks=tb,
                      scheduler=lr_scheduler.StepLR(args.step, 0.2) if args.optimizer == "sgd" else None)
    for _ in trange(args.epochs, ncols=80):