Пример #1
0
 def __init__(self, nc, n, ps=0.5):
     super().__init__()
     layers = [AdaptiveConcatPool2d(), Mish(), Flatten()] + \
         bn_drop_lin(nc*2, 512, True, ps, Mish()) + \
         bn_drop_lin(512, n, True, ps)
     self.fc = nn.Sequential(*layers)
     self._init_weight()
Пример #2
0
def build_model():
    model = Sequential()
    model.add(Dense(96, input_dim=state_size, kernel_initializer='he_uniform'))
    model.add(Mish())
    model.add(Dense(48, kernel_initializer='he_uniform'))
    model.add(Mish())
    model.add(Dense(24, kernel_initializer='he_uniform'))
    model.add(Mish())
    model.add(Dense(action_size, kernel_initializer='he_uniform'))
    model.compile(Adam(lr=0.001), loss='mse')
    return model
Пример #3
0
    def __init__(self,
                 features=[10, 256, 256],
                 bn=False,
                 activation_fn='mish',
                 p=0):
        super().__init__()
        self.net = nn.Sequential()

        self.activation_fn = Mish()
        self.features = features
        self.bn = bn
        self.activation_fn_name = activation_fn
        self.p = p

        for layer in range(1, len(features)):
            self.net.add_module(
                'fc%d' % layer, nn.Linear(features[layer - 1],
                                          features[layer]))
            self.net.add_module('sig%d' % layer, self.activation_fn)
            if p > 0:
                self.net.add_module('dp%d' % layer, nn.Dropout(p))
        self.net.add_module('out', nn.Linear(features[-1], 1))
        #         self.net.add_module('out-nolinear', nn.Softplus(beta=5))

        for m in self.modules():
            if isinstance(m, nn.Linear):
                m.weight.data = nn.init.xavier_normal_(m.weight.data)
                m.bias.data = torch.nn.init.zeros_(m.bias.data)
Пример #4
0
def test_function(test_input):
    x1, x2 = (test_input.clone().requires_grad_() for i in range(2))
    m1 = Mish()
    y1 = m1(x1)
    l1 = y1.mean()
    exp, = torch.autograd.grad(l1, x1)

    y2 = MishImplementation.apply(x2)
    l2 = y2.mean()
    res, = torch.autograd.grad(l2, x2)
    assert_allclose(res, exp)
Пример #5
0
 def __init__(self, vgg_name):
     super(VGG, self).__init__()
     self.features = self._make_layers(cfg[vgg_name])
     self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
     self.classifier = nn.Sequential(
         nn.Linear(512 * 7 * 7, 4096),
         act_fn,
         nn.Dropout(),
         nn.Linear(4096, 4096),
         Mish(),
         nn.Dropout(),
         nn.Linear(4096, 2), )
Пример #6
0
def test_module(test_input):
    x1, x2 = (test_input.clone().requires_grad_() for i in range(2))

    m1 = Mish()
    y1 = m1(x1)
    l1 = y1.mean()
    exp, = torch.autograd.grad(l1, x1)

    m2 = MemoryEfficientMish()
    y2 = m2(x2)
    l2 = y2.mean()
    res, = torch.autograd.grad(l2, x2)
    assert_allclose(y1, y2)
    assert_allclose(res, exp)
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              groups=1,
              norm_layer=None):
     super(BasicBlock, self).__init__()
     if norm_layer is None:
         norm_layer = nn.BatchNorm2d
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = norm_layer(planes)
     self.relu = Mish()
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = norm_layer(planes)
     self.downsample = downsample
     self.stride = stride
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 downsample=None,
                 groups=1,
                 norm_layer=None,
                 First=False):
        super(Bottleneck, self).__init__()
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
        self.first = First
        if self.first:
            self.ocb1 = FirstOctaveCBR(inplanes,
                                       planes,
                                       kernel_size=(1, 1),
                                       norm_layer=norm_layer,
                                       padding=0)
        else:
            self.ocb1 = OctaveCBR(inplanes,
                                  planes,
                                  kernel_size=(1, 1),
                                  norm_layer=norm_layer,
                                  padding=0)

        self.ocb2 = OctaveCBR(planes,
                              planes,
                              kernel_size=(3, 3),
                              stride=stride,
                              groups=groups,
                              norm_layer=norm_layer)

        self.ocb3 = OctaveCB(planes,
                             planes,
                             kernel_size=(1, 1),
                             norm_layer=norm_layer,
                             padding=0)
        self.relu = Mish()
        self.downsample = downsample
        self.stride = stride
Пример #9
0
  def __init__(self):
    super().__init__()

    act = lambda: Mish()

    self.model = Sequential(
      Conv2d(in_channels=3, out_channels=96,
             kernel_size=(11, 11), stride=(4, 4), padding=2),
      LocalResponseNorm(size=5, k=2, alpha=1e-4, beta=0.75),
      MaxPool2d(kernel_size=(3, 3), stride=2),
      act(),

      Conv2d(in_channels=96, out_channels=256,
             kernel_size=(5, 5), padding=2),
      LocalResponseNorm(size=5, k=2, alpha=1e-4, beta=0.75),
      MaxPool2d(kernel_size=(3, 3), stride=2),
      act(),

      Conv2d(in_channels=256, out_channels=384,
             kernel_size=(3, 3), padding=1),
      act(),

      Conv2d(in_channels=384, out_channels=384,
             kernel_size=(3, 3), padding=1),
      act(),

      Conv2d(in_channels=384, out_channels=256,
             kernel_size=(3, 3), padding=1),
      LocalResponseNorm(size=5, k=2, alpha=1e-4, beta=0.75),
      MaxPool2d(kernel_size=(3, 3), stride=2),
      act(),

      Flatten(),
      Linear(in_features=9216, out_features=4096),
      act(),
      Linear(in_features=4096, out_features=4096),
      act(),
      Linear(in_features=4096, out_features=10))
Пример #10
0
    def __init__(self, arch, num_classes=1, pretrained=True):
        super().__init__()

        # load EfficientNet
        if arch == 'se_resnext50_32x4d':
            if pretrained:
                self.base = se_resnext50_32x4d()
            else:
                self.base = se_resnext50_32x4d(pretrained=None)
            self.nc = self.base.last_linear.in_features

        elif arch == 'se_resnext101_32x4d':
            if pretrained:
                self.base = se_resnext101_32x4d()
            else:
                self.base = se_resnext101_32x4d(pretrained=None)
            self.nc = self.base.last_linear.in_features

        elif arch == 'inceptionv4':
            if pretrained:
                self.base = inceptionv4()
            else:
                self.base = inceptionv4(pretrained=None)
            self.nc = self.base.last_linear.in_features

        elif arch == 'inceptionresnetv2':
            if pretrained:
                self.base = inceptionresnetv2()
            else:
                self.base = inceptionresnetv2(pretrained=None)
            self.nc = self.base.last_linear.in_features

        self.logit = nn.Sequential(AdaptiveConcatPool2d(1), Flatten(),
                                   nn.BatchNorm1d(2 * self.nc),
                                   nn.Dropout(0.5),
                                   nn.Linear(2 * self.nc, 512), Mish(),
                                   nn.BatchNorm1d(512), nn.Dropout(0.5),
                                   nn.Linear(512, 1))
Пример #11
0
def mish_test_cpu():
    x = torch.randn(*shape, requires_grad=True)

    m = Mish()
    y = m(x)
    def __init__(self,
                 block,
                 layers,
                 zero_init_residual=False,
                 groups=1,
                 norm_layer=None):
        super(OctaveIID, self).__init__()
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
        self.inplanes = 32
        self.groups = groups
        self.conv1 = nn.Conv2d(3,
                               self.inplanes,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = norm_layer(self.inplanes)
        self.relu = Mish()
        self.layer1 = self._make_layer(block,
                                       64,
                                       layers[0],
                                       norm_layer=norm_layer,
                                       First=True)
        self.octaveCBR1 = OctaveCBR(64, 128)
        self.layer2 = self._make_layer(block,
                                       128,
                                       layers[1],
                                       norm_layer=norm_layer)
        self.layer3 = self._make_layer(block,
                                       128,
                                       layers[2],
                                       norm_layer=norm_layer)
        self.octaveCBR2 = OctaveCBR(128, 64)
        self.layer4 = self._make_layer(block,
                                       64,
                                       layers[3],
                                       norm_layer=norm_layer)
        self.basic_block_h = BasicBlock(32, 32)
        self.basic_block_l = BasicBlock(32, 32)
        self.Upsample = nn.Upsample(scale_factor=2)
        self.conv2_h = nn.Conv2d(32,
                                 3,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1,
                                 bias=False)
        self.conv2_l = nn.Conv2d(32,
                                 3,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1,
                                 bias=False)
        self.sigmoid = nn.Sigmoid()

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='leaky_relu')
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0)
Пример #13
0
    i = torch.Tensor(1, 3, 512, 512).cuda()
    FOCconv = FirstOctaveConv(kernel_size=(3, 3),
                              in_channels=3,
                              out_channels=128,
                              dilation=3).cuda()
    x_out, y_out = FOCconv(i)
    print("First: ", x_out.size(), y_out.size())
    # test last Octave Cov
    LOCconv = LastOctaveConv(kernel_size=(3, 3),
                             in_channels=256,
                             out_channels=128,
                             alpha=0.75).cuda()
    i = high, low
    out = LOCconv(i)
    print("Last: ", out.size())

    # test OCB
    ocb = OctaveCB(in_channels=256,
                   out_channels=128,
                   alpha=0.75,
                   norm_layer=nn.GroupNorm).cuda()
    i = high, low
    x_out_h, y_out_l = ocb(i)
    print("OCB:", x_out_h.size(), y_out_l.size())

    # test last OCB
    ocb_last = LastOCtaveCBR(256, 128, alpha=0.75, activation=Mish()).cuda()
    i = high, low
    x_out_h = ocb_last(i)
    print("Last OCB", x_out_h.size())
Пример #14
0
#! ---------------------
#! PATHS
#! ---------------------

DATA_PATH = '../data/'
ADJ_PATH = '../data/MAPK-adjacency_matrix.pt'
GENEORDER_PATH = '../data/MAPK-gene_order.pkl'
EXPR_PATH = '../data/expr/'
OUTPUT_PATH = '../output/'
GO_MATRIX_PATH = '../data/MAPK&overlap_pathway_matrix.pt'

#! ---------------------
#! MODEL ARCHITECTURE
#! ---------------------

ACTIVATION = Mish()  # F.leaky_relu

#! ---------------------
#! PRETRAINING - WEIGHT TRANSFER
#! ---------------------

USE_PRETRAINED_WEIGHTS = True
STATE_DICT_PATH = '../pretrained_weights/model_state_dict-EPOCH_18.pkl'
#GCN_WEIGHTS_PATH = '../pretrained_weights/cancer127_state_dict.pkl'

#! ---------------------
#! TRAINING PARAMS
#! ---------------------

USE_SEED = False
SEED = 0
Пример #15
0
    def __init__(self, ngpu, use_decoder, use_mixconv, act_func):
        super(InvGenerator, self).__init__()
        self.ngpu = ngpu
        self.use_decoder = use_decoder
        self.use_mixconv = use_mixconv

        self.enc_act_func, self.dec_act_func = act_func
        if self.enc_act_func == 'mish':
            EncActFuncs = [Mish() for i in range(4)]
        elif self.enc_act_func == 'relu':
            EncActFuncs = [nn.LeakyReLU(0.3, inplace=True) for i in range(4)]

        if self.dec_act_func == 'mish':
            DecActFuncs = [Mish() for i in range(4)]
        elif self.dec_act_func == 'relu':
            DecActFuncs = [nn.ReLU(inplace=True) for i in range(4)]

        C1 = C2 = C3 = C4 = 5
        encoder = [
            # input is (nc=1) x 40 x 40
            nn.Conv2d(nc, ndf * C1, 6, 1, 0, bias=False),
            nn.BatchNorm2d(ndf * C1),
            EncActFuncs[0],
            # state size. (ndf * 2) x 35 x 35
            nn.Conv2d(ndf * C1, ndf * C2, 5, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * C2),
            EncActFuncs[1],
            # state size. (ndf*2) x 16 x 16
            nn.Conv2d(ndf * C2, ndf * C3, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * C3),
            EncActFuncs[2],
            # state size. (ndf*2) x 8 x 8
            nn.Conv2d(ndf * C3, ndf * C4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * C4),
            EncActFuncs[3],
            # state size. (ndf*4) x 4 x 4
            # nn.Conv2d(ndf * C4, nz, 4, 1, 0, bias=False),
            # nn.BatchNorm2d(ndf * 4),
            # nn.ReLU(True),
            # state size. (ndf*8) x 3 x 3
            # nn.Conv2d(ndf * 4, nz, 3, 1, 0, bias=False),
            # 480 * 1 * 1
        ]

        if self.use_decoder:
            encoder.append(nn.Conv2d(ndf * C4, nz, 4, 1, 0, bias=False))
        else:
            encoder.append(nn.Conv2d(ndf * C4, 40 * 12, 4, 1, 0, bias=False))
            encoder.append(Flatten(40, 12))

        self.encoder = nn.Sequential(*encoder)

        decoder = [
            nn.ConvTranspose2d(nz, ngf * 8, 3, 1, 0, bias=False),  # 13
            nn.BatchNorm2d(ngf * 8),
            DecActFuncs[0],
            # image size: (ngf*8) x 3 x 3
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 3, (2, 1), 1,
                               bias=False),  # 16
            nn.BatchNorm2d(ngf * 4),
            DecActFuncs[1],
            # image size: (ngf*4) x 5 x 3
            nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),  # 19
            nn.BatchNorm2d(ngf * 2),
            DecActFuncs[2],
            # image size: (ngf*2) x 10 x 6
            nn.ConvTranspose2d(ngf * 2, ngf, (4, 3), (2, 1), 1,
                               bias=False),  # 22
            Power(2),
            nn.BatchNorm2d(ngf),
            DecActFuncs[3],
        ]

        if self.use_mixconv:
            # image size: (ngf*2) x 20 x 5
            decoder.append(
                nn.ConvTranspose2d(ngf,
                                   args.num_mix_comps,
                                   4,
                                   2,
                                   1,
                                   bias=False))  # 25
        else:
            decoder.append(
                nn.ConvTranspose2d(ngf, num_img_channels, 4, 2, 1,
                                   bias=False))  # 25
            decoder.append(nn.Tanh())
            # 1 x 40 x 12
            # Flatten()

        self.decoder = nn.Sequential(*decoder)
Пример #16
0
# @Software: PyCharm
import torch
import torch.nn as nn

# __all__=['']
# vgg相关参数的配置,这里BN层几乎是所有CNN的标配了,故这里不再实现无BN层的网络
from mish import Mish

cfg = {
    'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
    'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}

act_fn = Mish()


class VGG(nn.Module):
    def __init__(self, vgg_name):
        super(VGG, self).__init__()
        self.features = self._make_layers(cfg[vgg_name])
        self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
        self.classifier = nn.Sequential(
            nn.Linear(512 * 7 * 7, 4096),
            act_fn,
            nn.Dropout(),
            nn.Linear(4096, 4096),
            Mish(),
            nn.Dropout(),
            nn.Linear(4096, 2), )