def mobv1_ssdlite_create(num_classes, alpha=1., is_test=False):
    #Verandert bij het gebruik van LSTM
    alpha_base = alpha
    alpha_ssd = alpha
    alpha_lstm = alpha

    base_net = MobileNetV1(1001).model

    source_layer_indexes = [
        12,
        14,
    ]

    extras = nn.ModuleList([
        nn.Sequential(
            nn.Conv2d(in_channels=int(1024 * alpha_ssd),
                      out_channels=int(256 * alpha_ssd),
                      kernel_size=1),
            nn.ReLU6(inplace=False),
            SeperableConv2d(in_channels=256 * alpha_ssd,
                            out_channels=512 * alpha_ssd,
                            kernel_size=3,
                            stride=2,
                            padding=1),
        ),
        nn.Sequential(
            nn.Conv2d(in_channels=int(256 * alpha_ssd),
                      out_channels=int(128 * alpha_ssd),
                      kernel_size=1),
            nn.ReLU6(inplace=False),
            SeperableConv2d(in_channels=128 * alpha_ssd,
                            out_channels=256 * alpha_ssd,
                            kernel_size=3,
                            stride=2,
                            padding=1),
        ),
        nn.Sequential(
            nn.Conv2d(in_channels=int(256 * alpha_ssd),
                      out_channels=int(128 * alpha_ssd),
                      kernel_size=1),
            nn.ReLU6(inplace=False),
            SeperableConv2d(in_channels=128 * alpha_ssd,
                            out_channels=256 * alpha_ssd,
                            kernel_size=3,
                            stride=2,
                            padding=1),
        ),
        nn.Sequential(
            nn.Conv2d(in_channels=int(256 * alpha_ssd),
                      out_channels=int(128 * alpha_ssd),
                      kernel_size=1),
            nn.ReLU6(inplace=False),
            SeperableConv2d(in_channels=128 * alpha_ssd,
                            out_channels=256 * alpha_ssd,
                            kernel_size=3,
                            stride=2,
                            padding=1),
        )
    ])

    regression_headers = nn.ModuleList([
        SeperableConv2d(in_channels=512 * alpha_base,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=1024 * alpha_base,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=512 * alpha_ssd,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=256 * alpha_ssd,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=256 * alpha_ssd,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        nn.Conv2d(in_channels=int(256 * alpha_ssd),
                  out_channels=6 * 4,
                  kernel_size=1),
    ])

    classification_headers = nn.ModuleList([
        SeperableConv2d(in_channels=512 * alpha_base,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=1024 * alpha_base,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=512 * alpha_ssd,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=256 * alpha_ssd,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=256 * alpha_ssd,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        nn.Conv2d(in_channels=int(256 * alpha_ssd),
                  out_channels=6 * num_classes,
                  kernel_size=1),
    ])

    ssd = SSD(num_classes=num_classes,
              base_net=base_net,
              source_layer_indexes=source_layer_indexes,
              extras=extras,
              classification_headers=classification_headers,
              regression_headers=regression_headers,
              is_test=is_test,
              config=config)

    return ssd
Пример #2
0
        'name': 'yolo',
        'classes': 1,
        'ignore_thresh': .5,
        'bbox_loss': 'giou',
        'l1_loss_gain': 0.1,
    },
    'dropout': {
        'probability': 0.5,
    }
}

ACTIVATION_MAP = {
    'logistic': nn.Sigmoid,
    'leaky': lambda: nn.LeakyReLU(0.1, inplace=True),
    'relu': lambda: nn.ReLU(inplace=True),
    'relu6': lambda: nn.ReLU6(inplace=True),
    'tanh': nn.Tanh,
}

def str2value(ns):
    try:
        if '.' not in ns:
            return int(ns)
        return float(ns)
    except ValueError:
        return ns

class FC(nn.Module):
    def __init__(self, input: int, output: int, activation: str):
        super().__init__()
        self.fc = nn.Linear(input, output)
Пример #3
0
def conv_1x1_bn(inp, oup):
    return nn.Sequential(
        nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
        nn.BatchNorm2d(oup),
        nn.ReLU6(inplace=True)
    )
Пример #4
0
    def __init__(self, input_size, num_classes=100):
        super(SimpleMLP, self).__init__()

        self.simple = nn.Sequential(nn.Linear(input_size, 128),
                                    nn.BatchNorm1d(128), nn.ReLU6(),
                                    nn.Linear(128, num_classes))
Пример #5
0
	def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
				 dilation=1, groups=1, relu6=False):
		super(_ConvBNReLU, self).__init__()
		self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False)
		self.bn = nn.BatchNorm2d(out_channels)
		self.relu = nn.ReLU6(True) if relu6 else nn.ReLU(True)
Пример #6
0
def conv_bn(inp, oup, stride):
    padding = (1, 1, 1, 1) if stride == 1 else (0, 1, 0, 1)
    return nn.Sequential(nn.ConstantPad2d(padding, value=0.),
                         nn.Conv2d(inp, oup, 3, stride, 0, bias=False),
                         nn.BatchNorm2d(oup), nn.ReLU6(inplace=False))
Пример #7
0
 def conv2d(self, inp, outp, stride):
     kernel = 1 if stride == 1 else 3
     padding = (kernel - 1) // 2
     return nn.Sequential(
         nn.Conv2d(inp, outp, kernel, stride, padding, bias=False),
         nn.BatchNorm2d(outp), nn.ReLU6(True))
Пример #8
0
    def __init__(self,
                 kernel_size,
                 in_channels,
                 out_channels,
                 stride,
                 expand_ratio=6,
                 se=False,
                 se_paras='s_4',
                 ratio=1.0):
        super(MBInvertedConvLayer, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels

        self.kernel_size = kernel_size
        self.stride = stride
        self.expand_ratio = expand_ratio
        if kernel_size == 1:
            pad = 0
        elif kernel_size == 3:
            pad = 1
        elif kernel_size == 5:
            pad = 2
        elif kernel_size == 7:
            pad = 3
        feature_dim = round(in_channels * self.expand_ratio)
        middle_inc = int(feature_dim * ratio)
        if middle_inc % 2 != 0:
            middle_inc += 1
        #print(feature_dim,middle_inc)
        if se == False:
            self.op = nn.Sequential(
                nn.Conv2d(in_channels, middle_inc, 1, 1, 0, bias=False),
                nn.BatchNorm2d(middle_inc),
                nn.ReLU6(inplace=True),
                nn.Conv2d(middle_inc,
                          middle_inc,
                          kernel_size,
                          stride,
                          pad,
                          groups=middle_inc,
                          bias=False),
                nn.BatchNorm2d(middle_inc),
                nn.ReLU6(inplace=True),
                nn.Conv2d(middle_inc, out_channels, 1, 1, 0, bias=False),
                nn.BatchNorm2d(out_channels),
            )
        else:
            self.op = nn.Sequential(
                nn.Conv2d(in_channels, middle_inc, 1, 1, 0, bias=False),
                nn.BatchNorm2d(middle_inc),
                nn.ReLU6(inplace=True),
                nn.Conv2d(middle_inc,
                          middle_inc,
                          kernel_size,
                          stride,
                          pad,
                          groups=middle_inc,
                          bias=False),
                nn.BatchNorm2d(middle_inc),
                SEModule(middle_inc, se_paras),
                nn.ReLU6(inplace=True),
                nn.Conv2d(middle_inc, out_channels, 1, 1, 0, bias=False),
                nn.BatchNorm2d(out_channels),
            )
Пример #9
0
    def __init__(self, activation, num_layers, in_dim, hidden_dim, out_dim):
        super().__init__()
        self.num_layers = num_layers
        self.in_dim = in_dim
        self.hidden_dim = hidden_dim
        self.out_dim = out_dim

        if activation is 'none':
            self.activation = None
        elif activation is 'hardtanh':
            self.activation = nn.Hardtanh()
        elif activation is 'sigmoid':
            self.activation = nn.Sigmoid()
        elif activation is 'relu6':
            self.activation = nn.ReLU6()
        elif activation is 'tanh':
            self.activation = nn.Tanh()
        elif activation is 'tanhshrink':
            self.activation = nn.Tanhshrink()
        elif activation is 'hardshrink':
            self.activation = nn.Hardshrink()
        elif activation is 'leakyrelu':
            self.activation = nn.LeakyReLU()
        elif activation is 'softshrink':
            self.activation = nn.Softshrink()
        elif activation is 'softsign':
            self.activation = nn.Softsign()
        elif activation is 'relu':
            self.activation = nn.ReLU()
        elif activation is 'prelu':
            self.activation = nn.PReLU()
        elif activation is 'softplus':
            self.activation = nn.Softplus()
        elif activation is 'elu':
            self.activation = nn.ELU()
        elif activation is 'selu':
            self.activation = nn.SELU()
        else:
            raise ValueError("[!] Invalid activation function.")


        layers = []
        if self.activation is not None:
            layers.extend([
                nn.Linear(in_dim, hidden_dim),
                self.activation,
            ])
        else:
            layers.append(nn.Linear(in_dim, hidden_dim))
        for i in range(num_layers - 2):
            if self.activation is not None:
                layers.extend([
                    nn.Linear(hidden_dim, hidden_dim),
                    self.activation,
                ])
            else:
                layers.append(nn.Linear(hidden_dim, hidden_dim))
        layers.append(nn.Linear(hidden_dim, out_dim))

        self.model = nn.Sequential(*layers)

        # init
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.kaiming_uniform_(m.weight, a=math.sqrt(5))
                fan_in, _ = nn.init._calculate_fan_in_and_fan_out(m.weight)
                bound = 1 / math.sqrt(fan_in)
                nn.init.uniform_(m.bias, -bound, bound)
    def __init__(self, num_classes = 2300):
        super(MobileNetV2, self).__init__()
        layers = []
        ## First Layer:
        # [n, 3, 32, 32]
        #input_conv = nn.Conv2d(3, 32, kernel_size = 3, stride = 2, padding = 1, bias = False)
        input_conv = nn.Conv2d(3, 32, kernel_size = 1, stride = 1, padding = 0, bias = False)
        input_bn = nn.BatchNorm2d(32)
        input_relu = nn.ReLU6(inplace = True)
        layers.append(input_conv)
        layers.append(input_bn)
        layers.append(input_relu)

        # [n, 32, 16, 16]
        # BottleNeck1
        #layers.append(InvertedResidual(32, 16, t=1, kernel_size=3, stride=1))
        layers.append(InvertedResidual(32, 16, t=1, kernel_size=1, stride=1))
        #layers.append(nn.Dropout(p=0.2))



        # [n, 16, 16, 16]
        expansion = 6
        # BottleNeck2
        layers.append(InvertedResidual(16, 24, t=expansion, kernel_size=1, stride=1))
        #layers.append(InvertedResidual(24, 24, t=expansion, kernel_size=1, stride=1))
        #layers.append(InvertedResidual(16, 24, t=expansion, kernel_size=3, stride=2))
        #layers.append(InvertedResidual(24, 24, t=expansion, kernel_size=3, stride=1))
        #layers.append(InvertedResidual(24, 24, t=6, kernel_size=3, stride=1))
        #layers.append(nn.Dropout(p=0.2))


        # [n, 24, 8, 8]
        # BottleNeck3
        layers.append(InvertedResidual(24, 32, t=expansion, kernel_size=3, stride=1))
        layers.append(InvertedResidual(32, 32, t=expansion, kernel_size=3, stride=1))
        #layers.append(InvertedResidual(32, 32, t=expansion, kernel_size=3, stride=1))
        #layers.append(nn.Dropout(p=0.1))


        # [n, 32, 4, 4]
        # BottleNeck4
        layers.append(InvertedResidual(32, 64, t=expansion, kernel_size=3, stride=2))
        layers.append(InvertedResidual(64, 64, t=expansion, kernel_size=3, stride=1))
        #layers.append(InvertedResidual(64, 64, t=expansion, kernel_size=3, stride=1))
        #layers.append(InvertedResidual(64, 64, t=expansion, kernel_size=3, stride=1))

        # [n, 64, 4, 4]
        # BottleNeck5
        layers.append(InvertedResidual(64, 96, t=expansion, kernel_size=3, stride=2))
        #layers.append(InvertedResidual(96, 96, t=expansion, kernel_size=3, stride=1))
        #layers.append(InvertedResidual(96, 96, t=expansion, kernel_size=3, stride=1))

        # [n, 96, 4, 4]
        # BottleNeck6
        #layers.append(InvertedResidual(96, 160, t=expansion, kernel_size=3, stride=2))
        #layers.append(InvertedResidual(160, 160, t=expansion, kernel_size=3, stride=1))
        #layers.append(InvertedResidual(160, 160, t=expansion, kernel_size=3, stride=1))

        # [n, 160, 4, 4]
        # BottleNeck7
        #layers.append(InvertedResidual(160, 320, t=expansion, kernel_size=3, stride=1))






        output_conv = nn.Conv2d(96, 512, kernel_size = 3, stride = 1, padding = 1, bias = False)
        output_bn = nn.BatchNorm2d(512)
        output_relu = nn.ReLU6(inplace = True)
        layers.append(output_conv)
        layers.append(output_bn)
        layers.append(output_relu)



        self.last_channel = 512
        # building classifier
        self.classifier = nn.Sequential(
            nn.Dropout(0.3),
            nn.Linear(self.last_channel, num_classes),
        )

        # weight initialization
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out')
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.ones_(m.weight)
                nn.init.zeros_(m.bias)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.zeros_(m.bias)


        self.feature = nn.Sequential(*layers)

        ### THIS PART IS FOR CENTER LOSS
        self.linear_closs = nn.Linear(512, 64, bias=False)
        self.relu_closs = nn.ReLU(inplace=True)
Пример #11
0
    def __init__(self,
                 num_classes=1000,
                 width_mult=1.0,
                 inverted_residual_setting=None,
                 round_nearest=8,
                 block=None,
                 norm_layer=None,
                 last_CN=None):
        """
        MobileNet V2 main class
        Args:
            num_classes (int): Number of classes
            width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
            inverted_residual_setting: Network structure
            round_nearest (int): Round the number of channels in each layer to be a multiple of this number
            Set to 1 to turn off rounding
            block: Module specifying inverted residual building block for mobilenet
            norm_layer: Module specifying the normalization layer to use
        """
        super(MobileNetV2, self).__init__()

        if block is None:
            block = InvertedResidual

        if norm_layer is None:
            norm_layer = nn.BatchNorm2d

        input_channel = 32
        last_channel = 1280

        if inverted_residual_setting is None:
            inverted_residual_setting = [
                # t, c, n, s
                [1, 16, 1, 1],
                [6, 24, 2, 2],
                [6, 32, 3, 2],
                [6, 64, 4, 2],
                [6, 96, 3, 1],
                [6, 160, 3, 2],
                [6, 320, 1, 1],
            ]

        # only check the first element, assuming user knows t,c,n,s are required
        if len(inverted_residual_setting) == 0 or len(
                inverted_residual_setting[0]) != 4:
            raise ValueError("inverted_residual_setting should be non-empty "
                             "or a 4-element list, got {}".format(
                                 inverted_residual_setting))

        # building first layer
        input_channel = _make_divisible(input_channel * width_mult,
                                        round_nearest)
        self.last_channel = _make_divisible(
            last_channel * max(1.0, width_mult), round_nearest)
        features = [
            ConvBNReLU(3, input_channel, stride=2, norm_layer=norm_layer)
        ]
        # building inverted residual blocks
        total = 0
        for t, c, n, s in inverted_residual_setting:
            output_channel = _make_divisible(c * width_mult, round_nearest)
            for i in range(n):
                total += 1
                stride = s if i == 0 else 1
                features.append(
                    block(input_channel,
                          output_channel,
                          stride,
                          expand_ratio=t,
                          norm_layer=norm_layer))
                input_channel = output_channel
        #building last several layers
        features.append(
            ConvBNReLU(input_channel,
                       self.last_channel,
                       kernel_size=1,
                       norm_layer=norm_layer))
        # make it nn.Sequential
        self.features = nn.Sequential(*features)
        self.features_first = self.features[:9]
        self.features_second = self.features[9:]

        if not last_CN:
            self.last_CN = self.last_channel
        else:
            self.last_CN = last_CN

        # building classifier

        self.num_ori = 12
        self.num_shape = 40
        self.num_exp = 10
        self.num_texture = 40
        self.num_bin = 121
        self.num_scale = 1
        self.num_trans = 3

        if last_CN is not None:
            self.connector = nn.Sequential(
                nn.Linear(self.last_CN, self.last_CN // 16),
                nn.ReLU6(inplace=True),
                nn.Linear(self.last_CN // 16, self.last_CN),
                nn.ReLU6(inplace=True), nn.Sigmoid())
            self.adjuster = nn.Sequential(
                nn.Linear(self.last_CN, self.last_CN),
                nn.BatchNorm1d(self.last_CN))

        self.classifier_ori = nn.Sequential(
            nn.Dropout(0.2),
            nn.Linear(self.last_CN, self.num_ori),
        )
        self.classifier_shape = nn.Sequential(
            nn.Dropout(0.2),
            nn.Linear(self.last_CN, self.num_shape),
        )
        self.classifier_exp = nn.Sequential(
            nn.Dropout(0.2),
            nn.Linear(self.last_CN, self.num_exp),
        )
        self.classifier_texture = nn.Sequential(
            nn.Dropout(0.2),
            nn.Linear(self.last_CN, self.num_texture),
        )

        # weight initialization
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out')
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.ones_(m.weight)
                nn.init.zeros_(m.bias)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.zeros_(m.bias)
Пример #12
0
 def __init__(self, hidden_dim, input_dim=1):
     super(ReadVideoEncoder, self).__init__()
     self.W = nn.Parameter(torch.randn(hidden_dim, input_dim))
     self.b = nn.Parameter(torch.randn(hidden_dim))
     self.nonlinearity = nn.ReLU6()
def mobv2_ssdlite_lstm4_create(num_classes,
                               alpha=1.0,
                               use_batch_norm=True,
                               batch_size=None,
                               is_test=False):

    #onderstaande verhoudingen staan rechtstreeks in de cijfers, je kan gewoon alpha gebruiken
    alpha_base = alpha
    alpha_ssd = 0.5 * alpha
    alpha_lstm = 0.25 * alpha

    base_net = MobileNetV2(width_mult=alpha_base,
                           use_batch_norm=use_batch_norm).features

    source_layer_indexes = [
        GraphPath(14, 'conv', 3),
        19,
    ]
    #input_channels krijgen multiplier van vorige soort laag !

    extras = nn.ModuleList([
        # verhouding input en output van bottleneck lstm is 1/4 vanwege 4 gates
        # Bottleneck na laatste laag in base_net
        BottleneckLSTM(input_channels=int(1280 * alpha),
                       hidden_channels=int(320 * alpha),
                       height=10,
                       width=10,
                       batch_size=batch_size),
        nn.Sequential(
            nn.Conv2d(in_channels=int(320 * alpha),
                      out_channels=int(160 * alpha),
                      kernel_size=1),
            nn.ReLU6(inplace=False),
            SeperableConv2d(in_channels=int(160 * alpha),
                            out_channels=int(320 * alpha),
                            kernel_size=3,
                            stride=2,
                            padding=1),
        ),
        BottleneckLSTM(input_channels=int(320 * alpha),
                       hidden_channels=int(80 * alpha),
                       height=5,
                       width=5,
                       batch_size=batch_size),
        nn.Sequential(
            nn.Conv2d(in_channels=int(80 * alpha),
                      out_channels=int(40 * alpha),
                      kernel_size=1),
            nn.ReLU6(inplace=False),
            SeperableConv2d(in_channels=int(40 * alpha),
                            out_channels=int(80 * alpha),
                            kernel_size=3,
                            stride=2,
                            padding=1),
        ),
        BottleneckLSTM(input_channels=int(80 * alpha),
                       hidden_channels=int(20 * alpha),
                       height=3,
                       width=3,
                       batch_size=batch_size),
        nn.Sequential(
            nn.Conv2d(in_channels=int(20 * alpha),
                      out_channels=int(10 * alpha),
                      kernel_size=1),
            nn.ReLU6(inplace=False),
            SeperableConv2d(in_channels=int(10 * alpha),
                            out_channels=int(20 * alpha),
                            kernel_size=3,
                            stride=2,
                            padding=1),
        ),
        BottleneckLSTM(input_channels=int(20 * alpha),
                       hidden_channels=int(20 * alpha),
                       height=2,
                       width=2,
                       batch_size=batch_size),
        nn.Sequential(
            nn.Conv2d(in_channels=int(20 * alpha),
                      out_channels=int(10 * alpha),
                      kernel_size=1),
            nn.ReLU6(inplace=False),
            SeperableConv2d(in_channels=int(10 * alpha),
                            out_channels=int(20 * alpha),
                            kernel_size=3,
                            stride=2,
                            padding=1),
        ),
        BottleneckLSTM(input_channels=int(20 * alpha),
                       hidden_channels=int(20 * alpha),
                       height=1,
                       width=1,
                       batch_size=batch_size),
    ])

    regression_headers = nn.ModuleList([
        SeperableConv2d(in_channels=int(576 * alpha),
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=int(320 * alpha),
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=int(80 * alpha),
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=int(20 * alpha),
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=int(20 * alpha),
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        nn.Conv2d(in_channels=int(20 * alpha),
                  out_channels=6 * 4,
                  kernel_size=1)
    ])

    classification_headers = nn.ModuleList([
        SeperableConv2d(in_channels=int(576 * alpha),
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=int(320 * alpha),
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=int(80 * alpha),
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=int(20 * alpha),
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=int(20 * alpha),
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        nn.Conv2d(in_channels=int(20 * alpha),
                  out_channels=6 * num_classes,
                  kernel_size=1)
    ])
    """
	extras = nn.ModuleList([
		#verhouding input en output van bottleneck lstm is 1/4 vanwege 4 gates
		BottleneckLSTM(input_channels=int(1280*alpha_base), hidden_channels=int(320*alpha_lstm), height=10, width=10, batch_size=batch_size),
		
		nn.Sequential(
			nn.Conv2d(in_channels=int(320*alpha_lstm), out_channels=int(160*alpha_ssd), kernel_size=1),
			nn.ReLU6(inplace=False),
			SeperableConv2d(in_channels=int(160*alpha_ssd), out_channels=int(320*alpha_ssd), kernel_size=3, stride=2, padding=1),
		),
		BottleneckLSTM(input_channels=int(320*alpha_ssd), hidden_channels=int(80*alpha_lstm), height=5, width=5, batch_size=batch_size),
		
		nn.Sequential(
			nn.Conv2d(in_channels=int(80*alpha_lstm), out_channels=int(40*alpha_ssd), kernel_size=1),
			nn.ReLU6(inplace=False),
			SeperableConv2d(in_channels=int(40*alpha_ssd), out_channels=int(80*alpha_ssd), kernel_size=3, stride=2, padding=1),
		),
		BottleneckLSTM(input_channels=int(80*alpha_ssd), hidden_channels=int(20*alpha_lstm), height=3, width=3, batch_size=batch_size),
		
		nn.Sequential(
			nn.Conv2d(in_channels=int(20*alpha_lstm), out_channels=int(10*alpha_ssd), kernel_size=1),
			nn.ReLU6(inplace=False),
			SeperableConv2d(in_channels=int(10*alpha_ssd), out_channels=int(20*alpha_ssd), kernel_size=3, stride=2, padding=1),
		),
		BottleneckLSTM(input_channels=int(20*alpha_ssd), hidden_channels=int(20*alpha_lstm), height=2, width=2, batch_size=batch_size),
		
		nn.Sequential(
			nn.Conv2d(in_channels=int(20*alpha_ssd), out_channels=int(10*alpha_ssd), kernel_size=1),
			nn.ReLU6(inplace=False),
			SeperableConv2d(in_channels=int(10*alpha_ssd), out_channels=int(20*alpha_ssd), kernel_size=3, stride=2, padding=1),
		)
	])
	
	regression_headers = nn.ModuleList([
		SeperableConv2d(in_channels=int(576*alpha_base), out_channels=6 * 4, kernel_size=3, padding=1),
		SeperableConv2d(in_channels=int(1280*alpha_base), out_channels=6 * 4, kernel_size=3, padding=1),
		SeperableConv2d(in_channels=int(320*alpha_ssd), out_channels=6 * 4, kernel_size=3, padding=1),
		SeperableConv2d(in_channels=int(80*alpha_ssd), out_channels=6 * 4, kernel_size=3, padding=1),
		SeperableConv2d(in_channels=int(20*alpha_ssd), out_channels=6 * 4, kernel_size=3, padding=1),
		nn.Conv2d(in_channels=int(20*alpha_ssd), out_channels=6 * 4, kernel_size=1)
	])

	classification_headers = nn.ModuleList([
		SeperableConv2d(in_channels=int(576*alpha_base), out_channels=6 * num_classes, kernel_size=3, padding=1),
		SeperableConv2d(in_channels=int(1280*alpha_base), out_channels=6 * num_classes, kernel_size=3, padding=1),
		SeperableConv2d(in_channels=int(320*alpha_ssd), out_channels=6 * num_classes, kernel_size=3, padding=1),
		SeperableConv2d(in_channels=int(80*alpha_ssd), out_channels=6 * num_classes, kernel_size=3, padding=1),
		SeperableConv2d(in_channels=int(20*alpha_ssd), out_channels=6 * num_classes, kernel_size=3, padding=1),
		nn.Conv2d(in_channels=int(20*alpha_ssd), out_channels=6 * num_classes, kernel_size=1)
	])"""

    ssd = SSD(num_classes=num_classes,
              base_net=base_net,
              source_layer_indexes=source_layer_indexes,
              extras=extras,
              classification_headers=classification_headers,
              regression_headers=regression_headers,
              is_test=is_test,
              config=config)

    return ssd
def mobv1_ssdlite_lstm4_create(num_classes,
                               alpha=1.,
                               batch_size=None,
                               is_test=False):
    alpha_base = alpha
    alpha_ssd = 0.5 * alpha
    alpha_lstm = 0.25 * alpha

    base_net = MobileNetV1(1001).model

    source_layer_indexes = [
        12,
        14,
    ]

    extras = nn.ModuleList([
        BottleneckLSTM(input_channels=int(1024 * alpha_lstm),
                       hidden_channels=int(256 * alpha_lstm),
                       height=10,
                       width=10,
                       batch_size=batch_size),
        nn.Sequential(
            nn.Conv2d(in_channels=int(256 * alpha_ssd),
                      out_channels=int(128 * alpha_ssd),
                      kernel_size=1),
            nn.ReLU6(inplace=False),
            SeperableConv2d(in_channels=int(128 * alpha_ssd),
                            out_channels=int(256 * alpha_ssd),
                            kernel_size=3,
                            stride=2,
                            padding=1),
        ),
        BottleneckLSTM(input_channels=int(256 * alpha_lstm),
                       hidden_channels=int(64 * alpha_lstm),
                       height=5,
                       width=5,
                       batch_size=batch_size),
        nn.Sequential(
            nn.Conv2d(in_channels=int(64 * alpha_ssd),
                      out_channels=int(32 * alpha_ssd),
                      kernel_size=1),
            nn.ReLU6(inplace=False),
            SeperableConv2d(in_channels=int(32 * alpha_ssd),
                            out_channels=int(64 * alpha_ssd),
                            kernel_size=3,
                            stride=2,
                            padding=1),
        ),
        BottleneckLSTM(input_channels=64 * alpha_lstm,
                       hidden_channels=16 * alpha_lstm,
                       height=3,
                       width=3,
                       batch_size=batch_size),
        nn.Sequential(
            nn.Conv2d(in_channels=int(16 * alpha_ssd),
                      out_channels=int(8 * alpha_ssd),
                      kernel_size=1),
            nn.ReLU6(inplace=False),
            SeperableConv2d(in_channels=int(8 * alpha_ssd),
                            out_channels=int(16 * alpha_ssd),
                            kernel_size=3,
                            stride=2,
                            padding=1),
        ),
        BottleneckLSTM(input_channels=int(16 * alpha_lstm),
                       hidden_channels=int(16 * alpha_lstm),
                       height=1,
                       width=1,
                       batch_size=batch_size),
        nn.Sequential(
            nn.Conv2d(in_channels=int(16 * alpha_ssd),
                      out_channels=int(8 * alpha_ssd),
                      kernel_size=1),
            nn.ReLU6(inplace=False),
            SeperableConv2d(in_channels=int(8 * alpha_ssd),
                            out_channels=int(16 * alpha_ssd),
                            kernel_size=3,
                            stride=2,
                            padding=1),
        )
    ])

    #op alles of enkel op laatste?
    regression_headers = nn.ModuleList([
        SeperableConv2d(in_channels=int(512 * alpha_ssd),
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=int(256 * alpha_ssd),
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=int(64 * alpha_ssd),
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=int(16 * alpha_ssd),
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=int(16 * alpha_ssd),
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        nn.Conv2d(in_channels=int(16 * alpha_ssd),
                  out_channels=6 * 4,
                  kernel_size=1),
    ])

    classification_headers = nn.ModuleList([
        SeperableConv2d(in_channels=int(512 * alpha_ssd),
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=int(256 * alpha_ssd),
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=int(64 * alpha_ssd),
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=int(16 * alpha_ssd),
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=int(16 * alpha_ssd),
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        nn.Conv2d(in_channels=int(16 * alpha_ssd),
                  out_channels=6 * num_classes,
                  kernel_size=1),
    ])

    ssd = SSD(num_classes=num_classes,
              base_net=base_net,
              source_layer_indexes=source_layer_indexes,
              extras=extras,
              classification_headers=classification_headers,
              regression_headers=regression_headers,
              is_test=is_test,
              config=config)

    return ssd
Пример #15
0
def conv_dw(inp, oup, stride, pad1=0, bias_ena=False):
    padding = (1, 1, 1, 1) if stride == 1 else (0, 1, 0, 1)
    return nn.Sequential(
        nn.ConstantPad2d(padding, value=0.),
        nn.Conv2d(inp, inp, 3, stride, 0, groups=inp, bias=bias_ena),
        nn.BatchNorm2d(inp), nn.ReLU6(inplace=False))
Пример #16
0
def conv_block(inp, oup, stride, batch_norm):
    return nn.Sequential(nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
                         batch_norm(oup), nn.ReLU6(inplace=True))
Пример #17
0
def conv_pw(inp, oup, stride, bias_ena=False):
    padding = (0, 0, 0, 0)
    return nn.Sequential(nn.ConstantPad2d(padding, value=0.),
                         nn.Conv2d(inp, oup, 1, 1, 0, bias=bias_ena),
                         nn.BatchNorm2d(oup), nn.ReLU6(inplace=False))
Пример #18
0
    def __init__(self, blocks_args=None, global_params=None):
        super().__init__()
        assert isinstance(blocks_args, list), 'blocks_args should be a list'
        assert len(blocks_args) > 0, 'block args must be greater than 0'
        self._global_params = global_params
        self._blocks_args = blocks_args

        # Get static or dynamic convolution depending on image size
        Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)

        # Batch norm parameters
        bn_mom = 1 - self._global_params.batch_norm_momentum
        bn_eps = self._global_params.batch_norm_epsilon

        # Stem
        in_channels = 3  # rgb
        out_channels = round_filters(
            32, self._global_params)  # number of output channels
        self._conv_stem = Conv2d(in_channels,
                                 out_channels,
                                 kernel_size=3,
                                 stride=2,
                                 bias=False)
        self._bn0 = nn.BatchNorm2d(num_features=out_channels,
                                   momentum=bn_mom,
                                   eps=bn_eps)

        # Build blocks
        self._blocks = nn.ModuleList([])
        for block_args in self._blocks_args:

            # Update block input and output filters based on depth multiplier.
            block_args = block_args._replace(
                input_filters=round_filters(block_args.input_filters,
                                            self._global_params),
                output_filters=round_filters(block_args.output_filters,
                                             self._global_params),
                num_repeat=round_repeats(block_args.num_repeat,
                                         self._global_params))

            # The first block needs to take care of stride and filter size increase.
            self._blocks.append(MBConvBlock(block_args, self._global_params))
            if block_args.num_repeat > 1:
                block_args = block_args._replace(
                    input_filters=block_args.output_filters, stride=1)
            for _ in range(block_args.num_repeat - 1):
                self._blocks.append(
                    MBConvBlock(block_args, self._global_params))

        # Head
        in_channels = block_args.output_filters  # output of final block
        out_channels = round_filters(1280, self._global_params)
        self._conv_head = Conv2d(in_channels,
                                 out_channels,
                                 kernel_size=1,
                                 bias=False)
        self._bn1 = nn.BatchNorm2d(num_features=out_channels,
                                   momentum=bn_mom,
                                   eps=bn_eps)

        # Final linear layer
        self._avg_pooling = nn.AdaptiveAvgPool2d(1)
        self._dropout = nn.Dropout(self._global_params.dropout_rate)
        self._fc = nn.Linear(out_channels, self._global_params.num_classes)
        self._relu6 = nn.ReLU6()
        self._swish = MemoryEfficientSwish()
Пример #19
0
def conv_bn(inp, oup, stride, BatchNorm):
    return nn.Sequential(
        nn.Conv3d(inp, oup, 3, stride, 1, bias=False), #in_ch, out_ch, kernel, stride, pad
        BatchNorm(oup),
        nn.ReLU6(inplace=True)
    )
Пример #20
0
    def __init__(self, block_args, global_params):
        super().__init__()
        self._block_args = block_args
        self._bn_mom = 1 - global_params.batch_norm_momentum
        self._bn_eps = global_params.batch_norm_epsilon
        self.has_se = (self._block_args.se_ratio
                       is not None) and (0 < self._block_args.se_ratio <= 1)
        self.id_skip = block_args.id_skip  # skip connection and drop connect
        self.feature_input_size = [
            56, 28, 14, 7
        ]  # output sizes we are looking for overhaul distillation

        # Get static or dynamic convolution depending on image size
        Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)

        # Expansion phase
        inp = self._block_args.input_filters  # number of input channels
        oup = self._block_args.input_filters * self._block_args.expand_ratio  # number of output channels
        if self._block_args.expand_ratio != 1:
            self._expand_conv = Conv2d(in_channels=inp,
                                       out_channels=oup,
                                       kernel_size=1,
                                       bias=False)
            self._bn0 = nn.BatchNorm2d(num_features=oup,
                                       momentum=self._bn_mom,
                                       eps=self._bn_eps)

        # Depthwise convolution phase
        k = self._block_args.kernel_size
        s = self._block_args.stride
        self._depthwise_conv = Conv2d(
            in_channels=oup,
            out_channels=oup,
            groups=oup,  # groups makes it depthwise
            kernel_size=k,
            stride=s,
            bias=False)
        self._bn1 = nn.BatchNorm2d(num_features=oup,
                                   momentum=self._bn_mom,
                                   eps=self._bn_eps)

        # Squeeze and Excitation layer, if desired
        if self.has_se:
            num_squeezed_channels = max(
                1,
                int(self._block_args.input_filters *
                    self._block_args.se_ratio))
            self._se_reduce = Conv2d(in_channels=oup,
                                     out_channels=num_squeezed_channels,
                                     kernel_size=1)
            self._se_expand = Conv2d(in_channels=num_squeezed_channels,
                                     out_channels=oup,
                                     kernel_size=1)

        # Output phase
        final_oup = self._block_args.output_filters
        self._project_conv = Conv2d(in_channels=oup,
                                    out_channels=final_oup,
                                    kernel_size=1,
                                    bias=False)
        self._bn2 = nn.BatchNorm2d(num_features=final_oup,
                                   momentum=self._bn_mom,
                                   eps=self._bn_eps)
        self._relu6 = nn.ReLU6()
        self._swish = MemoryEfficientSwish()
Пример #21
0
    def __init__(self, inp, oup, stride, expand_ratio, keep_3x3=False):
        super(I2RBlock, self).__init__()
        assert stride in [1, 2]

        hidden_dim = inp // expand_ratio
        if hidden_dim < oup / 6.:
            hidden_dim = math.ceil(oup / 6.)
            hidden_dim = _make_divisible(hidden_dim, 16)  # + 16

        #self.relu = nn.ReLU6(inplace=True)
        self.identity = False
        self.identity_div = 1
        self.expand_ratio = expand_ratio
        if expand_ratio == 2:
            self.conv = nn.Sequential(
                # dw
                nn.Conv2d(inp, inp, 3, 1, 1, groups=inp, bias=False),
                nn.BatchNorm2d(inp),
                nn.ReLU6(inplace=True),
                # pw-linear
                nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
                nn.BatchNorm2d(hidden_dim),
                # pw-linear
                nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
                nn.BatchNorm2d(oup),
                nn.ReLU6(inplace=True),
                # dw
                nn.Conv2d(oup, oup, 3, stride, 1, groups=oup, bias=False),
                nn.BatchNorm2d(oup),
            )
        elif inp != oup and stride == 1 and keep_3x3 == False:
            self.conv = nn.Sequential(
                # pw-linear
                nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
                nn.BatchNorm2d(hidden_dim),
                # pw-linear
                nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
                nn.BatchNorm2d(oup),
                nn.ReLU6(inplace=True),
            )
        elif inp != oup and stride == 2 and keep_3x3 == False:
            self.conv = nn.Sequential(
                # pw-linear
                nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
                nn.BatchNorm2d(hidden_dim),
                # pw-linear
                nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
                nn.BatchNorm2d(oup),
                nn.ReLU6(inplace=True),
                # dw
                nn.Conv2d(oup, oup, 3, stride, 1, groups=oup, bias=False),
                nn.BatchNorm2d(oup),
            )
        else:
            if keep_3x3 == False:
                self.identity = True
            self.conv = nn.Sequential(
                # dw
                # nn.Conv2d(inp, inp, 3, 1, 1, groups=inp, bias=False),
                # nn.BatchNorm2d(inp),
                # nn.ReLU6(inplace=True),
                # pw
                nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
                nn.BatchNorm2d(hidden_dim),
                #nn.ReLU6(inplace=True),
                # pw
                nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
                nn.BatchNorm2d(oup),
                nn.ReLU6(inplace=True),
                # dw
                nn.Conv2d(oup, oup, 3, 1, 1, groups=oup, bias=False),
                nn.BatchNorm2d(oup),
            )
Пример #22
0
 def __init__(self, inplace=True):
     super(h_sigmoid, self).__init__()
     self.relu = nn.ReLU6(inplace=inplace)
Пример #23
0
 def __init__(self):
     super(HardSwish, self).__init__()
     self.relu6 = nn.ReLU6()
Пример #24
0
    def __init__(self,
                 baseWidth,
                 cardinality,
                 layers,
                 num_classes,
                 preactivation=True,
                 stochastic=True,
                 personalized=True,
                 activ_fun='relu'):
        """ Constructor
        Args:
            baseWidth: baseWidth for ResNeXt.
            cardinality: number of convolution groups.
            layers: config of layers, e.g., [3, 4, 6, 3]
            num_classes: number of classes
        """
        super(ResNeXt, self).__init__()
        block = Bottleneck

        self.cardinality = cardinality
        self.baseWidth = baseWidth
        self.num_classes = num_classes
        self.inplanes = 64
        self.output_size = 64
        self.depth = sum(layers)
        self.preactivation = preactivation
        self.stochastic = stochastic
        self.personalized = personalized
        self.activ_fun = activ_fun

        self.conv1 = nn.Conv2d(3, 64, 7, 2, 3, bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        if activ_fun == 'relu':
            self.relu = nn.ReLU(inplace=True)
        else:
            self.relu = nn.ReLU6(inplace=True)
        self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.maxpool1 = nn.Conv2d(64, 64, 3, 2, 1, bias=False)

        rel_depth = 0
        self.layer1 = self._make_layer(block, 64, layers[0], rel_depth, 1,
                                       self.preactivation, self.stochastic,
                                       self.personalized)
        rel_depth += layers[0]
        self.layer2 = self._make_layer(block, 128, layers[1], rel_depth, 2,
                                       self.preactivation, self.stochastic,
                                       self.personalized)
        rel_depth += layers[1]
        self.layer3 = self._make_layer(block, 256, layers[2], rel_depth, 2,
                                       self.preactivation, self.stochastic,
                                       self.personalized)
        rel_depth += layers[2]
        self.layer4 = self._make_layer(block, 512, layers[3], rel_depth, 2,
                                       self.preactivation, self.stochastic,
                                       self.personalized)
        self.avgpool = nn.AvgPool2d(7)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
 def __init__(self, inplace=True):
     super(HSwish, self).__init__()
     self.inplace = inplace
     self.relu = nn.ReLU6(inplace=self.inplace)
Пример #26
0
    def __init__(self,
                 inplanes,
                 planes,
                 baseWidth,
                 cardinality,
                 stride=1,
                 downsample=None,
                 drop_rate=0,
                 preactivation=True,
                 stochastic=True,
                 personalized=True,
                 activ_fun='relu'):
        """ Constructor
        Args:
            inplanes: input channel dimensionality
            planes: output channel dimensionality
            baseWidth: base width.
            cardinality: num of convolution groups.
            stride: conv stride. Replaces pooling layer.
        """
        super(Bottleneck, self).__init__()

        D = int(math.floor(planes * (baseWidth / 64)))
        C = cardinality

        self.conv1 = nn.Conv2d(inplanes,
                               D * C,
                               kernel_size=1,
                               stride=1,
                               padding=0,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(D * C)
        self.bn1p = nn.BatchNorm2d(inplanes)
        self.conv2a = nn.Conv2d(D * C,
                                D * C // 2,
                                kernel_size=3,
                                stride=stride,
                                padding=1,
                                groups=C,
                                bias=False)
        self.conv2b = nn.Sequential(
            nn.Conv2d(D * C,
                      D * C,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      groups=C,
                      bias=False),
            nn.Conv2d(D * C,
                      D * C // 2,
                      kernel_size=3,
                      stride=stride,
                      padding=1,
                      groups=C,
                      bias=False))
        self.conv2c = nn.Conv2d(D * C,
                                D * C,
                                kernel_size=3,
                                stride=stride,
                                padding=1,
                                groups=C,
                                bias=False)
        self.bn2 = nn.BatchNorm2d(D * C)
        self.bn2p = nn.BatchNorm2d(D * C)
        self.conv3 = nn.Conv2d(D * C,
                               planes * 4,
                               kernel_size=1,
                               stride=1,
                               padding=0,
                               bias=False)
        self.bn3p = nn.BatchNorm2d(D * C)
        self.bn3 = nn.BatchNorm2d(planes * 4)
        self.activ_fun = activ_fun

        if activ_fun == 'relu':
            self.relu = nn.ReLU(inplace=True)
        else:
            self.relu = nn.ReLU6(inplace=True)
        self.stocdepth = nn.Dropout(p=drop_rate)
        self.activ_fun = activ_fun

        self.downsample = downsample
        self.preactivation = preactivation
        self.stochastic = stochastic
        self.personalized = personalized
Пример #27
0
def conv_3x3_bn(inp, oup, stride):
    return nn.Sequential(
        nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
        nn.BatchNorm2d(oup),
        nn.ReLU6(inplace=True)
    )
Пример #28
0
 def __init__(self):
     super().__init__()
     self.relu6 = nn.ReLU6()
Пример #29
0
        self.activations = nn.ModuleDict([
                ['lrelu', nn.LeakyReLU()],
                ['prelu', nn.PReLU()]
        ])

    def forward(self, x, choice, act):
        x = self.choices[choice](x)
        x = self.activations[act](x)
        return x
"""

_activations = nn.ModuleDict([
    ["lrelu", nn.LeakyReLU()],
    ["prelu", nn.PReLU()],
    ["relu", nn.ReLU(inplace=True)],
    ["relu6", nn.ReLU6(inplace=True)],
    ["hswish", activations.h_swish()],
    ["hsigmoid", activations.h_sigmoid()],
])


class BatchNorm2d(nn.Module):
    def __init__(self, D, momentum=0.01):
        super(BatchNorm2d, self).__init__()
        slef.D = D
        self.momentun = momentun
        self.batch_norm = nn.BatchNorm2d(self.D, momentun=self.momentun)

    def forward(self, x):
        return self.batch_norm(x)
Пример #30
0
def h_sigmoid_fn(x, inplace=True):
    relu = nn.ReLU6(inplace=inplace)
    return relu(x + 3) / 6