示例#1
0
 def __init__(
     self,
     in_channels=1,
     out_channels=32,
     input_dim=312,
     hidden_dim=32,
     output_dim=10,
 ):
     super(cnn1d_ser, self).__init__()
     self.classifier = nn.Sequential(
         nn.Conv1d(in_channels, out_channels, 5, stride=1, padding=2),
         nn.BatchNorm1d(out_channels),
         nn.ReLU(),
         nn.Dropout(0.5),
         nn.Conv1d(out_channels, out_channels, 5, stride=1, padding=2),
         nn.BatchNorm1d(out_channels),
         nn.ReLU(),
         nn.Dropout(0.5),
         nn.Flatten(),
         nn.Linear(input_dim * out_channels, hidden_dim),
         nn.BatchNorm1d(hidden_dim),
         nn.ReLU(),
         nn.Dropout(0.5),
         nn.Linear(hidden_dim, output_dim),
     )
示例#2
0
 def __init__(self, num_speakers=2) -> None:
     super(simple_CNN, self).__init__()
     self.convs = nn.Sequential(
         nn.Conv1d(1, 16, 100, stride=10),
         nn.BatchNorm1d(16),
         nn.ReLU(),
         nn.Conv1d(16, 64, 21, stride=10),
         nn.BatchNorm1d(64),
         nn.ReLU(),
         nn.Conv1d(64, 64, 5, stride=5),
         nn.BatchNorm1d(64),
         nn.ReLU(),
     )
     self.linears = nn.Sequential(nn.Linear(1 * 6 * 64, 128),
                                  nn.Linear(128, num_speakers))
示例#3
0
def build_norm(norm, dim):
    """
    Build normalize layer
    LN cost more memory than BN
    """
    if norm not in ["cLN", "gLN", "BN"]:
        raise RuntimeError("Unsupported normalize layer: {}".format(norm))
    if norm == "cLN":
        return ChannelWiseLayerNorm(dim, elementwise_affine=True)
    elif norm == "BN":
        return nn.BatchNorm1d(dim)
    else:
        return GlobalChannelLayerNorm(dim, elementwise_affine=True)
示例#4
0
def get_bn_graph():
    model = nn.BatchNorm1d(6)
    model.eval()
    model.to_global(flow.env.all_device_placement("cpu"), flow.sbp.broadcast)

    class Testgraph(flow.nn.Graph):
        def __init__(self, model):
            super(Testgraph, self).__init__()
            self.module = model

        def build(self, x):
            return self.module(x)

    test_graph = Testgraph(model)
    return test_graph
示例#5
0
    def __init__(self, num_classes):
        super(ResReid, self).__init__()
        self.in_planes = 2048
        self.model = ResNet(block=Bottleneck,
                            layers=[3, 4, 6, 3],
                            last_stride=1)
        self.gap = nn.AdaptiveAvgPool2d(1)

        self.bnneck = nn.BatchNorm1d(self.in_planes)
        self.bnneck.bias.requires_grad_(False)  # no shift
        self.num_classes = num_classes
        nn.init.normal_(self.bnneck.weight, 1, 0.02)
        nn.init.constant_(self.bnneck.bias, 0)

        self.classifier = nn.Linear(self.in_planes,
                                    self.num_classes,
                                    bias=False)

        nn.init.normal_(self.classifier.weight, 0, 0.01)
示例#6
0
 def __init__(self,
              in_planes,
              out_planes,
              kernel_size=3,
              stride=1,
              groups=1):
     padding = (kernel_size - 1) // 2
     super(ConvBNReLU, self).__init__(
         nn.Conv1d(
             in_planes,
             out_planes,
             kernel_size,
             stride,
             padding,
             groups=groups,
             bias=False,
         ),
         nn.BatchNorm1d(out_planes),
         nn.ReLU6(),
     )
示例#7
0
    def __init__(self, channels, kernel_size, bias=True, dropout=0.0):
        super(ConformerConvolutionModule, self).__init__()

        assert kernel_size % 2 == 1

        self.pointwise_conv1 = nn.Linear(channels, 2 * channels, bias=bias)

        self.depthwise_conv = nn.Conv1d(
            channels,
            channels,
            kernel_size,
            stride=1,
            padding=(kernel_size - 1) // 2,
            groups=channels,
            bias=bias,
        )

        self.batch_norm = nn.BatchNorm1d(channels)

        self.pointwise_conv2 = nn.Linear(channels, channels, bias=bias)

        self.dropout = nn.Dropout(dropout)
示例#8
0
    def __init__(self, inp, oup, stride, expand_ratio):
        super(InvertedResidual, self).__init__()
        self.stride = stride
        assert stride in [1, 2]

        hidden_dim = int(round(inp * expand_ratio))
        self.use_res_connect = self.stride == 1 and inp == oup

        layers = []
        if expand_ratio != 1:
            # pw
            layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
        layers.extend([
            # dw
            ConvBNReLU(hidden_dim,
                       hidden_dim,
                       stride=stride,
                       groups=hidden_dim),
            # pw-linear
            nn.Conv1d(hidden_dim, oup, 1, 1, 0, bias=False),
            nn.BatchNorm1d(oup),
        ])
        self.conv = nn.Sequential(*layers)
示例#9
0
    def __init__(self, options):
        super(SincNet, self).__init__()

        self.cnn_N_filt = options["cnn_N_filt"]
        self.cnn_len_filt = options["cnn_len_filt"]
        self.cnn_max_pool_len = options["cnn_max_pool_len"]

        self.cnn_act = options["cnn_act"]
        self.cnn_drop = options["cnn_drop"]

        self.cnn_use_laynorm = options["cnn_use_laynorm"]
        self.cnn_use_batchnorm = options["cnn_use_batchnorm"]
        self.cnn_use_laynorm_inp = options["cnn_use_laynorm_inp"]
        self.cnn_use_batchnorm_inp = options["cnn_use_batchnorm_inp"]

        self.input_dim = int(options["input_dim"])

        self.fs = options["fs"]

        self.N_cnn_lay = len(options["cnn_N_filt"])
        self.conv = nn.ModuleList([])
        self.bn = nn.ModuleList([])
        self.ln = nn.ModuleList([])
        self.act = nn.ModuleList([])
        self.drop = nn.ModuleList([])

        if self.cnn_use_laynorm_inp:
            self.ln0 = LayerNorm(self.input_dim)

        if self.cnn_use_batchnorm_inp:
            self.bn0 = nn.BatchNorm1d([self.input_dim], momentum=0.05)

        current_input = self.input_dim

        for i in range(self.N_cnn_lay):

            N_filt = int(self.cnn_N_filt[i])
            len_filt = int(self.cnn_len_filt[i])

            # dropout
            self.drop.append(nn.Dropout(p=self.cnn_drop[i]))

            # activation
            self.act.append(act_fun(self.cnn_act[i]))

            # layer norm initialization
            self.ln.append(
                LayerNorm((
                    N_filt,
                    int((current_input - self.cnn_len_filt[i] + 1) /
                        self.cnn_max_pool_len[i]),
                )))

            self.bn.append(
                nn.BatchNorm1d(
                    N_filt,
                    int((current_input - self.cnn_len_filt[i] + 1) /
                        self.cnn_max_pool_len[i]),
                    momentum=0.05,
                ))

            if i == 0:
                self.conv.append(
                    SincConv_fast(self.cnn_N_filt[0], self.cnn_len_filt[0],
                                  self.fs))
            else:
                self.conv.append(
                    nn.Conv1d(self.cnn_N_filt[i - 1], self.cnn_N_filt[i],
                              self.cnn_len_filt[i]))

            current_input = int((current_input - self.cnn_len_filt[i] + 1) /
                                self.cnn_max_pool_len[i])

        self.out_dim = current_input * N_filt
示例#10
0
    def __init__(self, options):
        super(MLP, self).__init__()

        self.input_dim = int(options["input_dim"])
        self.fc_lay = options["fc_lay"]
        self.fc_drop = options["fc_drop"]
        self.fc_use_batchnorm = options["fc_use_batchnorm"]
        self.fc_use_laynorm = options["fc_use_laynorm"]
        self.fc_use_laynorm_inp = options["fc_use_laynorm_inp"]
        self.fc_use_batchnorm_inp = options["fc_use_batchnorm_inp"]
        self.fc_act = options["fc_act"]

        self.wx = nn.ModuleList([])
        self.bn = nn.ModuleList([])
        self.ln = nn.ModuleList([])
        self.act = nn.ModuleList([])
        self.drop = nn.ModuleList([])

        # input layer normalization
        if self.fc_use_laynorm_inp:
            self.ln0 = LayerNorm(self.input_dim)

        # input batch normalization
        if self.fc_use_batchnorm_inp:
            self.bn0 = nn.BatchNorm1d([self.input_dim], momentum=0.05)

        self.N_fc_lay = len(self.fc_lay)

        current_input = self.input_dim

        # Initialization of hidden layers
        for i in range(self.N_fc_lay):

            # dropout
            self.drop.append(nn.Dropout(p=self.fc_drop[i]))

            # activation
            self.act.append(act_fun(self.fc_act[i]))

            add_bias = True

            # layer norm initialization
            self.ln.append(LayerNorm(self.fc_lay[i]))
            self.bn.append(nn.BatchNorm1d(self.fc_lay[i], momentum=0.05))

            if self.fc_use_laynorm[i] or self.fc_use_batchnorm[i]:
                add_bias = False

            # Linear operations
            self.wx.append(nn.Linear(current_input, self.fc_lay[i], bias=add_bias))

            # weight initialization
            self.wx[i].weight = nn.Parameter(
                flow.Tensor(self.fc_lay[i], current_input).uniform_(
                    -np.sqrt(0.01 / (current_input + self.fc_lay[i])),
                    np.sqrt(0.01 / (current_input + self.fc_lay[i])),
                )
            )
            self.wx[i].bias = nn.Parameter(flow.zeros(self.fc_lay[i]))

            current_input = self.fc_lay[i]
示例#11
0
    def __init__(
        self,
        num_classes=1000,
        width_mult=1.0,
        inverted_residual_setting=None,
        round_nearest=8,
    ):
        """
        MobileNet V2 main class

        Args:
            num_classes (int): Number of classes
            width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
            inverted_residual_setting: Network structure
            round_nearest (int): Round the number of channels in each layer to be a multiple of this number
            Set to 1 to turn off rounding
        """
        super(MobileNetV2, self).__init__()
        block = InvertedResidual
        input_channel = 32
        last_channel = 1280

        if inverted_residual_setting is None:
            inverted_residual_setting = [
                # t, c, n, s
                [1, 16, 1, 1],
                [6, 24, 2, 2],
                [6, 32, 3, 2],
                [6, 64, 4, 2],
                [6, 96, 3, 1],
                [6, 160, 3, 2],
                [6, 320, 1, 1],
            ]

        # only check the first element, assuming user knows t,c,n,s are required
        if (len(inverted_residual_setting) == 0
                or len(inverted_residual_setting[0]) != 4):
            raise ValueError("inverted_residual_setting should be non-empty "
                             "or a 4-element list, got {}".format(
                                 inverted_residual_setting))

        # building first layer
        input_channel = _make_divisible(input_channel * width_mult,
                                        round_nearest)
        self.last_channel = _make_divisible(
            last_channel * max(1.0, width_mult), round_nearest)
        features = [ConvBNReLU(1, input_channel, stride=2)]
        # building inverted residual blocks
        for t, c, n, s in inverted_residual_setting:
            output_channel = _make_divisible(c * width_mult, round_nearest)
            for i in range(n):
                stride = s if i == 0 else 1
                features.append(
                    block(input_channel,
                          output_channel,
                          stride,
                          expand_ratio=t))
                input_channel = output_channel
        # building last several layers
        features.append(
            ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
        # make it nn.Sequential
        self.features = nn.Sequential(*features)

        # building classifier
        self.classifier = nn.Sequential(
            nn.Dropout(0.2),
            nn.Linear(self.last_channel, num_classes[0]),
            nn.LogSoftmax(dim=1),
        )

        self.normalize = nn.BatchNorm1d(6420)
        self.maxpool1d = nn.MaxPool1d(3, stride=2)

        # weight initialization
        for m in self.modules():
            if isinstance(m, nn.Conv1d):
                nn.init.kaiming_normal_(m.weight, mode="fan_out")
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m, nn.BatchNorm1d):
                nn.init.ones_(m.weight)
                nn.init.zeros_(m.bias)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.zeros_(m.bias)
示例#12
0
    def __init__(
        self,
        block,
        layers,
        dropout=0,
        num_features=512,
        zero_init_residual=False,
        groups=1,
        width_per_group=64,
        replace_stride_with_dilation=None,
        fp16=False,
    ):
        super(IResNet, self).__init__()
        self.fp16 = fp16
        self.inplanes = 64
        self.dilation = 1
        if replace_stride_with_dilation is None:
            replace_stride_with_dilation = [False, False, False]
        if len(replace_stride_with_dilation) != 3:
            raise ValueError("replace_stride_with_dilation should be None "
                             "or a 3-element tuple, got {}".format(
                                 replace_stride_with_dilation))
        self.groups = groups
        self.base_width = width_per_group
        self.conv1 = nn.Conv2d(3,
                               self.inplanes,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05)
        self.prelu = nn.ReLU(self.inplanes)
        self.layer1 = self._make_layer(block, 64, layers[0], stride=2)
        self.layer2 = self._make_layer(block,
                                       128,
                                       layers[1],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[0])
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[1])
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[2])
        self.bn2 = nn.BatchNorm2d(
            512 * block.expansion,
            eps=1e-05,
        )
        self.dropout = nn.Dropout(p=dropout, inplace=True)
        self.fc = nn.Linear(512 * block.expansion * self.fc_scale,
                            num_features)
        self.features = nn.BatchNorm1d(num_features, eps=1e-05)
        nn.init.constant_(self.features.weight, 1.0)
        self.features.weight.requires_grad = False

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.normal_(m.weight, 0, 0.1)
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, IBasicBlock):
                    nn.init.constant_(m.bn2.weight, 0)