Ejemplo n.º 1
0
 def __init__(self, classes=7, **kwargs):
     super(GluonCrepe, self).__init__(**kwargs)
     self.NUM_FILTERS = 256  # number of convolutional filters per convolutional layer
     self.NUM_OUTPUTS = classes  # number of classes
     self.FULLY_CONNECTED = 1024  # number of unit in the fully connected dense layer
     self.features = nn.HybridSequential()
     with self.name_scope():
         self.features.add(
             nn.Conv1D(channels=self.NUM_FILTERS,
                       kernel_size=7,
                       activation='relu'),
             nn.MaxPool1D(pool_size=3, strides=3),
             nn.Conv1D(channels=self.NUM_FILTERS,
                       kernel_size=7,
                       activation='relu'),
             nn.MaxPool1D(pool_size=3, strides=3),
             nn.Conv1D(channels=self.NUM_FILTERS,
                       kernel_size=3,
                       activation='relu'),
             nn.Conv1D(channels=self.NUM_FILTERS,
                       kernel_size=3,
                       activation='relu'),
             nn.Conv1D(channels=self.NUM_FILTERS,
                       kernel_size=3,
                       activation='relu'),
             nn.Conv1D(channels=self.NUM_FILTERS,
                       kernel_size=3,
                       activation='relu'),
             nn.MaxPool1D(pool_size=3, strides=3),
             nn.Flatten(),
             nn.Dense(self.FULLY_CONNECTED, activation='relu'),
             nn.Dense(self.FULLY_CONNECTED, activation='relu'),
         )
         self.output = nn.Dense(self.NUM_OUTPUTS)
Ejemplo n.º 2
0
def get_netD1():
    netD = nn.Sequential()
    netD.add(
        nn.Conv1D(channels=8, kernel_size=4, strides=1, activation='relu'),
        nn.MaxPool1D(pool_size=4, strides=1),
        nn.Conv1D(channels=128,
                  kernel_size=512,
                  strides=512,
                  activation='relu'), nn.MaxPool1D(pool_size=4, strides=4),
        nn.Conv1D(channels=256, kernel_size=4, strides=4, activation='relu'),
        nn.MaxPool1D(pool_size=4, strides=4), nn.Dense(128), nn.Dense(10))
    return netD
Ejemplo n.º 3
0
 def __init__(self, num_points = 2500):
     super(STN3d, self).__init__()
     self.num_points = num_points
     with self.name_scope():
         self.STN3d = nn.HybridSequential(prefix='')
         with self.STN3d.name_scope():
             self.STN3d.add(nn.Conv1D(64, 1), nn.BatchNorm(in_channels=64), nn.Activation('relu'),
                            nn.Conv1D(128, 1), nn.BatchNorm(in_channels=128), nn.Activation('relu'),
                            nn.Conv1D(1024, 1), nn.BatchNorm(in_channels=1024), nn.Activation('relu'),
                            nn.MaxPool1D(num_points), nn.Flatten(),
                            nn.Dense(512), nn.BatchNorm(in_channels=512), nn.Activation('relu'),
                            nn.Dense(256), nn.BatchNorm(in_channels=256), nn.Activation('relu'),
                            nn.Dense(9))
             # self.conv1 = nn.Conv1D(64, 1)
             # self.bn1 = nn.BatchNorm(in_channels=64)
             # self.relu1 = nn.Activation('relu')
             # self.conv2 = nn.Conv1D(128, 1)
             # self.bn2 = nn.BatchNorm(in_channels=128)
             # self.relu2 = nn.Activation('relu')
             # self.conv3 = nn.Conv1D(1024, 1)
             # self.bn3 = nn.BatchNorm(in_channels=1024)
             # self.relu3 = nn.Activation('relu')
             # self.mp1 = nn.MaxPool1D(num_points)
             # self.fla = nn.Flatten()
             # self.fc1 = nn.Dense(512)
             # self.bn4 = nn.BatchNorm(in_channels=512)
             # self.relu4 = nn.Activation('relu')
             # self.fc2 = nn.Dense(256)
             # self.bn5 = nn.BatchNorm(in_channels=256)
             # self.relu5 = nn.Activation('relu')
             # self.fc3 = nn.Dense(9)
         self.iden = self.params.get_constant('iden', value=nd.array([1,0,0,0,1,0,0,0,1],dtype='float32').reshape(1,9))
Ejemplo n.º 4
0
    def __init__(self, ctx=mx.cpu(), warmup=5, runs=25, inputs=None):
        # Set the default Inputs
        default_parameters = {
            "data": (32, 3, 256),
            "data_initializer": nd.normal,
            "pool_size": 2,
            "strides": None,
            "padding": 0,
            "layout": "NCW",
            "run_backward": True,
            "dtype": "float32"
        }

        super().__init__(ctx=ctx,
                         warmup=warmup,
                         runs=runs,
                         default_parameters=default_parameters,
                         custom_parameters=inputs)

        self.data = get_mx_ndarray(ctx=self.ctx,
                                   in_tensor=self.inputs["data"],
                                   dtype=self.inputs["dtype"],
                                   initializer=self.inputs["data_initializer"],
                                   attach_grad=self.inputs["run_backward"])

        self.block = nn.MaxPool1D(pool_size=self.inputs["pool_size"],
                                  strides=self.inputs["strides"],
                                  padding=self.inputs["padding"],
                                  layout=self.inputs["layout"])
        self.block.initialize(ctx=self.ctx)
Ejemplo n.º 5
0
 def __init__(self, dilation_depth=2, n_repeat=5, **kwargs):
     super(TCN, self).__init__(**kwargs)
     self.dilations = [1, 2, 4, 8, 16]
     #self.post_res= nn.Sequential()
     self.net = nn.Sequential()
     #self.bn = nn.BatchNorm()
     #self.post_res= nn.Sequential()
     #self.TCN= nn.Sequential()
     with self.name_scope():
         ## The embedding part
         self.conv1 = nn.Conv1D(kernel_size=24,
                                channels=1,
                                activation='relu',
                                strides=2)
         self.conv2 = nn.Conv1D(kernel_size=3,
                                channels=1,
                                activation='relu',
                                strides=2)
         self.pool1 = nn.MaxPool1D(pool_size=3)
         self.store_embedding = nn.Embedding(370, 10)
         self.nMonth_embedding = nn.Embedding(12, 2)
         self.nYear_embedding = nn.Embedding(3, 2)
         self.mDay_embedding = nn.Embedding(31, 3)
         self.wday_embedding = nn.Embedding(7, 3)
         self.nHour_embedding = nn.Embedding(24, 3)
         #self.post_res.add(Residual(xDim=34))
         self.post_res = futureResidual(xDim=14)
         self.net.add(nn.Dense(64, flatten=False))
         self.net.add(nn.BatchNorm(axis=2))
         self.net.add(nn.Activation(activation='relu'))
         self.net.add(nn.Dropout(.2))
         self.net.add(nn.Dense(1, activation='relu', flatten=False))
Ejemplo n.º 6
0
 def __init__(self, vocab, embed_size, num_channels, pooling_ints,
              dropout_rate, dense):
     """
     :param vocab: 词汇量
     :param embed_size: 200/300维
     :param num_channels: 多少
     :param pooling_ints: 池化数
     :param dropout_rate: dropout比率
     :param dense: dense层
     """
     super(Textcnn, self).__init__()
     with self.name_scope():
         self.embedding = nn.Embedding(vocab, embed_size)
         self.cnn1 = nn.Conv1D(num_channels,
                               kernel_size=3,
                               padding=2,
                               activation='relu')
         self.cnn2 = nn.Conv1D(num_channels,
                               kernel_size=4,
                               padding=3,
                               activation='relu')
         self.cnn3 = nn.Conv1D(num_channels,
                               kernel_size=5,
                               padding=4,
                               activation='relu')
         self.batchnorm = nn.BatchNorm()
         self.poolings = nn.MaxPool1D(pooling_ints)
         self.dropouts = nn.Dropout(dropout_rate)
         self.flattern = nn.Flatten()
         self.dense = nn.Sequential()
         for each in dense[:-1]:
             self.dense.add(nn.Dense(each, activation="softrelu"))
             self.dense.add(nn.BatchNorm())
             self.dense.add(nn.Dropout(dropout_rate))
         self.dense.add(nn.Dense(dense[-1], activation="sigmoid"))
Ejemplo n.º 7
0
def test_pool():
    layers1d = [
        nn.MaxPool1D(),
        nn.MaxPool1D(3),
        nn.MaxPool1D(3, 2),
        nn.AvgPool1D(),
        nn.AvgPool1D(count_include_pad=False),
        nn.GlobalAvgPool1D(),
        ]
    for layer in layers1d:
        check_layer_forward(layer, (1, 2, 10))


    layers2d = [
        nn.MaxPool2D(),
        nn.MaxPool2D((3, 3)),
        nn.MaxPool2D(3, 2),
        nn.AvgPool2D(),
        nn.AvgPool2D(count_include_pad=False),
        nn.GlobalAvgPool2D(),
        ]
    for layer in layers2d:
        check_layer_forward(layer, (1, 2, 10, 10))

    layers3d = [
        nn.MaxPool3D(),
        nn.MaxPool3D((3, 3, 3)),
        nn.MaxPool3D(3, 2),
        nn.AvgPool3D(),
        nn.AvgPool3D(count_include_pad=False),
        nn.GlobalAvgPool3D(),
        ]
    for layer in layers3d:
        check_layer_forward(layer, (1, 2, 10, 10, 10))

    # test ceil_mode
    x = mx.nd.zeros((2, 2, 10, 10))

    layer = nn.MaxPool2D(3, ceil_mode=False)
    layer.collect_params().initialize()
    assert (layer(x).shape==(2, 2, 3, 3))

    layer = nn.MaxPool2D(3, ceil_mode=True)
    layer.collect_params().initialize()
    assert (layer(x).shape==(2, 2, 4, 4))
Ejemplo n.º 8
0
 def __init__(self, num_points = 2500, global_feat = True):
     super(PointNetfeat, self).__init__()
     self.stn = STN3d(num_points = num_points)
     self.conv1 = nn.Conv1D(64, 1)
     self.conv2 = nn.Conv1D(128, 1)
     self.conv3 = nn.Conv1D(1024, 1)
     self.bn1 = nn.BatchNorm(in_channels=64)
     self.bn2 = nn.BatchNorm(in_channels=128)
     self.bn3 = nn.BatchNorm(in_channels=1024)
     self.mp1 = nn.MaxPool1D(num_points)
     self.num_points = num_points
     self.global_feat = global_feat
Ejemplo n.º 9
0
 def __init__(self, num_points=2500, global_feat=True, routing=None):
     super(PointNetfeat_vanilla, self).__init__()
     self.stn = input_transform_net(num_points=num_points)
     self.routing = routing
     self.conv1 = nn.Conv1D(64, 1)
     self.conv2 = nn.Conv1D(128, 1)
     self.conv3 = nn.Conv1D(1024, 1)
     self.bn1 = nn.BatchNorm(in_channels=64)
     self.bn2 = nn.BatchNorm(in_channels=128)
     self.bn3 = nn.BatchNorm(in_channels=1024)
     self.mp1 = nn.MaxPool1D(num_points)
     self.num_points = num_points
     self.global_feat = global_feat
Ejemplo n.º 10
0
 def __init__(self, opt):
     super(CNNText, self).__init__()
     self.opt = opt
     with self.name_scope():
         self.drop = nn.Dropout(opt.drop)
         #self.encoder = nn.Embedding(input_dim=opt.vocab_size,output_dim=opt.embed_dim)
         self.conv_block = HybrideConcurrent(concat_dim=1)
         for i, ngram_filter in enumerate(opt.ngram_filters):
             net = nn.HybridSequential(prefix='filter' + str(i))
             net.add(nn.Conv1D(opt.num_hidden, ngram_filter))
             #net.add(nn.BatchNorm())
             net.add(nn.Activation('relu'))
             net.add(nn.MaxPool1D(opt.seq_len - ngram_filter + 1))
             self.conv_block.add(net)
Ejemplo n.º 11
0
def CNN():
    net = nn.Sequential()
    with net.name_scope():
        net.add(
            nn.Conv1D(channels=32, kernel_size=33),
            nn.BatchNorm(axis=1),
            nn.Activation('relu'),
            nn.MaxPool1D(pool_size=13),
            
            nn.Flatten(),
            nn.Dense(33, activation='relu'),
            nn.Dropout(0.2),
            nn.Dense(1, activation='sigmoid'),
        )
    return net
Ejemplo n.º 12
0
 def __init__(self, num_points = 2500, global_feat = True):
     super(PointNetfeat_sim, self).__init__()
     self.k = 30
     self.stn = STN3d(num_points = num_points)
     self.sim = nn.Dense(16, flatten=False)
     self.sim_bn = nn.BatchNorm(in_channels=16)
     self.sim_t = nn.Dense(16, flatten=False)
     self.sim_tbn = nn.BatchNorm(in_channels=16)
     self.conv1 = nn.Conv1D(64, 1)
     self.conv2 = nn.Conv1D(128, 1)
     self.conv3 = nn.Conv1D(1024, 1)
     self.bn1 = nn.BatchNorm(in_channels=64)
     self.bn2 = nn.BatchNorm(in_channels=128)
     self.bn3 = nn.BatchNorm(in_channels=1024)
     self.mp1 = nn.MaxPool1D(num_points)
     self.num_points = num_points
     self.global_feat = global_feat
Ejemplo n.º 13
0
 def __init__(self, vocab, embed_size, headers, unit, num_channels,
              pooling_ints, dropout_rate, dense):
     """
     :param vocab: 词汇量
     :param embed_size: 200/300维
     :param num_channels: 多少
     :param pooling_ints: 池化数
     :param dropout_rate: dropout比率
     :param dense: dense层
     """
     super(Textcnn_attention, self).__init__()
     with self.name_scope():
         self.embedding = nn.Embedding(vocab, embed_size)
         if headers > 0:
             cell = DotProductAttentionCell(scaled=True, dropout=0.2)
             cell = MultiHeadAttentionCell(base_cell=cell,
                                           use_bias=False,
                                           query_units=unit,
                                           key_units=unit,
                                           value_units=unit,
                                           num_heads=headers)
             self.att = cell
         else:
             self.att = None
         self.cnn1 = nn.Conv1D(num_channels,
                               kernel_size=3,
                               padding=2,
                               activation='relu')
         self.cnn2 = nn.Conv1D(num_channels,
                               kernel_size=4,
                               padding=3,
                               activation='relu')
         self.cnn3 = nn.Conv1D(num_channels,
                               kernel_size=5,
                               padding=4,
                               activation='relu')
         self.batchnorm = nn.BatchNorm()
         self.poolings = nn.MaxPool1D(pooling_ints)
         self.dropouts = nn.Dropout(dropout_rate)
         self.flattern = nn.Flatten()
         self.dense = nn.Sequential()
         for each in dense[:-1]:
             self.dense.add(nn.Dense(each, activation="softrelu"))
             self.dense.add(nn.BatchNorm())
             self.dense.add(nn.Dropout(dropout_rate))
         self.dense.add(nn.Dense(dense[-1], activation="sigmoid"))
Ejemplo n.º 14
0
    def __init__(self, num_points=2500):
        super(STN3d, self).__init__()
        self.num_points = num_points
        self.conv1 = nn.Conv1D(64, 1)
        self.conv2 = nn.Conv1D(128, 1)
        self.conv3 = nn.Conv1D(1024, 1)
        self.mp1 = nn.MaxPool1D(num_points)
        self.fc1 = nn.Dense(512)
        self.fc2 = nn.Dense(256)
        self.fc3 = nn.Dense(9)
        self.relu = nn.Activation('relu')

        self.bn1 = nn.BatchNorm(in_channels=64)
        self.bn2 = nn.BatchNorm(in_channels=128)
        self.bn3 = nn.BatchNorm(in_channels=1024)
        self.bn4 = nn.BatchNorm(in_channels=512)
        self.bn5 = nn.BatchNorm(in_channels=256)
        self.iden = self.params.get_constant(
            'iden',
            value=nd.array([1, 0, 0, 0, 1, 0, 0, 0, 1],
                           dtype='float32').reshape(1, 9))
Ejemplo n.º 15
0
 def __init__(self, num_factors, num_users, num_items, L=5, d=16,
              d_prime=4, drop_ratio=0.05, **kwargs):
     super(Caser, self).__init__(**kwargs)
     self.P = nn.Embedding(num_users, num_factors)
     self.Q = nn.Embedding(num_items, num_factors)
     self.d_prime, self.d = d_prime, d
     # Vertical convolution layer
     self.conv_v = nn.Conv2D(d_prime, (L, 1), in_channels=1)
     # Horizontal convolution layer
     h = [i + 1 for i in range(L)]
     self.conv_h, self.max_pool = nn.Sequential(), nn.Sequential()
     for i in h:
         self.conv_h.add(nn.Conv2D(d, (i, num_factors), in_channels=1))
         self.max_pool.add(nn.MaxPool1D(L - i + 1))
     # Fully-connected layer
     self.fc1_dim_v, self.fc1_dim_h = d_prime * num_factors, d * len(h)
     self.fc = nn.Dense(in_units=d_prime * num_factors + d * L,
                        activation='relu', units=num_factors)
     # 这里为什么还要embedding
     self.Q_prime = nn.Embedding(num_items, num_factors * 2)
     self.b = nn.Embedding(num_items, 1)
     self.dropout = nn.Dropout(drop_ratio)
Ejemplo n.º 16
0
 def __init__(self, index=0, **kwargs):
     super(Simple, self).__init__(**kwargs)
     # use name_scope to give child Blocks appropriate names.
     self.index = index
     with self.name_scope():
         self.output = nn.HybridSequential()
         if index == 0:
             self.output.add(nn.Dense(16))
         elif index == 1:
             self.output.add(nn.Dense(500), nn.Dense(256), nn.Dropout(0.8),
                             nn.Dense(16))
         elif index == 2:
             self.output.add(
                 nn.Conv1D(8, kernel_size=5, activation='relu'),
                 nn.Conv1D(16, kernel_size=5, activation='relu'),
                 nn.BatchNorm(momentum=0.8), nn.MaxPool1D(pool_size=2),
                 nn.Conv1D(16, kernel_size=1, activation='relu'),
                 nn.Conv1D(16, kernel_size=5, activation='relu'),
                 nn.Flatten(), nn.Dense(256, activation='relu'),
                 nn.Dropout(0.25), nn.Dense(16, activation='relu'))
         else:
             pass
Ejemplo n.º 17
0
 def __init__(self,
              block,
              layers,
              cardinality=1,
              bottleneck_width=64,
              classes=1000,
              dilated=False,
              dilation=1,
              norm_layer=BatchNorm,
              norm_kwargs=None,
              last_gamma=False,
              deep_stem=False,
              stem_width=32,
              avg_down=False,
              final_drop=0.0,
              use_global_stats=False,
              name_prefix='',
              dropblock_prob=0,
              input_size=224,
              use_splat=False,
              radix=2,
              avd=False,
              avd_first=False,
              split_drop_ratio=0,
              in_channels=3):
     self.cardinality = cardinality
     self.bottleneck_width = bottleneck_width
     self.inplanes = stem_width * 2 if deep_stem else 64
     self.radix = radix
     self.split_drop_ratio = split_drop_ratio
     self.avd_first = avd_first
     super(ResNet, self).__init__(prefix=name_prefix)
     norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
     if use_global_stats:
         norm_kwargs['use_global_stats'] = True
     self.norm_kwargs = norm_kwargs
     with self.name_scope():
         if not deep_stem:
             self.conv1 = nn.Conv1D(channels=64,
                                    kernel_size=7,
                                    strides=2,
                                    padding=3,
                                    use_bias=False,
                                    in_channels=in_channels)
         else:
             self.conv1 = nn.HybridSequential(prefix='conv1')
             self.conv1.add(
                 nn.Conv1D(channels=stem_width,
                           kernel_size=3,
                           strides=2,
                           padding=1,
                           use_bias=False,
                           in_channels=in_channels))
             self.conv1.add(
                 norm_layer(in_channels=stem_width, **norm_kwargs))
             self.conv1.add(nn.Activation('relu'))
             self.conv1.add(
                 nn.Conv1D(channels=stem_width,
                           kernel_size=3,
                           strides=1,
                           padding=1,
                           use_bias=False,
                           in_channels=stem_width))
             self.conv1.add(
                 norm_layer(in_channels=stem_width, **norm_kwargs))
             self.conv1.add(nn.Activation('relu'))
             self.conv1.add(
                 nn.Conv1D(channels=stem_width * 2,
                           kernel_size=3,
                           strides=1,
                           padding=1,
                           use_bias=False,
                           in_channels=stem_width))
         input_size = _update_input_size(input_size, 2)
         self.bn1 = norm_layer(
             in_channels=64 if not deep_stem else stem_width * 2,
             **norm_kwargs)
         self.relu = nn.Activation('relu')
         self.maxpool = nn.MaxPool1D(pool_size=3, strides=2, padding=1)
         input_size = _update_input_size(input_size, 2)
         self.layer1 = self._make_layer(1,
                                        block,
                                        64,
                                        layers[0],
                                        avg_down=avg_down,
                                        norm_layer=norm_layer,
                                        last_gamma=last_gamma,
                                        use_splat=use_splat,
                                        avd=avd)
         self.layer2 = self._make_layer(2,
                                        block,
                                        128,
                                        layers[1],
                                        strides=2,
                                        avg_down=avg_down,
                                        norm_layer=norm_layer,
                                        last_gamma=last_gamma,
                                        use_splat=use_splat,
                                        avd=avd)
         input_size = _update_input_size(input_size, 2)
         if dilated or dilation == 4:
             self.layer3 = self._make_layer(3,
                                            block,
                                            256,
                                            layers[2],
                                            strides=1,
                                            dilation=2,
                                            avg_down=avg_down,
                                            norm_layer=norm_layer,
                                            last_gamma=last_gamma,
                                            dropblock_prob=dropblock_prob,
                                            input_size=input_size,
                                            use_splat=use_splat,
                                            avd=avd)
             self.layer4 = self._make_layer(4,
                                            block,
                                            512,
                                            layers[3],
                                            strides=1,
                                            dilation=4,
                                            pre_dilation=2,
                                            avg_down=avg_down,
                                            norm_layer=norm_layer,
                                            last_gamma=last_gamma,
                                            dropblock_prob=dropblock_prob,
                                            input_size=input_size,
                                            use_splat=use_splat,
                                            avd=avd)
         elif dilation == 3:
             # special
             self.layer3 = self._make_layer(3,
                                            block,
                                            256,
                                            layers[2],
                                            strides=1,
                                            dilation=2,
                                            avg_down=avg_down,
                                            norm_layer=norm_layer,
                                            last_gamma=last_gamma,
                                            dropblock_prob=dropblock_prob,
                                            input_size=input_size,
                                            use_splat=use_splat,
                                            avd=avd)
             self.layer4 = self._make_layer(4,
                                            block,
                                            512,
                                            layers[3],
                                            strides=2,
                                            dilation=2,
                                            pre_dilation=2,
                                            avg_down=avg_down,
                                            norm_layer=norm_layer,
                                            last_gamma=last_gamma,
                                            dropblock_prob=dropblock_prob,
                                            input_size=input_size,
                                            use_splat=use_splat,
                                            avd=avd)
         elif dilation == 2:
             self.layer3 = self._make_layer(3,
                                            block,
                                            256,
                                            layers[2],
                                            strides=2,
                                            avg_down=avg_down,
                                            norm_layer=norm_layer,
                                            last_gamma=last_gamma,
                                            dropblock_prob=dropblock_prob,
                                            input_size=input_size,
                                            use_splat=use_splat,
                                            avd=avd)
             self.layer4 = self._make_layer(4,
                                            block,
                                            512,
                                            layers[3],
                                            strides=1,
                                            dilation=2,
                                            avg_down=avg_down,
                                            norm_layer=norm_layer,
                                            last_gamma=last_gamma,
                                            dropblock_prob=dropblock_prob,
                                            input_size=input_size,
                                            use_splat=use_splat,
                                            avd=avd)
         else:
             self.layer3 = self._make_layer(3,
                                            block,
                                            256,
                                            layers[2],
                                            strides=2,
                                            avg_down=avg_down,
                                            norm_layer=norm_layer,
                                            last_gamma=last_gamma,
                                            dropblock_prob=dropblock_prob,
                                            input_size=input_size,
                                            use_splat=use_splat,
                                            avd=avd)
             input_size = _update_input_size(input_size, 2)
             self.layer4 = self._make_layer(4,
                                            block,
                                            512,
                                            layers[3],
                                            strides=2,
                                            avg_down=avg_down,
                                            norm_layer=norm_layer,
                                            last_gamma=last_gamma,
                                            dropblock_prob=dropblock_prob,
                                            input_size=input_size,
                                            use_splat=use_splat,
                                            avd=avd)
             input_size = _update_input_size(input_size, 2)
         self.avgpool = nn.GlobalAvgPool1D()
         self.flat = nn.Flatten()
         self.drop = None
         if final_drop > 0.0:
             self.drop = nn.Dropout(final_drop)
         self.fc = nn.Dense(in_units=512 * block.expansion, units=classes)
Ejemplo n.º 18
0
                                   last_batch='rollover')
val_data = gluon.data.DataLoader(dataset=IndianDataset(train=False),
                                 batch_size=batch_size,
                                 shuffle=False)

# model
net = nn.Sequential()
#net.add(
#        nn.Dense(500,activation='relu'),
#        nn.Dense(256,activation='relu'),
#        nn.Dropout(dropout_rate),
#        nn.Dense(out_put_num,activation='sigmoid')
#    )
net.add(nn.Conv1D(8, kernel_size=5, activation='relu'),
        nn.Conv1D(16, kernel_size=5, activation='relu'),
        nn.BatchNorm(momentum=0.8), nn.MaxPool1D(pool_size=2),
        nn.Conv1D(16, kernel_size=1, activation='relu'),
        nn.Conv1D(16, kernel_size=5, activation='relu'), nn.Flatten(),
        nn.Dense(256, activation='relu'), nn.Dropout(0.25),
        nn.Dense(out_put_num, activation='relu'))
#net.initialize(mx.init.Xavier(magnitude=2.24))
#net.initialize(mx.init.MSRAPrelu())
#net.initialize(mx.init.Normal(0.5) ,ctx=ctx)
net.load_parameters(para_filepath)
net.collect_params().reset_ctx(ctx)

# solve
loss = gloss.SoftmaxCrossEntropyLoss()
metric = mx.metric.Accuracy()

Ejemplo n.º 19
0
def get_netG():
    netG = nn.Sequential()
    netG.add(nn.Embedding(256, 8),
             GLU(channels=128, kernel_size=512, stride=512),
             nn.MaxPool1D(128, 128), nn.Dense(1024))
    return netG
Ejemplo n.º 20
0
    # def forward(self, x):
    #     return nd.swapaxes(x, self.dim1, self.dim2)

    def hybrid_forward(self, F, x, *args, **kwargs):
        return F.swapaxes(x, self.dim1, self.dim2)


with mx.Context(mx.cpu(0)):
    model = nn.HybridSequential()
    model.add(
        SwapAxes(1, 2),
        CBR(40, 1),
        CBR(40),
        CBR(40),
        nn.MaxPool1D(2),
        CBR(80, 1),
        CBR(80),
        CBR(80),
        nn.MaxPool1D(2),
        CBR(160, 1),
        nn.Dropout(0.3),
        CBR(160),
        CBR(160),
        CBR(160),
        nn.MaxPool1D(2),
        CBR(240, 1),
        nn.Dropout(0.3),
        # CBR(200),
        # CBR(200),
        # CBR(200),