예제 #1
0
def preNeuralNet(fs, T, ctx, template_block, margin, learning_rate=0.003):
    net = gluon.nn.Sequential()
    with net.name_scope(
    ):  # Used to disambiguate saving and loading net parameters
        net.add(
            MatchedFilteringLayer(
                mod=fs * T,
                fs=fs,
                template_H1=template_block[:, :1],  #.as_in_context(ctx),
                template_L1=template_block[:, -1:]  #.as_in_context(ctx) 
            ))
        net.add(CutHybridLayer(margin=margin))
        net.add(Conv2D(channels=16, kernel_size=(1, 3), activation='relu'))
        net.add(MaxPool2D(pool_size=(1, 4), strides=2))
        net.add(Conv2D(channels=32, kernel_size=(1, 3), activation='relu'))
        net.add(MaxPool2D(pool_size=(1, 4), strides=2))
        net.add(Flatten())
        net.add(Dense(32))
        net.add(Activation('relu'))
        net.add(Dense(2))

    net.initialize(mx.init.Xavier(magnitude=2.24),
                   ctx=ctx[-1],
                   force_reinit=True)  # Initialize parameters of all layers
    net.summary(nd.random.randn(1, 2, 2, 1, fs * T, ctx=ctx[-1]))
    net.initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx,
                   force_reinit=True)  # Initialize parameters of all layers
    # 交叉熵损失函数
    # loss = gloss.SoftmaxCrossEntropyLoss()
    # The cross-entropy loss for binary classification.
    bloss = gluon.loss.SigmoidBinaryCrossEntropyLoss()
    trainer = gluon.Trainer(net.collect_params(), 'adam',
                            {'learning_rate': learning_rate})
    return net, bloss, trainer
예제 #2
0
파일: rl.py 프로젝트: tsuberim/RL
    def __init__(self, n_dims=128, **kwargs):
        PersistentBlock.__init__(self, **kwargs)
        if n_dims < 16:
            raise ValueError('`n_dims` must be at least 16 (given: %d)' %
                             n_dims)

        self.encoder = Sequential()
        self.encoder.add(BatchNorm(), Conv2D(int(n_dims / 16), 6, (4, 3)),
                         Activation('relu'), Conv2D(int(n_dims / 8), 3),
                         Activation('relu'), Conv2D(int(n_dims / 2), 3),
                         BatchNorm(), MaxPool2D(), Activation('relu'),
                         Conv2D(int(n_dims),
                                3), MaxPool2D(), Activation('relu'),
                         Conv2D(int(n_dims), 3), MaxPool2D(),
                         Activation('relu'), Flatten())
예제 #3
0
    def __init__(self, layers=None, channels=None, version=None):
        super(Bifpn, self).__init__()

        self._layers = layers
        self._effbase = get_efficientbase(version=version)  # Efficient Net

        with self.name_scope():

            self._p7lateral = lateral_conv(channels, 1, 1, 0)
            self._p6lateral = lateral_conv(channels, 1, 1, 0)
            self._p5lateral = lateral_conv(channels, 1, 1, 0)
            self._p4lateral = lateral_conv(channels, 1, 1, 0)
            self._p3lateral = lateral_conv(channels, 1, 1, 0)

            self._p7 = HybridSequential()
            self._p6 = HybridSequential()
            self._p5 = HybridSequential()
            self._p4 = HybridSequential()
            self._p3 = HybridSequential()
            self._downsample = HybridSequential()

            for _ in range(layers):
                self._p7.add(HybridSequential())
                self._p6.add(HybridSequential())
                self._p5.add(HybridSequential())
                self._p4.add(HybridSequential())
                self._p3.add(HybridSequential())
                self._downsample.add(HybridSequential())

            for i in range(layers):
                self._p7[i].add(intermediateFF(channels))
                self._p6[i].add(intermediateFF(channels))
                self._p6[i].add(OutputFF(channels))
                self._p5[i].add(intermediateFF(channels))
                self._p5[i].add(OutputFF(channels))
                self._p4[i].add(intermediateFF(channels))
                self._p4[i].add(OutputFF(channels))
                self._p3[i].add(intermediateFF(channels))

                # down -> top
                self._downsample[i].add(
                    MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
                self._downsample[i].add(
                    MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
                self._downsample[i].add(
                    MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
                self._downsample[i].add(
                    MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
예제 #4
0
def vgg_block(num_convs, num_channels):
    blk = Sequential()
    for _ in range(num_convs):
        blk.add(Conv2D(num_channels, kernel_size=3,
                       padding=1, activation='relu'))
    blk.add(MaxPool2D(pool_size=2, strides=2))
    return blk
예제 #5
0
def get_pool(pool_type, pool_size, strides, **kwargs):
    """ Wrapper for Pooling operations"""
    if pool_type == "maxpool":
        return MaxPool2D(pool_size=pool_size, strides=strides, **kwargs)
    if pool_type == "avgpool":
        return AvgPool2D(pool_size=pool_size, strides=strides, **kwargs)
    raise NotImplementedError
예제 #6
0
    def __init__(self, block, layers, channels, **kwargs):
        super(ResNetV2, self).__init__(**kwargs)
        assert len(layers) == len(channels) - 1
        with self.name_scope():

            # pretrained weight 사용하려면, self.features = nn.HybridSequential(prefix='상관없음')을 사용하는 수밖에 없다.
            self.features = HybridSequential(prefix='')
            self.features.add(BatchNorm(
                scale=False, center=False))  # 의문점 하나 : 맨 앞에 왜 batch norm을???
            self.features.add(Conv2D(channels[0], 7, 2, 3, use_bias=False))
            self.features.add(BatchNorm())
            self.features.add(Activation('relu'))
            self.features.add(MaxPool2D(3, 2, 1))  # 4번째

            in_channels = channels[0]
            # 5(c2),6(c3),7(c4),8
            for i, num_layer in enumerate(layers):
                stride = 1 if i == 0 else 2
                self.features.add(
                    self._make_layer(block,
                                     num_layer,
                                     channels[i + 1],
                                     stride,
                                     i + 1,
                                     in_channels=in_channels))
                in_channels = channels[i + 1]
            self.features.add(BatchNorm())
            self.features.add(Activation('relu'))  # 10(c5)
예제 #7
0
def get_pool(pool_type, pool_size, strides, **kwargs):
    if pool_type == "maxpool":
        return MaxPool2D(pool_size=pool_size, strides=strides, **kwargs)
    elif pool_type == "avgpool":
        return AvgPool2D(pool_size=pool_size, strides=strides, **kwargs)
    else:
        raise NotImplementedError
예제 #8
0
 def __init__(self, batch_size=1, topk=100, scale=4.0):
     super(Prediction, self).__init__()
     self._batch_size = batch_size
     self._topk = topk
     self._scale = scale
     self._heatmap_nms = MaxPool2D(pool_size=(3, 3),
                                   strides=(1, 1),
                                   padding=(1, 1))
예제 #9
0
 def __init__(self, batch_size=1, topk=100, scale=4.0, nms=False, except_class_thresh=0.01, nms_thresh=0.5):
     super(Prediction, self).__init__()
     self._batch_size = batch_size
     self._topk = topk
     self._scale = scale
     self._heatmap_nms = MaxPool2D(pool_size=(3, 3), strides=(1, 1), padding=(1, 1))
     self._nms = nms
     self._nms_thresh = nms_thresh
     self._except_class_thresh = except_class_thresh
예제 #10
0
 def __init__(self, opts, num_unit, num_filters, trans_block=True):
     super(EncoderBlock, self).__init__()
     self.eblock = HybridSequential()
     if trans_block:
         self.eblock.add(TransitionBlock(opts, num_filters=num_filters))
     else:
         self.eblock.add(
             MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding=(1, 1)))
     opts.trans_block = trans_block
     self.eblock.add(DenseBlock(opts, num_unit))
예제 #11
0
 def __init__(self, **kwargs):
     super(Model, self).__init__(**kwargs)
     with self.name_scope():
         self.conv1 = Conv2D(32, (3, 3))
         self.conv2 = Conv2D(64, (3, 3))
         self.pool = MaxPool2D(pool_size=(2, 2))
         self.dropout1 = Dropout(0.25)
         self.flatten = Flatten()
         self.dense1 = Dense(128)
         self.dropout2 = Dropout(0.5)
         self.dense2 = Dense(NUM_CLASSES)
예제 #12
0
 def _make_features(self, layers, filters, batch_norm):
     featurizer = mx.gluon.nn.HybridSequential(prefix='')
     for i, num in enumerate(layers):
         for _ in range(num):
             featurizer.add(
                 Conv2D(filters[i],
                        kernel_size=3,
                        padding=1,
                        weight_initializer=Xavier(rnd_type='gaussian',
                                                  factor_type='out',
                                                  magnitude=2),
                        bias_initializer='zeros'))
             if batch_norm:
                 featurizer.add(BatchNorm())
             featurizer.add(Activation('relu'))
         featurizer.add(MaxPool2D(strides=2))
     return featurizer
예제 #13
0
    def __init__(self, **kwargs):
        super(ResDiscriminator, self).__init__(**kwargs)

        # out = (in - ks) * strides - 2 * padding + ks + out_padding
        with self.name_scope():
            self.add(
                Conv2D(64, kernel_size=7, strides=2, padding=3,
                       use_bias=False),
                BatchNorm(),
                LeakyReLU(0.2),
                MaxPool2D(pool_size=(2, 2)),
                # out (bs, 64, 64, 64)
                ResidualBlock(64, in_channels=64, downsample=False),
                ResidualBlock(64, in_channels=64, downsample=False),
                # out (bs, 64, 32, 32)
                ResidualBlock(128,
                              in_channels=64,
                              strides=(2, 1),
                              downsample=True),
                ResidualBlock(128, in_channels=128, downsample=False),
                # out (bs, 128, 16, 16)
                ResidualBlock(256,
                              in_channels=128,
                              strides=(2, 1),
                              downsample=True),
                ResidualBlock(256, in_channels=256, downsample=False),
                # out (bs, 256, 8, 8)
                ResidualBlock(512,
                              in_channels=258,
                              strides=(2, 1),
                              downsample=True),
                ResidualBlock(512, in_channels=512, downsample=False),
                # out (bs, 512, 4, 4)
                BatchNorm(),
                LeakyReLU(0.2),
                GlobalAvgPool2D(),
                Dense(128),
                LeakyReLU(0.2),
                Dense(1))
예제 #14
0
파일: train.py 프로젝트: PurbasaKar/MLFLOW
batch_size = 256
train_data = gluon.data.DataLoader(mnist_train,
                                   batch_size=batch_size,
                                   shuffle=True,
                                   num_workers=4)

mnist_valid = gluon.data.vision.FashionMNIST(train=False)
valid_data = gluon.data.DataLoader(mnist_valid.transform_first(transformer),
                                   batch_size=batch_size,
                                   num_workers=4)

# Only hybrid based networks can be exported
net = HybridSequential()
net.add(
    Conv2D(channels=6, kernel_size=5, activation="relu"),
    MaxPool2D(pool_size=2, strides=2),
    Conv2D(channels=16, kernel_size=3, activation="relu"),
    MaxPool2D(pool_size=2, strides=2),
    Flatten(),
    Dense(120, activation="relu"),
    Dense(84, activation="relu"),
    Dense(10),
)
net.initialize(init=init.Xavier())
# Only after hybridization a model can be exported with architecture included
net.hybridize()

trainer = Trainer(net.collect_params(), "sgd", {"learning_rate": 0.1})

est = estimator.Estimator(net=net,
                          loss=SoftmaxCrossEntropyLoss(),
예제 #15
0
import d2lzh as d2l
from mxnet import gluon, init, nd
from mxnet.gluon.nn import Sequential, Conv2D, Dense, MaxPool2D, Dropout

net = Sequential()
net.add(Conv2D(channels=6, kernel_size=5, activation='sigmoid'),
        MaxPool2D(pool_size=2, strides=2),
        Conv2D(channels=16, kernel_size=5, activation='sigmoid'),
        MaxPool2D(pool_size=2, strides=2), Dense(120, activation='sigmoid'),
        Dropout(0.05), Dense(84, activation='sigmoid'), Dropout(0.05),
        Dense(10))

batch_size = 256
train_iter, test_iter = d2l.load_data_mnist(batch_size=batch_size)

lr, num_epochs = 0.9, 20
ctx = d2l.try_gpu()

net.initialize(force_reinit=True, ctx=ctx, init=init.Xavier())

X = nd.random.uniform(shape=(1, 1, 28, 28))

trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})
d2l.train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs)
    def __init__(self, block, layers, channels, **kwargs):
        super(ResNetV2, self).__init__(**kwargs)
        assert len(layers) == len(channels) - 1
        with self.name_scope():
            # pretrained weight 사용하려면, self.features = nn.HybridSequential(prefix='상관없음')을 사용하는 수밖에 없다.
            self.features = HybridSequential(prefix='')
            self.features.add(BatchNorm(
                scale=False, center=False))  # 의문점 하나 : 맨 앞에 왜 batch norm을???
            self.features.add(Conv2D(channels[0], 7, 2, 3, use_bias=False))
            self.features.add(BatchNorm())
            self.features.add(Activation('relu'))
            self.features.add(MaxPool2D(3, 2, 1))  # 4번째

            in_channels = channels[0]
            # 5(c2),6(c3),7(c4),8
            for i, num_layer in enumerate(layers):
                stride = 1 if i == 0 else 2
                self.features.add(
                    self._make_layer(block,
                                     num_layer,
                                     channels[i + 1],
                                     stride,
                                     i + 1,
                                     in_channels=in_channels))
                in_channels = channels[i + 1]
            self.features.add(BatchNorm())
            self.features.add(Activation('relu'))  # 10(c5)

            # FPN 논문에서, Output by each stage's last residual block
            # Top - Down
            self.lateral3 = Conv2D(channels=256,
                                   kernel_size=1,
                                   strides=1,
                                   padding=0,
                                   use_bias=False,
                                   prefix='lateral3')
            self.lateral4 = Conv2D(channels=256,
                                   kernel_size=1,
                                   strides=1,
                                   padding=0,
                                   use_bias=False,
                                   prefix='lateral4')
            self.lateral5 = Conv2D(channels=256,
                                   kernel_size=1,
                                   strides=1,
                                   padding=0,
                                   use_bias=False,
                                   prefix='lateral5')
            # extra convolution
            self.conv6 = Conv2D(channels=256,
                                kernel_size=3,
                                strides=2,
                                padding=1,
                                use_bias=False,
                                activation="relu",
                                prefix='extraconv1')
            self.conv7 = Conv2D(channels=256,
                                kernel_size=3,
                                strides=2,
                                padding=1,
                                activation="relu",
                                use_bias=False,
                                prefix='extraconv2')
예제 #17
0
batch_size = 256
train_data = gluon.data.DataLoader(mnist_train,
                                   batch_size=batch_size,
                                   shuffle=True,
                                   num_workers=4)

mnist_valid = gluon.data.vision.FashionMNIST(train=False)
valid_data = gluon.data.DataLoader(mnist_valid.transform_first(transformer),
                                   batch_size=batch_size,
                                   num_workers=4)

# Only hybrid based networks can be exported
net = HybridSequential()
net.add(Conv2D(channels=6, kernel_size=5, activation="relu"),
        MaxPool2D(pool_size=2, strides=2),
        Conv2D(channels=16, kernel_size=3, activation="relu"),
        MaxPool2D(pool_size=2, strides=2), Flatten(),
        Dense(120, activation="relu"), Dense(84, activation="relu"), Dense(10))
net.initialize(init=init.Xavier())
# Only after hybridization a model can be exported with architecture included
net.hybridize()

trainer = Trainer(net.collect_params(), "sgd", {"learning_rate": 0.1})

est = estimator.Estimator(net=net,
                          loss=SoftmaxCrossEntropyLoss(),
                          metrics=Accuracy(),
                          trainer=trainer)
est.fit(train_data=train_data, epochs=2, val_data=valid_data)
예제 #18
0
# Step 1: Load the MNIST dataset

(x_train, y_train), (x_test, y_test), min_pixel_value, max_pixel_value = load_mnist()

# Step 1a: Swap axes to MXNet's NCHW format

x_train = np.swapaxes(x_train, 1, 3)
x_test = np.swapaxes(x_test, 1, 3)

# Step 2: Create the model

model = mxnet.gluon.nn.Sequential()
with model.name_scope():
    model.add(Conv2D(channels=4, kernel_size=5, activation="relu"))
    model.add(MaxPool2D(pool_size=2, strides=1))
    model.add(Conv2D(channels=10, kernel_size=5, activation="relu"))
    model.add(MaxPool2D(pool_size=2, strides=1))
    model.add(Flatten())
    model.add(Dense(100, activation="relu"))
    model.add(Dense(10))
    model.initialize()

loss = mxnet.gluon.loss.SoftmaxCrossEntropyLoss()
trainer = mxnet.gluon.Trainer(model.collect_params(), "adam", {"learning_rate": 0.01})

# Step 3: Create the ART classifier

classifier = MXClassifier(
    model=model,
    clip_values=(min_pixel_value, max_pixel_value),
예제 #19
0
net = HybridSequential()
with net.name_scope():
    net.add(
        # layer 1
        Conv2D(channels=32,
               kernel_size=(5, 5),
               padding=(5 // 2, 5 // 2),
               activation='relu'),
        BatchNorm(axis=1, momentum=0.9),
        # layer 2
        Conv2D(channels=32,
               kernel_size=(5, 5),
               padding=(5 // 2, 5 // 2),
               activation='relu'),
        BatchNorm(axis=1, momentum=0.9),
        MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
        # layer 3
        Conv2D(channels=64,
               kernel_size=(3, 3),
               padding=(3 // 2, 3 // 2),
               activation='relu'),
        BatchNorm(axis=1, momentum=0.9),
        # layer 4
        Conv2D(channels=64,
               kernel_size=(3, 3),
               padding=(3 // 2, 3 // 2),
               activation='relu'),
        BatchNorm(axis=1, momentum=0.9),
        MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
        # layer 5
        Flatten(),
예제 #20
0
def train(hyperparameters, channel_input_dirs, num_gpus, hosts):
    batch_size = hyperparameters.get("batch_size", 64)
    epochs = hyperparameters.get("epochs", 3)

    mx.random.seed(42)

    training_dir = channel_input_dirs['training']

    with open("{}/train/data.p".format(training_dir), "rb") as pickle:
        train_nd = load(pickle)
    with open("{}/validation/data.p".format(training_dir), "rb") as pickle:
        validation_nd = load(pickle)

    train_data = gluon.data.DataLoader(train_nd, batch_size, shuffle=True)
    validation_data = gluon.data.DataLoader(validation_nd,
                                            batch_size,
                                            shuffle=True)

    net = Sequential()
    # http: // gluon.mxnet.io / chapter03_deep - neural - networks / plumbing.html  # What's-the-deal-with-name_scope()?
    with net.name_scope():
        net.add(
            Conv2D(channels=32,
                   kernel_size=(3, 3),
                   padding=0,
                   activation="relu"))
        net.add(
            Conv2D(channels=32,
                   kernel_size=(3, 3),
                   padding=0,
                   activation="relu"))
        net.add(MaxPool2D(pool_size=(2, 2)))
        net.add(Dropout(.25))
        net.add(Flatten())
        net.add(Dense(8))

    ctx = mx.gpu() if num_gpus > 0 else mx.cpu()

    # Also known as Glorot
    net.collect_params().initialize(Xavier(magnitude=2.24), ctx=ctx)

    loss = SoftmaxCrossEntropyLoss()

    # kvstore type for multi - gpu and distributed training.
    if len(hosts) == 1:
        kvstore = "device" if num_gpus > 0 else "local"
    else:
        kvstore = "dist_device_sync'" if num_gpus > 0 else "dist_sync"

    trainer = Trainer(net.collect_params(), optimizer="adam", kvstore=kvstore)

    smoothing_constant = .01

    for e in range(epochs):
        moving_loss = 0
        for i, (data, label) in enumerate(train_data):
            data = data.as_in_context(ctx)
            label = label.as_in_context(ctx)
            with autograd.record():
                output = net(data)
                loss_result = loss(output, label)
            loss_result.backward()
            trainer.step(batch_size)

            curr_loss = nd.mean(loss_result).asscalar()
            moving_loss = (curr_loss if ((i == 0) and (e == 0)) else
                           (1 - smoothing_constant) * moving_loss +
                           smoothing_constant * curr_loss)

        validation_accuracy = measure_performance(net, ctx, validation_data)
        train_accuracy = measure_performance(net, ctx, train_data)
        print("Epoch %s. Loss: %s, Train_acc %s, Test_acc %s" %
              (e, moving_loss, train_accuracy, validation_accuracy))

    return net