Exemple #1
0
    def test_stblock(self):

        from model.base_layers import St_conv_block, Output_layer

        num_of_vertices = 228
        cheb_polys = nd.random_uniform(shape=(num_of_vertices,
                                              num_of_vertices * 3))
        net = gluon.nn.Sequential()
        net.add(St_conv_block(3, 3, [1, 32, 64], 1.0, cheb_polys),
                St_conv_block(3, 3, [64, 32, 128], 1.0, cheb_polys),
                Output_layer(128, 4))
        net.initialize()

        x = nd.random_uniform(shape=(8, 1, 12, num_of_vertices))
        o = net(x)
        y = nd.random_uniform(shape=o.shape)

        trainer = gluon.Trainer(net.collect_params(), 'adam')
        trainer.set_learning_rate(1e-3)
        loss = gluon.loss.L2Loss()
        with autograd.record():
            l = loss(net(x), y)
        l.backward()
        trainer.step(8)
        self.assertEqual((8, 1, 1, num_of_vertices), o.shape)
        self.assertIsInstance(l.mean().asscalar().item(), float)
Exemple #2
0
    def test_stgcn(self):

        from model import base_model

        ctx = mx.gpu(1)
        num_of_vertices = 228
        batch_size = 8
        cheb_polys = nd.random_uniform(shape=(num_of_vertices,
                                              num_of_vertices * 3),
                                       ctx=ctx)
        blocks = [[1, 32, 64], [64, 32, 128]]
        x = nd.random_uniform(shape=(batch_size, 1, 12, num_of_vertices),
                              ctx=ctx)
        y = nd.random_uniform(shape=(batch_size, 1, 1, num_of_vertices),
                              ctx=ctx)

        net = base_model.STGCN(12, 3, 3, blocks, 1.0, cheb_polys)
        net.initialize(ctx=ctx)
        self.assertEqual((batch_size, 1, 1, num_of_vertices), net(x).shape)

        trainer = gluon.Trainer(net.collect_params(), 'adam')
        trainer.set_learning_rate(1e-3)
        loss = gluon.loss.L2Loss()

        for i in range(5):
            with autograd.record():
                l = loss(net(x), y)
            l.backward()
            trainer.step(batch_size)
            self.assertIsInstance(l.mean().asscalar().item(), float)
            print(l.mean().asscalar())
Exemple #3
0
    def test_evaluate(self):
        from model import hybrid_model
        from model import trainer
        from data_loader.data_utils import data_gen
        import numpy as np
        from mxboard import SummaryWriter
        import os
        import shutil

        ctx = mx.gpu(1)
        num_of_vertices = 897
        batch_size = 50

        PeMS_dataset = data_gen('datasets/PeMSD7_V_897.csv', 24)
        print('>> Loading dataset with Mean: {0:.2f}, STD: {1:.2f}'.format(
            PeMS_dataset.mean, PeMS_dataset.std))

        test = PeMS_dataset['test'].transpose((0, 3, 1, 2))
        test_x, test_y = test[:100, :, :12, :], test[:100, :, 12:, :]
        test_loader = gluon.data.DataLoader(gluon.data.ArrayDataset(
            nd.array(test_x), nd.array(test_y)),
                                            batch_size=batch_size,
                                            shuffle=False)
        print(test_x.shape, test_y.shape)

        cheb_polys = nd.random_uniform(shape=(num_of_vertices,
                                              num_of_vertices * 3))
        blocks = [[1, 32, 64], [64, 32, 128]]
        x = nd.random_uniform(shape=(batch_size, 1, 12, num_of_vertices),
                              ctx=ctx)

        net = hybrid_model.STGCN(12, 3, 3, blocks, 1.0, num_of_vertices,
                                 cheb_polys)
        net.initialize(ctx=ctx)
        net.hybridize()
        net(x)

        ground_truth = (
            np.concatenate([y.asnumpy() for x, y in test_loader], axis=0) *
            PeMS_dataset.std + PeMS_dataset.mean)[:100]

        if os.path.exists('test_logs'):
            shutil.rmtree('test_logs')
        sw = SummaryWriter('test_logs', flush_secs=5)

        trainer.evaluate(net, ctx, ground_truth, test_loader, 12,
                         PeMS_dataset.mean, PeMS_dataset.std, sw, 0)
        self.assertEqual(os.path.exists('test_logs'), True)
        sw.close()
        if os.path.exists('test_logs'):
            shutil.rmtree('test_logs')
    def query(self, image_text_pairs):
        if self.pool_size == 0:
            return image_text_pairs
        ret_images = []
        ret_text_feats = []
        images, text_feats = image_text_pairs

        for i in range(images.shape[0]):
            image = nd.expand_dims(images[i], axis=0)
            text_feat = nd.expand_dims(text_feats[i], axis=0)
            if self.num_imgs < self.pool_size:
                self.num_imgs = self.num_imgs + 1
                self.images.append(image)
                self.text_feats.append(text_feat)
                ret_images.append(image)
                ret_text_feats.append(text_feat)
            else:
                p = nd.random_normal(0, 1, shape=(1, )).asscalar()
                if p < 0.5:
                    random_index = nd.random_uniform(0, self.pool_size-1, shape=(1, )).astype(np.uint8).asscalar()
                    tmp_img = self.images[random_index].copy()
                    tmp_text_feat = self.text_feats[random_index].copy()
                    self.images[random_index] = image
                    self.text_feats[random_index] = text_feat
                    ret_images.append(tmp_img)
                    ret_text_feats.append(tmp_text_feat)
                else:
                    ret_images.append(image)
                    ret_text_feats.append(text_feat)
        ret_images = nd.concat(*ret_images, dim=0)
        ret_text_feats = nd.concat(*ret_text_feats, dim=0)
        return [ret_images, ret_text_feats]
Exemple #5
0
def test_predict2():
    from model.astgcn import ASTGCN
    from model.model_config import get_backbones
    import mxnet as mx
    ctx = mx.cpu()
    all_backbones = get_backbones('configurations/PEMS08.conf',
                                  'data/PEMS08/distance.csv', ctx)

    net = ASTGCN(12, all_backbones)
    net.initialize(ctx=ctx)
    test_w = nd.random_uniform(shape=(8, 170, 3, 12), ctx=ctx)
    test_d = nd.random_uniform(shape=(8, 170, 3, 12), ctx=ctx)
    test_r = nd.random_uniform(shape=(8, 170, 3, 36), ctx=ctx)
    output = net([test_w, test_d, test_r])
    assert output.shape == (8, 170, 12)
    assert type(output.mean().asscalar()) == np.float32
Exemple #6
0
def test_anchor(h=40, w=40):
    x = nd.random_uniform(shape=(1, 3, h, w))
    y = MultiBoxPrior(x, sizes=[0.5, 0.25, 0.1], ratios=[1, 2, 0.5])

    boxes = y.reshape((h, w, -1, 4))
    print('The first anchor box at row 21, column 21:', boxes[20, 20, 0, :])
    return boxes
Exemple #7
0
     def Initial(self):
     # Initail your session or somethingself.target_net
     # restore neural net parameters
         # self.buffer_size = 0
         self.ctx = try_gpu(GPU_INDEX)
         self.frame_cnt = 0
         self.train_count = 0
         self.loss_sum = 0

         self.q_count = 0
         self.q_sum = 0
         self.dtype = DTYPE
         INPUT_SAMPLE = nd.random_uniform(0,1,(1, FRAME_SKIP, 11), self.ctx, self.dtype)
         self.target_net = self.get_net(INPUT_SAMPLE)
         self.policy_net = self.get_net(INPUT_SAMPLE)

         if MODEL_FILE is not None:
             print('%s: read trained results from [%s]' % (tm.strftime("%Y-%m-%d %H:%M:%S"), MODEL_FILE))
             self.policy_net.load_params(MODEL_FILE, ctx=self.ctx)
         self.update_target_net()
         # adagrad
         self.trainer = Trainer(self.policy_net.collect_params(),
                                optimizer=mx.optimizer.RMSProp(LEARNING_RATE, 0.95, 0.95))
         self.loss_func = loss.L2Loss()

         self.epsilon = EPSILON_START
         self.epsilon_min = EPSILON_MIN
         self.epsilon_rate = (EPSILON_START - EPSILON_MIN) / EPSILON_DECAY
         self.rng = np.random.RandomState(int(time() * 1000) % 100000000)
Exemple #8
0
def dropout(X, drop_prob):
    keep_prob = 1.0 - drop_prob
    mask = nd.random_uniform(0, 1.0, X.shape, ctx=X.context) < keep_prob

    if keep_prob > 0.0:
        scale = (1 / keep_prob)
    else:
        scale = 0.0
    return mask * X * scale
Exemple #9
0
def dropout(X, drop_probability):
    keep_probability = 1 - drop_probability;
    assert 0 <= keep_probability <= 1
    if keep_probability == 0:
        return X.zeros_like()

    mask = nd.random_uniform(0, 1.0, X.shape, ctx=X.context) < keep_probability
    scale = 1 / keep_probability
    return mask * X * scale
Exemple #10
0
    def test_cpu(self):
        x = nd.random_uniform(0, 1, (10000, ))
        start = time.time()
        c = 0.1
        for i in range(10000):
            x += c

        t = time.time() - start
        print(x)
        print("time: " + str(t))
def dropout(X, drop_probability):
    keep_probability = 1 - drop_probability
    mask = nd.random_uniform(0, 1.0, X.shape, ctx=X.context) < keep_probability
    #############################
    #  Avoid division by 0 when scaling
    #############################
    if keep_probability > 0.0:
        scale = (1 / keep_probability)
    else:
        scale = 0.0
    return mask * X * scale
Exemple #12
0
def main():
    parse = argparse.ArgumentParser()
    parse.add_argument('--epoches', type=int, default=10)
    args = parse.parse_args()
    net = model.ASENet(10)
    net.initialize(mx.init.Xavier())
    net.hybridize()
    #print(net)

    rgb = nd.random_uniform(0, 1, (1, 3, 512, 512))
    depth = nd.random_uniform(0, 1, (1, 1, 512, 512))

    y1, y2, y3, y4, y5 = net(rgb, depth)
    #y1, y2 = net(rgb, depth)

    print('y1.shape: ', y1.shape)
    print('y2.shape: ', y2.shape)
    print('y3.shape: ', y3.shape)
    print('y4.shape: ', y4.shape)
    print('y5.shape: ', y5.shape)
Exemple #13
0
    def test_predict(self):
        from model import hybrid_model
        from model import trainer

        ctx = mx.gpu(1)
        num_of_vertices = 228
        batch_size = 8
        cheb_polys = nd.random_uniform(shape=(num_of_vertices,
                                              num_of_vertices * 3),
                                       ctx=ctx)
        blocks = [[1, 32, 64], [64, 32, 128]]
        x = nd.random_uniform(shape=(batch_size, 1, 12, num_of_vertices))

        net = hybrid_model.STGCN(12, 3, 3, blocks, 1.0, num_of_vertices,
                                 cheb_polys)
        net.initialize(ctx=ctx)
        net.hybridize()

        y = trainer.predict_batch(net, ctx, x, 12)

        self.assertEqual(y.shape, (batch_size, 1, 12, num_of_vertices))
Exemple #14
0
 def forward(self, is_train, req, in_data, out_data, aux):
     x = in_data[0]
     if is_train:
         self._spatial_dropout_mask = nd.broadcast_greater(
             nd.random_uniform(low=0, high=1, shape=(1, self._num_filters, 1, 1), ctx=self._ctx),
             nd.ones(shape=(1, self._num_filters, 1, 1), ctx=self._ctx) * self._p,
             ctx=self._ctx
         )
         y = nd.broadcast_mul(x, self._spatial_dropout_mask, ctx=self._ctx) / (1 - self._p)
         self.assign(out_data[0], req[0], y)
     else:
         self.assign(out_data[0], req[0], x)
Exemple #15
0
 def query(self, images):
     if self.pool_size == 0:
         return images
     ret_imgs = []
     for i in range(images.shape[0]):
         image = nd.expand_dims(images[i], axis=0)
         if self.num_imgs < self.pool_size:
             self.num_imgs = self.num_imgs + 1
             self.images.append(image)
             ret_imgs.append(image)
         else:
             p = nd.random_uniform(0, 1, shape=(1,)).asscalar()
             if p > 0.5:
                 random_id = nd.random_uniform(0, self.pool_size - 1, shape=(1,)).astype(np.uint8).asscalar()
                 tmp = self.images[random_id].copy()
                 self.images[random_id] = image
                 ret_imgs.append(tmp)
             else:
                 ret_imgs.append(image)
     ret_imgs = nd.concat(*ret_imgs, dim=0)
     return ret_imgs
Exemple #16
0
def test_ASTGCN_submodule():
    from model.astgcn import ASTGCN_submodule
    x = nd.random_uniform(shape=(32, 307, 3, 24))
    K = 3
    cheb_polynomials = [nd.random_uniform(shape=(307, 307)) for i in range(K)]
    backbone = [{
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": 2,
        "cheb_polynomials": cheb_polynomials
    }, {
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": 1,
        "cheb_polynomials": cheb_polynomials
    }]
    net = ASTGCN_submodule(12, backbone)
    net.initialize()
    output = net(x)
    assert output.shape == (32, 307, 12)
    assert type(output.mean().asscalar()) == np.float32
Exemple #17
0
def dropout(X, drop_probability):
    keep_probability = 1 - drop_probability
    assert 0 <= keep_probability <= 1

    if keep_probability == 0:
        return X.zeros_like()

    # uniform生成 0 ~ 1均匀分布的数值(是否保留的概率),
    # 然后在于keep_probability(概率阀值)进行比较,如果为true则当前位置的元素为1,否则为0
    mask = nd.random_uniform(0, 1.0, X.shape, ctx=X.context) < keep_probability

    # 伸缩变换
    scale = 1 / keep_probability

    # 最后返回mask之后的X,注意除此之外数值的范围进行伸缩变换
    return mask * X * scale
Exemple #18
0
 def _init_weight(self, _, arr):
     #初始化权重,使用out=arr后不需指定形状
     nd.random_uniform(low=5, high=10, out=arr)
from mxnet import nd
from mxnet.gluon import nn

net = nn.Sequential()
with net.name_scope():
    net.add(nn.Dense(256, activation='relu'))
    net.add(nn.Dense(10))
print(net)


# 使用nn.Block来定义
class MLP(nn.Block):
    def __init__(self, **kwargs):
        super(MLP, self).__init__(**kwargs)
        with self.name_scope():
            self.dense0 = nn.Dense(256)
            self.dense1 = nn.Dense(10)  # 输出为10

    # 定义网络的计算 Dense0->relu->Dense1
    def forward(self, x):
        return self.dense1(nd.relu(self.dense0(x)))


net2 = MLP()
print(net2)
net2.initialize()
x = nd.random_uniform(shape=(4, 20))  # 输入为4
y = net2(x)
print(y)  # 输入为4,输出为10
Exemple #20
0
import mxnet as mx
from mxnet import nd
from mxnet.contrib.ndarray import MultiBoxPrior
import matplotlib.pyplot as plt

n = 40
# shape: batch x channel x height x weight
x = nd.random_uniform(shape=(1, 3, n, n))

y = MultiBoxPrior(x, sizes=[.5, .25, .1], ratios=[1, 2, .5])

# the first anchor box generated for pixel at (20,20)
# its format is (x_min, y_min, x_max, y_max)
boxes = y.reshape((n, n, -1, 4))
print('The first anchor box at row 21, column 21:', boxes[20, 20, 0, :])

from mxnet.gluon import nn
def class_predictor(num_anchors, num_classes):
    """return a layer to predict classes"""
    return nn.Conv2D(num_anchors * (num_classes + 1), 3, padding=1)

cls_pred = class_predictor(5, 10)
cls_pred.initialize()
x = nd.zeros((2, 3, 20, 20))
print('Class prediction', cls_pred(x).shape)

def box_predictor(num_anchors):
    """return a layer to predict delta locations"""
    return nn.Conv2D(num_anchors * 4, 3, padding=1)

box_pred = box_predictor(10)
Exemple #21
0
    out = nn.Sequential()
    for _ in range(num_convs):
        out.add(
            nn.Conv2D(channels=channel,
                      kernel_size=3,
                      padding=1,
                      activation='relu'))

    out.add(nn.MaxPool2D(pool_size=2, strides=2))
    return out


blk = vggBlock(2, 128)
blk.initialize()

x = nd.random_uniform(shape=(2, 3, 16, 16))
y = blk(x)
print(y.shape)


# define VGG11
def vggStack(architecture):
    out = nn.Sequential()
    for (num_convs, channel) in architecture:
        out.add(vggBlock(num_convs, channel))
    return out


num_outputs = 10
architecture = ((1, 64), (1, 128), (2, 256), (2, 256), (2, 512))
net = nn.Sequential()
Exemple #22
0
        super(DenseBlock, self).__init__(**kwargs)
        self.net = nn.Sequential()
        for i in range(layers):
            self.net.add(conv_block(growth_rate))

    def forward(self, x):
        for layers in self.net:
            out = layers(x)
            x = nd.concat(x, out, dim=1)
        return x


dblk = DenseBlock(2, 10)
dblk.initialize()

x = nd.random_uniform(shape=(4, 3, 8, 8))
print(dblk(x).shape)


# Transition Block
def transition_block(channels):
    out = nn.Sequential()
    out.add(nn.BatchNorm(), nn.Activation('relu'),
            nn.Conv2D(channels, kernel_size=1),
            nn.AvgPool2D(pool_size=2, strides=2))

    return out


tblk = transition_block(10)
tblk.initialize()
Exemple #23
0
        
        return nd.dot(final_conv_output, self.final_fc_weight.data()) + self.final_fc_bias.data()
    
if __name__ == "__main__":
    ctx = mx.cpu()
    distance_df = pd.read_csv('../data/METR-LA/preprocessed/distance.csv', dtype={'from': 'int', 'to': 'int'})
    num_of_vertices = 207
    A = get_adjacency_matrix(distance_df, num_of_vertices, 0.1)
    L_tilde = scaled_Laplacian(A)
    cheb_polys = [nd.array(i, ctx = ctx) for i in cheb_polynomial(L_tilde, 3)]
    backbones = [
    {
        'num_of_time_conv_filters1': 64,
        'num_of_time_conv_filters2': 64,
        'K_t': 3,
        'num_of_cheb_filters': 32,
        'K': 3,
        'cheb_polys': cheb_polys
    },
    {
        'num_of_time_conv_filters1': 64,
        'num_of_time_conv_filters2': 64,
        'K_t': 3,
        'num_of_cheb_filters': 32,
        'K': 3,
        'cheb_polys': cheb_polys
    }]
    net = STGCN(backbones, 64)
    net.initialize(ctx = ctx)
    print(net(nd.random_uniform(shape = (16, 1, 207, 12))).shape)
#!/usr/bin/env python
#-*- coding:utf-8 -*-

import mxnet as mx
from mxnet import nd
from mxnet.contrib.ndarray import MultiBoxPrior##MultiBoxPrior产生预设框

n = 40
# 输入形状: batch × channel × height × weight
x = nd.random_uniform(shape=(1, 3, n, n))  
##               图像    n 个预设尺寸      m 个预设的长宽比    输出为 n+m-1 个方框
y = MultiBoxPrior(x, sizes=[.5, .25, .1], ratios=[1, 2, .5])

## 取位于 (20,20) 像素点的第一个预设框
# box的格式为 (x_min, y_min, x_max, y_max) 且为比例
boxes = y.reshape((n, n, -1, 4))
print('The first anchor box at row 21, column 21:', boxes[20, 20, 0, :])

import matplotlib.pyplot as plt
#"""convert an anchor box to a matplotlib rectangle"""
def box_to_rect(box, color, linewidth=3):
    box = box.asnumpy()
    return plt.Rectangle(
        (box[0], box[1]), (box[2]-box[0]), (box[3]-box[1]),
        fill=False, edgecolor=color, linewidth=linewidth)
colors = ['blue', 'green', 'red', 'black', 'magenta']# 3+3-1=5个
plt.imshow(nd.ones((n, n, 3)).asnumpy())
anchors = boxes[20, 20, :, :]
for i in range(anchors.shape[0]):
    plt.gca().add_patch(box_to_rect(anchors[i,:]*n, colors[i]))
plt.show()
Exemple #25
0
                                   activation='relu')
        self.p4_pool_1 = nn.MaxPool2D(pool_size=3, padding=1, strides=1)
        self.p4_conv_2 = nn.Conv2D(n4, kernel_size=1, activation='relu')

    def forward(self, x):
        p1 = self.p1_conv_1(x)
        p2 = self.p2_conv_2(self.p2_conv_1(x))
        p3 = self.p3_conv_2(self.p3_conv_1(x))
        p4 = self.p4_conv_2(self.p4_pool_1(x))
        return nd.Concat(p1, p2, p3, p4, dim=1)


incp = Inception(64, 96, 128, 16, 32, 32)
incp.initialize()

x = nd.random_uniform(shape=(32, 3, 64, 64))
print(incp(x).shape)


# define GoogLeNet
class GoogLeNet(nn.Block):
    def __init__(self, num_classes, verbose=False, **kwargs):
        super(GoogLeNet, self).__init__(**kwargs)
        self.verbose = verbose
        with self.name_scope():
            b1 = nn.Sequential()
            b1.add(
                nn.Conv2D(64,
                          kernel_size=7,
                          strides=2,
                          padding=3,
Exemple #26
0
def train(epoch, train_loader, model,loss, optimizer, opt,ctx,train_loss,train_iou):
    """
    one epoch training for program executor
    """
    loss_sum,iou_sum,n = 0.0,0.0,0
    for idx, data in enumerate(train_loader):
        start_t = time.time()

        shape, label, param = data
        bsz = shape.shape[0]
        n_step = label.shape[1]
        #print("label.shape:",label)
        #print("n_step:",n_step,"bsz:",bsz,"stop_id:",stop_id)
        
        index = np.array(list(map(lambda x: n_step, label)))-1
        #index = label
        
        # add noise during training, making the executor accept
        # continuous output from program generator
        label = label.reshape(-1,1).asnumpy()
        pgm_vector = 0.2 * np.random.uniform(0,1,(bsz * n_step, stop_id))
        pgm_noise = 0.2 *np.random.uniform(0,1,label.shape)
        pgm_value = 1 - pgm_noise
        #print('pgm_val.shape:',pgm_value.shape,'label.shape:',label.shape,'label.shape:',label.shape)
        pgm_vector = scatter_numpy(pgm_vector,1,label,pgm_value).reshape(bsz,n_step,stop_id)
        
        
        param_noise = nd.random_uniform(0,1,shape=param.shape)
        param_vector = param + 0.6 * (param_noise - 0.5)
        #print("param_vector.shape:",param_vector.shape)
        gt = shape.as_in_context(ctx)
        #print(pgm_vector.dtype)
        index = nd.from_numpy(index).astype('int64').as_in_context(ctx)
        pgm_vector = nd.from_numpy(pgm_vector).astype('float32').as_in_context(ctx)
        param_vector = param_vector.as_in_context(ctx)


        with autograd.record():
            pred = model(pgm_vector, param_vector, index)
            scores = nd.log_softmax(pred,axis=1)
            pred0 = scores[:,0].squeeze()*opt.n_weight
            pred1 = scores[:,1].squeeze()*opt.p_weight
            l = -nd.where(gt, pred1, pred0).mean((1,2,3))
            #l = -(nd.pick(scores1, gt, axis=1, keepdims=True)*opt.n_weight
            #    +nd.pick(scores2,(1-gt), axis=1, keepdims=True)*opt.p_weight).mean((1,2,3,4))
        l.backward()
                                        
        #clip_gradient(optimizer, opt.grad_clip)
        #optimizer._allreduce_grads();

        optimizer.step(l.shape[0],ignore_stale_grad=True)
        
        l = l.mean().asscalar()
        
        pred = nd.softmax(pred,axis = 1)
        pred = pred[:, 1, :, :, :]
        s1 = gt.reshape(-1, 32, 32, 32).astype('float32').as_in_context(mx.cpu())
        s2 = pred.squeeze().as_in_context(mx.cpu())
        #print(s2.shape)
        s2 = (s2 > 0.5)

        batch_iou = BatchIoU(s1, s2)
        iou = batch_iou.mean()
        end_t = time.time()
        loss_sum+=l
        n+=1
        iou_sum+=iou

        if idx % (opt.info_interval * 10) == 0:
            print("Train: epoch {} batch {}/{}, loss13 = {:.3f}, iou = {:.3f}, time = {:.3f}"
                  .format(epoch, idx, len(train_loader), l, iou, end_t - start_t))
            sys.stdout.flush()
        
    train_loss.append(loss_sum/n)
    train_iou.append(iou_sum/n)
Exemple #27
0
        layer3 = self.layer3(layer2)
        layer4 = self.layer4(layer3)
        flattened = self.flatten(layer4)
        dropout1 = self.dropout1(self.denselayer1(flattened))
        dense2 = self.denselayer2(dropout1)
        outputs = self.outputlayer(dense2)
        return outputs


# for test the model
if __name__ == '__main__':
    net = ResNet3D(10)
    net.initialize(mx.init.Xavier())
    net.hybridize()
    x = nd.random_uniform(0, 1,
                          shape=(1, 1, 200, 9,
                                 9))  # the shape is same as training samples
    y = net(x)
    print(y.shape)  # output (1, 10)
'''
net = gluon.nn.Sequential()
net.add(gluon.nn.Conv3D(16, (3, 3, 3), padding=(1, 1, 1), activation='relu'))    # layout: NCDHW, weight's shape: (out_channels, input_channels, 3, 3, 3)
net.initialize()

#x = nd.random_uniform(0, 1, shape=(1, 1, 200, 9, 9))    # output: (1, 16, 200, 9, 9)
x = nd.random_uniform(0, 1, shape=(1, 1, 200, 9, 9))    # output: (1, 16, 200, 9, 9)

y = net(x)
print(net.collect_params())
print(y.shape)
'''
Exemple #28
0
import numpy as np
from mxnet import nd


data_in = nd.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
print(data_in)

kernel = nd.array([[0, 1], [2, 3]])

net = gluon.nn.Sequential()
net.add(gluon.nn.Conv2D(channels=1, kernel_size=(3, 3), strides=(1, 1)),
        gluon.nn.MaxPool2D())

net.initialize()

x = nd.random_uniform(shape=(1, 1, 10, 10))
y = net(x)
print(net[0].params)

print(net[0].weight)

print(net[0].weight.data())

print('x = ', x)
print('y = ', y)

Decov = gluon.nn.Sequential()
Decov.add(gluon.nn.Conv2DTranspose(channels=1, kernel_size=(4, 4), strides=(2, 2)))

Decov.initialize()
y_ = Decov(y)
Exemple #29
0
    nn.Conv2D(256, kernel_size=5, padding=2, activation="relu"),
    nn.MaxPool2D(pool_size=3, strides=2),
    nn.BatchNorm(),
    # 13*13*256
    nn.Conv2D(384, padding=1, kernel_size=3, activation="relu"),
    nn.Conv2D(384, padding=1, kernel_size=3, activation="relu"),
    nn.Conv2D(256, kernel_size=3, padding=1, activation="relu"),
    nn.MaxPool2D(pool_size=3, strides=2),
    # fc 1
    nn.Dense(4096, activation="relu"),
    nn.Dropout(.5),
    nn.Dense(4096, activation="relu"),
    nn.Dropout(0.5),
    nn.Dense(10))
net.initialize()
x = nd.random_uniform(shape=[1, 1, 224, 224])
print(net(x))


def load_data_fashion_mnist(batch_size,
                            resize=None,
                            root=os.path.join('~', '.mxnet', 'datasets',
                                              'fashion-mnist')):
    root = os.path.expanduser(root)  # 展开用户路径 '~'。
    transformer = []
    if resize:
        transformer += [gdata.vision.transforms.Resize(resize)]
    transformer += [gdata.vision.transforms.ToTensor()]
    transformer = gdata.vision.transforms.Compose(transformer)
    mnist_train = gdata.vision.FashionMNIST(root=root, train=True)
    mnist_test = gdata.vision.FashionMNIST(root=root, train=False)
Exemple #30
0
    out = nd.reshape(in_data, shape=(0, 0, -4, -1, factor,
                                     -2))  # -4后面的两个参数表明h维被分割成h/2和2(factor)了
    #print('out shape = ', out.shape)                           # 1, 3, 208, 2, 416
    out = nd.transpose(out, axes=(0, 1, 3, 2, 4))
    #print('out shape = ', out.shape)                           # 1, 3, 2, 208, 416
    out = nd.reshape(out, shape=(0, -3, -1, -2))
    #print('out shape = ', out.shape)                           # 1, 6, 208, 416
    out = nd.reshape(out, shape=(0, 0, 0, -4, -1, factor))
    #print('out shape = ', out.shape)                           # 1, 6, 208, 208, 2
    out = nd.transpose(out, axes=(0, 1, 4, 2, 3))
    #print('out shape = ', out.shape)                           # 1, 6, 2, 208, 208
    out = nd.reshape(out, shape=(0, -3, -1, -2))  # output: 1, 12, 208, 208

    return out


x = nd.random_uniform(shape=(1, 3, 416, 416))
#x = mx.symbol.var('data')

print('x type = ', type(x))
#y = model.stack_neightbor(x)
#y = test_stack_neightbor(x)

# test the net
y = net(x)  # correct, y = (1, 125, 13, 13)

print('y type = ', type(y))

print('y = ', y)
print('y shape = ', y.shape)
y = net(nd.random.uniform(shape=(4, 8)))
print(y.mean())

params = gluon.ParameterDict(prefix='block1_')
params.get("param2", shape=(2, 3))
print(params)

class MyDense(nn.Block):
    def __init__(self, units, in_units, prefix=None, params=None):
        super().__init__(prefix, params)
        with self.name_scope():
            self.weight = self.params.get('weight', shape=(in_units, units))
            self.bias = self.params.get('bias', shape=(units,))

    def forward(self, x):
        linear = nd.dot(x, self.weight.data()) + self.bias.data()
        return nd.relu(linear)

dense = MyDense(5, in_units = 10, prefix='o_my_dense_')
print(dense.params)

dense.initialize()
dense(nd.random_uniform(shape=(2, 10)))


net = nn.Sequential()
with net.name_scope():
    net.add(MyDense(32, in_units=64))
    net.add(MyDense(2, in_units=32))
net.initialize()
net(nd.random.uniform(shape=(2, 64)))