Exemplo n.º 1
0
def test_broadcast():
    a = nd.ones(shape=(LARGE_X, SMALL_Y))
    b = nd.arange(0, LARGE_X).reshape(LARGE_X, 1)
    res = nd.broadcast_to(b, shape=(b.shape[0], SMALL_Y))
    assert np.sum(res[-1].asnumpy() == LARGE_X) == res.shape[1]
    res = mx.nd.broadcast_like(b, a)
    assert np.sum(res[-1].asnumpy() == LARGE_X) == a.shape[1]
Exemplo n.º 2
0
def linspace(start=0., stop=1., num=1, end=False, ctx=None, dtype=None):
    if end and num > 1:
        num -= 1
        step = (stop-start)/num
        stop += step
    else:
        step = (stop-start)/num
    return nd.arange(start, stop, step, ctx=ctx, dtype='float32').astype(dtype)
Exemplo n.º 3
0
def random_counter(num, K, ctx, d=100, ohkw={}):
    nrow, ncol = K, (num-1)//K+1
    cond_col   = nd.arange(nrow, ctx=ctx).reshape([1, nrow])

    noise      = nd.random_normal(shape=(num, d), ctx=ctx)
    cond       = cond_col .tile([ncol, 1]).one_hot(K, **ohkw).reshape([ncol*nrow,   K])[:num]

    return noise, cond
Exemplo n.º 4
0
def test_where():
    a = nd.ones(shape=(LARGE_X, SMALL_Y))
    b = nd.arange(0, LARGE_X).reshape(LARGE_X, 1)
    b = nd.broadcast_to(b, shape=(b.shape[0], SMALL_Y))
    res = nd.where(b > 100, a, b)
    assert np.sum(res[-1].asnumpy() == 1) == b.shape[1]

    csr_cond = nd.sparse.cast_storage(b < 10, 'csr')
    res = nd.sparse.where(csr_cond, a, b)
    assert np.sum(res[0].asnumpy() == 1) == b.shape[1]
Exemplo n.º 5
0
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1, ctx=None):
    # First figure out what the size of the output should be
    N, C, H, W = x_shape
    assert (H + 2 * padding - field_height) % stride == 0
    assert (W + 2 * padding - field_height) % stride == 0
    out_height = int((H + 2 * padding - field_height) / stride + 1)
    out_width = int((W + 2 * padding - field_width) / stride + 1)

    i0 = nd.repeat(nd.arange(field_height, ctx=ctx), field_width)
    i0 = nd.tile(i0, C)
    i1 = stride * nd.repeat(nd.arange(out_height, ctx=ctx), out_width)
    j0 = nd.tile(nd.arange(field_width, ctx=ctx), field_height * C)
    j1 = stride * nd.tile(nd.arange(out_width, ctx=ctx), out_height)
    i = i0.reshape((-1, 1)) + i1.reshape((1, -1))
    j = j0.reshape((-1, 1)) + j1.reshape((1, -1))

    k = nd.repeat(nd.arange(C, ctx=ctx), field_height * field_width).reshape((-1, 1))

    return (k.astype('int32'), i.astype('int32'), j.astype('int32'))
Exemplo n.º 6
0
 def smooth(label, classes, eta=0.1):
     if isinstance(label, nd.NDArray):
         label = [label]
     smoothed = []
     for l in label:
         ind = l.astype('int')
         res = nd.zeros((ind.shape[0], classes), ctx = l.context)
         res += eta/classes
         res[nd.arange(ind.shape[0], ctx = l.context), ind] = 1 - eta + eta/classes
         smoothed.append(res)
     return smoothed
Exemplo n.º 7
0
def grow(num, K, ctx, d=100, ohkw={}):
    nrow, ncol  = K, (num-1)//K+1
    noise_one   = nd.random_normal(shape=(1, 1, d), ctx=ctx)
    noise       = noise_one.tile([ncol, nrow]).reshape([ncol*nrow, d])[:num]

    onval = ohkw.get('on_value', 1.0)
    offval = ohkw.get('off_value', -1.0)

    cond_col_d  = nd.arange(nrow, ctx=ctx).reshape([1, nrow]).tile([ncol, 1])
    cond_col    = cond_col_d.one_hot(K, **ohkw)
    alpha       = linspace(offval, onval, ncol, end=True, ctx=ctx).reshape([ncol, 1, 1]) * cond_col_d.one_hot(K) + offval * cond_col_d.one_hot(K, off_value=1., on_value=0.)
    cond        = alpha.reshape([ncol*nrow, K])[:num]

    return noise, cond
Exemplo n.º 8
0
def col2im_indices(cols, x_shape, field_height=3, field_width=3, padding=1,
                   stride=1, ctx=None):
    """ An implementation of col2im based on fancy indexing and np.add.at """
    N, C, H, W = x_shape
    H_padded, W_padded = H + 2 * padding, W + 2 * padding
    x_padded = nd.zeros((N, C, H_padded, W_padded), dtype=cols.dtype, ctx=ctx)
    k, i, j = get_im2col_indices(x_shape, field_height, field_width, padding, stride, ctx=ctx)
    cols_reshaped = cols.reshape((C * field_height * field_width, -1, N))
    cols_reshaped = cols_reshaped.transpose((2, 0, 1))
    # The for loop is probably a bottleneck, but cannot be avoided without a nd.add.at function
    #for l in nd.arange(cols.shape[1]):
    #    x_padded[:,k,i[:,l], j[:,l]] += cols_reshaped[:,:,l]
    for col in nd.arange(cols.shape[0], ctx=ctx):
        x_padded[:,k[col],i[col,:], j[col,:]] += cols_reshaped[:,col,:]
    #np.add.at(x_padded, (slice(None), k, i, j), cols_reshaped)
    if padding == 0:
        return x_padded
    return x_padded[:, :, padding:-padding, padding:-padding]
Exemplo n.º 9
0
def transform(num, K, ctx, d=100, ohkw={}):
    nrow, ncol  = K, (num-1)//K+1
    noise_one   = nd.random_normal(shape=(1, 1, d), ctx=ctx)
    noise       = noise_one.tile([ncol, nrow]).reshape([ncol*nrow, d])[:num]

    onval = ohkw.get('on_value', 1.0)
    offval = ohkw.get('off_value', -1.0)

    cond_col_ds = nd.arange(nrow, ctx=ctx).reshape([1, nrow]).tile([ncol, 1])
    cond_col_dt = cond_col_ds[:, list(range(1,nrow)) + [0]]
    cond_col_s  = cond_col_ds.one_hot(K)
    cond_col_t  = cond_col_dt.one_hot(K)

    alpha       = linspace(offval, onval, ncol, end=True, ctx=ctx).reshape([ncol, 1, 1]) * cond_col_t
    beta        = linspace(onval, offval, ncol, end=True, ctx=ctx).reshape([ncol, 1, 1]) * cond_col_s
    offvals     = offval * cond_col_ds.one_hot(K, off_value=1., on_value=0.) * cond_col_dt.one_hot(K, off_value=1., on_value=0.)
    cond        = (beta + alpha + offvals).reshape([ncol*nrow, K])[:num]

    return noise, cond
Exemplo n.º 10
0
def render(gfunc, stepsize=0.1, momentum=0.9, maxstep=24000):
    K = 10
    num = 30
    bbox = config.data.bbox
    cond = nd.one_hot(nd.repeat(nd.arange(K, ctx=ctx), (num-1)//K+1)[:num], K).reshape((num, K, 1, 1))
    anoi = nd.random.normal(shape=(num,100,1,1), ctx=ctx)
    bnoi = nd.random.normal(shape=(num,100,1,1), ctx=ctx)
    slast = 0.
    for step in range(maxstep):
        snoi = anoi - bnoi

        sdist = snoi.norm(axis=1,keepdims=True)
        if sdist.min().asscalar() < .5:
            anoi = nd.random.normal(shape=(30,100,1,1), ctx=ctx)
        snoi /= sdist
        slast = stepsize*snoi + momentum*slast
        bnoi += slast

        gen = gfunc(noise=bnoi, cond=cond)
        indat = ((gen - bbox[0]) * 255/(bbox[1]-bbox[0])).asnumpy().clip(0, 255).astype(np.uint8)
        indat = align_images(indat, 5, 6, 32, 32, 3)
        yield indat
Exemplo n.º 11
0
 def label_transform(label, classes):
     ind = label.astype('int')
     res = nd.zeros((ind.shape[0], classes), ctx = label.context)
     res[nd.arange(ind.shape[0], ctx = label.context), ind] = 1
     return res
import mxnet as mx
from mxnet import nd
import numpy as np
a = nd.arange(1, 13).reshape(3, 4)
b = a.as_in_context(mx.gpu())
c = nd.array(np.arange(1, 13).reshape(3, 4), ctx=mx.gpu())
d = nd.arange(1, 13, ctx=mx.gpu()).reshape((3, 4))
print(a, b, c, d, sep='\n')
from mxnet import autograd, nd
from utils import plot

x = nd.arange(-8, 8, 0.1)
x.attach_grad()

with autograd.record():
    y = x.relu()
    y.backward()
plot(x, y, 'relu')
plot(x, x.grad, 'grad of relu')

with autograd.record():
    y = x.sigmoid()
    y.backward()
plot(x, y, 'sigmoid')
plot(x, x.grad, 'grad of sigmoid')

with autograd.record():
    y = x.tanh()
    y.backward()
plot(x, y, 'tanh')
plot(x, x.grad, 'grad of tanh')
Exemplo n.º 14
0
def test_take():
    a = nd.ones(shape=(LARGE_X, SMALL_Y))
    idx = nd.arange(LARGE_X-1000, LARGE_X)
    res = nd.take(a, idx)
    assert np.sum(res[-1].asnumpy() == 1) == res.shape[1]
Exemplo n.º 15
0
from mxnet import nd

x = nd.arange(12)
#print(x)
x = x.reshape(3, 4)
#print(x)
y = nd.zeros((2, 3, 4))
#print(y)
z = nd.ones((2, 8, 9))
#print(z)
z *= 3
#print(z)
k = nd.normal(0, 1, shape=(2, 3, 4))
#print(k)
k = k + y
#print(k)
#k = k/y
#print(k.exp())
a = nd.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
#b = nd.array([1, 2, 3, 4], [ 5, 6, 7, 8], [9, 10, 11, 12])
#a = nd.dot(b, a.T)
b = nd.arange(12)
b = b.reshape((4, 3))
#print(b.shape)
#print(a.shape)
#print(b)
e = b.T
#print(b)
#print(b.shape)
#矩阵的转置乘法
c = nd.dot(a, e)
Exemplo n.º 16
0
# char_to_idx    字符转idx
# idx_to_char     idx转字符
# vocab_size不同字的个数
(corpus_indices, char_to_idx, idx_to_char,
 vocab_size) = d2l.load_data_jay_lyrics()

# one-hot向量
print(nd.one_hot(nd.array([1, 2]), vocab_size))  # one-hot一行只有一个1,哪个位置是1呢?1,2位置


def to_onehot(X, size):
    return [nd.one_hot(x, size) for x in X.T]  # X中列是feature,行是sample


# Test
X = nd.arange(10).reshape((2, 5))  # 2:batch_size  5:num_step
inputs = to_onehot(X, vocab_size)  # 转成num_steps个形状为(batch_size,vocab_size)
np.set_printoptions(edgeitems=6)  # 显示个数设置,默认显示3个
print(len(inputs), inputs[0])  # 5个长度, 2*1027

################################################# TODO 初始化模型参数 #####################################################
num_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size
ctx = d2l.try_gpu()
print('use ', ctx)


def get_param():
    def _one(shape):
        return nd.random.normal(scale=0.01, shape=shape, ctx=ctx)

    # 隐藏层参数
Exemplo n.º 17
0
                                  padding=1,
                                  activation="relu")
            self.__f1 = nn.MaxPool2D()
            self.__f2 = nn.Flatten()
            self.__d2 = nn.Dense(units=32, activation="relu")
            self.__d3 = nn.Dense(units=64, activation="relu")
            self.__d4 = nn.Dense(units=1)
            # self.__p1 = nn.AvgPool2D(pool_size=4)
            # self.__f1 = nn.Flatten()

    def forward(self, x):
        x = self.__d1(x)
        x = x.reshape((-1, 1, 4, 4))
        x = self.__c1(x)
        x = self.__c2(x)
        x = self.__c3(x)
        x = self.__f1(x)
        x = self.__f2(x)
        x = self.__d2(x)
        x = self.__d3(x)
        x = self.__d4(x)

        return x


if __name__ == "__main__":
    x = nd.arange(25).reshape((1, 1, 5, 5))
    lbk = LostRepairBlock()
    lbk.initialize()
    print(lbk(x).shape)
Exemplo n.º 18
0
# 第0行              0的位置是1
# 第1行              2的位置是1
# 2 x 1027
tmp = nd.one_hot(nd.array([0, 2]), vocab_size)
print(tmp)


#
def to_onehot(X, size):  # 本函数已保存在d2lzh包中方便以后使用
    # 5 x 2
    #
    return [nd.one_hot(x, size) for x in X.T]


# 2 x 5
X = nd.arange(10).reshape((2, 5))

# 2 x 1027
# 2 x 1027
# 2 x 1027
# 2 x 1027
# 2 x 1027
inputs = to_onehot(X, vocab_size)

#
print(len(inputs))

# 2 x 1027
print(inputs[0].shape)

# num_inputs    1027
Exemplo n.º 19
0
from mxnet import nd
w = nd.arange(4).reshape((1, 1, 2, 2))
b = nd.array([1])
data = nd.arange(9).reshape((1, 1, 3, 3))
out = nd.Convolution(data, w, b, kernel=w.shape[2:], num_filter=w.shape[1])
print('input:', data, '\n\nweight:', w, '\n\nbias:', b, '\n\noutput:', out)

out = nd.Convolution(data,
                     w,
                     b,
                     kernel=w.shape[2:],
                     num_filter=w.shape[1],
                     stride=(2, 2),
                     pad=(1, 1))
print('input:', data, '\n\nweight:', w, '\n\nbias:', b, '\n\noutput:', out)

w = nd.arange(8).reshape((1, 2, 2, 2))
data = nd.arange(18).reshape((1, 2, 3, 3))
out = nd.Convolution(data, w, b, kernel=w.shape[2:], num_filter=w.shape[0])
print('input:', data, '\n\nweight:', w, '\n\nbias:', b, '\n\noutput:', out)

w = nd.arange(16).reshape((2, 2, 2, 2))
data = nd.arange(18).reshape((1, 2, 3, 3))
b = nd.array([1, 2])
out = nd.Convolution(data, w, b, kernel=w.shape[2:], num_filter=w.shape[0])
print('input:', data, '\n\nweight:', w, '\n\nbias:', b, '\n\noutput:', out)

data = nd.arange(18).reshape((1, 2, 3, 3))
max_pool = nd.Pooling(data=data, pool_type="max", kernel=[2, 2])
avg_pool = nd.Pooling(data=data, pool_type="avg", kernel=[2, 2])
print('data:', data, '\n\nmax pooling:', max_pool, '\n\navg pooling:',
Exemplo n.º 20
0
import mxnet as mx
from RPN import RPN
from Dataset import getDataset
from mxnet import autograd, nd
num_cls = 20
batch_size = 64
train_data, test_data = getDataset()
ctx = mx.gpu(1)
net = RPN(num_cls, ctx) # TODO: num_class need to modify
for epoch in range(20):
    print('epoch: {}'.format(epoch))
    for i, batch in enumerate(train_data):
        print('batch')
        x = batch[0].as_in_context(ctx) # x.shape = (64, 3, 224, 224)
        y = batch[1].as_in_context(ctx) # y.shape = (64,)
        with autograd.record():
            anchors, class_pred_origin = net(x) # anchors.shape = (1, 784, 4), class_pred_origin.shape = (64, 84, 14, 14)
            # TODO: softmax ?
            # class_pred_origin.shape = (batch, anchor_num * (num_cls+1), height, width)
            class_pred_flatten = class_pred_origin.reshape(batch_size, -1) # class_pred_flatten.shape = (batch, anchor_num * (num_cls+1) * height * width)
            picked_anchors_index = class_pred_flatten.argmax(axis=1, keepdims=True) # shape = (batch,)
            temp1 = picked_anchors_index - picked_anchors_index % (num_cls+1)
            idx0 = nd.arange(batch_size, ctx=ctx).reshape(-1,1)
            idx1 = temp1 + nd.arange(num_cls+1, ctx=ctx)
            class_pred = class_pred_flatten[idx0, idx1]# class_pred.shape = (batch, num_cls + 1)
            nd.waitall()
            # class_target.shape = (batch, num_cls + 1)


 def gen(dimensions):
     shape = rand_shape_nd(dimensions, 4)
     nelems = reduce(mul, shape)
     x = nd.arange(nelems).reshape(shape)
     return x
def arange_shape_like(y):
    shape = y.shape
    nelems = reduce(mul, shape)
    x = nd.arange(nelems).reshape(shape)
    return x

def dropout(X, drop_probability):
    keep_probability = 1 - drop_probability
    mask = nd.random_uniform(0, 1.0, X.shape, ctx=X.context) < keep_probability
    #############################
    #  Avoid division by 0 when scaling
    #############################
    if keep_probability > 0.0:
        scale = (1 / keep_probability)
    else:
        scale = 0.0
    return mask * X * scale


A = nd.arange(20).reshape((5, 4))
dropout(A, 0.0)

dropout(A, 0.5)

dropout(A, 1.0)


def softmax(y_linear):
    exp = nd.exp(y_linear - nd.max(y_linear))
    partition = nd.nansum(exp, axis=0, exclude=True).reshape((-1, 1))
    return exp / partition


def softmax_cross_entropy(yhat_linear, y):
    return -nd.nansum(y * nd.log_softmax(yhat_linear), axis=0, exclude=True)
Exemplo n.º 24
0
def test_tile():
    a = nd.arange(0, LARGE_X).reshape(LARGE_X, 1)
    b = nd.tile(a, reps=(1, SMALL_Y))
    assert np.sum(b[-1].asnumpy() == LARGE_X) == b.shape[1]
Exemplo n.º 25
0
def test_argmin():
    a = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y)
    idx = mx.nd.argmin(a, axis=0)
    assert idx.shape[0] == SMALL_Y
Exemplo n.º 26
0
from mxnet import nd

w = nd.arange(4).reshape((1, 1, 2, 2))
b = nd.array([1])
data = nd.arange(9).reshape((1, 1, 3, 3))
out = nd.Convolution(data, w, b, kernel=w.shape[2:], num_filter=w.shape[1], stride=(2, 2), pad=(1, 1))

print(w)
print(w.shape[2])
print(w.shape[3])
print(b)
print(data)
print(out)

Exemplo n.º 27
0
def test_clip():
    a = nd.arange(0, LARGE_X).reshape(LARGE_X, 1)
    b = nd.broadcast_to(a, shape=(a.shape[0], SMALL_Y))
    res = nd.clip(b, a_min=100, a_max=1000)
    assert np.sum(res[-1].asnumpy() == 1000) == b.shape[1]
Exemplo n.º 28
0
    nd.Dropout(X, p=drop_rate, out=Z)
    return Z


def dropout_gluon():
    drop_prob1, drop_prob2, lr, batch_size, num_epochs = 0.2, 0.5, 0.1, 64, 50

    net = nn.Sequential()
    net.add(
        nn.Dense(256, activation="relu"),
        nn.Dropout(drop_prob1),  # 在第一个全连接层后添加丢弃层
        nn.Dense(256, activation="relu"),
        nn.Dropout(drop_prob2),  # 在第二个全连接层后添加丢弃层
        nn.Dense(10))
    net.initialize(init.Normal(sigma=0.01))

    train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)

    loss = gloss.SoftmaxCrossEntropyLoss()
    trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})
    d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
                  None, None, trainer)


if __name__ == "__main__":
    x = nd.arange(64).reshape(8, 8)
    logger.info(dropout(x, 0.5))
    logger.info(dropout2(x, 0.5) / 2)

    dropout_gluon()
Exemplo n.º 29
0
def test_take():
    a = nd.ones(shape=(LARGE_X, SMALL_Y))
    idx = nd.arange(LARGE_X - 1000, LARGE_X)
    res = nd.take(a, idx)
    assert np.sum(res[-1].asnumpy() == 1) == res.shape[1]
Exemplo n.º 30
0
def test_clip():
    a = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y)
    res = nd.clip(a, a_min=100, a_max=1000)
    assert np.sum(res[-1].asnumpy() == 1000) == a.shape[1]
Exemplo n.º 31
0
# %matplotlib inline

import d2lzh as d2l
from mxnet import autograd, nd


def xyplot(x_vals, y_vals, name):
    d2l.set_figsize(figsize=(5, 2.5))
    d2l.plt.plot(x_vals.asnumpy(), y_vals.asnumpy())
    d2l.plt.xlabel('x')
    d2l.plt.ylabel(name + '(x)')
    d2l.plt.show()


x = nd.arange(-8.0, 8.0)
x.attach_grad()
with autograd.record():
    y = x.relu()
xyplot(x, y, 'relu')
print(y)

y.backward()

xyplot(x, x.grad, 'grad of relu')
Exemplo n.º 32
0
def test_clip():
    a = nd.arange(0, LARGE_X).reshape(LARGE_X, 1)
    b = nd.broadcast_to(a, shape=(a.shape[0], SMALL_Y))
    res = nd.clip(b, a_min=100, a_max=1000)
    assert np.sum(res[-1].asnumpy() == 1000) == b.shape[1]
Exemplo n.º 33
0
 def check_mean():
     a = nd.arange(-LARGE_X // 2, LARGE_X // 2 + 1, dtype=np.int64)
     b = nd.mean(a, axis=0)
     assert b == 0
Exemplo n.º 34
0
import d2lzh as d2l
from mxnet import autograd, nd


# xyplot
def xypolt(x, y, name):
    d2l.set_figsize(figsize=(15, 5))
    d2l.plt.figure()
    d2l.plt.plot(x.asnumpy(), y.asnumpy())
    d2l.plt.xlabel('x')
    d2l.plt.ylabel(name + '(x)')


# ReLU
x = nd.arange(-8.0, 8.0, 0.1)  # 和matlab差不多
x.attach_grad()
with autograd.record():
    y = x.relu()
y.backward()
xypolt(x, y, 'ReLU')
xypolt(x, x.grad, 'grad of ReLU')

# sigmod
with autograd.record():
    y = x.sigmoid()
y.backward()
xypolt(x, y, 'sigmoid')
xypolt(x, x.grad, 'grad of sigmoid')

# sigmod
with autograd.record():
Exemplo n.º 35
0
 def create_input_for_rounding_ops():
     # Creates an vector with values (-LARGE/2 .... -2, -1, 0, 1, 2, .... , LARGE/2-1)
     # then divides each element by 2 i.e (-LARGE/4 .... -1, -0.5, 0, 0.5, 1, .... , LARGE/4-1)
     inp = nd.arange(-LARGE_X // 2, LARGE_X // 2, dtype=np.float64)
     inp = inp / 2
     return inp
Exemplo n.º 36
0
    def forward(self, x, gt_boxes=None):
        """
        :param x: ndarray (B,C,H,W)
        :return: 
        """
        def _split_box(x, num_outputs, axis, squeeze_axis=False):
            a = nd.split(x,
                         axis=axis,
                         num_outputs=num_outputs,
                         squeeze_axis=squeeze_axis)
            if not isinstance(a, (list, tuple)):
                return [a]
            return a

        # 首先用basenet抽取特征
        feat = self.features(x)

        # 输入RPN网络
        if autograd.is_training():
            # 训练过程
            img = nd.zeros_like(x)
            rpn_score, rpn_box, raw_rpn_score, raw_rpn_box, anchors = self.rpn(
                feat, img)
            # 采样输出
            rpn_box, samples, matches = self.sampler(rpn_box, rpn_score,
                                                     gt_boxes)
        else:
            # 预测过程
            # output shape (B,N,4)
            _, rpn_box = self.rpn(feat, x)
        # 对输出的Region Proposal 进行采样
        # 输出送到后面运算的RoI
        # rois shape = (B,self._num_sampler,4),

        num_roi = self._num_sample if autograd.is_training(
        ) else self._rpn_test_post_nms

        # 将rois变为2D,加上batch_index
        with autograd.pause():
            roi_batchid = nd.arange(0,
                                    self._max_batch,
                                    repeat=num_roi,
                                    ctx=rpn_box.context)

            rpn_roi = nd.concat(
                *[roi_batchid.reshape((-1, 1)),
                  rpn_box.reshape((-1, 4))],
                dim=-1)
            rpn_roi = nd.stop_gradient(rpn_roi)

        # RoI Pooling 层
        if self._roi_mode == 'pool':
            # (Batch*num_roi,channel,H,W)
            pool_feat = nd.ROIPooling(feat, rpn_roi, self._roi_size,
                                      1 / self._stride)

        elif self._roi_mode == 'align':
            pool_feat = nd.contrib.ROIAlign(feat,
                                            rpn_roi,
                                            self._roi_size,
                                            1 / self._stride,
                                            sample_ratio=2)
        else:
            raise ValueError("Invalid roi mode: {}".format(self._roi_mode))

        top_feat = self.top_features(pool_feat)
        avg_feat = self.global_avg_pool(top_feat)
        # 类别预测,回归预测
        # output shape (B*num_roi,(num_cls+1)) -> (B,N,C)
        cls_pred = self.class_predictor(avg_feat)
        # output shape (B*num_roi,(num_cls)*4) -> (B,N,C,4)
        box_pred = self.bbox_predictor(avg_feat)

        cls_pred = cls_pred.reshape(
            (self._max_batch, num_roi, self.num_class + 1))
        box_pred = box_pred.reshape(
            (self._max_batch, num_roi, self.num_class, 4))

        # 训练过程
        if autograd.is_training():

            return (cls_pred, box_pred, rpn_box, samples, matches,
                    raw_rpn_score, raw_rpn_box, anchors)
        # 预测过程
        # 还要进行的步骤,将预测的类别和预测的偏移量加到输入的RoI中
        else:
            # 直接输出所有类别的信息
            # cls_id (B,N,C) scores(B,N,C)
            cls_ids, scores = self.cls_decoder(nd.softmax(cls_pred, axis=-1))

            # 将所有的C调换到第一维
            # (B,N,C)  -----> (B,N,C,1) -------> (B,C,N,1)
            cls_ids = cls_ids.transpose((0, 2, 1)).reshape((0, 0, 0, 1))
            # (B,N,C)  -----> (B,N,C,1) -------> (B,C,N,1)
            scores = scores.transpose((0, 2, 1)).reshape((0, 0, 0, 1))
            # (B,N,C,4) -----> (B,C,N,4),
            box_pred = box_pred.transpose((0, 2, 1, 3))

            rpn_boxes = _split_box(rpn_box,
                                   num_outputs=self._max_batch,
                                   axis=0,
                                   squeeze_axis=False)
            cls_ids = _split_box(cls_ids,
                                 num_outputs=self._max_batch,
                                 axis=0,
                                 squeeze_axis=True)
            scores = _split_box(scores,
                                num_outputs=self._max_batch,
                                axis=0,
                                squeeze_axis=True)
            box_preds = _split_box(box_pred,
                                   num_outputs=self._max_batch,
                                   axis=0,
                                   squeeze_axis=True)

            results = []
            # 对每个batch分别进行decoder nms
            for cls_id, score, box_pred, rpn_box in zip(
                    cls_ids, scores, box_preds, rpn_boxes):
                # box_pred(C,N,4)   rpn_box(1,N,4)   box (C,N,4)
                box = self.box_decoder(box_pred, self.box_to_center(rpn_box))

                # cls_id (C,N,1) score (C,N,1) box (C,N,4)
                # result (C,N,6)
                res = nd.concat(*[cls_id, score, box], dim=-1)
                # nms操作 (C,self.nms_topk,6)
                res = nd.contrib.box_nms(res,
                                         overlap_thresh=self.nms_thresh,
                                         valid_thresh=0.0001,
                                         topk=self.nms_topk,
                                         coord_start=2,
                                         score_index=1,
                                         id_index=0,
                                         force_suppress=True)

                res = res.reshape((-3, 0))
                results.append(res)

            results = nd.stack(*results, axis=0)
            ids = nd.slice_axis(results, axis=-1, begin=0, end=1)
            scores = nd.slice_axis(results, axis=-1, begin=1, end=2)
            bboxes = nd.slice_axis(results, axis=-1, begin=2, end=6)

        # 输出为score,bbox
        return ids, scores, bboxes
Exemplo n.º 37
0
from mxnet import autograd, nd

x = nd.arange(4).reshape((4, 1))

x.attach_grad()

print(autograd.is_training())
with autograd.record():
    print(autograd.is_training())
    y = 2 * nd.dot(x.T, x)
y.backward()
Exemplo n.º 38
0
def produce_dataset(size):
    X = nd.arange(start=1, stop=size+1, step=1.0)
    return X
Exemplo n.º 39
0
"""
Created on Mon Oct 19 14:43:02 2020

@author: DER
"""
""" 3.8.1隐藏层 """
""" 3.8.2激活函数 """
import d2lzh as d2l
from mxnet import autograd, nd


def xyplot(x_vals, y_vals, name):
    d2l.set_figsize(figsize=(5, 2.5))
    d2l.plt.plot(x_vals.asnumpy(), y_vals.asnumpy())
    d2l.plt.xlabel("x")
    d2l.plt.ylabel(name + "(x)")


x = nd.arange(-8.0, 8.0, 0.1)
x.attach_grad()
with autograd.record():
    y = x.relu()
xyplot(x, y, "relu")

y.backward()
xyplot(x, x.grad, "grad of relu")

with autograd.record():
    y = x.sigmoid()
xyplot(x, y, "sigmoid")
Exemplo n.º 40
0
    Y = nd.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))
    for i in range(Y.shape[0]):
        for j in range(Y.shape[1]):
            if mode == 'max':
                Y[i, j] = X[i:i + p_h, j:j + p_w].max()
            elif mode == 'avg':
                Y[i, j] = X[i:i + p_h, j:j + p_w].mean()
    return Y


X = nd.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
print(pool2d(X, (2, 2)))

print(pool2d(X, (2, 2), 'avg'))

X = nd.arange(16).reshape((1, 1, 4, 4))
print(X)

pool2d = nn.MaxPool2D(3)
pool2d(X)  # 因为池化层没有模型参数,所以不需要调用参数初始化函数

pool2d = nn.MaxPool2D(3, padding=1, strides=2)
print(pool2d(X))

pool2d = nn.MaxPool2D((2, 3), padding=(1, 2), strides=(2, 3))
print(pool2d(X))

X = nd.concat(X, X + 1, dim=1)
print(X)

pool2d = nn.MaxPool2D(3, padding=1, strides=2)
Exemplo n.º 41
0
def test_split():
    a = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y)
    outs = nd.split(a, num_outputs=SMALL_Y, axis=1)
    result = sum(1 for i, v in enumerate(outs) if i == v[0].asnumpy())
    assert result == a.shape[1]
Exemplo n.º 42
0
 def check_take():
     a = nd.ones(shape=LARGE_X)
     idx = nd.arange(LARGE_X - 1000, LARGE_X)
     res = nd.take(a, idx)
     assert np.sum(res.asnumpy() == 1) == res.shape[0]
Exemplo n.º 43
0
# x = nd.arange(12).reshape(3, 4)
# print(x, x ** 2, (x ** 2).sum())
# print(x < 5)


def dropout(X, drop_prob):
    assert 0 <= drop_prob <= 1

    keep_prob = 1 - drop_prob
    if keep_prob == 0:
        return X.zeros_like()
    mask = nd.random.uniform(0, 1, X.shape) < keep_prob
    return mask * X / keep_prob


X = nd.arange(16).reshape((2, 8))
print(dropout(X, 0))
print(dropout(X, 0.5))
print(dropout(X, 1))

num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256

W1 = nd.random.normal(scale=0.01, shape=(num_inputs, num_hiddens1))
b1 = nd.zeros(num_hiddens1)
W2 = nd.random.normal(scale=0.01, shape=(num_hiddens1, num_hiddens2))
b2 = nd.zeros(num_hiddens2)
W3 = nd.random.normal(scale=0.01, shape=(num_hiddens2, num_outputs))
b3 = nd.zeros(num_outputs)

params = [W1, b1, W2, b2, W3, b3]
for param in params: