Example #1
0
def gru(_inputs, initial_state, *parameters):
    # _inputs: a list with length num_steps,
    # corresponding element: batch_size * input_dim matrix

    H = initial_state

    [W_xz, W_hz, b_z,
     W_xr, W_hr, b_r,
     W_xh, W_hh, b_h,
     W_hy, b_y] = parameters

    _outputs = []

    for X in _inputs:
        # compute update gate from input and last/initial hidden state
        update_gate = nd.sigmoid(nd.dot(X, W_xz) + nd.dot(H, W_hz) + b_z)
        # compute reset gate from input and last/initial hidden state
        reset_gate = nd.sigmoid(nd.dot(X, W_xr) + nd.dot(H, W_hr) + b_r)
        # compute candidate hidden state from input, reset gate and last/initial hidden state
        H_candidate = nd.tanh(nd.dot(X, W_xh) + reset_gate * nd.dot(H, W_hh) + b_h)
        # compute hidden state from candidate hidden state and last hidden state
        H = update_gate * H + (1 - update_gate) * H_candidate
        # compute output from hidden state
        Y = nd.dot(H, W_hy) + b_y
        _outputs.append(Y)

    return _outputs, H
Example #2
0
def yolo2_forward(x, num_class, anchor_scales):
    """Transpose/reshape/organize convolution outputs."""
    stride = num_class + 5
    # transpose and reshape, 4th dim is the number of anchors
    x = x.transpose((0, 2, 3, 1))
    x = x.reshape((0, 0, 0, -1, stride))
    # now x is (batch, m, n, stride), stride = num_class + 1(object score) + 4(coordinates)
    # class probs
    cls_pred = x.slice_axis(begin=0, end=num_class, axis=-1)
    # object score
    score_pred = x.slice_axis(begin=num_class, end=num_class + 1, axis=-1)
    score = nd.sigmoid(score_pred)
    # center prediction, in range(0, 1) for each grid
    xy_pred = x.slice_axis(begin=num_class + 1, end=num_class + 3, axis=-1)
    xy = nd.sigmoid(xy_pred)
    # width/height prediction
    wh = x.slice_axis(begin=num_class + 3, end=num_class + 5, axis=-1)
    # convert x, y to positions relative to image
    x, y = transform_center(xy)
    # convert w, h to width/height relative to image
    w, h = transform_size(wh, anchor_scales)
    # cid is the argmax channel
    cid = nd.argmax(cls_pred, axis=-1, keepdims=True)
    # convert to corner format boxes
    half_w = w / 2
    half_h = h / 2
    left = nd.clip(x - half_w, 0, 1)
    top = nd.clip(y - half_h, 0, 1)
    right = nd.clip(x + half_w, 0, 1)
    bottom = nd.clip(y + half_h, 0, 1)
    output = nd.concat(*[cid, score, left, top, right, bottom],
                       dim=4)  # 为什么left和top有很多0?
    return output, cls_pred, score, nd.concat(*[xy, wh], dim=4)
Example #3
0
def class_and_score_forward(x):
    class_part = nd.slice_axis(x,begin=0,end=3,axis=-1)
    concentration_part = nd.slice_axis(x,begin=3,end=5,axis=-1)

    class_part = nd.sigmoid(class_part)
    concentration_part = nd.sigmoid(concentration_part)
    return class_part,concentration_part
Example #4
0
def lstm_rnn(inputs, state_h, state_c, *params):
    '''

    :param inputs: 输入
    :param state_h: 上一时刻的输出
    :param state_c: 上一时刻的状态
    :param params: 参数对
    :return: 输出
    输入门:It=σ(Xt*Wxi+Ht−1*Whi+bi)
    遗忘门:Ft=σ(Xt*Wxf+Ht−1*Whf+bf)
    输出门:Ot=σ(Xt*Wxo+Ht−1*Who+bo)
    输入状态:I_state=tanh(Xt*Wxc+Ht−1*Whc+bc)
    输出:Y=Why*Ht-1+by
    '''
    [
        W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,
        W_hy, b_y
    ] = params
    H = state_h  # 与输入组成一个输入门状态,控制有多少新输入补充到最新记忆里
    C = state_c  # 记录这一时刻的状态,传给下一时刻
    outputs = []
    for X in inputs:
        I = nd.sigmoid(nd.dot(X, W_xi) + nd.dot(H, W_hi) + b_i)  # 输入门,就是
        C_tilda = nd.tanh(nd.dot(X, W_xc) + nd.dot(H, W_hc) +
                          b_c)  # 输入门状态用来控制有多少输入信息补充到最新的记忆
        F = nd.sigmoid(nd.dot(X, W_xf) + nd.dot(H, W_hf) +
                       b_f)  # 遗忘门,控制上一刻有多少信息被遗忘
        O = nd.sigmoid(nd.dot(X, W_xo) + nd.dot(H, W_ho) + b_o)  # 输出门
        C = F * C + C_tilda * I  # 更新作为下一刻的状态
        H = O * C.tanh()
        Y = nd.dot(H, W_hy) + b_y  # 这一刻的输出作为下一刻的输入
        outputs.append(Y)
    return (outputs, H, C)
Example #5
0
    def forward_single(self, feature, data, begin_state):
        """ unroll one step

        Parameters
        ----------
        feature: a NDArray with shape [n, d].
        data: a NDArray with shape [n, b, d].        
        begin_state: a NDArray with shape [n, b, d].
        
        Returns
        -------
        output: ouptut of the cell, which is a NDArray with shape [n, b, d]
        states: a list of hidden states (list of hidden units with shape [n, b, d]) of RNNs.
        
        """
        if begin_state is None:
            num_nodes, batch_size, _ = data.shape
            begin_state = [nd.zeros((num_nodes, batch_size, self.hidden_size), ctx=feature.context)]

        prev_state = begin_state[0]
        data_and_state = nd.concat(data, prev_state, dim=-1)
        z = nd.sigmoid(self.dense_z(feature, data_and_state))
        r = nd.sigmoid(self.dense_r(feature, data_and_state))

        state = z * prev_state + (1 - z) * nd.tanh(self.dense_i2h(feature, data) + self.dense_h2h(feature, r * prev_state))
        return state, [state]
Example #6
0
    def nodeforward(self, x, cs, hs, ctx):
        x = nd.reshape(x, (self.dim_h, ))
        _Ui = nd.zeros((self.dim_h, ), ctx=ctx)
        _Uo = nd.zeros((self.dim_h, ), ctx=ctx)
        _Uu = nd.zeros((self.dim_h, ), ctx=ctx)
        _Uf = [nd.zeros((self.dim_h, ), ctx=ctx) for i in range(len(cs))]

        for idx in range(len(cs)):
            _Ui = nd.add(_Ui, nd.dot(self.Uis[idx].data(), hs[idx]))
            _Uo = nd.add(_Uo, nd.dot(self.Uos[idx].data(), hs[idx]))
            _Uu = nd.add(_Uu, nd.dot(self.Uus[idx].data(), hs[idx]))
            for j in range(len(cs)):
                _Uf[idx] = nd.add(_Uf[idx],
                                  nd.dot(self.Ufs[idx][j].data(), hs[j]))

        i = nd.sigmoid(
            nd.add(nd.add(nd.dot(self.Wi.data(), x), _Ui), self.bi.data()))
        o = nd.sigmoid(
            nd.add(nd.add(nd.dot(self.Wo.data(), x), _Uo), self.bo.data()))
        f = [
            nd.sigmoid(
                nd.add(nd.add(nd.dot(self.Wf.data(), x), _Uf[idx]),
                       self.bf.data())) for idx in range(len(cs))
        ]
        u = nd.tanh(
            nd.add(nd.add(nd.dot(self.Wu.data(), x), _Uu), self.bu.data()))

        c = nd.zeros((self.dim_h, ), ctx=ctx)
        for idx in range(len(cs)):
            c = nd.add(c, nd.multiply(f[idx], cs[idx]))
        c = nd.add(nd.multiply(i, u), c)

        h = nd.multiply(o, nd.tanh(c))
        return c, h
Example #7
0
def lstm(_inputs, initial_state_h, initial_state_c, *parameters):
    # _inputs: a list with length num_steps,
    # corresponding element: batch_size * input_dim matrix

    H = initial_state_h  # hidden state
    C = initial_state_c  # memory cell

    [W_xi, W_hi, b_i,
     W_xf, W_hf, b_f,
     W_xo, W_ho, b_o,
     W_xc, W_hc, b_c,
     W_hy, b_y] = parameters

    _outputs = []

    for X in _inputs:
        # compute INPUT gate from input and last/initial hidden state
        input_gate = nd.sigmoid(nd.dot(X, W_xi) + nd.dot(H, W_hi) + b_i)
        # compute FORGET gate from input and last/initial hidden state
        forget_gate = nd.sigmoid(nd.dot(X, W_xf) + nd.dot(H, W_hf) + b_f)
        # compute OUTPUT gate from input and last/initial hidden state
        output_gate = nd.sigmoid(nd.dot(X, W_xo) + nd.dot(H, W_ho) + b_o)
        # compute memory cell candidate from input and last/initial hidden state
        memory_cell_candidate = nd.tanh(nd.dot(X, W_xc) + nd.dot(H, W_hc) + b_c)
        # compute memory cell from last memory cell and memory cell candidate
        C = forget_gate * C + input_gate * memory_cell_candidate
        # compute hidden state from output gate and memory cell
        H = output_gate * nd.tanh(C)
        # compute output from hidden state
        Y = nd.dot(H, W_hy) + b_y
        _outputs.append(Y)

    return _outputs, H, C
Example #8
0
File: rbm.py Project: yixuan/cdtau
    def accumulate_grad2_ucd(self, dat, min_mcmc=1, max_mcmc=100):
        # Initial value for Gibbs sampling
        N = dat.shape[0]
        ind = np.random.choice(N, 1)[0]
        v0 = dat[ind, :]

        # Gibbs samples
        samp = UnbiasedRBMSampler(self.w, self.b, self.c, ctx=self.ctx)
        vhist, vchist, disc = samp.sample(v0,
                                          min_steps=min_mcmc,
                                          max_steps=max_mcmc)

        burnin = min_mcmc - 1
        tau = vchist.shape[0]
        remain = tau - burnin

        vk = vhist[burnin, :]
        hk_mean = nd.sigmoid(nd.dot(self.w.T, vk) + self.c)

        hhist_mean = nd.sigmoid(nd.dot(vhist[-remain:, :], self.w) + self.c)
        hchist_mean = nd.sigmoid(nd.dot(vchist[-remain:, :], self.w) + self.c)

        # Second term
        self.db2 += vk + nd.sum(vhist[-remain:, :], axis=0) -\
                    nd.sum(vchist[-remain:, :], axis=0)
        self.dc2 += hk_mean + nd.sum(hhist_mean, axis=0) -\
                    nd.sum(hchist_mean, axis=0)
        self.dw2 += nd.dot(vk.reshape(-1, 1), hk_mean.reshape(1, -1)) +\
                    nd.dot(vhist[-remain:, :].T, hhist_mean) -\
                    nd.dot(vchist[-remain:, :].T, hchist_mean)

        return tau, disc
def train(train_data,
          test_data,
          net,
          loss,
          trainer,
          ctx,
          num_epochs,
          best_score=0.78):
    print("Start training on ", ctx)
    if isinstance(ctx, mx.Context):
        ctx = [ctx]
    best_ths = 0.5
    for epoch in range(num_epochs):
        train_loss, train_acc, n, m = 0.0, 0.0, 0.0, 0.0
        train_data.reset()
        start = time()

        for i, batch in enumerate(train_data):
            data, label, batch_size = get_batch(batch, ctx)
            losses = []
            with autograd.record():
                outputs = [net(X).reshape(-1) for X in data]
                losses = [loss(yhat, y) for yhat, y in zip(outputs, label)]
            for l in losses:
                l.backward()
            train_acc += sum([((nd.sigmoid(yhat) > 0.5) == y).sum().asscalar()
                              for yhat, y in zip(outputs, label)])
            train_loss += sum([l.sum().asscalar() for l in losses])
            n += batch_size
            m += sum([y.size for y in label])
            trainer.step(batch_size)

        print("Epoch %d. Loss: %.5f, Train acc %.5f, Time %.1f sec" %
              (epoch, train_loss / n, train_acc / m, time() - start))
        test_data.reset()
        for i, batch in enumerate(test_data):
            data = batch.data[0]
            label = batch.label[0]
            data = data.as_in_context(ctx[0])
            label = label.as_in_context(ctx[0])
            yhat = net(data).reshape(-1)
            pred = nd.sigmoid(yhat)
            nd.waitall()
            if (i == 0):
                cpl_pred = pred
                cpl_lable = label
            else:
                cpl_pred = nd.concat(cpl_pred, pred, dim=0)
                cpl_lable = nd.concat(cpl_lable, label, dim=0)

        pred = cpl_pred.asnumpy()
        label = cpl_lable.asnumpy()
        scores, ths = score_plt(pred, label)
        print('score=', max(scores))
        print('threshold=', scores.index(max(scores)))
        plt.plot(ths, scores, color='green')
        plt.show()
Example #10
0
def gru_rnn(inputs, h, temperature=1.0):
    outputs = []
    for X in inputs:
        z = nd.sigmoid(nd.dot(X, Wxz) + nd.dot(h, Whz) + bz)
        r = nd.sigmoid(nd.dot(X, Wxr) + nd.dot(h, Whr) + br)
        g = nd.tanh(nd.dot(X, Wxh) + nd.dot(r * h, Whh) + bh)
        h = z * h + (1 - z) * g

        yhat_linear = nd.dot(h, Why) + by
        yhat = softmax(yhat_linear, temperature=temperature)
        outputs.append(yhat)
    return (outputs, h)
def gru_rnn(inputs, h, temperature=1.0):
    outputs = []
    for X in inputs:
        z = nd.sigmoid(nd.dot(X, Wxz) + nd.dot(h, Whz) + bz)
        r = nd.sigmoid(nd.dot(X, Wxr) + nd.dot(h, Whr) + br)
        g = nd.tanh(nd.dot(X, Wxh) + nd.dot(r * h, Whh) + bh)
        h = z * h + (1 - z) * g

        yhat_linear = nd.dot(h, Why) + by
        yhat = softmax(yhat_linear, temperature=temperature)
        outputs.append(yhat)
    return (outputs, h)
Example #12
0
def gru(inputs, state, params):
    W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params
    H = state
    outputs = []
    for X in inputs:
        Z = nd.sigmoid(nd.dot(X, W_xz) + nd.dot(H, W_hz) + b_z)
        R = nd.sigmoid(nd.dot(X, W_xr) + nd.dot(H, W_hr) + b_r)
        H_ = nd.tanh(nd.dot(X, W_xh) + R * nd.dot(H, W_hh) + b_h)
        H = Z * H + (1 - Z) * H_
        Y = nd.dot(H, W_hq) + b_q
        outputs.append(Y)
    return outputs, H
Example #13
0
    def forward(self, x):
        if self.dependent_G:
            g = nd.sigmoid(nd.dot(x, self.G.data()))
        else:
            g = nd.sigmoid(self.G.data())

        W0 = nd.tanh(self.W0_hat.data()) * nd.sigmoid(self.M0_hat.data())
        W1 = nd.tanh(self.W1_hat.data()) * nd.sigmoid(self.M1_hat.data())
        a = nd.dot(x, W0)
        m = nd.exp(nd.dot(nd.log(nd.abs(x) + 1e-10), W1))
        y = g * a + (1 - g) * m

        return y
Example #14
0
def lstm(inputs, state, params):
    W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q = params
    H, C = state
    outputs = []
    for X in inputs:
        I = nd.sigmoid(nd.dot(X, W_xi) + nd.dot(H, W_hi) + b_i)
        F = nd.sigmoid(nd.dot(X, W_xf) + nd.dot(H, W_hf) + b_f)
        O = nd.sigmoid(nd.dot(X, W_xo) + nd.dot(H, W_ho) + b_o)
        C_ = nd.tanh(nd.dot(X, W_xc) + nd.dot(H, W_hc) + b_c)
        C = F * C + I * C_
        H = O * nd.tanh(C)
        Y = nd.dot(H, W_hq) + b_q
        outputs.append(Y)
    return outputs, (H, C)
Example #15
0
    def predict_LP(self, batch_out):
        batch_out = self.slice_out(batch_out)
        out = nd.concat(nd.sigmoid(batch_out[0]), *batch_out[1:], dim=-1)[0]
        # (10L, 16L, 10L)
        best_index = out[:, :, 0].reshape(-1).argmax(axis=0)
        out = out.reshape((-1, 10))
        pred = out[best_index].reshape(-1)  # best out

        pred[1:4] *= 1000

        for i in range(3):
            p = (nd.sigmoid(pred[i + 4]) - 0.5) * 2 * self.LP_r_max[i]
            pred[i + 4] = p * math.pi / 180.
        return pred.asnumpy()
Example #16
0
def gru_rnn(inputs, H, *params):
    # inputs: num_steps 个尺寸为 batch_size * vocab_size 矩阵
    # H: 尺寸为 batch_size * hidden_dim 矩阵
    # outputs: num_steps 个尺寸为 batch_size * vocab_size 矩阵
    W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hy, b_y = params
    outputs = []
    for X in inputs:
        Z = nd.sigmoid(nd.dot(X, W_xz) + nd.dot(H, W_hz) + b_z)
        R = nd.sigmoid(nd.dot(X, W_xr) + nd.dot(H, W_hr) + b_r)
        H_tilda = nd.tanh(nd.dot(X, W_xh) + R * nd.dot(H, W_hh) + b_h)
        H = Z * H + (1 - Z) * H_tilda
        Y = nd.dot(H, W_hy) + b_y
        outputs.append(Y)
    return (outputs, H)
Example #17
0
    def gru(self, inputs, state):
        W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = self.params
        H = state
        outputs = []

        for X in inputs:
            Z = nd.sigmoid(nd.dot(X, W_xz) + nd.dot(H, W_hz) + b_z)
            R = nd.sigmoid(nd.dot(X, W_xr) + nd.dot(H, W_hr) + b_r)
            H_tilda = nd.tanh(nd.dot(R * H, W_hh) + nd.dot(X, W_xh) + b_h)
            H = Z * H + (1 - Z) * H_tilda
            Y = nd.dot(H, W_hq) + b_q
            outputs.append(Y)

        return outputs, H
Example #18
0
    def forward(self, x=0):
        if (mx.autograd.is_training()):
            u = nd.random.uniform(0, 1)
            s = nd.log(u) - nd.log(1 - u) + self._qz_loga.data()
            if (self._temperature == 0):
                s = nd.sign(s)
            else:
                s = nd.sigmoid(s / self._temperature)

        else:
            s = nd.sigmoid(self._qz_loga.data())

        s = s * (self._limit_hi - self._limit_lo) + self._limit_lo

        return nd.minimum(1, nd.maximum(s, 0))
Example #19
0
def lstm(inputs, state, params):
    # inputs和outputs皆为num_steps个形状为(batch_size, vocab_size)的矩阵
    [ W_xi,W_hi,b_i,W_xf,W_hf,b_f,W_xo,W_ho,b_o,W_xc,W_hc,b_c ,W_hq,b_q] = params
    (H,C) = state
    outputs = []
    for X in inputs:
        I = nd.sigmoid(nd.dot(X,W_xi)+nd.dot(H,W_hi)+b_i)
        F = nd.sigmoid(nd.dot(X,W_xf)+nd.dot(H,W_hf)+b_f)
        O = nd.sigmoid(nd.dot(X,W_xo)+nd.dot(H,W_ho)+b_o)
        C_tilda = nd.tanh(nd.dot(X,W_xc)+nd.dot(H,W_hc)+b_c)
        C=F*C+I*C_tilda
        H=C.tanh()*O
        Y=nd.dot(H,W_hq)+b_q
        outputs.append(Y)
    return outputs, (H,C)
Example #20
0
def volume_render_radiance_field(radiance_field, depth_values, ray_directions, radiance_field_noise_std=0.0,
                                 white_background=False):
    # TESTED
    one_e_10 = nd.array([1e10], dtype=ray_directions.dtype, ctx=ray_directions.context).broadcast_to(depth_values[..., :1].shape)
    dists = nd.concat(*[depth_values[..., 1:] - depth_values[..., :-1], one_e_10], dim=-1)
    dists = dists * ray_directions[..., None, :].norm(ord=2, axis=-1)

    rgb = nd.sigmoid(radiance_field[..., :3])
    noise = 0.0
    if radiance_field_noise_std > 0.0:
        noise = nd.random.normal(0.0, 1.0, shape=radiance_field[..., 3].shape,
                                 dtype=radiance_field.dtype, ctx=radiance_field.context)
        noise = noise * radiance_field_noise_std
    sigma_a = nd.relu(radiance_field[..., 3] + noise)
    alpha = 1.0 - nd.exp(-sigma_a * dists)
    weights = alpha * cumprod_exclusive_gluon(1.0 - alpha + 1e-10)

    rgb_map = weights[..., None] * rgb
    rgb_map = rgb_map.sum(axis=-2)
    depth_map = weights * depth_values
    depth_map = depth_map.sum(axis=-1)
    # depth_map = (weights * depth_values).sum(dim=-1)
    acc_map = weights.sum(axis=-1)
    disp_map = 1.0 / nd.maximum(1e-10 * nd.ones_like(depth_map), depth_map / acc_map)

    if white_background:
        rgb_map = rgb_map + (1.0 - acc_map[..., None])

    return rgb_map, disp_map, acc_map, weights, depth_map
Example #21
0
def plot_attention(net, n_samples=10, mean=False):
    from matplotlib import pyplot as plt
    import seaborn as sns
    sns.set()
    idx = np.random.choice(np.arange(len(va_x)), size=n_samples, replace=False)
    _dat = [va_x[i] for i in idx]

    w_idx = []
    word = [[idx2word[x] for x in y] for y in _dat]
    original_txt = [va_origin[i] for i in idx]
    out, att = net(nd.array(_dat, ctx=context))
    print('attention shape = {}'.format(att.shape))
    _a = []
    _w = []
    for x, y, z in zip(word, att, original_txt):
        _idx = [i for i, _x in enumerate(x) if _x is not 'PAD']
        _w.append(np.array([x[i] for i in _idx]))
        _a.append(np.array([y[i].asnumpy() for i in _idx]))

    _label = [va_y[i] for i in idx]
    _pred = (nd.sigmoid(out) > .5).asnumpy()

    fig, axes = plt.subplots(np.int(np.ceil(n_samples / 4)),
                             4,
                             sharex=False,
                             sharey=True)
    plt.subplots_adjust(hspace=1)
    if mean == True:
        fig.set_size_inches(20, 4)
        plt.subplots_adjust(hspace=5)
    else:
        fig.set_size_inches(20, 20)
        plt.subplots_adjust(hspace=1)
    cbar_ax = fig.add_axes([.91, .3, .04, .4])
Example #22
0
def cal_normalized_tp_pos_fp_neg(output, target, nclass, score_thresh):
    """mIoU"""
    # inputs are NDarray, output 4D, target 3D
    # the category 0 is ignored class, typically for background / boundary
    mini = 1
    maxi = 1  # nclass
    nbins = 1  # nclass

    predict = (nd.sigmoid(output).asnumpy() > score_thresh).astype(
        'int64')  # P
    # predict = (output.asnumpy() > 0).astype('int64')  # P
    if len(target.shape) == 3:
        target = nd.expand_dims(target, axis=1).asnumpy().astype('int64')  # T
    elif len(target.shape) == 4:
        target = target.asnumpy().astype('int64')  # T
    else:
        raise ValueError("Unknown target dimension")
    intersection = predict * (predict == target)  # TP
    tp = intersection.sum()
    fp = (predict * (predict != target)).sum()  # FP
    tn = ((1 - predict) * (predict == target)).sum()  # TN
    fn = ((predict != target) * (1 - predict)).sum()  # FN
    pos = tp + fn
    neg = fp + tn

    return tp, pos, fp, neg
Example #23
0
 def forward(self, S_a, S_b):
     M_a = self.maxpooling(S_a).flatten()
     M_b = self.maxpooling(S_b).flatten()
     gate_rate = nd.sigmoid(nd.dot(M_a, self.W_a.data()) + \
                         nd.dot(M_b, self.W_b.data()) + self.b.data())
     gated_output = gate_rate * M_a + (1 - gate_rate) * M_b
     return gated_output
Example #24
0
    def predict_LP(self, LP_batch_out):
        # LP_batch_out = self.fp16_2_fp32(LP_batch_out)
        LP_batch_out = self.merge_and_slice(LP_batch_out, self.LP_slice_point)

        LP_score = nd.sigmoid(LP_batch_out[0])
        LP_pose_xy = LP_batch_out[1]
        LP_pose_z = LP_batch_out[2]
        LP_pose_r = LP_batch_out[3]
        LP_batch_out = nd.concat(
            LP_score, LP_pose_xy, LP_pose_z, LP_pose_r, dim=-1)

        LP_batch_out = nd.split(LP_batch_out, axis=0, num_outputs=len(LP_batch_out))

        LP_batch_pred = []
        for i, out in enumerate(LP_batch_out):
            best_index = LP_score[i].reshape(-1).argmax(axis=0)
            out = out.reshape((-1, 7))

            pred = out[best_index][0]  # best out
            pred[1:7] = self.LP_pose_activation(pred[1:7])
            LP_batch_pred.append(nd.expand_dims(pred, axis=0))

        LP_batch_pred = nd.concat(*LP_batch_pred, dim=0)

        return LP_batch_pred.asnumpy()
Example #25
0
    def forward(self, x, x_mask=None):
        N, T, D = tuple(x.shape)  # bs, sl, vec
        bs, sl, vec = tuple(x.shape)
        direct_mask = get_direct_mask(bs, sl, self.direction)
        #x_mask_tile = x_mask.expand_dims(1)
        #mask = np.logical_and(direct_mask, x_mask_tile).astype(float)
        mask = direct_mask.astype('float32')
        x_map = self.linear1(x)  # bs, sl, vec
        #x_map_tile = x_map.expand_dims(1) #
        x_map_tile = nd.tile(x_map.expand_dims(1),
                             (1, sl, 1, 1))  # bs, sl, sl, vec
        x_map_drop = self.dropout(x_map)

        dependent = self.linear2(x_map_drop)
        dependent_etd = dependent.expand_dims(1)
        head = self.linear3(x_map_drop)
        head_etd = head.expand_dims(2)
        loggits = scaled_tanh(dependent_etd + head_etd + self.f_bias, 5.0)

        loggits_masked = exp_mask_for_tensor(loggits, mask)
        attn_score = nd.softmax(loggits_masked, 2)
        attn_score = mask_for_tensor(attn_score, mask)

        attn_result = (attn_score * x_map_tile).nansum(2)
        fusion_gate = nd.sigmoid(
            self.linear4(x_map) + self.linear5(attn_result) + self.o_bias)
        output = fusion_gate * x_map + (1 - fusion_gate) * attn_result
        return output
Example #26
0
def get_pred(net, loss, idx2word, iterator, context):
    pred_sa = []
    label_sa = []
    va_text = []
    iterator.reset()
    for i, batch in enumerate(iterator):
        if i % 100 == 0:
            print('i = {}'.format(i))
        data = batch.data[0].as_in_context(context)
        label = batch.data[1].as_in_context(context)
        output, _ = net(data)
        L = loss(output, label)
        pred = (nd.sigmoid(output) > 0.5).reshape((-1, ))
        pred_sa.extend(pred.asnumpy())
        label_sa.extend(label.asnumpy())
        va_text.extend([
            ' '.join([
                idx2word[np.int(x)] for x in y.asnumpy()
                if idx2word[np.int(x)] is not 'PAD'
            ]) for y in data
        ])
    pred_sa_pd = pd.DataFrame(pred_sa, columns=['pred_sa'])
    label_pd = pd.DataFrame(label_sa, columns=['label'])
    text_pd = pd.DataFrame(va_text, columns=['text'])
    res = pd.concat([text_pd, pred_sa_pd, label_pd], axis=1)
    return res
Example #27
0
    def compute(self, input):
        """Compute the output of the neural net given the input.
        The input has to be a ndarray of shape input_size or (input_size, N) where N is the batch_size."""
        if len(input.shape) == 1:
            input = input.reshape((input.shape[0], 1))

        X = input[0]
        Y = input[1]
        for i in range(self.n):
            _X = nd.cos(self.thetas[i]) * X + nd.sin(
                self.thetas[i]) * Y  # Sym / (cos(O)u_x + sin(O)u_y)
            Y = -nd.sin(self.thetas[i]) * X + nd.cos(self.thetas[i]) * Y

            X = nd.abs(_X + self.biases[i]) - self.biases[i]

            if (False):  # optimize
                _X = nd.cos(self.thetas[i]) * X - nd.sin(
                    self.thetas[i]) * Y  # Sym / (cos(O)u_x + sin(O)u_y)
                Y = nd.sin(self.thetas[i]) * X + nd.cos(self.thetas[i]) * Y
                X = _X

        _X = nd.cos(self.thetas[self.n]) * X + nd.sin(self.thetas[self.n]) * Y
        Y = -nd.sin(self.thetas[self.n]) * X + nd.cos(self.thetas[self.n]) * Y

        return nd.sigmoid(Y - _X)
Example #28
0
def check_tbox(image, label):
    plt.clf()
    rgb_mean = RGB_MEAN.as_in_context(image.context)
    rgb_std = RGB_STD.as_in_context(image.context)
    assert label.shape == (1, 5), \
        "shape of label expected [1, 5], but given {}".format(label.shape)
    assert image.shape == (3, 256, 256), \
        "shape of image expected [3, 256, 256], given {}".format(image.shape)
    scores_tmp = nd.zeros((1, 16, 16, 3, 1))
    label = label.expand_dims(axis=0)
    tid, tscore, tbox, _ = yolo2_target(scores_tmp, label, anchor_scales)
    t_xy = tbox.slice_axis(begin=0, end=2, axis=-1)
    t_wh = tbox.slice_axis(begin=2, end=4, axis=-1)
    xy = nd.sigmoid(t_xy)
    x, y = transform_center(xy)
    w, h = transform_size(t_wh, anchor_scales)

    left = nd.clip(x - w / 2, 0, 1)
    top = nd.clip(y - h / 2, 0, 1)
    right = nd.clip(x + w / 2, 0, 1)
    bottom = nd.clip(y + h / 2, 0, 1)

    output = nd.concat(*[tid, tscore, left, top, right, bottom], dim=-1)
    out = nd.contrib.box_nms(output.reshape((0, -1, 6)))
    out = out.asnumpy()
    box = out[0][0][2:6] * np.array([image.shape[1], image.shape[2]] * 2)
    rect = box_to_rect(nd.array(box), 'green', 2)
    image = image.transpose((1, 2, 0))
    i0 = (image * rgb_std + rgb_mean).asnumpy()
    i0 = i0.clip(0, 255) / 255.
    plt.imshow(i0)
    plt.gca().add_patch(rect)
    plt.show()
    #plt.savefig('check_tbox.jpg')
    return box
Example #29
0
 def Route(self, x):
     # print x.context
     # b_mat = nd.repeat(self.b_mat.data(), repeats=x.shape[0], axis=0)#nd.stop_gradient(nd.repeat(self.b_mat.data(), repeats=x.shape[0], axis=0))
     b_mat = nd.zeros((x.shape[0], 1, self.num_cap, self.num_locations),
                      ctx=x.context)
     x_expand = nd.expand_dims(nd.expand_dims(x, axis=2), 2)
     w_expand = nd.repeat(nd.expand_dims(self.w_ij.data(x.context), axis=0),
                          repeats=x.shape[0],
                          axis=0)
     u_ = w_expand * x_expand
     u = nd.sum(u_, axis=1)
     # u_ = nd.square(w_expand - x_expand)
     # u = -nd.sum(u_, axis = 1)
     u_no_gradient = nd.stop_gradient(u)
     for i in range(self.route_num):
         # c_mat = nd.softmax(b_mat, axis=2)
         c_mat = nd.sigmoid(b_mat)
         if i == self.route_num - 1:
             s = nd.sum(u * c_mat, axis=-1)
         else:
             s = nd.sum(u_no_gradient * c_mat, axis=-1)
         v = squash(s, 1)
         if i != self.route_num - 1:
             v1 = nd.expand_dims(v, axis=-1)
             update_term = nd.sum(u_no_gradient * v1, axis=1, keepdims=True)
             b_mat = b_mat + update_term
             # b_mat = update_term
         # else:
         #    v = s
     return v
Example #30
0
 def mask_loss(self, mask_pred, mask_eoc, mask_target, matches, bt_target):
     samples = matches >= 0
     pos_num = samples.sum(axis=-1).asnumpy().astype('int')
     rank = (-matches).argsort(axis=-1)
     # pos_bboxes = []
     # pos_masks = []
     # mask_preds = []
     losses = []
     for i in range(mask_pred.shape[0]):
         if pos_num[i] == 0:
             losses.append(nd.zeros(shape=(1, ), ctx=mask_pred.context))
             continue
         idx = rank[i, :pos_num[i]]
         pos_bboxe = nd.take(bt_target[i], idx)
         area = (pos_bboxe[:, 3] - pos_bboxe[:, 1]) * (pos_bboxe[:, 2] -
                                                       pos_bboxe[:, 0])
         weight = self.gt_weidth * self.gt_height / area
         mask_gt = mask_target[i, matches[i, idx], :, :]
         mask_preds = nd.sigmoid(
             nd.dot(nd.take(mask_eoc[i], idx), mask_pred[i]))
         _, h, w = mask_preds.shape
         mask_preds = self.crop(pos_bboxe, h, w, mask_preds)
         loss = self.SBCELoss(mask_preds, mask_gt) * weight
         # loss = 0.5 * nd.square(mask_gt - mask_preds) / (mask_gt.shape[0]*mask_gt.shape[1]*mask_gt.shape[2])
         losses.append(nd.mean(loss))
     return nd.concat(*losses, dim=0)
Example #31
0
 def forward(self, inputs, state):
     """ forward function """
     h, = state
     outputs = []
     for x in inputs:
         z = nd.sigmoid(
             nd.dot(x, self.w_xz) + nd.dot(h, self.w_hz) + self.b_z)
         r = nd.sigmoid(
             nd.dot(x, self.w_xr) + nd.dot(h, self.w_hr) + self.b_r)
         h_tilda = nd.tanh(
             nd.dot(x, self.w_xh) + nd.dot(h, self.w_hh) + self.b_h)
         h = z * h + (1 - z) * h_tilda
         y = nd.dot(h, self.w_hq) + self.b_q
         outputs.append(y)
     y_hat = nd.concat(*outputs, dim=0)
     return y_hat, (h, )
    def forward(self, current, previous, doc_encode):
        """[summary]

        Args:
            current ([type]): h_j (batch_size, sentence_hidden_size * 2)
            previous ([type]): s_j (batch_size, sentence_hidden_size * 2)
            doc_encode ([type]): d (batch_size, ndoc_dims)
        """
        # content: (batch_size, 1)
        content = self.content_encoder(current)
        # salience: (batch_size, sentence_hidden_size * 2)
        salience = self.salience_encoder(doc_encode)
        salience = current * salience
        # salience: (batch_size,)
        salience = nd.sum_axis(salience, -1)
        # salience: (batch_size, 1)
        salience = nd.expand_dims(salience, -1)

        # novelty: (bathc_size, sentence_hidden_size * 2)
        novelty = self.novelty_encoder(nd.tanh(previous))
        novelty = current * novelty
        # salience: (batch_size,)
        novelty = nd.sum_axis(novelty, -1)
        # salience: (batch_size, 1)
        novelty = nd.expand_dims(novelty, -1)

        # P: (batch_size, 1)
        P = nd.sigmoid(content + salience - novelty)

        return P
Example #33
0
 def forward(self, x):
     x = self.layer(x)
     # Note that the loss function has the sigmoid operation for better numerical stability. When
     # doing inference, we need to add the sigmoid function to the model.
     if not autograd.is_training():
         x = nd.sigmoid(x)
     return x
Example #34
0
def lstm_rnn(inputs, h, c, temperature=1.0):
    outputs = []
    for X in inputs:
        g = nd.tanh(nd.dot(X, Wxg) + nd.dot(h, Whg) + bg)
        i = nd.sigmoid(nd.dot(X, Wxi) + nd.dot(h, Whi) + bi)
        f = nd.sigmoid(nd.dot(X, Wxf) + nd.dot(h, Whf) + bf)
        o = nd.sigmoid(nd.dot(X, Wxo) + nd.dot(h, Who) + bo)
        #######################
        #
        #######################
        c = f * c + i * g
        h = o * nd.tanh(c)
        #######################
        #
        #######################
        yhat_linear = nd.dot(h, Why) + by
        yhat = softmax(yhat_linear, temperature=temperature)
        outputs.append(yhat)
    return (outputs, h, c)