コード例 #1
0
ファイル: flows.py プロジェクト: rikrd/torchkit
    def __init__(self, in_dim, hidden_dim, out_dim):
        super(DenseSigmoidFlow, self).__init__()
        self.in_dim = in_dim
        self.hidden_dim = hidden_dim
        self.out_dim = out_dim

        self.act_a = lambda x: nn_.softplus(x)
        self.act_b = lambda x: x
        self.act_w = lambda x: nn_.softmax(x, dim=3)
        self.act_u = lambda x: nn_.softmax(x, dim=3)

        self.u_ = Parameter(torch.Tensor(hidden_dim, in_dim))
        self.w_ = Parameter(torch.Tensor(out_dim, hidden_dim))

        self.reset_parameters()
コード例 #2
0
    def position_aware_attn(self, hidden_mat, last_h, start1, ent1, start2,
                            end2, seq_len):
        tri_pos_list = []
        ent_pos_list = []

        for i in range(seq_len):
            tri_pos_list.append(io_utils.relative_position(start1, ent1, i))
            ent_pos_list.append(io_utils.relative_position(start2, end2, i))

        tri_pos_emb = self.position_embed(tri_pos_list)
        tri_pos_mat = ops.cat(tri_pos_emb, 1)
        ent_pos_emb = self.position_embed(ent_pos_list)
        ent_pos_mat = ops.cat(ent_pos_emb, 1)

        #expand_last_h = nn.cat([last_h] * seq_len, 1)
        # (birnn * 2 + pos_emb*2, seq_len)
        att_input = ops.cat([hidden_mat, tri_pos_mat, ent_pos_mat], 0)
        hidden = self.attn_hidden(att_input)
        attn_out = self.attn_out(hidden)
        # (1, seq_len)
        attn_prob = nn.softmax(attn_out, dim=1)
        # (rnn_dim * 2, 1)
        rep = hidden_mat * dy.transpose(attn_prob)

        return rep
コード例 #3
0
ファイル: flows.py プロジェクト: rikrd/torchkit
    def __init__(self, num_ds_dim=4):
        super(SigmoidFlow, self).__init__()
        self.num_ds_dim = num_ds_dim

        self.act_a = lambda x: nn_.softplus(x)
        self.act_b = lambda x: x
        self.act_w = lambda x: nn_.softmax(x, dim=2)
コード例 #4
0
    def forward(self, x_t, pz_tm1):

        #print "z_tm1", z_tm1.ndim, type(z_tm1)
        #print "pz_tm1", pz_tm1.ndim, type(pz_tm1)
        pz_t = softmax(T.dot(x_t, self.w_s) + self.bias_s)
        # batch
        pz_t = pz_t[:, 1]  #only consider probability for 1

        return pz_t
コード例 #5
0
    def sample2(self, x_t, z_tm1, pz_tm1):

        print "z_tm1", z_tm1.ndim, type(z_tm1)
        print "pz_tm1", pz_tm1.ndim, type(pz_tm1)
        pz_t = softmax(T.dot(x_t, self.w_s) + self.bias_s)

        print "pz_t", pz_t.ndim, type(pz_t)
        pz_tm2 = T.max(pz_t, axis=1)
        z_t = T.cast(
            T.argmax(pz_t, axis=1), theano.config.floatX
        )  #T.cast(self.MRG_rng.binomial(size=pz_t.shape, p=pz_t), theano.config.floatX)

        return z_t, pz_tm2
コード例 #6
0
    def sample(self, x_t, z_tm1, pz_tm1):

        print "z_tm1", z_tm1.ndim, type(z_tm1)
        print "pz_tm1", pz_tm1.ndim, type(pz_tm1)
        pz_t = softmax(T.dot(x_t, self.w_s) + self.bias_s)
        # batch
        pz_t = pz_t[:, 1]  #only consider probability for 1
        print "pz_t", pz_t.ndim, type(pz_t)
        pz_tm2 = pz_t
        pz_t = pz_t.ravel()
        z_t = T.cast(self.MRG_rng.binomial(size=pz_t.shape, p=pz_t),
                     theano.config.floatX)

        return z_t, pz_tm2
コード例 #7
0
ファイル: detection.py プロジェクト: guochaorong/paddle_v3
def detection_output(loc,
                     scores,
                     prior_box,
                     prior_box_var,
                     background_label=0,
                     nms_threshold=0.3,
                     nms_top_k=400,
                     keep_top_k=200,
                     score_threshold=0.01,
                     nms_eta=1.0):
    """
    **Detection Output Layer for Single Shot Multibox Detector (SSD).**

    This operation is to get the detection results by performing following
    two steps:

    1. Decode input bounding box predictions according to the prior boxes.
    2. Get the final detection results by applying multi-class non maximum
       suppression (NMS).

    Please note, this operation doesn't clip the final output bounding boxes
    to the image window.

    Args:
        loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
            predicted locations of M bounding bboxes. N is the batch size,
            and each bounding box has four coordinate values and the layout
            is [xmin, ymin, xmax, ymax].
        scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
            predicted confidence predictions. N is the batch size, C is the
            class number, M is number of bounding boxes. For each category
            there are total M scores which corresponding M bounding boxes.
        prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
            each box is represented as [xmin, ymin, xmax, ymax],
            [xmin, ymin] is the left top coordinate of the anchor box,
            if the input is image feature map, they are close to the origin
            of the coordinate system. [xmax, ymax] is the right bottom
            coordinate of the anchor box.
        prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
            of variance.
        background_label(float): The index of background label,
            the background label will be ignored. If set to -1, then all
            categories will be considered.
        nms_threshold(float): The threshold to be used in NMS.
        nms_top_k(int): Maximum number of detections to be kept according
            to the confidences aftern the filtering detections based on
            score_threshold.
        keep_top_k(int): Number of total bboxes to be kept per image after
            NMS step. -1 means keeping all bboxes after NMS step.
        score_threshold(float): Threshold to filter out bounding boxes with
            low confidence score. If not provided, consider all boxes.
        nms_eta(float): The parameter for adaptive NMS.

    Returns:
        Variable: The detection outputs is a LoDTensor with shape [No, 6].
            Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
            `No` is the total number of detections in this mini-batch. For each
            instance, the offsets in first dimension are called LoD, the offset
            number is N + 1, N is the batch size. The i-th image has
            `LoD[i + 1] - LoD[i]` detected results, if it is 0, the i-th image
            has no detected results. If all images have not detected results,
            all the elements in LoD are 0, and output tensor only contains one
            value, which is -1.

    Examples:
        .. code-block:: python

        pb = layers.data(name='prior_box', shape=[10, 4],
                         append_batch_size=False, dtype='float32')
        pbv = layers.data(name='prior_box_var', shape=[10, 4],
                          append_batch_size=False, dtype='float32')
        loc = layers.data(name='target_box', shape=[2, 21, 4],
                          append_batch_size=False, dtype='float32')
        scores = layers.data(name='scores', shape=[2, 21, 10],
                          append_batch_size=False, dtype='float32')
        nmsed_outs = fluid.layers.detection_output(scores=scores,
                                       loc=loc,
                                       prior_box=pb,
                                       prior_box_var=pbv)
    """
    helper = LayerHelper("detection_output", **locals())
    decoded_box = box_coder(prior_box=prior_box,
                            prior_box_var=prior_box_var,
                            target_box=loc,
                            code_type='decode_center_size')
    old_shape = scores.shape
    scores = nn.reshape(x=scores, shape=(-1, old_shape[-1]))
    scores = nn.softmax(input=scores)
    scores = nn.reshape(x=scores, shape=old_shape)
    scores = nn.transpose(scores, perm=[0, 2, 1])
    scores.stop_gradient = True
    nmsed_outs = helper.create_tmp_variable(dtype=decoded_box.dtype)
    helper.append_op(type="multiclass_nms",
                     inputs={
                         'Scores': scores,
                         'BBoxes': decoded_box
                     },
                     outputs={'Out': nmsed_outs},
                     attrs={
                         'background_label': 0,
                         'nms_threshold': nms_threshold,
                         'nms_top_k': nms_top_k,
                         'keep_top_k': keep_top_k,
                         'score_threshold': score_threshold,
                         'nms_eta': 1.0
                     })
    nmsed_outs.stop_gradient = True
    return nmsed_outs
コード例 #8
0
import nn
import tools as tl
import pm
import numpy as np
import pickle
import os
import matplotlib.pyplot as plt

S = tl.load_Q('state')
Q = tl.load_Q('data/Q0')
X = [np.array(list(map(int, s))) for s in S]
V = [10 * Q[s] for s in S]
Y = [nn.softmax().forward(v) for v in V]

# Z = [np.where(x >0,0,1) for x in X ]
# Y = [z*y for z,y in zip(Z,Y)]
# Y = [y/np.sum(y) for y in Y]
if os.path.exists('index_nn.npy'):
    index_nn = np.loadtxt('index_nn.npy')
    index_nn = int(index_nn) + 1
else:
    index_nn = 10
np.savetxt('index_nn.npy', [index_nn])

#W = tl.load_Q('data/W'+str(index_nn-1))
# input = X
# output = Y

l1 = nn.linear(9, 9)
l1.grad_zero()
l1.init_param()
コード例 #9
0
X = np.random.rand(10000, 2) * 3 - 1.5
Y = np.array([np.where(np.sum(x * x) > 1, [0, 1], [1, 0]) for x in X])

# input = X
# output = Y

l1 = nn.linear(2, 9)
l1.grad_zero()
l1.init_param()
r1 = nn.relu()

l2 = nn.linear(9, 2)
l2.grad_zero()
l2.init_param()

s = nn.softmax()
loss = nn.cre()


def model(x, y):
    x = np.array(x)
    y = np.array(y)

    x = l1.forward(x)
    x = r1.forward(x)
    x = l2.forward(x)

    x = s.forward(x)
    dx = loss.forward(x, y)
    dx = s.backward(dx)
コード例 #10
0
ファイル: mnist.py プロジェクト: navdeepkumar12/Tic
    tti.append(b)
tni = np.array(tni) / 256  #normaling input
tti = np.array(tti) / 256
# one hot vector
tnl = np.array([np.eye(s + 1, 10)[s] for s in train[1]])
ttl = np.array([np.eye(s + 1, 10)[s] for s in test[1]])
print('Data preprocessed')

#  Architecture
conv1 = nn.convolve3d(shape=(5, 1, 9, 9), mode='valid')
add1 = nn.add()
relu1 = nn.relu()
conv2 = nn.convolve3d(shape=(10, 5, 20, 20), mode='valid')
add2 = nn.add()
lin = nn.linear()
softmax = nn.softmax()
cre = nn.cre()
print('Architecture loaded')

# #param init randoom
if pm.initial_Q == False:
    conv1.set_param(np.random.randn(5, 1, 9, 9) / (np.sqrt(81 * 2)))
    conv2.set_param(
        np.random.randn(10, 5, 20, 20) / (np.sqrt(5 * 19 * 19 * 2)))
    lin.init_param((10, 10))
    print('mnist:- Weights initialized randomly')
else:
    if pm.initial_Q == True:
        ind = index - 1
        W = tl.load_Q('data/mnist/param' + str(ind))
    else:
コード例 #11
0
def task4():
    # soft二分类
    net = NeuralNetwork([2, 4, 2], activation='tanh', softmax_=True)

    train_N = 200
    test_N = 100

    x = np.random.normal(loc=0.0, scale=2.0, size=(train_N, 2))

    a = 1.0
    b = 0.15
    f = lambda x: a * x + b

    plt.figure(1)
    plt.plot(x, f(x), 'g', label='真实分割线')

    # 线性分割前面的点
    y = np.zeros([train_N, 2])

    for i in range(train_N):
        if f(x[i, 0]) >= x[i, 1]:
            # 点在直线下方
            y[i] = np.array([1., 0.])
            plt.plot(x[i, 0], x[i, 1], 'bo', markersize=8, label='类一')
        else:
            # 点在直线上方
            y[i] = np.array([0., 1.])
            plt.plot(x[i, 0], x[i, 1], 'ro', markersize=8, label='类二')

    plt.legend(labels=['真实分割线'], loc=1)
    plt.title('随机数生成及展示')
    plt.show()
    wb = net.train(x,
                   y,
                   loss='cross_entropy',
                   epochs=100,
                   lr=0.001,
                   batchsize=8)

    # wb = net.train(x, y, softmax_=True, loss='cross_entropy', epochs=200, lr=0.001, batchsize=8)

    newx = np.random.normal(loc=0.0, scale=2.0, size=(test_N, 2))
    y_preds = np.array(
        list(map(net.forward, newx, (wb for _ in range(len(newx))))))
    # y_preds = softmax(np.squeeze(y_preds))
    y_preds = np.array([softmax(a) for a in np.squeeze(y_preds)])

    plt.figure(2)
    plt.plot(x, f(x), 'g', label='真实分割线')
    # print(y_preds.shape)
    for i in range(test_N):
        if y_preds[i][0][0] > 0.5:
            plt.plot(newx[i, 0],
                     newx[i, 1],
                     'b^',
                     markersize=8,
                     label='类一(预测)')
        else:
            plt.plot(newx[i, 0],
                     newx[i, 1],
                     'r^',
                     markersize=8,
                     label='类二(预测)')

    plt.legend(labels=['真实分割线'], loc=1)
    # plt.plot(x, f(x), 'y')
    # plt.legend()
    plt.show()
コード例 #12
0
ファイル: mnist.py プロジェクト: gtoubassi/jax-playground
 def forward(self, params, x):
   W1, b1, W2, b2 = params
   #h1 = nn.sigmoid(np.matmul(W1, x) + b1)
   #h1 = np.maximum(np.matmul(W1, x) + b1, 0) #relu
   h1 = np.tanh(np.matmul(W1, x) + b1)
   return nn.softmax(np.matmul(W2, h1) + b2)
コード例 #13
0
ファイル: detection.py プロジェクト: absorbguo/Paddle
def detection_output(loc,
                     scores,
                     prior_box,
                     prior_box_var,
                     background_label=0,
                     nms_threshold=0.3,
                     nms_top_k=400,
                     keep_top_k=200,
                     score_threshold=0.01,
                     nms_eta=1.0):
    """
    **Detection Output Layer for Single Shot Multibox Detector (SSD).**

    This operation is to get the detection results by performing following
    two steps:

    1. Decode input bounding box predictions according to the prior boxes.
    2. Get the final detection results by applying multi-class non maximum
       suppression (NMS).

    Please note, this operation doesn't clip the final output bounding boxes
    to the image window.

    Args:
        loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
            predicted locations of M bounding bboxes. N is the batch size,
            and each bounding box has four coordinate values and the layout
            is [xmin, ymin, xmax, ymax].
        scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
            predicted confidence predictions. N is the batch size, C is the
            class number, M is number of bounding boxes. For each category
            there are total M scores which corresponding M bounding boxes.
        prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
            each box is represented as [xmin, ymin, xmax, ymax],
            [xmin, ymin] is the left top coordinate of the anchor box,
            if the input is image feature map, they are close to the origin
            of the coordinate system. [xmax, ymax] is the right bottom
            coordinate of the anchor box.
        prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
            of variance.
        background_label(float): The index of background label,
            the background label will be ignored. If set to -1, then all
            categories will be considered.
        nms_threshold(float): The threshold to be used in NMS.
        nms_top_k(int): Maximum number of detections to be kept according
            to the confidences aftern the filtering detections based on
            score_threshold.
        keep_top_k(int): Number of total bboxes to be kept per image after
            NMS step. -1 means keeping all bboxes after NMS step.
        score_threshold(float): Threshold to filter out bounding boxes with
            low confidence score. If not provided, consider all boxes.
        nms_eta(float): The parameter for adaptive NMS.

    Returns:
        Variable: The detection outputs is a LoDTensor with shape [No, 6].
            Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
            `No` is the total number of detections in this mini-batch. For each
            instance, the offsets in first dimension are called LoD, the offset
            number is N + 1, N is the batch size. The i-th image has
            `LoD[i + 1] - LoD[i]` detected results, if it is 0, the i-th image
            has no detected results. If all images have not detected results,
            all the elements in LoD are 0, and output tensor only contains one
            value, which is -1.

    Examples:
        .. code-block:: python

        pb = layers.data(name='prior_box', shape=[10, 4],
                         append_batch_size=False, dtype='float32')
        pbv = layers.data(name='prior_box_var', shape=[10, 4],
                          append_batch_size=False, dtype='float32')
        loc = layers.data(name='target_box', shape=[2, 21, 4],
                          append_batch_size=False, dtype='float32')
        scores = layers.data(name='scores', shape=[2, 21, 10],
                          append_batch_size=False, dtype='float32')
        nmsed_outs = fluid.layers.detection_output(scores=scores,
                                       loc=loc,
                                       prior_box=pb,
                                       prior_box_var=pbv)
    """
    helper = LayerHelper("detection_output", **locals())
    decoded_box = box_coder(
        prior_box=prior_box,
        prior_box_var=prior_box_var,
        target_box=loc,
        code_type='decode_center_size')
    old_shape = scores.shape
    scores = nn.reshape(x=scores, shape=(-1, old_shape[-1]))
    scores = nn.softmax(input=scores)
    scores = nn.reshape(x=scores, shape=old_shape)
    scores = nn.transpose(scores, perm=[0, 2, 1])
    scores.stop_gradient = True
    nmsed_outs = helper.create_tmp_variable(dtype=decoded_box.dtype)
    helper.append_op(
        type="multiclass_nms",
        inputs={'Scores': scores,
                'BBoxes': decoded_box},
        outputs={'Out': nmsed_outs},
        attrs={
            'background_label': 0,
            'nms_threshold': nms_threshold,
            'nms_top_k': nms_top_k,
            'keep_top_k': keep_top_k,
            'score_threshold': score_threshold,
            'nms_eta': 1.0
        })
    nmsed_outs.stop_gradient = True
    return nmsed_outs