Exemple #1
0
    def __backward(self, dout):
        dbeta = dout.sum(axis=0)
        dgammax = dout

        dgamma = ve.sum(ve.mul(self.xhat, dgammax), axis=0)
        dxhat = dgammax * self.gamma

        divar = ve.sum(ve.mul(dxhat, self.xmu), axis=0)
        dxmu1 = dxhat * self.ivar

        dsqrtvar = divar * (1 / (self.sqrtvar**2)) * (-1)

        dvar = (1 / ve.sqrt(self.var + 10e-7)) * dsqrtvar * 0.5

        dsq = ve.arange([
            1 for i in range(reduce(lambda x, y: x * y, dout.get_demension()))
        ]).reshape(dout.get_demension()) * (dvar / dout.get_demension()[1])

        dxmu2 = ve.mul(self.xmu, dsq) * 2

        dx1 = dxmu1 + dxmu2
        dmu = ve.sum(dxmu1 + dxmu2, axis=0) * (-1)

        dx2 = ve.arange([
            1 for i in range(reduce(lambda x, y: x * y, dout.get_demension()))
        ]).reshape(dout.get_demension()) * (dmu / dout.get_demension()[1])

        dx = dx1 + dx2

        return dx
Exemple #2
0
def AND(x1, x2, b=-0.7):
    x = np.arange([x1, x2])
    w = np.arange([0.5, 0.5]).reshape(2, 1)
    tmp = x * w + b
    if tmp <= 0:
        return 0
    else:
        return 1
Exemple #3
0
def _f(func, x):
    if (type(x) == type(np.arange([
            1
    ]))) and (len(x.get_demension()) >= 2) and (x.get_demension()[0] == 1):
        #TODO
        lst = list(map(func, x.get_origin_list()))
        #print lst
        return np.arange(lst).reshape(1, len(lst))
    elif (type(x) == type(1)) or (type(x) == type(1.0)):
        return func(x)
    else:
        #print type(x)
        raise ValueError
Exemple #4
0
def _classification(x):
    lst = list(map(lambda x: e._e**x, x.get_origin_list()))
    full_lst = [1 for _ in range(x.get_demension()[1])]

    a = ve.arange(lst).reshape(1, x.get_demension()[1])
    full = ve.arange(full_lst).reshape(x.get_demension()[1], 1)

    total = a * full

    #print a * (1 / total.get_origin_list())
    #print "^^^^^^^^^^^^^^^^^^^^^^^^^^^"

    return a * (1 / total.get_origin_list())
Exemple #5
0
def create_contexts_target(corpus, window_size=1):
    corpus_lst = corpus.get_origin_list()
    target = corpus_lst[window_size:-window_size]
    contexts = []

    for idx in range(window_size, len(corpus_lst) - window_size):
        cs = []
        for t in range(-window_size, window_size + 1):
            if t == 0:
                continue
            cs.append(corpus_lst[idx + 1])
        contexts.append(cs)

    return np.arange(contexts), np.arange(target)
Exemple #6
0
    def backward(self, dout):
        if (type(dout) == type(1)) or (type(dout) == type(1.0)):
            if x >= 0:
                dx = dout
            else:
                dx = 0
        elif (type(dout) == type(ve.arange([1]))):
            dmen = dout.get_demension()
            dout.reshape(1, reduce(lambda x, y: x * y, dmen))
            dout_lst = dout.get_origin_list()
            dx = ve.arange(list(map(lambda x: x
                                    if x >= 0 else 0, dout_lst))).reshape(dmen)

        return dx
Exemple #7
0
    def forward(self, x):
        if (type(x) == type(1)) or (type(x) == type(1.0)):
            if x >= 0:
                out = x
            else:
                out = 0
        elif (type(x) == type(ve.arange([1]))):
            #print "**********"
            dmen = x.get_demension()
            x.reshape(1, reduce(lambda x, y: x * y, dmen))
            x_lst = x.get_origin_list()
            out = ve.arange(list(map(lambda x: x
                                     if x >= 0 else 0, x_lst))).reshape(dmen)

        return out
Exemple #8
0
    def forward(self, x):
        if (type(x) == type(1)) or (type(x) == type(1.0)):
            out = 1 / (1 + e._e**(x * (-1)))
        elif (type(x) == type(ve.arange([1]))):
            print "%% Sigmoid forward %%"
            print x
            dmen = x.get_demension()
            x.reshape(1, reduce(lambda x, y: x * y, dmen))
            x_lst = x.get_origin_list()
            out = ve.arange(
                list(map(lambda x: (1 / (1 + e._e**(x * (-1)))),
                         x_lst))).reshape(dmen)
            print out

        self.out = out
        return out
Exemple #9
0
def _numerical_gradient(f, x):
    h = 1e-4
    dmen = x.get_demension()
    tlen = reduce(lambda x, y: x * y, dmen)
    grad_lst = []

    it = np.nditer(x)
    while not it.finished:
        idx = it.multi_index
        tmp_val = it.iterget(idx)
        it.iterset(idx, ((tmp_val * 1.0) + h))
        print "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
        print x
        fxh1 = f(x)

        it.iterset(idx, ((tmp_val * 1.0) - h))
        print "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
        print x
        fxh2 = f(x)
        grad_lst.append((fxh1 - fxh2) / (2 * h))
        #grad_lst[it._get_cur_ptr(list(idx))] = (fxh1 - fxh2) / (2 * h)
        print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
        print it._get_cur_ptr(list(idx))
        print grad_lst
        print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
        it.iterset(idx, tmp_val)
        it.iternext()

    grad = np.arange(grad_lst).reshape(dmen)

    print grad
    print "!>>>>>>>>>>>>>>>>>>>>>"

    return grad
Exemple #10
0
    def backward(self, dout=1):
        batch_size = self.t.get_demension()[1]
        #TODO
        print "*********xxx**********************"
        print self.y
        print self.t

        #dx = (self.y - self.t) * (1.0/batch_size)
        dmen = self.y.get_demension()
        self.y.reshape(1, reduce(lambda x, y: x * y, dmen))

        dx_lst = self.y.get_origin_list()
        dx = ve.arange(dx_lst).reshape(dmen)
        self.y.reshape(dmen)

        dx = (dx - self.t)

        print "**********yyy**********************"
        print self.y
        print self.t
        print self.loss
        print dx
        print "**********zzz*********************"

        return dx
Exemple #11
0
    def update(self, params, grads):
        if self.v is None:
            self.v = {}
            for key, val in params.items():
                dmen = val.get_demension()
                self.v[key] = np.arange([
                    0 for _ in range(reduce(lambda x, y: x * y, dmen))
                ]).reshape(dmen)

        for key in params.keys():
            self.v[key] = (self.v[key] * self.momentum) + (grads[key] *
                                                           self.lr)
            params[key] = params[key] - self.v[key]
Exemple #12
0
    def backward(self, dout):
        if (type(dout) == type(1)) or (type(dout) == type(1.0)):
            dx = dout * (1.0 - self.out) * self.out
        elif (type(dout) == type(ve.arange([1]))):
            print "%% Sigmoid backward %%"
            print dout
            print self.out
            dout_dmen = dout.get_demension()
            out_dmen = self.out.get_demension()
            dout.reshape(1, reduce(lambda x, y: x * y, dout_dmen))
            self.out.reshape(1, reduce(lambda x, y: x * y, out_dmen))

            dout_lst = dout.get_origin_list()
            out_lst = self.out.get_origin_list()

            dx = ve.arange(
                list(map(lambda x, y: (x * (1.0 - y) * y), dout_lst,
                         out_lst))).reshape(dout_dmen)
            self.out.reshape(out_dmen)

        print dx

        return dx
Exemple #13
0
    def update(self, params, grads):
        if self.v is None:
            self.v = {}
            for key, val in params.items():
                self.v[key] = np.arange([
                    0 for _ in range(reduce(lambda x, y: x * y, dmen))
                ]).reshape(dmen)

        for key in params.keys():
            # v(t) = momentum * v(t-1) - learning_rate * dL/dw
            # w(t) = w(t-1) + momentum * v(t+1) - learning_rate * dL/dw
            self.v[key] = (self.v[key] * self.momentum) - grads[key] * self.lr
            params[key] = params[key] + self.v[
                key] * self.momentum * self.momentum - grads[key] * (
                    1 + self.momentum) * self.lr
Exemple #14
0
def init_network():
    network = {}
    network['W1'] = ve.arange([0.1, 0.3, 0.5, 0.2, 0.4, 0.6]).reshape(2, 3)
    network['b1'] = ve.arange([0.1, 0.2, 0.3])
    network['W2'] = ve.arange([0.1, 0.4, 0.2, 0.5, 0.3, 0.6]).reshape(3, 2)
    network['b2'] = ve.arange([0.1, 0.2])
    network['W3'] = ve.arange([0.1, 0.3, 0.2, 0.4]).reshape(2, 2)
    network['b3'] = ve.arange([0.1, 0.2])

    return network
Exemple #15
0
def softmax(a):
    print "=========================================="
    dmen = a.get_demension()
    c = ve.max(a)
    print c
    b = a - c
    print b
    exp_a = ve.exp(b)
    print exp_a
    y = ve.arange(exp_a).reshape(dmen)
    print y
    sum_exp_a = sum(exp_a)
    print sum_exp_a
    y = y * (1 / sum_exp_a)
    print y
    print "========================================="

    return y
Exemple #16
0
def create_co_matrix(corpus, vocab_size, window_size=1):
    corpus_lst = corpus.get_origin_list()
    corpus_size = len(corpus_lst)
    co_matrix = np.arange([0 for _ in range(vocab_size * vocab_size)
                           ]).reshape(vocab_size, vocab_size)

    for idx, word_id in enumerate(corpus_lst):
        for i in range(1, window_size + 1):
            left_idx = idx - 1
            right_idx = idx + 1

            if left_idx >= 0:
                left_word_id = corpus_lst[left_idx]
                co_matrix.get_origin_list()[word_id][left_word_id] += 1

            if right_idx < corpus_size:
                right_word_id = corpus_lst[right_idx]
                co_matrix.get_origin_list()[word_id][right_word_id] += 1

    return co_matrix
Exemple #17
0
def _check_vector(a):
    return type(a) == type(np.arange([2]))