示例#1
0
def fully_connected(inp, inp_size, out_size, center, drop=None):
    w = net.variable(guass(0., .1, (inp_size, out_size)))
    regularizer = net.l2_regularize(w, center)
    b = net.variable(np.ones((out_size, )) * .1)
    mul = net.matmul(inp, w)
    biased = net.plus_b(mul, b)
    if drop is not None:
        biased = net.dropout(biased, drop)
    return biased, regularizer
示例#2
0
    def check_against_output(cls, module, args, outs, out_idx):
        target = outs[out_idx]
        simulated_target = guass(target.mean(),
                                 target.std() + 1., target.shape)
        out_grads = [np.zeros(o.shape) for o in outs]
        out_grads[out_idx] = 2 * (target - simulated_target)
        inp_grads = module.backward(*out_grads)
        if inp_grads is None: return

        print('\tChecking w.r.t output #', out_idx)
        inp_grads = cls.tolist(inp_grads)
        for inp_idx, inp_grad in enumerate(inp_grads):
            if type(inp_grad) is np.ndarray:
                cls.check_against_input(module, args, inp_idx, inp_grad,
                                        out_idx, simulated_target)
示例#3
0
    def _setup(self,
               server,
               w_shape,
               bias=None,
               act_class=sigmoid,
               transfer=None):
        if transfer is None:
            b_shape = (w_shape[-1], )
            w_init = xavier(w_shape)
            if bias is not None:
                b_init = np.ones(b_shape) * bias
            elif bias is None:
                b_init = guass(0., 1e-1, b_shape)
        else:
            w_init, b_init = transfer

        self._act_class = act_class
        self._w = server.issue_var_slot(w_init, True)
        self._b = server.issue_var_slot(b_init, True)
示例#4
0
from src.utils import randn, uniform, guass, read_mnist
import numpy as np
import time

inp_dim = 28
out_dim = 10
std = 1.
lr = 1e-3
inp_shape = (inp_dim, inp_dim, 1)

net = Net()
image = net.portal((28, 28, 1))
label = net.portal((10, ))
is_training = net.portal()

k1 = net.variable(guass(0., std, (5, 5, 1, 32)))
b1 = net.variable(np.ones((32, )) * .1)
k2 = net.variable(guass(0., std, (5, 5, 32, 64)))
b2 = net.variable(np.ones((64, )) * .1)
w3 = net.variable(guass(0., std, (7 * 7 * 64, 1024)))
b3 = net.variable(np.ones((1024, )) * .1)
w4 = net.variable(guass(0., std, (1024, 10)))
b4 = net.variable(np.ones((10, )) * .1)

conv1 = net.conv2d(image, k1, pad=(2, 2), stride=(1, 1))
conv1 = net.batch_norm(conv1, net.variable(guass(0., std, (32, ))),
                       is_training)
conv1 = net.plus_b(conv1, b1)
conv1 = net.relu(conv1)
pool1 = net.maxpool2(conv1)
示例#5
0
import numpy as np
import time

inp_dim = 784
hid_dim = 64
out_dim = 10
std = 1e-3
lr = 1e-2
batch = 128

net = Net()
image = net.portal((784, ))
keep_prob = net.portal()
target = net.portal((10, ))

w1 = net.variable(guass(0., std, (inp_dim, hid_dim)))
b1 = net.variable(np.ones(hid_dim) * .1)
w2 = net.variable(guass(0., std, (hid_dim, out_dim)))
b2 = net.variable(np.ones(out_dim) * .1)

fc1 = net.matmul(image, w1)
bias = net.plus_b(fc1, b1)
relu = net.relu(bias)
dropped = net.dropout(relu, keep_prob)
fc2 = net.matmul(dropped, w2)
bias = net.plus_b(fc2, b2)
loss = net.softmax_crossent(bias, target)

net.optimize(loss, 'sgd', lr)

mnist_data = read_mnist()