Пример #1
0
 def __init__(self, param):
     self.c1 = ConvLayer((1, 28, 28), (4, 5, 5), (1, 0), Relu(), param)
     # 4x24x24
     self.p1 = PoolingLayer(self.c1.output_shape, (
         2,
         2,
     ), 2, PoolingTypes.MAX)
     # 4x12x12
     self.f1 = FcLayer(self.p1.output_size, 32, Sigmoid(), param)
     self.f2 = FcLayer(self.f1.output_size, 10, Softmax(), param)
Пример #2
0
 def __init__(self, param):
     self.c1 = ConvLayer((1,28,28), (4,5,5), (1,0), Relu(), param)
     # 4x24x24
     self.p1 = PoolingLayer(self.c1.output_shape, (2,2,), 2, PoolingTypes.MAX)
     # 4x12x12
     #self.c2 = ConvLayer(self.p1.output_shape, (8,3,3), (1,0), Relu(), param)
     # 4x10x10
     #self.p2 = PoolingLayer(self.c2.output_shape, (2,2,), 2, PoolingTypes.MAX)
     # 4x5x5
     #self.f1 = FcLayer(self.p2.output_size, 32, Relu(), param)
     self.f1 = FcLayer(self.p1.output_size, 32, Relu(), param)
     self.f2 = FcLayer(self.f1.output_size, 10, Softmax(), param)
Пример #3
0
def net():
    num_output = 10
    dataReader = LoadData(num_output)

    max_epoch = 1
    batch_size = 50
    eta = 0.01
    eps = 0.01
    params = CParameters(eta, max_epoch, batch_size, eps,
                         LossFunctionName.CrossEntropy3, InitialMethod.Xavier,
                         OptimizerName.Adam)

    loss_history = CLossHistory()

    net = NeuralNet(params)

    c1 = ConvLayer((1, 28, 28), (8, 3, 3), (1, 1), Relu(), params)
    net.add_layer(c1)

    c2 = ConvLayer(c1.output_shape, (8, 3, 3), (1, 1), Relu(), params)
    net.add_layer(c2)

    p1 = PoolingLayer(c2.output_shape, (
        2,
        2,
    ), 2, PoolingTypes.MAX)
    net.add_layer(p1)

    c3 = ConvLayer(p1.output_shape, (16, 3, 3), (1, 1), Relu(), params)
    net.add_layer(c3)

    c4 = ConvLayer(c3.output_shape, (16, 3, 3), (1, 1), Relu(), params)
    net.add_layer(c4)

    p2 = PoolingLayer(c4.output_shape, (
        2,
        2,
    ), 2, PoolingTypes.MAX)
    net.add_layer(p2)

    f1 = FcLayer(p2.output_size, 32, Relu(), params)
    net.add_layer(f1)

    f2 = FcLayer(f1.output_size, 10, Softmax(), params)
    net.add_layer(f2)

    net.train(dataReader, loss_history)

    loss_history.ShowLossHistory(params)
Пример #4
0
class Model(object):
    def __init__(self, param):
        self.c1 = ConvLayer((1, 28, 28), (4, 3, 3), (2, 2), Relu(), param)
        # 4x24x24
        self.p1 = PoolingLayer(self.c1.output_shape, (
            2,
            2,
        ), 2, PoolingTypes.MAX)
        # 4x12x12
        #self.c2 = ConvLayer(self.p1.output_shape, (8,3,3), (1,0), Relu(), param)
        # 4x10x10
        #self.p2 = PoolingLayer(self.c2.output_shape, (2,2,), 2, PoolingTypes.MAX)
        # 4x5x5
        #self.f1 = FcLayer(self.p2.output_size, 32, Relu(), param)
        self.f1 = FcLayer(self.p1.output_size, 32, Relu(), param)
        self.f2 = FcLayer(self.f1.output_size, 10, Softmax(), param)

    def forward(self, x):
        net = self.c1.forward(x)
        net = self.p1.forward(net)
        #net = self.c2.forward(net)
        #net = self.p2.forward(net)
        net = self.f1.forward(net)
        net = self.f2.forward(net)
        self.output = net
        return self.output

    def backward(self, y):
        delta = self.output - y
        delta = self.f2.backward(delta, LayerIndexFlags.LastLayer)
        delta = self.f1.backward(delta, LayerIndexFlags.MiddleLayer)
        #delta = self.p2.backward(delta, LayerIndexFlags.MiddleLayer)
        #delta = self.c2.backward(delta, LayerIndexFlags.MiddleLayer)
        delta = self.p1.backward(delta, LayerIndexFlags.MiddleLayer)
        delta = self.c1.backward(delta, LayerIndexFlags.FirstLayer)

    def update(self):
        self.c1.update()
        #self.c2.update()
        self.f1.update()
        self.f2.update()

    def save(self):
        self.c1.save_parameters("c1")
        self.p1.save_parameters("p1")
        #self.c2.save_parameters("c2")
        #self.p2.save_parameters("p2")
        self.f1.save_parameters("f1")
        self.f2.save_parameters("f2")

    def load(self):
        self.c1.load_parameters("c1")
        self.p1.load_parameters("p1")
        #self.c2.load_parameters("c2")
        #self.p2.load_parameters("p2")
        self.f1.load_parameters("f1")
        self.f2.load_parameters("f2")
Пример #5
0
def net():
    num_output = 10
    dr = ReadData()

    max_epoch = 1
    batch_size = 50
    eta = 0.001
    eps = 0.01
    params = CParameters(eta, max_epoch, batch_size, eps,
                         LossFunctionName.CrossEntropy3, InitialMethod.Xavier,
                         OptimizerName.Adam)

    loss_history = CLossHistory()

    net = NeuralNet(params)

    c1 = ConvLayer((3, 32, 32), (32, 3, 3), (1, 1), Relu(), params)
    net.add_layer(c1, "c1")

    p1 = PoolingLayer(c1.output_shape, (
        2,
        2,
    ), 2, PoolingTypes.MAX)
    net.add_layer(p1, "p1")

    c2 = ConvLayer(p1.output_shape, (64, 3, 3), (1, 1), Relu(), params)
    net.add_layer(c2, "c2")

    p2 = PoolingLayer(c2.output_shape, (
        2,
        2,
    ), 2, PoolingTypes.MAX)
    net.add_layer(p2, "p2")

    f1 = FcLayer(p2.output_size, 512, Relu(), params)
    net.add_layer(f1, "f1")

    f2 = FcLayer(f1.output_size, 10, Softmax(), params)
    net.add_layer(f2, "f2")

    net.train(dr, loss_history)

    loss_history.ShowLossHistory(params)
Пример #6
0
def try_filters(file_name):
    img = cv2.imread(file_name)
    # cv2 format is:G B R, change it to R G B
    img1 = img[:, :, [2, 1, 0]]
    #plt.imshow(img2)
    #plt.show()
    img2 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
    batch_size = 1
    input_channel = 1
    (height, width) = img2.shape
    FH = 3
    FW = 3
    print(img2.shape)
    data = img2.reshape((1, 1, height, width))
    hp = HyperParameters_4_2(0.1,
                             10,
                             batch_size,
                             net_type=NetType.MultipleClassifier,
                             init_method=InitialMethod.Xavier,
                             optimizer_name=OptimizerName.Momentum)
    conv = ConvLayer((1, height, width), (1, FH, FW), (1, 1), hp)
    conv.initialize("know_cnn", "name")

    filters = [
        np.array([0, -1, 0, -1, 5, -1, 0, -1, 0]),  # sharpness filter
        np.array([0, 0, 0, -1, 2, -1, 0, 0, 0]),  # vertical edge
        np.array([1, 1, 1, 1, -9, 1, 1, 1, 1]),  # surround
        np.array([-1, -2, -1, 0, 0, 0, 1, 2, 1]),  # sobel y
        np.array([0, 0, 0, 0, 1, 0, 0, 0, 0]),  # nothing
        np.array([0, -1, 0, 0, 2, 0, 0, -1, 0]),  # horizontal edge
        np.array([0.11, 0.11, 0.11, 0.11, 0.11, 0.11, 0.11, 0.11,
                  0.11]),  # blur
        np.array([-1, 0, 1, -2, 0, 2, -1, 0, 1]),  # sobel x
        np.array([2, 0, 0, 0, -1, 0, 0, 0, -1])
    ]  # embossing

    filters_name = [
        "sharpness", "vertical edge", "surround", "sobel y", "nothing",
        "horizontal edge", "blur", "sobel x", "embossing"
    ]

    fig, ax = plt.subplots(nrows=3, ncols=3, figsize=(9, 9))
    for i in range(len(filters)):
        filter = np.repeat(filters[i],
                           input_channel).reshape(batch_size, input_channel,
                                                  FH, FW)
        conv.set_filter(filter, None)
        z = conv.forward(data)
        #z = normalize(z, 255)
        ax[i // 3, i % 3].imshow(z[0, 0])
        ax[i // 3, i % 3].set_title(filters_name[i])
        ax[i // 3, i % 3].axis("off")
    plt.suptitle("filters")
    plt.show()
    return z
Пример #7
0
def test_performance():
    batch_size = 64
    params = HyperParameters_4_2(0.1,
                                 1,
                                 batch_size,
                                 net_type=NetType.MultipleClassifier,
                                 init_method=InitialMethod.Xavier)
    stride = 1
    padding = 1
    fh = 3
    fw = 3
    input_channel = 3
    output_channel = 4
    iw = 28
    ih = 28
    # 64 个 3 x 28 x 28 的图像输入(模拟 mnist)
    x = np.random.randn(batch_size, input_channel, iw, ih)

    c1 = ConvLayer((input_channel, iw, ih), (output_channel, fh, fw),
                   (stride, padding), params)
    c1.initialize("test", "test", False)

    # dry run
    for i in range(5):
        f1 = c1.forward_numba(x)
        delta_in = np.ones((f1.shape))
        #b1, dw1, db1 = c1.backward_numba(delta_in, 1)
    # run
    s1 = time.time()
    for i in range(1000):
        f1 = c1.forward_numba(x)
        #b1, dw1, db1 = c1.backward_numba(delta_in, 1)
    e1 = time.time()
    print("method numba:", e1 - s1)

    # dry run
    for i in range(5):
        f2 = c1.forward_img2col(x)
        #b2, dw2, db2 = c1.backward_col2img(delta_in, 1)
    # run
    s2 = time.time()
    for i in range(1000):
        f2 = c1.forward_img2col(x)
        #b2, dw2, db2 = c1.backward_col2img(delta_in, 1)
    e2 = time.time()
    print("method im2col:", e2 - s2)

    print("compare correctness of method 1 and method 2:")
    print("forward:", np.allclose(f1, f2, atol=1e-7))
Пример #8
0
class Model(object):
    def __init__(self, param):
        self.c1 = ConvLayer((1, 28, 28), (4, 5, 5), (1, 0), Relu(), param)
        # 4x24x24
        self.p1 = PoolingLayer(self.c1.output_shape, (
            2,
            2,
        ), 2, PoolingTypes.MAX)
        # 4x12x12
        self.f1 = FcLayer(self.p1.output_size, 32, Sigmoid(), param)
        self.f2 = FcLayer(self.f1.output_size, 10, Softmax(), param)

    def forward(self, x):
        a_c1 = self.c1.forward(x)
        a_p1 = self.p1.forward(a_c1)
        a_f1 = self.f1.forward(a_p1)
        a_f2 = self.f2.forward(a_f1)
        self.output = a_f2
        return self.output

    def backward(self, y):
        delta_in = self.output - y
        d_f2 = self.f2.backward(delta_in, LayerIndexFlags.LastLayer)
        d_f1 = self.f1.backward(d_f2, LayerIndexFlags.MiddleLayer)
        d_p1 = self.p1.backward(d_f1, LayerIndexFlags.MiddleLayer)
        d_c1 = self.c1.backward(d_p1, LayerIndexFlags.FirstLayer)

    def update(self, learning_rate):
        self.c1.update()
        self.f1.update()
        self.f2.update()

    def save(self):
        self.c1.save_parameters("c1")
        self.p1.save_parameters("p1")
        self.f1.save_parameters("f1")
        self.f2.save_parameters("f2")

    def load(self):
        self.c1.load_parameters("c1")
        self.p1.load_parameters("p1")
        self.f1.load_parameters("f1")
        self.f2.load_parameters("f2")
Пример #9
0
def conv_relu_pool():
    img = cv2.imread(circle_pic)
    #img2 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    batch_size = 1
    (height, width, input_channel) = img.shape
    FH = 3
    FW = 3
    data = np.transpose(img, axes=(2, 1, 0)).reshape(
        (batch_size, input_channel, width, height))
    hp = HyperParameters_4_2(0.1,
                             10,
                             batch_size,
                             net_type=NetType.MultipleClassifier,
                             init_method=InitialMethod.Xavier,
                             optimizer_name=OptimizerName.Momentum)
    conv = ConvLayer((input_channel, width, height), (1, FH, FW), (1, 0), hp)
    conv.initialize("know_cnn", "conv")
    kernal = np.array([-1, 0, 1, -2, 0, 2, -1, 0, 1])
    filter = np.repeat(kernal, input_channel).reshape(batch_size,
                                                      input_channel, FH, FW)
    conv.set_filter(filter, None)
    z1 = conv.forward(data)
    z2 = Relu().forward(z1)
    pool = PoolingLayer(z2[0].shape, (2, 2), 2, PoolingTypes.MAX)
    pool.initialize("know_cnn", "pool")
    z3 = pool.forward(z2)

    fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(8, 6))
    ax[0, 0].imshow(img[:, :, [2, 1, 0]])
    ax[0, 0].axis("off")
    ax[0, 0].set_title("source:" + str(img.shape))
    ax[0, 1].imshow(z1[0, 0].T)
    ax[0, 1].axis("off")
    ax[0, 1].set_title("conv:" + str(z1.shape))
    ax[1, 0].imshow(z2[0, 0].T)
    ax[1, 0].axis("off")
    ax[1, 0].set_title("relu:" + str(z2.shape))
    ax[1, 1].imshow(z3[0, 0].T)
    ax[1, 1].axis("off")
    ax[1, 1].set_title("pooling:" + str(z3.shape))

    plt.suptitle("conv-relu-pool")
    plt.show()
Пример #10
0
def test_4d_im2col():
    batch_size = 2
    stride = 1
    padding = 0
    fh = 2
    fw = 2
    input_channel = 3
    output_channel = 2
    iw = 3
    ih = 3

    x = np.random.randn(batch_size, input_channel, iw, ih)
    params = HyperParameters_4_2(
        0.1, 1, batch_size,
        net_type=NetType.MultipleClassifier,
        init_method=InitialMethod.Xavier)
    c1 = ConvLayer((input_channel,iw,ih), (output_channel,fh,fw), (stride, padding), params)
    c1.initialize("test", "test", False)
    f1 = c1.forward_numba(x)
    f2 = c1.forward_img2col(x)
    print("correctness:", np.allclose(f1, f2, atol=1e-7))