예제 #1
0
    def test_learn_with_depth_and_multiple_filter(self):
        print("al")
        net = fcnetwork.FCLayer(arch=[784, 100, 10], load_path=None)

        enhancedModel = model.Model(learning_rate=0.1,
                                    dataset=None,
                                    layerContainer=[net])

        net = enhancedModel.getLayerContainer()[0]

        before_w, before_b = net.getWeightsAndBiases()

        enhancedModel.test_learn_mnist(epoch=1, batch_size=30)

        net = enhancedModel.getLayerContainer()[0]

        after_w, after_b = net.getWeightsAndBiases()

        for b, a in zip(before_w, after_w):
            print("la")
            print(b)
            print(a)
            self.assertEqual((b == a).all(), False)
        for b, a in zip(before_b, after_b):
            print("la")
            self.assertEqual((b == a).all(), False)
예제 #2
0
    def test_online_learning1(self):
        pathdir = "./tensorfiles"
        filename = "online_learning1"
        fullypath = pathdir + "/" + filename
        if not os.path.exists(os.path.join(pathdir, filename + ".bs1.npy")):
            # print("la2")
            ws1 = numpy.array([[0.2, 0.3], [0.4, 0.5], [1.1, 0.1]])
            # print(f"w = {ws1.shape}")
            ws2 = numpy.array([[0.9, 0.3, 0.1], [0.3, 0.4, 0.1]])
            ws  = [ws1, ws2]
            bs1 = [0, 0, 0]
            bs2 = [0, 0]

            # return
            TensorFileManager("./tensorfiles").save(filename + ".bs1", bs1)
            TensorFileManager("./tensorfiles").save(filename + ".bs2", bs2)
            TensorFileManager("./tensorfiles").save(filename + ".ws1", ws1)
            TensorFileManager("./tensorfiles").save(filename + ".ws2", ws2)
        net = fcn.FCLayer(arch=[2, 3, 2], load_path=filename)

        input = numpy.array([0.1, 0.2])
        expected_res = numpy.array([1,  0])
        
        previous_delta = numpy.array([-0.00601069,  0.00878729,  0.00173201])
        ws1 = numpy.array([[0.2, 0.3], [0.4, 0.5], [1.1, 0.1]])

        expected_delta = numpy.dot(previous_delta, ws1)
        
        net.compute(input, learn=True)
        net.learn(expected_res)
        returnedDelta = net.getLastDelta()
        
        res  = numpy.isclose(returnedDelta, expected_delta, atol=1e-5)
        
        self.assertEqual(res.all(), True)
예제 #3
0
def model_for_mnist():

    layerContainer = [fcnetwork.FCLayer(arch=[784, 100, 10])]
    learning_rate = 0.01

    modelMnist = model.Model(learning_rate=learning_rate,
                             dataset=None,
                             layerContainer=layerContainer)

    modelMnist.test_learn_mnist(epoch=50, batch_size=10)
예제 #4
0
    def test_perceptron1(self):
        pathdir = "./tensorfiles"
        filename = "perceptest1"
        fullypath = pathdir + "/" + filename
        if not os.path.exists(os.path.join(pathdir, filename + ".bs1.npy")):
            TensorFileManager("./tensorfiles").save(filename + ".bs1", numpy.array([[0.4]]))
            TensorFileManager("./tensorfiles").save(filename + ".ws1", numpy.array([[1.1]]))
        net = fcn.FCLayer(arch=[1, 1], load_path=filename)
        expected_res = fcn.sigmoid(0.4 + 1.1)
        res = net.compute(numpy.array([1]))

        self.assertEqual(res, expected_res)
예제 #5
0
def test_learn():
    layerContainer = [
        #3, 150, 150
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       load_path="ff2c1",
                       filtershape=(32, 3, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(3, 150, 150)),

        #32, 74, 74
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       load_path="ff2c2",
                       filtershape=(64, 32, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(32, 74, 74)),

        #64, 36, 36
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       load_path="ff2c3",
                       filtershape=(128, 64, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(64, 36, 36)),

        #128, 17, 17
        fcnetwork.FCLayer(optimizer=adam.AdamFC(),
                          load_path="ff2fcn1",
                          arch=[36992, 512, 128],
                          activation_func="relu",
                          is_classifier=False),
        softmax.SoftmaxLayer(optimizer=adam.AdamFC(),
                             load_path="ff2softm",
                             arch=[128, 5])
    ]
    # signal.signal(signal.SIGINT, signal_handler)

    ## here learning rate is useless
    model_FAndF = model.Model(learning_rate=0.001,
                              dataset=None,
                              layerContainer=layerContainer)

    pic = iml.ImageLoader.getOutputNpArray(example1,
                                           crop=True,
                                           crop_size=(0, 0, 150, 150))

    y = model_FAndF.compute(pic)
    print(y)
예제 #6
0
def flowerAndFun(path=example1):
    input = iml.ImageLoader.getOutputNpArray(path,
                                             crop=True,
                                             crop_size=(0, 0, 150, 150))

    layerContainer = [
        #3, 150, 150
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       filtershape=(32, 3, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(3, 150, 150)),

        #32, 74, 74
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       filtershape=(64, 32, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(32, 74, 74)),

        #64, 36, 36
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       filtershape=(128, 64, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(64, 36, 36)),

        #128, 17, 17
        fcnetwork.FCLayer(optimizer=adam.AdamFC(), arch=[36992, 512, 128, 5])
    ]
    learning_rate = 0.0001

    model_FAndF = model.Model(learning_rate=learning_rate,
                              dataset=None,
                              layerContainer=layerContainer)

    # output = model_FAndF.compute(input, learn=True)

    # model_FAndF.soft_learn()
    model_FAndF.test_learn(epoch=50)
예제 #7
0
    def test_shallow_network1(self):
        pathdir = "./tensorfiles"
        filename = "shallow_network1"
        fullypath = pathdir + "/" + filename
        if not os.path.exists(os.path.join(pathdir, filename + ".bs1.npy")):
            # ws = [[numpy.array([1, 0.7]), numpy.array([0.1, 0.8]), numpy.array([0.4, 0.9])], [numpy.array([0.9, 0.7, 0.1])]]
            ws1 = numpy.array([[1, 0.7], [0.1, 0.8], [0.4, 0.9]])
            ws2 = numpy.array([[0.9, 0.7, 0.1]])
            ws  = [ws1, ws2]
            bs1 = [[0, 0, 0]]
            bs2 = [[0]]
            # return
            TensorFileManager("./tensorfiles").save(filename + ".bs1", bs1)
            TensorFileManager("./tensorfiles").save(filename + ".bs2", bs2)
            TensorFileManager("./tensorfiles").save(filename + ".ws1", ws1)
            TensorFileManager("./tensorfiles").save(filename + ".ws2", ws2)
        net = fcn.FCLayer(arch=[2, 3, 1], load_path=filename)

        input = numpy.array([0.3, 0.4])
        expected_res = 0.7406534729647368
        res = net.compute(input)

        self.assertEqual(res, expected_res)
예제 #8
0
    def test_learn_with_depth_and_multiple_filter(self):
        # input 3, 5, 5
        # filter 3, 3, 2, 2 => output 3, 4, 4
        input = numpy.array([[[0.1, 2, 0.11, 0.3, 1], [0, 0.4, 0.4, 0.36, 1],
                              [0, 0.12, 0.27, 0.34, -3],
                              [0.62, 0.12, 0.11, 10, 1], [0, 56, 11, 23, 44]],
                             [[0.11, 0.58, -1, 2, 0.35],
                              [0.1, 0.36, 0.12, 0.8, 0.27],
                              [0.27, 0, 0.64, 1, 0.12], [1, -1, 0.4, 3, 11],
                              [0, 0.56, 0.11, 0.23, 0.44]],
                             [[1, 3, 0.24, 5, -1], [0.12, 2, 0, 1, 11],
                              [-0.1, 0.2, 0.3, 0.11, 0.22],
                              [12, 0.27, 0.18, 0.2, 0.34],
                              [0, 0.56, 0.11, 0.23, 0.44]]])

        pathdir = "./tensorfiles"
        filename = "conv_test_depth_multiple_filter"
        fullypath = pathdir + "/" + filename

        if not os.path.exists(os.path.join(pathdir, filename + ".bs1.npy")):
            tm = TensorFileManager("./tensorfiles")

            containerfilter = numpy.ndarray((0, 3, 2, 2))

            f3 = numpy.array([[[0.3, 0.3], [0.3, 0.3]], [[0.3, 0.3],
                                                         [0.3, 0.3]],
                              [[0.3, 0.3], [0.3, 0.3]]])
            containerfilter = numpy.insert(containerfilter, 0, f3, 0)
            f2 = numpy.array([[[0.2, 0.2], [0.2, 0.2]], [[0.2, 0.2],
                                                         [0.2, 0.2]],
                              [[0.2, 0.2], [0.2, 0.2]]])
            containerfilter = numpy.insert(containerfilter, 0, f2, 0)
            f1 = numpy.array([[[0.1, 0.1], [0.1, 0.1]], [[0.1, 0.1],
                                                         [0.1, 0.1]],
                              [[0.1, 0.1], [0.1, 0.1]]])
            containerfilter = numpy.insert(containerfilter, 0, f1, 0)

            biases = numpy.zeros((3, ))

            tm.save("conv_test_depth_multiple_filter.bs1", biases)
            tm.save("conv_test_depth_multiple_filter.ws1", containerfilter)

        l1 = conv.ConvLayer(load_path=filename,
                            filtershape=(3, 3, 2, 2),
                            pool=pool.PoolLayer(),
                            activation_function="relu")
        output = l1.compute(input)

        filename = "fcn_test_depth_multiple_filter"

        if not os.path.exists(os.path.join(pathdir, filename + ".bs1.npy")):
            tm = TensorFileManager("./tensorfiles")

            ws = numpy.array([[
                0.1, 0.3, 0.5, 0.12, 0.9, 0.12, 0.9, 0.10, 0.1, 0.11, 0.12,
                0.13
            ],
                              [
                                  0.34, 0.3, 0.64, 0.12, 1, 0.12, 0.1, 0.1,
                                  0.12, 0.13, 0.15, 0.11
                              ]])
            biases = numpy.zeros((2, ))

            tm.save("fcn_test_depth_multiple_filter.bs1", biases)
            tm.save("fcn_test_depth_multiple_filter.ws1", ws)

        l2 = fcnetwork.FCLayer(arch=[12, 2],
                               load_path="fcn_test_depth_multiple_filter")

        expected_res = numpy.array([1, 0])

        l2.compute(input=output, learn=True)
        l2.learn(expected_res)

        delta = l2.getLastDelta()

        l1.learn(delta)

        nabla_w = l1.getNablaW()

        self.assertEqual(numpy.isclose(nabla_w[0][0][0][0], 4.1609570961e-09),
                         True)
        self.assertEqual(numpy.isclose(nabla_w[1][1][0][0], 1.8135273233e-09),
                         True)
예제 #9
0
def flowerAndFun2(path=example1):
    def signal_handler(sig, frame):
        # saveModel.saveLayers(["ff2c1", "ff2c2", "ff2c3", "ff2fcn1", "ff2softm"])
        pic = iml.ImageLoader.getOutputNpArray(example1,
                                               crop=True,
                                               crop_size=(0, 0, 150, 150))

        y = saveModel.compute(pic)
        saveModel.saveLayers([
            "ff2c1", "d1", "ff2c2", "d2", "ff2c3", "d3", "ff2fcn1", "d4",
            "ff2softm"
        ])
        print(y)
        sys.exit(0)

    input = iml.ImageLoader.getOutputNpArray(path,
                                             crop=True,
                                             crop_size=(0, 0, 150, 150))

    # layerContainer = [
    #     #3, 150, 150
    #     conv.ConvLayer(optimizer=adam.AdamConv(), filtershape=(32, 3, 3, 3), stride_length=1, pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2), ishape=(3, 150, 150)),
    #     dropout.DropoutLayer(p=0.2, ishape=(32, 74, 74)),

    #     #32, 74, 74
    #     conv.ConvLayer(optimizer=adam.AdamConv(), filtershape=(64, 32, 3, 3), stride_length=1, pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2), ishape=(32, 74, 74)),
    #     dropout.DropoutLayer(p=0.2, ishape=(64, 36, 36)),

    #     #64, 36, 36
    #     conv.ConvLayer(optimizer=adam.AdamConv(), filtershape=(128, 64, 3, 3), stride_length=1, pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2), ishape=(64, 36, 36)),
    #     dropout.DropoutLayer(p=0.2, ishape=(128, 17, 17)),

    #     #128, 17, 17
    #     fcnetwork.FCLayer(optimizer=adam.AdamFC(), arch=[36992, 512, 128], activation_func="relu", is_classifier=False),
    #     dropout.DropoutLayer(p=0.2, ishape=(128,)),

    #     softmax.SoftmaxLayer(optimizer=adam.AdamFC(), arch=[128, 5])
    # ]
    # # load net
    layerContainer = [
        #3, 150, 150
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       load_path="ff2c1",
                       filtershape=(32, 3, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(3, 150, 150)),
        dropout.DropoutLayer(p=0.2, ishape=(32, 74, 74)),

        #32, 74, 74
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       load_path="ff2c2",
                       filtershape=(64, 32, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(32, 74, 74)),
        dropout.DropoutLayer(p=0.2, ishape=(64, 36, 36)),

        #64, 36, 36
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       load_path="ff2c3",
                       filtershape=(128, 64, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(64, 36, 36)),
        dropout.DropoutLayer(p=0.2, ishape=(128, 17, 17)),

        #128, 17, 17
        fcnetwork.FCLayer(optimizer=adam.AdamFC(),
                          load_path="ff2fcn1",
                          arch=[36992, 512, 128],
                          activation_func="relu",
                          is_classifier=False),
        dropout.DropoutLayer(p=0.2, ishape=(128, )),
        softmax.SoftmaxLayer(optimizer=adam.AdamFC(),
                             load_path="ff2softm",
                             arch=[128, 5])
    ]
    signal.signal(signal.SIGINT, signal_handler)

    ## here learning rate is useless
    model_FAndF = model.Model(learning_rate=0.001,
                              dataset=None,
                              layerContainer=layerContainer)

    saveModel = model_FAndF
    # saveModel.saveLayers(["ff2c1", "ff2c2", "ff2c3", "ff2fcn1", "ff2softm"])

    model_FAndF.test_learn(epoch=50)
예제 #10
0
def flowerAndFunModel(path=example1):

    #it crops by 224x224 by default
    print(f"Model load this picture as input: {path}")
    input = iml.ImageLoader.getOutputNpArray(path,
                                             crop=True,
                                             crop_size=(0, 0, 150, 150))
    dir = "./tensorfiles"

    if not os.path.exists(dir + "/" + "ff2c1" + ".ws1.npy"):
        layerContainer = [
            #3, 150, 150
            conv.ConvLayer(optimizer=adam.AdamConv(),
                           filtershape=(32, 3, 3, 3),
                           stride_length=1,
                           pool=pool.PoolLayer(pool_size=(2, 2),
                                               stride_length=2),
                           ishape=(3, 150, 150)),

            #32, 74, 74
            conv.ConvLayer(optimizer=adam.AdamConv(),
                           filtershape=(64, 32, 3, 3),
                           stride_length=1,
                           pool=pool.PoolLayer(pool_size=(2, 2),
                                               stride_length=2),
                           ishape=(32, 74, 74)),

            #64, 36, 36
            conv.ConvLayer(optimizer=adam.AdamConv(),
                           filtershape=(128, 64, 3, 3),
                           stride_length=1,
                           pool=pool.PoolLayer(pool_size=(2, 2),
                                               stride_length=2),
                           ishape=(64, 36, 36)),

            #128, 17, 17
            fcnetwork.FCLayer(optimizer=adam.AdamFC(),
                              arch=[36992, 512, 128],
                              activation_func="relu",
                              is_classifier=False),
            softmax.SoftmaxLayer(optimizer=adam.AdamFC(), arch=[128, 5])
        ]
        ffM = model.Model(learning_rate=None,
                          dataset=None,
                          layerContainer=layerContainer)
        ffM.saveLayers(["ff2c1", "ff2c2", "ff2c3", "ff2fcn1", "ff2softm"])

    layerContainer = [
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       load_path="ff2c1",
                       filtershape=(32, 3, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(3, 150, 150)),

        #32, 74, 74
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       load_path="ff2c2",
                       filtershape=(64, 32, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(32, 74, 74)),

        #64, 36, 36
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       load_path="ff2c3",
                       filtershape=(128, 64, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(64, 36, 36)),

        #128, 17, 17
        fcnetwork.FCLayer(optimizer=adam.AdamFC(),
                          load_path="ff2fcn1",
                          arch=[36992, 512, 128],
                          activation_func="relu",
                          is_classifier=False),
        softmax.SoftmaxLayer(optimizer=adam.AdamFC(),
                             load_path="ff2softm",
                             arch=[128, 5])
    ]

    ffM = model.Model(learning_rate=None,
                      dataset=None,
                      layerContainer=layerContainer)

    try:
        output = ffM.compute(input)
    except:
        print("error occured in zf5 model")
        return "error"
    return return_response(output)


# print(zf5model())
# overfeat()
# a = numpy.array(
#         [[1, 2, 3, 4, 5, 6],
#         [1, 2, 3, 4, 5, 6],
#         [1, 2, 3, 4, 5, 6],
#         [1, 2, 3, 4, 5, 6],
#         [1, 2, 3, 4, 5, 6],
#         [1, 2, 3, 4, 5, 6]])
예제 #11
0
def zf5model(path=example1):
    # from this architecture:
    # https://www.researchgate.net/figure/Architecture-of-ZF-model-An-3-channels-image-with-224224-is-as-the-input-It-is_fig5_318577329

    # image = iml.ImageLoader.getOutputNpArray(rp_dataset + "daisy/" + "5547758_eea9edfd54_n.jpg", gray=True)
    # image = iml.ImageLoader.getCropedImage(rp_dataset + "daisy/" + "5547758_eea9edfd54_n.jpg")#.getOutputNpArray(rp_dataset + "daisy/" + "5547758_eea9edfd54_n.jpg")

    #it crops by 224x224 by default
    print(f"Model load this picture as input: {path}")
    input = iml.ImageLoader.getOutputNpArray(path,
                                             crop=True,
                                             crop_size=(0, 0, 224, 224))
    dir = "./tensorfiles"

    if not os.path.exists(dir + "/" + "conv1" + ".ws1.npy"):
        layerContainer = [
            #3, 224, 224
            conv.ConvLayer(padding=1,
                           filtershape=(96, 3, 7, 7),
                           stride_length=2,
                           pool=pool.PoolLayer(pad=1,
                                               pool_size=(3, 3),
                                               stride_length=2),
                           ishape=(3, 224, 224)),
            #96, 55, 55
            conv.ConvLayer(filtershape=(256, 96, 5, 5),
                           stride_length=2,
                           pool=pool.PoolLayer(pad=1,
                                               pool_size=(3, 3),
                                               stride_length=2),
                           ishape=(96, 55, 55)),
            #256, 26, 26
            conv.ConvLayer(padding=1,
                           filtershape=(384, 256, 3, 3),
                           stride_length=1,
                           ishape=(256, 26, 26)),

            #384, 13, 13
            conv.ConvLayer(padding=1,
                           filtershape=(384, 384, 3, 3),
                           stride_length=1,
                           ishape=(384, 13, 13)),

            #384, 13, 13
            conv.ConvLayer(padding=1,
                           filtershape=(256, 384, 3, 3),
                           stride_length=1,
                           pool=pool.PoolLayer(pool_size=(3, 3),
                                               stride_length=2),
                           ishape=(384, 13, 13)),

            #do use he initialization here
            fcnetwork.FCLayer(arch=[9216, 4096, 4096, 5])
        ]

        zf5 = model.Model(learning_rate=None,
                          dataset=None,
                          layerContainer=layerContainer)

        # output = zf5.compute(input)

        # print(f"first output = {output}")

        zf5.saveLayers(
            ["conv1", "conv2", "conv3", "conv4", "conv5", "classifier"])

    layerContainer = [
        #3, 224, 224
        conv.ConvLayer(load_path="conv1"),
        #96, 55, 55
        conv.ConvLayer(load_path="conv2"),
        #256, 26, 26
        conv.ConvLayer(load_path="conv3"),

        #384, 13, 13
        conv.ConvLayer(load_path="conv4"),

        #384, 13, 13
        conv.ConvLayer(load_path="conv5"),

        #do use he initialization here
        fcnetwork.FCLayer(arch=[9216, 4096, 4096, 5], load_path="classifier")
    ]

    zf5 = model.Model(learning_rate=None,
                      dataset=None,
                      layerContainer=layerContainer)

    # output = zf5.compute(input)
    # res = zf5.learn()
    # print(f"snd output = {res}")

    try:
        output = zf5.compute(input)
    except:
        print("error occured in zf5 model")
        return "error"
    # print(return_response(output))
    return return_response(output)
예제 #12
0
# from this architecture:
# https://www.researchgate.net/figure/Architecture-of-ZF-model-An-3-channels-image-with-224224-is-as-the-input-It-is_fig5_318577329

layerContainer = [
    conv.ConvLayer(padding=1,
                   filtershape=(96, 3, 7, 7),
                   stride_length=2,
                   pool=pool.PoolLayer(pad=1,
                                       pool_size=(3, 3),
                                       stride_length=2)),
    conv.ConvLayer(filtershape=(256, 96, 5, 5),
                   stride_length=2,
                   pool=pool.PoolLayer(pad=1,
                                       pool_size=(3, 3),
                                       stride_length=2)),
    conv.ConvLayer(padding=1, filtershape=(384, 256, 3, 3), stride_length=1),
    conv.ConvLayer(padding=1, filtershape=(384, 384, 3, 3), stride_length=1),
    conv.ConvLayer(padding=1,
                   filtershape=(256, 384, 3, 3),
                   stride_length=1,
                   pool=pool.PoolLayer(pool_size=(3, 3), stride_length=2)),
    fcnetwork.FCLayer(arch=[9216, 4096, 5])
]

zf5 = model.Model(learning_rate=None,
                  dataset=None,
                  layerContainer=layerContainer)

output = zf5.compute(input)

print(output.shape)