Example #1
0
def test_learn():
    layerContainer = [
        #3, 150, 150
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       load_path="ff2c1",
                       filtershape=(32, 3, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(3, 150, 150)),

        #32, 74, 74
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       load_path="ff2c2",
                       filtershape=(64, 32, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(32, 74, 74)),

        #64, 36, 36
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       load_path="ff2c3",
                       filtershape=(128, 64, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(64, 36, 36)),

        #128, 17, 17
        fcnetwork.FCLayer(optimizer=adam.AdamFC(),
                          load_path="ff2fcn1",
                          arch=[36992, 512, 128],
                          activation_func="relu",
                          is_classifier=False),
        softmax.SoftmaxLayer(optimizer=adam.AdamFC(),
                             load_path="ff2softm",
                             arch=[128, 5])
    ]
    # signal.signal(signal.SIGINT, signal_handler)

    ## here learning rate is useless
    model_FAndF = model.Model(learning_rate=0.001,
                              dataset=None,
                              layerContainer=layerContainer)

    pic = iml.ImageLoader.getOutputNpArray(example1,
                                           crop=True,
                                           crop_size=(0, 0, 150, 150))

    y = model_FAndF.compute(pic)
    print(y)
Example #2
0
    def test_sliding_window_2(self):
        pathdir = "./tensorfiles"
        filename = "conv_sliding_window2"
        fullypath = pathdir + "/" + filename
        if not os.path.exists(os.path.join(pathdir, filename + ".bs1.npy")):
            tm = TensorFileManager("./tensorfiles")

            containerfilter = numpy.ndarray((0, 3, 2, 2))

            f3 = numpy.array([[[3, 3], [3, 3]], [[3, 3], [3, 3]],
                              [[3, 3], [3, 3]]])
            containerfilter = numpy.insert(containerfilter, 0, f3, 0)
            f2 = numpy.array([[[2, 2], [2, 2]], [[2, 2], [2, 2]],
                              [[2, 2], [2, 2]]])
            containerfilter = numpy.insert(containerfilter, 0, f2, 0)
            f1 = numpy.array([[[1, 1], [1, 1]], [[1, 1], [1, 1]],
                              [[1, 1], [1, 1]]])
            containerfilter = numpy.insert(containerfilter, 0, f1, 0)

            biases = numpy.zeros((3, ))

            tm.save("conv_sliding_window2.bs1", biases)
            tm.save("conv_sliding_window2.ws1", containerfilter)

        conv1 = conv.ConvLayer(load_path="conv_sliding_window2")
        input = numpy.ones((3, 4, 3))
        output = conv1.slidingWindow(input)
        self.assertEqual(output.shape, (3, 3, 2))

        expected_o1 = numpy.ones((3, 2)) * 12
        self.assertEqual((output[0] == expected_o1).all(), True)
        expected_o2 = numpy.ones((3, 2)) * 24
        self.assertEqual((output[1] == expected_o2).all(), True)
        expected_o3 = numpy.ones((3, 2)) * 36
        self.assertEqual((output[2] == expected_o3).all(), True)
Example #3
0
    def simple_adam_optimizer_test(self):
        train, test_data = dataloader.load_some_flowers(5,
                                                        0,
                                                        crop_size=(0, 0, 150,
                                                                   150))

        x = list(train)[0][0]

        c1 = conv.ConvLayer(optimizer=adam.AdamConv(),
                            filtershape=(32, 3, 3, 3),
                            stride_length=1,
                            pool=pool.PoolLayer(pool_size=(2, 2),
                                                stride_length=2),
                            ishape=(3, 150, 150))

        initFilters, initBiases = c1.getFiltersAndBiases()

        out = c1.compute(x, learn=True)

        false_delta = numpy.random.randn(*(32, 74, 74))

        c1.learn(false_delta)

        c1.modify_weights(learning_rate=0.1, batch_size=1)

        finalFilters, finalBiases = c1.getFiltersAndBiases()

        # print(f"init f {initFilters}")
        # print(f"final f {finalFilters}")

        self.assertFalse((initFilters == finalFilters).all())
        self.assertFalse((initBiases == finalBiases).all())
Example #4
0
    def test_bias_gradient1(self):
        train, test_data = dataloader.load_some_flowers(5,
                                                        0,
                                                        crop_size=(0, 0, 150,
                                                                   150))

        x = list(train)[0][0]

        c1 = conv.ConvLayer(filtershape=(32, 3, 3, 3),
                            stride_length=1,
                            pool=pool.PoolLayer(pool_size=(2, 2),
                                                stride_length=2),
                            ishape=(3, 150, 150))

        out = c1.compute(x, learn=True)

        self.assertEqual(out.shape, (32, 74, 74))

        false_delta = numpy.ones((32, 74, 74))

        c1.learn(false_delta)

        nabla_b = c1.getNablaB()

        self.assertTrue(nabla_b.shape, (32, ))
Example #5
0
def flowerAndFun(path=example1):
    input = iml.ImageLoader.getOutputNpArray(path,
                                             crop=True,
                                             crop_size=(0, 0, 150, 150))

    layerContainer = [
        #3, 150, 150
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       filtershape=(32, 3, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(3, 150, 150)),

        #32, 74, 74
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       filtershape=(64, 32, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(32, 74, 74)),

        #64, 36, 36
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       filtershape=(128, 64, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(64, 36, 36)),

        #128, 17, 17
        fcnetwork.FCLayer(optimizer=adam.AdamFC(), arch=[36992, 512, 128, 5])
    ]
    learning_rate = 0.0001

    model_FAndF = model.Model(learning_rate=learning_rate,
                              dataset=None,
                              layerContainer=layerContainer)

    # output = model_FAndF.compute(input, learn=True)

    # model_FAndF.soft_learn()
    model_FAndF.test_learn(epoch=50)
Example #6
0
def overfeat(path=example1):
    #     input = iml.ImageLoader.getOutputNpArray(path, crop=True)

    layerContainer = [
        #3, 224, 224
        conv.ConvLayer(padding=1,
                       filtershape=(96, 3, 7, 7),
                       stride_length=2,
                       pool=pool.PoolLayer(pool_size=(3, 3), stride_length=3),
                       ishape=(3, 224, 224)),
        #96, 36, 36
        conv.ConvLayer(padding=1,
                       filtershape=(256, 96, 5, 5),
                       stride_length=1,
                       pool=pool.PoolLayer(pad=1,
                                           pool_size=(2, 2),
                                           stride_length=2),
                       ishape=(96, 36, 36)),
        #256, 18, 18
        conv.ConvLayer(padding=1,
                       filtershape=(512, 256, 3, 3),
                       stride_length=1,
                       ishape=(256, 18, 18)),
        #512, 18, 18
        conv.ConvLayer(padding=1,
                       filtershape=(512, 512, 3, 3),
                       stride_length=1,
                       ishape=(256, 18, 18)),
        #512, 18, 18
        conv.ConvLayer(padding=1,
                       filtershape=(512, 512, 3, 3),
                       stride_length=1,
                       ishape=(256, 18, 18)),
        #512, 18, 18
        conv.ConvLayer(padding=1,
                       filtershape=(512, 512, 3, 3),
                       stride_length=1,
                       ishape=(256, 18, 18)),
        #512, 18, 18
        conv.ConvLayer(padding=1,
                       filtershape=(512, 512, 3, 3),
                       stride_length=1,
                       ishape=(256, 18, 18)),
    ]

    # model.learn()

    overfeat = model.Model(learning_rate=None,
                           dataset=None,
                           layerContainer=layerContainer)
    output = overfeat.compute(input)

    #     overfeat.saveLayers(["OVconv1", "OVconv2", "OVconv3", "OVconv4", "OVconv5", "OVconv6", "OVconv7", "OVclassifier"])

    overfeat.learn()
Example #7
0
    def test_sliding_window3d_1(self):
        input = numpy.ndarray((3, 4, 3))
        conv1 = conv.ConvLayer(nb_filters=64)

        output = conv1.slidingWindow(input)
        self.assertEqual(output.shape, (64, 3, 2))
Example #8
0
    def test_get_next_delta2(self):
        # with depth and filter
        # input  = (3, 5, 5)
        # filter = (4, 3, 2, 2)

        pathdir = "./tensorfiles"
        filename1 = "conv_next_delta_test1"

        input = numpy.array([[[0.1, 2, 0.11, 0.3, 1], [0, 0.4, 0.4, 0.36, 1],
                              [0, 0.12, 0.27, 0.34, -3],
                              [0.62, 0.12, 0.11, 10, 1],
                              [0, 0.56, 0.11, 0.44, 0.23]],
                             [[0.1, 2, 0.11, 0.3, 1], [0, 0.4, 0.4, 0.36, 1],
                              [0, 0.12, 0.27, 0.34, -3],
                              [0.62, 0.12, 0.11, 10, 1],
                              [0, 0.56, 0.11, 0.23, 0.44]],
                             [[0.1, 2, 0.11, 0.3, 1], [0, 0.4, 0.4, 0.36, 1],
                              [0, 0.12, 0.27, 0.34, -3],
                              [0.62, 0.12, 0.11, 10, 1],
                              [0, 0.56, 0.11, 0.23, 0.44]]])
        # print(f"input shape = {input.shape}")

        if not os.path.exists(os.path.join(pathdir, filename1 + ".bs1.npy")):
            f = numpy.array([[[-0.13, 0.15], [-0.51, 0.62]],
                             [[-0.13, 0.15], [-0.51, 0.62]],
                             [[-0.13, 0.15], [-0.51, 0.62]]])

            containerfilter = numpy.ndarray((0, 3, 2, 2))

            containerfilter = numpy.insert(containerfilter, 0, f, axis=0)
            containerfilter = numpy.insert(containerfilter, 0, f, axis=0)
            containerfilter = numpy.insert(containerfilter, 0, f, axis=0)
            containerfilter = numpy.insert(containerfilter, 0, f, axis=0)

            #print(f"container filter shape = {containerfilter.shape}")

            biases = numpy.zeros((4, ))

            tm = TensorFileManager("./tensorfiles")
            # print(f"shape = {f.shape}")

            tm.save(filename1 + ".bs1", biases)
            tm.save(filename1 + ".ws1", containerfilter)

        l1 = conv.ConvLayer(load_path=filename1,
                            filtershape=(4, 3, 2, 2),
                            pool=pool.PoolLayer(),
                            activation_function="sigmoid")

        res1 = l1.compute(input)

        # print(f"result conv shape {res1.shape}")
        ws = numpy.array([[
            0.61, 0.82, 0.96, -1, 0.9, 0.71, 0.3, 0.276, 0.11, 0.12, 0.17, 0.5,
            0.1, 0.2, 0.11, 0.6
        ],
                          [
                              0.02, -0.5, 0.23, 0.17, 0.9, 0.1, 0.4, 0.9, 0.2,
                              0.12, 0.11, 0.3, 0.1, 0.2, 0.7, 0.8
                          ]])

        prev_delta = numpy.array([0.25, -0.15])  #delta from the link

        delta = numpy.dot(prev_delta, ws)

        # print(f"delta = {delta}")

        l1.learn(delta)

        dx = l1.getLastDelta()

        self.assertEqual(dx.shape, (3, 5, 5))

        # print(dx)

        self.assertEqual(numpy.isclose(dx[0][2][2], 2.837872562200001e-10),
                         True)
Example #9
0
    def test_learn_with_depth_and_multiple_filter(self):
        # input 3, 5, 5
        # filter 3, 3, 2, 2 => output 3, 4, 4
        input = numpy.array([[[0.1, 2, 0.11, 0.3, 1], [0, 0.4, 0.4, 0.36, 1],
                              [0, 0.12, 0.27, 0.34, -3],
                              [0.62, 0.12, 0.11, 10, 1], [0, 56, 11, 23, 44]],
                             [[0.11, 0.58, -1, 2, 0.35],
                              [0.1, 0.36, 0.12, 0.8, 0.27],
                              [0.27, 0, 0.64, 1, 0.12], [1, -1, 0.4, 3, 11],
                              [0, 0.56, 0.11, 0.23, 0.44]],
                             [[1, 3, 0.24, 5, -1], [0.12, 2, 0, 1, 11],
                              [-0.1, 0.2, 0.3, 0.11, 0.22],
                              [12, 0.27, 0.18, 0.2, 0.34],
                              [0, 0.56, 0.11, 0.23, 0.44]]])

        pathdir = "./tensorfiles"
        filename = "conv_test_depth_multiple_filter"
        fullypath = pathdir + "/" + filename

        if not os.path.exists(os.path.join(pathdir, filename + ".bs1.npy")):
            tm = TensorFileManager("./tensorfiles")

            containerfilter = numpy.ndarray((0, 3, 2, 2))

            f3 = numpy.array([[[0.3, 0.3], [0.3, 0.3]], [[0.3, 0.3],
                                                         [0.3, 0.3]],
                              [[0.3, 0.3], [0.3, 0.3]]])
            containerfilter = numpy.insert(containerfilter, 0, f3, 0)
            f2 = numpy.array([[[0.2, 0.2], [0.2, 0.2]], [[0.2, 0.2],
                                                         [0.2, 0.2]],
                              [[0.2, 0.2], [0.2, 0.2]]])
            containerfilter = numpy.insert(containerfilter, 0, f2, 0)
            f1 = numpy.array([[[0.1, 0.1], [0.1, 0.1]], [[0.1, 0.1],
                                                         [0.1, 0.1]],
                              [[0.1, 0.1], [0.1, 0.1]]])
            containerfilter = numpy.insert(containerfilter, 0, f1, 0)

            biases = numpy.zeros((3, ))

            tm.save("conv_test_depth_multiple_filter.bs1", biases)
            tm.save("conv_test_depth_multiple_filter.ws1", containerfilter)

        l1 = conv.ConvLayer(load_path=filename,
                            filtershape=(3, 3, 2, 2),
                            pool=pool.PoolLayer(),
                            activation_function="relu")
        output = l1.compute(input)

        filename = "fcn_test_depth_multiple_filter"

        if not os.path.exists(os.path.join(pathdir, filename + ".bs1.npy")):
            tm = TensorFileManager("./tensorfiles")

            ws = numpy.array([[
                0.1, 0.3, 0.5, 0.12, 0.9, 0.12, 0.9, 0.10, 0.1, 0.11, 0.12,
                0.13
            ],
                              [
                                  0.34, 0.3, 0.64, 0.12, 1, 0.12, 0.1, 0.1,
                                  0.12, 0.13, 0.15, 0.11
                              ]])
            biases = numpy.zeros((2, ))

            tm.save("fcn_test_depth_multiple_filter.bs1", biases)
            tm.save("fcn_test_depth_multiple_filter.ws1", ws)

        l2 = fcnetwork.FCLayer(arch=[12, 2],
                               load_path="fcn_test_depth_multiple_filter")

        expected_res = numpy.array([1, 0])

        l2.compute(input=output, learn=True)
        l2.learn(expected_res)

        delta = l2.getLastDelta()

        l1.learn(delta)

        nabla_w = l1.getNablaW()

        self.assertEqual(numpy.isclose(nabla_w[0][0][0][0], 4.1609570961e-09),
                         True)
        self.assertEqual(numpy.isclose(nabla_w[1][1][0][0], 1.8135273233e-09),
                         True)
Example #10
0
input = numpy.array([(0.51, 0.9, 0.88, 0.84, 0.05),
                     (0.4, 0.62, 0.22, 0.59, 0.1),
                     (0.11, 0.2, 0.74, 0.33, 0.14),
                     (0.47, 0.01, 0.85, 0.7, 0.09),
                     (0.76, 0.19, 0.72, 0.17, 0.57)])

filter = numpy.array([[-0.13, 0.15], [-0.51, 0.62]])
biases = numpy.zeros((1, ))

if not os.path.exists(os.path.join(pathdir, filename1 + ".bs1.npy")):
    tm = TensorFileManager("./tensorfiles")
    tm.save("convtest.bs1", biases)
    tm.save("convtest.ws1", filter)

l1 = conv.ConvLayer(load_path=filename1, pool=pool.PoolLayer())

res1 = l1.compute(input)

print(res1)

filename2 = "networktest"

############# no need to build a fcn actually
ws = numpy.array([[0.61, 0.82, 0.96, -1], [0.02, -0.5, 0.23, 0.17]])
# biases = numpy.zeros((2,))

# if not os.path.exists(os.path.join(pathdir, filename2 + ".bs1.npy")):
#     tm = TensorFileManager("./tensorfiles")
#     tm.save("networktest.bs1", biases)
#     tm.save("networktest.ws1", ws)
Example #11
0
def flowerAndFun2(path=example1):
    def signal_handler(sig, frame):
        # saveModel.saveLayers(["ff2c1", "ff2c2", "ff2c3", "ff2fcn1", "ff2softm"])
        pic = iml.ImageLoader.getOutputNpArray(example1,
                                               crop=True,
                                               crop_size=(0, 0, 150, 150))

        y = saveModel.compute(pic)
        saveModel.saveLayers([
            "ff2c1", "d1", "ff2c2", "d2", "ff2c3", "d3", "ff2fcn1", "d4",
            "ff2softm"
        ])
        print(y)
        sys.exit(0)

    input = iml.ImageLoader.getOutputNpArray(path,
                                             crop=True,
                                             crop_size=(0, 0, 150, 150))

    # layerContainer = [
    #     #3, 150, 150
    #     conv.ConvLayer(optimizer=adam.AdamConv(), filtershape=(32, 3, 3, 3), stride_length=1, pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2), ishape=(3, 150, 150)),
    #     dropout.DropoutLayer(p=0.2, ishape=(32, 74, 74)),

    #     #32, 74, 74
    #     conv.ConvLayer(optimizer=adam.AdamConv(), filtershape=(64, 32, 3, 3), stride_length=1, pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2), ishape=(32, 74, 74)),
    #     dropout.DropoutLayer(p=0.2, ishape=(64, 36, 36)),

    #     #64, 36, 36
    #     conv.ConvLayer(optimizer=adam.AdamConv(), filtershape=(128, 64, 3, 3), stride_length=1, pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2), ishape=(64, 36, 36)),
    #     dropout.DropoutLayer(p=0.2, ishape=(128, 17, 17)),

    #     #128, 17, 17
    #     fcnetwork.FCLayer(optimizer=adam.AdamFC(), arch=[36992, 512, 128], activation_func="relu", is_classifier=False),
    #     dropout.DropoutLayer(p=0.2, ishape=(128,)),

    #     softmax.SoftmaxLayer(optimizer=adam.AdamFC(), arch=[128, 5])
    # ]
    # # load net
    layerContainer = [
        #3, 150, 150
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       load_path="ff2c1",
                       filtershape=(32, 3, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(3, 150, 150)),
        dropout.DropoutLayer(p=0.2, ishape=(32, 74, 74)),

        #32, 74, 74
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       load_path="ff2c2",
                       filtershape=(64, 32, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(32, 74, 74)),
        dropout.DropoutLayer(p=0.2, ishape=(64, 36, 36)),

        #64, 36, 36
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       load_path="ff2c3",
                       filtershape=(128, 64, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(64, 36, 36)),
        dropout.DropoutLayer(p=0.2, ishape=(128, 17, 17)),

        #128, 17, 17
        fcnetwork.FCLayer(optimizer=adam.AdamFC(),
                          load_path="ff2fcn1",
                          arch=[36992, 512, 128],
                          activation_func="relu",
                          is_classifier=False),
        dropout.DropoutLayer(p=0.2, ishape=(128, )),
        softmax.SoftmaxLayer(optimizer=adam.AdamFC(),
                             load_path="ff2softm",
                             arch=[128, 5])
    ]
    signal.signal(signal.SIGINT, signal_handler)

    ## here learning rate is useless
    model_FAndF = model.Model(learning_rate=0.001,
                              dataset=None,
                              layerContainer=layerContainer)

    saveModel = model_FAndF
    # saveModel.saveLayers(["ff2c1", "ff2c2", "ff2c3", "ff2fcn1", "ff2softm"])

    model_FAndF.test_learn(epoch=50)
Example #12
0
def flowerAndFunModel(path=example1):

    #it crops by 224x224 by default
    print(f"Model load this picture as input: {path}")
    input = iml.ImageLoader.getOutputNpArray(path,
                                             crop=True,
                                             crop_size=(0, 0, 150, 150))
    dir = "./tensorfiles"

    if not os.path.exists(dir + "/" + "ff2c1" + ".ws1.npy"):
        layerContainer = [
            #3, 150, 150
            conv.ConvLayer(optimizer=adam.AdamConv(),
                           filtershape=(32, 3, 3, 3),
                           stride_length=1,
                           pool=pool.PoolLayer(pool_size=(2, 2),
                                               stride_length=2),
                           ishape=(3, 150, 150)),

            #32, 74, 74
            conv.ConvLayer(optimizer=adam.AdamConv(),
                           filtershape=(64, 32, 3, 3),
                           stride_length=1,
                           pool=pool.PoolLayer(pool_size=(2, 2),
                                               stride_length=2),
                           ishape=(32, 74, 74)),

            #64, 36, 36
            conv.ConvLayer(optimizer=adam.AdamConv(),
                           filtershape=(128, 64, 3, 3),
                           stride_length=1,
                           pool=pool.PoolLayer(pool_size=(2, 2),
                                               stride_length=2),
                           ishape=(64, 36, 36)),

            #128, 17, 17
            fcnetwork.FCLayer(optimizer=adam.AdamFC(),
                              arch=[36992, 512, 128],
                              activation_func="relu",
                              is_classifier=False),
            softmax.SoftmaxLayer(optimizer=adam.AdamFC(), arch=[128, 5])
        ]
        ffM = model.Model(learning_rate=None,
                          dataset=None,
                          layerContainer=layerContainer)
        ffM.saveLayers(["ff2c1", "ff2c2", "ff2c3", "ff2fcn1", "ff2softm"])

    layerContainer = [
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       load_path="ff2c1",
                       filtershape=(32, 3, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(3, 150, 150)),

        #32, 74, 74
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       load_path="ff2c2",
                       filtershape=(64, 32, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(32, 74, 74)),

        #64, 36, 36
        conv.ConvLayer(optimizer=adam.AdamConv(),
                       load_path="ff2c3",
                       filtershape=(128, 64, 3, 3),
                       stride_length=1,
                       pool=pool.PoolLayer(pool_size=(2, 2), stride_length=2),
                       ishape=(64, 36, 36)),

        #128, 17, 17
        fcnetwork.FCLayer(optimizer=adam.AdamFC(),
                          load_path="ff2fcn1",
                          arch=[36992, 512, 128],
                          activation_func="relu",
                          is_classifier=False),
        softmax.SoftmaxLayer(optimizer=adam.AdamFC(),
                             load_path="ff2softm",
                             arch=[128, 5])
    ]

    ffM = model.Model(learning_rate=None,
                      dataset=None,
                      layerContainer=layerContainer)

    try:
        output = ffM.compute(input)
    except:
        print("error occured in zf5 model")
        return "error"
    return return_response(output)


# print(zf5model())
# overfeat()
# a = numpy.array(
#         [[1, 2, 3, 4, 5, 6],
#         [1, 2, 3, 4, 5, 6],
#         [1, 2, 3, 4, 5, 6],
#         [1, 2, 3, 4, 5, 6],
#         [1, 2, 3, 4, 5, 6],
#         [1, 2, 3, 4, 5, 6]])
Example #13
0
def zf5model(path=example1):
    # from this architecture:
    # https://www.researchgate.net/figure/Architecture-of-ZF-model-An-3-channels-image-with-224224-is-as-the-input-It-is_fig5_318577329

    # image = iml.ImageLoader.getOutputNpArray(rp_dataset + "daisy/" + "5547758_eea9edfd54_n.jpg", gray=True)
    # image = iml.ImageLoader.getCropedImage(rp_dataset + "daisy/" + "5547758_eea9edfd54_n.jpg")#.getOutputNpArray(rp_dataset + "daisy/" + "5547758_eea9edfd54_n.jpg")

    #it crops by 224x224 by default
    print(f"Model load this picture as input: {path}")
    input = iml.ImageLoader.getOutputNpArray(path,
                                             crop=True,
                                             crop_size=(0, 0, 224, 224))
    dir = "./tensorfiles"

    if not os.path.exists(dir + "/" + "conv1" + ".ws1.npy"):
        layerContainer = [
            #3, 224, 224
            conv.ConvLayer(padding=1,
                           filtershape=(96, 3, 7, 7),
                           stride_length=2,
                           pool=pool.PoolLayer(pad=1,
                                               pool_size=(3, 3),
                                               stride_length=2),
                           ishape=(3, 224, 224)),
            #96, 55, 55
            conv.ConvLayer(filtershape=(256, 96, 5, 5),
                           stride_length=2,
                           pool=pool.PoolLayer(pad=1,
                                               pool_size=(3, 3),
                                               stride_length=2),
                           ishape=(96, 55, 55)),
            #256, 26, 26
            conv.ConvLayer(padding=1,
                           filtershape=(384, 256, 3, 3),
                           stride_length=1,
                           ishape=(256, 26, 26)),

            #384, 13, 13
            conv.ConvLayer(padding=1,
                           filtershape=(384, 384, 3, 3),
                           stride_length=1,
                           ishape=(384, 13, 13)),

            #384, 13, 13
            conv.ConvLayer(padding=1,
                           filtershape=(256, 384, 3, 3),
                           stride_length=1,
                           pool=pool.PoolLayer(pool_size=(3, 3),
                                               stride_length=2),
                           ishape=(384, 13, 13)),

            #do use he initialization here
            fcnetwork.FCLayer(arch=[9216, 4096, 4096, 5])
        ]

        zf5 = model.Model(learning_rate=None,
                          dataset=None,
                          layerContainer=layerContainer)

        # output = zf5.compute(input)

        # print(f"first output = {output}")

        zf5.saveLayers(
            ["conv1", "conv2", "conv3", "conv4", "conv5", "classifier"])

    layerContainer = [
        #3, 224, 224
        conv.ConvLayer(load_path="conv1"),
        #96, 55, 55
        conv.ConvLayer(load_path="conv2"),
        #256, 26, 26
        conv.ConvLayer(load_path="conv3"),

        #384, 13, 13
        conv.ConvLayer(load_path="conv4"),

        #384, 13, 13
        conv.ConvLayer(load_path="conv5"),

        #do use he initialization here
        fcnetwork.FCLayer(arch=[9216, 4096, 4096, 5], load_path="classifier")
    ]

    zf5 = model.Model(learning_rate=None,
                      dataset=None,
                      layerContainer=layerContainer)

    # output = zf5.compute(input)
    # res = zf5.learn()
    # print(f"snd output = {res}")

    try:
        output = zf5.compute(input)
    except:
        print("error occured in zf5 model")
        return "error"
    # print(return_response(output))
    return return_response(output)
Example #14
0
            pickle.dump(self.__layers, f)

    def load(self, file):
        with open(file, 'rb') as f:
            self.__layers = pickle.load(f)


if __name__ == '__main__':
    import numpy as np
    import conv
    import act
    import pool
    import dense
    # build
    net = Network()
    net.add_layer(conv.ConvLayer(28, 28, 1, 3, 10))
    net.add_layer(act.ReLULayer())
    net.add_layer(pool.PoolLayer(f=2, stride=2))
    net.add_layer(act.ReLULayer())
    net.add_layer(conv.ConvLayer(13, 13, 10, 4, 16))
    net.add_layer(act.ReLULayer())
    net.add_layer(pool.PoolLayer(f=2, stride=2))
    net.add_layer(act.ReLULayer())
    net.add_layer(conv.ConvLayer(5, 5, 16, 2, 10))
    net.add_layer(act.ReLULayer())
    net.add_layer(pool.PoolLayer(f=4, stride=1))
    net.add_layer(dense.DenseLayer(10, 10))
    net.add_layer(act.SigmoidLayer())
    # train
    i = np.random.randn(1, 28, 28, 1)
    o = np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0]).reshape(10, 1)
Example #15
0
rp_dataset = "./dataset/"

# image = iml.ImageLoader.getOutputNpArray(rp_dataset + "daisy/" + "5547758_eea9edfd54_n.jpg", gray=True)
# image = iml.ImageLoader.getCropedImage(rp_dataset + "daisy/" + "5547758_eea9edfd54_n.jpg")#.getOutputNpArray(rp_dataset + "daisy/" + "5547758_eea9edfd54_n.jpg")

input = iml.ImageLoader.getOutputNpArray(image_path=rp_dataset + "daisy/" +
                                         "5547758_eea9edfd54_n.jpg",
                                         crop=True)

# from this architecture:
# https://www.researchgate.net/figure/Architecture-of-ZF-model-An-3-channels-image-with-224224-is-as-the-input-It-is_fig5_318577329

layerContainer = [
    conv.ConvLayer(padding=1,
                   filtershape=(96, 3, 7, 7),
                   stride_length=2,
                   pool=pool.PoolLayer(pad=1,
                                       pool_size=(3, 3),
                                       stride_length=2)),
    conv.ConvLayer(filtershape=(256, 96, 5, 5),
                   stride_length=2,
                   pool=pool.PoolLayer(pad=1,
                                       pool_size=(3, 3),
                                       stride_length=2)),
    conv.ConvLayer(padding=1, filtershape=(384, 256, 3, 3), stride_length=1),
    conv.ConvLayer(padding=1, filtershape=(384, 384, 3, 3), stride_length=1),
    conv.ConvLayer(padding=1,
                   filtershape=(256, 384, 3, 3),
                   stride_length=1,
                   pool=pool.PoolLayer(pool_size=(3, 3), stride_length=2)),
    fcnetwork.FCLayer(arch=[9216, 4096, 5])
]