예제 #1
0
    def fcNet(self, margin=1):
        net = MyCaffeNet({"debug": True})
        d_dim = 1000
        data, label = net.dataLayer("", "mem", 20, memdim=[1, 1, d_dim + 1])
        fc = net.fcLayer(data, 512, replace="relu")
        fc = net.fcLayer(fc, 512, replace="relu")
        fc = net.fcLayer(fc, 512, replace="relu")

        out = net.fcLayer(fc, 20, t="xavier", isout=True)
        loss = net.lossLayer(out, label, "softmax", 1)
        acc = net.accLayer(out, label)

        net.netStr(self.netname)
        return
예제 #2
0
    def fcNet(self, netname, is_test):
        net = MyCaffeNet({"debug": True})
        data, label = net.pyDataLayer(
            "DataTrain" if not is_test else "DataTest",
            "DataTrain" if not is_test else "DataTest",
            0 if not is_test else 1,
        )
        fc = net.fcLayer(data, 512, replace="relu")
        fc = net.fcLayer(fc, 512, replace="relu")
        fc = net.fcLayer(fc, 512, replace="relu")

        out = net.fcLayer(fc, 20, t="xavier", isout=True)
        loss = net.lossLayer(out, label, "softmax", 1)
        acc = net.accLayer(out, label)

        net.netStr(netname)
        return
예제 #3
0
    def inceptionNet(self, single_dim, stock_dim, num, margin=1):
        net = MyCaffeNet({"debug": True})
        d_dim = 36768
        data, label = net.dataLayer("",
                                    "mem",
                                    16,
                                    tops=["data", "label"],
                                    memdim=[1, 1, d_dim])
        points = [single_dim - stock_dim, single_dim]
        slices = net.sliceLayer(data, points)

        conv01 = net.convLayer(slices[1], [3, 1], [1, 1], 128, pad_wh=[1, 0])

        conv1 = net.convLayer(slices[2], [stock_dim, 1], [stock_dim, 1], 64)
        pool1 = net.poolLayer(conv1, [34, 1], [2, 1], "ave")

        concat = net.concatLayer(*[conv01, pool1])
        inception1 = net.inceptionLayerV1(concat)
        pool = net.poolLayer(inception1, [7, 1], [2, 1], "max", pad_wh=[3, 0])
        conv_last = net.convLayer(pool, [3, 1], [1, 1], 64, pad_wh=[1, 0])

        helper1 = net.fcLayer(conv_last,
                              2048,
                              replace='relu',
                              dropout=self.dropout)
        helper1_2 = net.fcLayer(helper1,
                                2048,
                                replace='relu',
                                dropout=self.dropout)
        helper1_out = net.fcLayer(helper1_2, 20, t="xavier", isout=True)
        helper1_loss = net.lossLayer(helper1_out, label, "softmax", 1)
        helper1_acc = net.accLayer(helper1_out, label)

        classify = net.fcLayer(concat, 20, t="xavier", isout=True)
        loss = net.lossLayer(classify, label, "softmax", 0.2)
        acc = net.accLayer(classify, label)

        ignore1 = net.fcLayer(slices[0], 1, t="xavier", isout=True)
        loss = net.lossLayer(ignore1, label, "softmax", 0)

        net.netStr(self.netname)
        return
예제 #4
0
    def inceptionNetTest(self, single_dim, stock_dim, num, margin=1):
        net = MyCaffeNet({"debug": True})
        d_dim = 36768
        data, label = net.dataLayer("",
                                    "mem",
                                    16,
                                    tops=["data", "label"],
                                    memdim=[1, 1, d_dim])
        points = [single_dim - stock_dim, single_dim]
        slices = net.sliceLayer(data, points)

        #fc = net.fcLayer(slices[1], 2048, replace='relu')
        #reshape = net.flattenLayer(fc, True)
        #conv01 = net.convLayer(reshape, [80, 1], [16, 1], 128, pad_wh=[0, 0])
        conv01 = net.convLayer(slices[1], [3, 1], [1, 1], 128, pad_wh=[1, 0])
        """
        points = map(lambda x: (x+1) * stock_dim, range(num-1))
        slices_all = net.sliceLayer(slices[2], points)
        tops = []
        one_dim = 64
        for one in slices_all:
            tmp = net.fcLayer(one, one_dim, replace="relu", wname=["d_w", "d_b"])
            tops.append(tmp)
        concat = net.concatLayer(*tops)
        reshape = net.flattenLayer(concat, True)
        conv1 = net.convLayer(reshape, [one_dim, 1], [one_dim, 1], 32)
        pool1 = net.poolLayer(conv1, [34, 1], [2, 1], "ave")
        conv2 = net.convLayer(pool1, [5, 1], [1, 1], 128, pad_wh=[2, 0])

        concat = net.concatLayer(*[conv01, conv2])
        conv_last = net.convLayer(concat, [3, 1], [2, 1], 64, pad_wh=[1, 0])
        reshape = net.flattenLayer(conv1)
        """

        conv1 = net.convLayer(slices[2], [stock_dim, 1], [stock_dim, 1], 64)
        pool1 = net.poolLayer(conv1, [34, 1], [2, 1], "ave")
        concat = net.concatLayer(*[conv01, pool1])
        inception1 = net.inceptionLayerV1(concat)
        pool = net.poolLayer(inception1, [7, 1], [2, 1], "max", pad_wh=[3, 0])
        conv_last = net.convLayer(pool, [3, 1], [1, 1], 64, pad_wh=[1, 0])

        helper1 = net.fcLayer(conv_last, 2048, replace='relu', dropout=0.5)
        helper1_2 = net.fcLayer(helper1, 2048, replace='relu', dropout=0.5)
        helper1_out = net.fcLayer(helper1_2, 20, t="xavier", isout=True)
        helper1_loss = net.lossLayer(helper1_out, label, "softmax", 1)
        helper1_acc = net.accLayer(helper1_out, label)

        classify = net.fcLayer(concat, 20, t="xavier", isout=True)
        loss = net.lossLayer(classify, label, "softmax", 0.3)
        acc = net.accLayer(classify, label)

        ignore1 = net.fcLayer(slices[0], 1, t="xavier", isout=True)
        loss = net.lossLayer(ignore1, label, "softmax", 0)
        #acc = net.accLayer(classify_1, label)

        net.netStr(self.netname)
        return
예제 #5
0
    def testNet(self, margin=1):
        net = MyCaffeNet({"debug": True})
        one_len = 5
        num = 2862
        d_dim = one_len + 7 + num * one_len - 1
        data, label = net.dataLayer("",
                                    "mem",
                                    64,
                                    tops=["data", "label"],
                                    memdim=[1, 1, d_dim])
        #slices = net.sliceLayer(data, [d_dim])
        #data = slices[0]
        #prop = slices[1]

        points = [one_len + 7 - 1]
        slices = net.sliceLayer(data, points)

        fc = net.fcLayer(slices[0], 256, replace="relu")
        #fc = net.batchNormLayer(fc)

        data_drop = net.groupDropLayer(slices[1], num, int(num * 0.8), False)
        #data_drop = slices[1]
        conv1 = net.convLayer(data_drop, [one_len, 1], [one_len, 1],
                              32,
                              replace="relu")
        layer = net.normLayer(conv1)
        #layer = net.batchNormLayer(conv1)
        pool1 = net.poolLayer(layer, [6, 1], [2, 1], "max", pad_wh=[2, 0])
        conv2 = net.convLayer(pool1, [4, 1], [2, 1],
                              16,
                              pad_wh=[1, 0],
                              replace="relu")
        conv3 = net.convLayer(conv2, [4, 1], [2, 1],
                              8,
                              pad_wh=[1, 0],
                              replace="relu")
        flat = net.flattenLayer(conv3)

        concat = net.concatLayer(*[fc, flat])

        helper1 = net.fcLayer(concat,
                              512,
                              replace='relu',
                              dropout=self.dropout)
        helper1_2 = net.fcLayer(helper1,
                                512,
                                replace='relu',
                                dropout=self.dropout)

        helper1_out = net.fcLayer(helper1_2, 20, t="xavier", isout=True)
        helper1_loss = net.lossLayer(helper1_out, label, "softmax", 1)
        helper1_acc = net.accLayer(helper1_out, label)

        net.netStr(self.netname)
        return
예제 #6
0
파일: Net.py 프로젝트: xiaojingyi/finTest2
    def net(self, netname, is_test):
        net = MyCaffeNet({"debug": True})
        data, label_muti, label, label_ = net.pyDataLayer(
            "DataTrain" if not is_test else "DataTest",
            "DataTrain" if not is_test else "DataTest",
            0 if not is_test else 1,
            tops=["data", "label_muti", "label", "label_"])

        points = [12]
        slices = net.sliceLayer(data, points)
        net.silenceLayer(label_)

        fc = net.fcLayer(slices[0], 1024, replace="relu")

        drop = net.groupDropLayer(slices[1], 800, 400, False)
        conv1 = net.convLayer(drop, [5, 1], [5, 1], 64)
        layer = net.normLayer(conv1)
        #pool1 = net.poolLayer(layer, [6, 1], [2, 1],
        #        "ave", pad_wh=[2, 0])
        pool1 = net.convLayer(layer, [6, 1], [2, 1], 64, pad_wh=[2, 0])
        conv2 = net.convLayer(pool1, [4, 1], [2, 1], 32, pad_wh=[1, 0])
        conv3 = net.convLayer(conv2, [4, 1], [2, 1], 16, pad_wh=[1, 0])
        flat = net.flattenLayer(conv3)

        concat = net.concatLayer(*[fc, flat])

        fc = net.fcLayer(concat,
                         2048,
                         t="xavier",
                         replace='relu',
                         dropout=self.dropout)
        fc = net.fcLayer(fc,
                         2048,
                         t="xavier",
                         replace='relu',
                         dropout=self.dropout)

        out = net.fcLayer(fc, 20, t="xavier", isout=True)

        loss = net.lossLayer(out, label_muti, "sigcross", 1)
        #net.silenceLayer(label_muti)
        #loss = net.lossLayer(out, label, "softmax", 1)
        acc = net.accLayer(out, label)

        net.netStr(netname)
        return
예제 #7
0
파일: Net.py 프로젝트: xiaojingyi/finTest2
 def trainNet(self, fname):
     self.nt = MyCaffeNet({"debug": True})
     #self.convNet(fname, False)
     self.net(fname, False)
예제 #8
0
파일: Net.py 프로젝트: xiaojingyi/finTest2
 def testNet(self, fname):
     self.net_model = MyCaffeNet({"debug": True})
     self.net(fname, True)
예제 #9
0
파일: Net.py 프로젝트: xiaojingyi/finTest2
class Net(object):
    def __init__(self, config):
        if not config:
            self.bail(-1, "no config: Net init")
        self.config = config
        self.debug = config["debug"]
        self.dropout = config['dropout']
        #super(Net, self).__init__(config)

    def trainNet(self, fname):
        self.net_model = MyCaffeNet({"debug": True})
        self.net(fname, False)

    def testNet(self, fname):
        self.net_model = MyCaffeNet({"debug": True})
        self.net(fname, True)

    def net(self, netname, is_test):
        data, label_muti, label, label_, sim = self.net_model.pyDataLayer(
            "DataTrain" if not is_test else "DataTest",
            "DataTrain" if not is_test else "DataTest",
            0 if not is_test else 1,
            tops=["data", "label_muti", "label", "label_", "sim"])

        onelen = 4
        self.net_model.silenceLayer(label_)
        self.net_model.silenceLayer(label_muti)
        drop = self.net_model.groupDropLayer(data, 60, 50, False)
        conv1 = self.net_model.convLayer(drop, [onelen, 1], [onelen, 1], 64)
        layer = self.net_model.normLayer(conv1)
        conv2 = self.net_model.convLayer(layer, [3, 1], [1, 1], 64)
        conv3 = self.net_model.convLayer(conv2, [3, 1], [2, 1], 64)
        """
        """

        fc_1 = self.net_model.fcLayer(conv3, 512, t="xavier", replace='relu')
        ex_drop = 0.5
        fc_drop1 = self.net_model.dropLayer(fc_1, ex_drop, False)
        fc_drop2 = self.net_model.dropLayer(fc_1, ex_drop, False)
        fc_drop3 = self.net_model.dropLayer(fc_1, ex_drop, False)

        s1 = self.net_model.fcLayer(fc_drop1,
                                    512,
                                    t="xavier",
                                    replace='sigmoid',
                                    wname=["s_w", "s_b"])
        s2 = self.net_model.fcLayer(fc_drop2,
                                    512,
                                    t="xavier",
                                    replace='sigmoid',
                                    wname=["s_w", "s_b"])
        s3 = self.net_model.fcLayer(fc_drop3,
                                    512,
                                    t="xavier",
                                    replace='sigmoid',
                                    wname=["s_w", "s_b"])
        margin_min = 50
        margin_max = 100
        loss_s1 = self.net_model.lossLayer(s1,
                                           s2,
                                           third_bottom=sim,
                                           t="contrastive",
                                           param={
                                               "margin": margin_min,
                                               "margin_sim": margin_max
                                           },
                                           weight=1)
        loss_s2 = self.net_model.lossLayer(s1,
                                           s3,
                                           third_bottom=sim,
                                           t="contrastive",
                                           param={
                                               "margin": margin_min,
                                               "margin_sim": margin_max
                                           },
                                           weight=1)
        loss_s3 = self.net_model.lossLayer(s2,
                                           s3,
                                           third_bottom=sim,
                                           t="contrastive",
                                           param={
                                               "margin": margin_min,
                                               "margin_sim": margin_max
                                           },
                                           weight=1)

        out = self.net_model.fcLayer(s1, 20, t="xavier", isout=True)
        out_seg = self.net_model.sigmoidLayer(out)
        self.net_model.silenceLayer(out_seg)

        loss = self.net_model.lossLayer(out, label_muti, "sigcross", 1)
        #loss = self.net_model.lossLayer(out, label, "softmax", 1)
        acc = self.net_model.accLayer(out, label)

        self.net_model.netStr(netname)
        return

    def testPrint(self):
        print "Hello World!"

    def bail(self, sig, msg):
        print sig, ": ", msg
        exit()
예제 #10
0
파일: Net.py 프로젝트: xiaojingyi/finTest2
class Net(object):
    def __init__(self, config):
        if not config:
            self.bail(-1, "no config: Net init")
        self.config = config
        self.debug = config["debug"]
        self.dropout = config['dropout']
        #super(Net, self).__init__(config)

    def trainNet(self, fname):
        self.nt = MyCaffeNet({"debug": True})
        #self.convNet(fname, False)
        self.net(fname, False)

    def testNet(self, fname):
        self.nt = MyCaffeNet({"debug": True})
        #self.convNet(fname, True)
        self.net(fname, True)

    def convNet(self, netname, is_test):
        data, target = self.nt.pyDataLayer(
            "DataTrain" if not is_test else "DataTest",
            "DataTrain" if not is_test else "DataTest",
            0 if not is_test else 1,
            tops=["data", "target"])
        drop, pos_mask, neg_mask = self.nt.drop2Layer(data, 0.05)
        self.nt.silenceLayer(pos_mask)

        net_dim = 1024
        drop = self.nt.reshapeLayer(drop, [0, 1, 1, net_dim])

        target = self.nt.sameDropLayer(target, neg_mask, False)
        target = self.nt.reshapeLayer(target, [0, 1, 1, net_dim])

        conv1 = self.nt.convLayer(drop,
                                  ksize_wh=[129, 1],
                                  stride_wh=[1, 1],
                                  nout=32,
                                  pad_wh=[64, 0],
                                  t="xavier",
                                  std=0.1,
                                  replace="")
        conv1 = self.nt.batchNormLayer(conv1, gs=is_test)
        conv1 = self.nt.scaleLayer(conv1)
        conv1 = self.nt.reluLayer(conv1)

        conv2 = self.nt.convLayer(conv1,
                                  ksize_wh=[129, 1],
                                  stride_wh=[1, 1],
                                  nout=32,
                                  pad_wh=[64, 0],
                                  t="xavier",
                                  std=0.1,
                                  replace="")
        conv2 = self.nt.batchNormLayer(conv2, gs=is_test)
        conv2 = self.nt.scaleLayer(conv2)
        conv2 = self.nt.reluLayer(conv2)

        conv3 = self.nt.convLayer(conv2,
                                  ksize_wh=[129, 1],
                                  stride_wh=[1, 1],
                                  nout=32,
                                  pad_wh=[64, 0],
                                  t="xavier",
                                  std=0.1,
                                  replace="")
        conv3 = self.nt.batchNormLayer(conv3, gs=is_test)
        conv3 = self.nt.scaleLayer(conv3)
        conv3 = self.nt.reluLayer(conv3)

        conv4 = self.nt.convLayer(conv3,
                                  ksize_wh=[129, 1],
                                  stride_wh=[1, 1],
                                  nout=32,
                                  pad_wh=[64, 0],
                                  t="xavier",
                                  std=0.1,
                                  replace="")
        conv4 = self.nt.batchNormLayer(conv4, gs=is_test)
        conv4 = self.nt.scaleLayer(conv4)
        conv4 = self.nt.reluLayer(conv4)

        conv5 = self.nt.convLayer(conv4,
                                  ksize_wh=[129, 1],
                                  stride_wh=[1, 1],
                                  nout=32,
                                  pad_wh=[64, 0],
                                  t="xavier",
                                  std=0.1,
                                  replace="")
        conv5 = self.nt.batchNormLayer(conv5, gs=is_test)
        conv5 = self.nt.scaleLayer(conv5)
        conv5 = self.nt.reluLayer(conv5)

        out = self.nt.deConvLayer(
            conv5,
            ksize_wh=[17, 1],
            stride_wh=[1, 1],
            pad_wh=[8, 0],
            nout=1,
        )

        loss = self.nt.lossLayer(out,
                                 target,
                                 "sigcross",
                                 1,
                                 third_bottom=neg_mask)

        sig = self.nt.sigmoidLayer(out)
        sim_target = self.nt.sameDropLayer(sig, neg_mask)
        loss = self.nt.lossLayer(sim_target, target, "eloss", 0)

        self.nt.netStr(netname)
        """
        deconv = self.nt.sigmoidLayer(deconv)

        conv = self.nt.batchNormLayer(conv, gs=is_test)
        conv = self.nt.scaleLayer(conv)
        conv = self.nt.reluLayer(conv)
        deconv = self.nt.batchNormLayer(deconv, gs=is_test)
        deconv = self.nt.scaleLayer(deconv)
        """
        return

    def net(self, netname, is_test):
        data, target, zeros, ones = self.nt.pyDataLayer(
            "DataTrain" if not is_test else "DataTest",
            "DataTrain" if not is_test else "DataTest",
            0 if not is_test else 1,
            tops=["data", "target", "zeros", "ones"])
        drop, pos_mask, neg_mask = self.nt.drop2Layer(data, 0.3)
        target = self.nt.sameDropLayer(target, neg_mask, False)
        self.nt.silenceLayer(pos_mask)

        n_num = 2048
        fc = self.nt.fcLayer(drop, n_num, t="xavier", replace='relu')

        fc = self.nt.fcLayer(fc, n_num, t="xavier", replace='relu')

        fc_drop = self.nt.sameDropLayer(ones, fc, False)
        fc_drop = self.nt.fcLayer(fc_drop, n_num, replace="sigmoid")
        fc_drop = self.nt.fcLayer(fc_drop, n_num, replace="sigmoid")
        fc_drop = self.nt.eltwiseLayer([fc_drop, zeros], opt=1)
        out_for_loss = self.nt.sameDropLayer(fc_drop, fc, False)
        loss = self.nt.lossLayer(out_for_loss, zeros, "eloss", 0.1)

        fc = self.nt.sameDropLayer(fc, fc_drop, False)
        fc = self.nt.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.nt.scaleLayer(fc, in_place=False)

        fc = self.nt.fcLayer(fc, n_num, t="xavier", replace='relu')

        fc_drop = self.nt.sameDropLayer(ones, fc, False)
        fc_drop = self.nt.fcLayer(fc_drop, n_num, replace="sigmoid")
        fc_drop = self.nt.fcLayer(fc_drop, n_num, replace="sigmoid")
        fc_drop = self.nt.eltwiseLayer([fc_drop, zeros], opt=1)
        out_for_loss = self.nt.sameDropLayer(fc_drop, fc, False)
        loss = self.nt.lossLayer(out_for_loss, zeros, "eloss", 0.1)

        fc = self.nt.sameDropLayer(fc, fc_drop, False)
        fc = self.nt.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.nt.scaleLayer(fc, in_place=False)

        fc = self.nt.fcLayer(fc, n_num, t="xavier", replace='relu')

        fc_drop = self.nt.sameDropLayer(ones, fc, False)
        fc_drop = self.nt.fcLayer(fc_drop, n_num, replace="sigmoid")
        fc_drop = self.nt.fcLayer(fc_drop, n_num, replace="sigmoid")
        fc_drop = self.nt.eltwiseLayer([fc_drop, zeros], opt=1)
        out_for_loss = self.nt.sameDropLayer(fc_drop, fc, False)
        loss = self.nt.lossLayer(out_for_loss, zeros, "eloss", 0.1)

        fc = self.nt.sameDropLayer(fc, fc_drop, False)
        fc = self.nt.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.nt.scaleLayer(fc, in_place=False)

        fc = self.nt.fcLayer(fc, n_num, t="xavier", replace='relu')

        fc_drop = self.nt.sameDropLayer(ones, fc, False)
        fc_drop = self.nt.fcLayer(fc_drop, n_num, replace="sigmoid")
        fc_drop = self.nt.fcLayer(fc_drop, n_num, replace="sigmoid")
        fc_drop = self.nt.eltwiseLayer([fc_drop, zeros], opt=1)
        out_for_loss = self.nt.sameDropLayer(fc_drop, fc, False)
        loss = self.nt.lossLayer(out_for_loss, zeros, "eloss", 0.1)

        fc = self.nt.sameDropLayer(fc, fc_drop, False)
        fc = self.nt.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.nt.scaleLayer(fc, in_place=False)

        fc = self.nt.fcLayer(fc, n_num, t="xavier", replace='relu')

        fc_drop = self.nt.sameDropLayer(ones, fc, False)
        fc_drop = self.nt.fcLayer(fc_drop, n_num, replace="sigmoid")
        fc_drop = self.nt.fcLayer(fc_drop, n_num, replace="sigmoid")
        fc_drop = self.nt.eltwiseLayer([fc_drop, zeros], opt=1)
        out_for_loss = self.nt.sameDropLayer(fc_drop, fc, False)
        loss = self.nt.lossLayer(out_for_loss, zeros, "eloss", 0.1)

        fc = self.nt.sameDropLayer(fc, fc_drop, False)
        fc = self.nt.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.nt.scaleLayer(fc, in_place=False)

        out = self.nt.fcLayer(fc, 1024, t="xavier", isout=True)

        self.nt.silenceLayer(zeros)
        self.nt.silenceLayer(ones)
        """
        """
        loss = self.nt.lossLayer(out,
                                 target,
                                 "sigcross",
                                 10,
                                 third_bottom=neg_mask)
        sig = self.nt.sigmoidLayer(out)

        sim_target = self.nt.sameDropLayer(sig, neg_mask, False)
        loss = self.nt.lossLayer(sim_target, target, "eloss", 0)
        """
        fc_drop = self.nt.sameDropLayer(ones, fc, False)
        fc_drop = self.nt.fcLayer(fc_drop, n_num, replace="sigmoid")
        fc_drop = self.nt.fcLayer(fc_drop, n_num, replace="sigmoid")
        out_for_loss = self.nt.sameDropLayer(fc_drop, fc, False)
        loss = self.nt.lossLayer(out_for_loss, zeros, "eloss", 0.1)
        fc = self.nt.sameDropLayer(fc, fc_drop, False)
        fc = self.nt.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.nt.scaleLayer(fc, in_place=False)

        fc = self.nt.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.nt.scaleLayer(fc, in_place=False)

        target = self.nt.sameDropLayer(target, fc_drop, False)

        fc_drop = self.nt.fcLayer(fc, n_num, replace="sigmoid")
        fc_drop = self.nt.fcLayer(fc_drop, n_num, replace="sigmoid")
        fc_drop = self.nt.fcLayer(fc_drop, n_num, replace="sigmoid")
        fc = self.nt.eltwiseLayer([fc, fc_drop], opt=0)
        loss = self.nt.lossLayer(fc, zeros, "eloss", 0.1)
        fc = self.nt.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.nt.scaleLayer(fc, in_place=False)

        fc = self.nt.fcLayer(fc, n_num, t="xavier", 
                replace='relu')

        fc_drop = self.nt.fcLayer(fc, n_num, replace="relu")
        fc_drop = self.nt.dropLayer(fc_drop, 0.3)
        fc_drop = self.nt.fcLayer(fc_drop, n_num, replace="relu")
        loss = self.nt.lossLayer(fc_drop, fc, "eloss", 0.1)
        fc_drop = self.nt.fcLayer(fc, n_num/2, replace="relu")
        fc_drop = self.nt.fcLayer(fc_drop, n_num, replace="relu")
        loss = self.nt.lossLayer(fc_drop, fc, "eloss", 0.1)

        fc = self.nt.dropLayer(fc, 0.1)
        fc = self.nt.normLayer(fc)

        """

        self.nt.netStr(netname)
        return

    def testPrint(self):
        print "Hello World!"

    def bail(self, sig, msg):
        print sig, ": ", msg
        exit()
예제 #11
0
파일: Net.py 프로젝트: xiaojingyi/finTest2
    def net(self, netname, is_test):
        net = MyCaffeNet({"debug": True})
        data, label_multi, label, label_, clips = net.pyDataLayer(
                "DataTrain" if not is_test else "DataTest", 
                "DataTrain" if not is_test else "DataTest", 
                0 if not is_test else 1,
                tops = [
                    "data", 
                    "label_multi", 
                    "label", 
                    "label_",
                    "clips",
                    ]
                )

        points = [12]
        slices = net.sliceLayer(data, points)
        net.silenceLayer(label_)

        fc = net.fcLayer(slices[0], 1024, replace="relu")

        drop = net.groupDropLayer(slices[1], 800, 400, False)
        conv1 = net.convLayer(drop, [5, 1], [5, 1], 64)
        layer = net.normLayer(conv1)
        pool1 = net.poolLayer(layer, [6, 1], [2, 1], 
                "ave", pad_wh=[2, 0])
        conv2 = net.convLayer(pool1, [4, 1], [2, 1], 
                32, pad_wh=[1, 0])
        conv3 = net.convLayer(conv2, [4, 1], [2, 1], 
                16, pad_wh=[1, 0])
        flat = net.flattenLayer(conv3)

        concat = net.concatLayer(*[fc, flat])

        concat_same = net.fcLayer(concat, 1024, t="xavier", 
                replace='relu', dropout=self.dropout)
        fc = net.fcLayer(concat_same, 1024, t="xavier", 
                replace='relu', dropout=self.dropout)

        fc_resize = net.reshapeLayer(fc, [
            common.tlen, common.stream_len, 
            -1, 1])
        lstm = net.rnnLayer(fc_resize, clips, 128, is_lstm=True )
        drop = net.dropLayer(lstm, 0.3, False)

        reshape = net.reshapeLayer(drop, [
            common.tlen * common.stream_len,
            -1, 1, 1
            ])
        out = net.fcLayer(reshape, 20, t="xavier", isout=True )

        loss = net.lossLayer(out, label_multi, "sigcross", 1)
        acc = net.accLayer(out, label)

        net.netStr(netname)
        return
예제 #12
0
    def net(self, margin=1):
        net = MyCaffeNet({"debug": True})
        d_dim = 4012
        data, label = net.dataLayer("",
                                    "mem",
                                    128,
                                    tops=["data", "label"],
                                    memdim=[1, 1, d_dim + 20])
        xy = net.sliceLayer(data, [20])
        y = xy[0]

        data = xy[1]
        points = [12]
        slices = net.sliceLayer(data, points)

        fc = net.fcLayer(slices[0], 1024, replace="relu")

        drop = net.groupDropLayer(slices[1], 800, 400, False)
        conv1 = net.convLayer(drop, [5, 1], [5, 1], 64)
        layer = net.normLayer(conv1)
        pool1 = net.poolLayer(layer, [6, 1], [2, 1], "ave", pad_wh=[2, 0])
        conv2 = net.convLayer(pool1, [4, 1], [2, 1], 32, pad_wh=[1, 0])
        conv3 = net.convLayer(conv2, [4, 1], [2, 1], 16, pad_wh=[1, 0])
        flat = net.flattenLayer(conv3)

        concat = net.concatLayer(*[fc, flat])

        helper1 = net.fcLayer(concat,
                              2048,
                              t="xavier",
                              replace='relu',
                              dropout=self.dropout)
        helper1_2 = net.fcLayer(helper1,
                                2048,
                                t="xavier",
                                replace='relu',
                                dropout=self.dropout)

        helper1_out = net.fcLayer(helper1_2, 20, t="xavier", isout=True)

        helper1_loss = net.lossLayer(helper1_out, y, "sigcross", 1)
        loss_empty = net.lossLayer(label, label, "eloss", 0)
        helper1_acc = net.accLayer(helper1_out, label)

        net.netStr(self.netname)
        return
예제 #13
0
    def nn_sauron(self):
        net = MyCaffeNet({"debug": True})
        data, label = net.dataLayer("",
                                    "mem",
                                    10,
                                    tops=["data", "label"],
                                    memdim=[1, 1, 6001])
        slices = net.sliceLayer(data, [3000, 6000])
        data1 = slices[0]
        data2 = slices[1]
        sim = slices[2]

        drop = 0.7
        lr = [1, 2]
        ip1 = net.fcLayer(data1, 2048, replace="relu", \
                dropout=drop, wname=["w1", "b1"], lr=lr)
        ip2 = net.fcLayer(ip1, 2048, replace="relu", \
                dropout=drop, wname=["w2", "b2"], lr=lr)
        ip3 = net.fcLayer(ip2, 128, replace="sigmoid", \
                dropout=0, wname=["w3", "b3"], lr=lr)
        decoder = net.fcLayer(ip3,
                              128,
                              replace="sigmoid",
                              wname=["dw", "db"],
                              lr=lr)

        ip1_ = net.fcLayer(data2, 2048, replace="relu", \
                dropout=drop, wname=["w1", "b1"], lr=lr)
        ip2_ = net.fcLayer(ip1_, 2048, replace="relu", \
                dropout=drop, wname=["w2", "b2"], lr=lr)
        ip3_ = net.fcLayer(ip2_, 128, replace="sigmoid", \
                dropout=0, wname=["w3", "b3"], lr=lr)
        decoder_ = net.fcLayer(ip3_,
                               128,
                               replace="sigmoid",
                               wname=["dw", "db"],
                               lr=lr)

        top = net.fcLayer(ip3, 10, isout=True)

        loss = net.lossLayer(top, label, "softmax", 0.1)
        loss_sim = net.lossLayer(decoder,
                                 decoder_,
                                 "contrastive",
                                 1,
                                 third_bottom=sim)

        acc = net.accLayer(top, label)
        net.netStr("nn_sauron.prototxt")
        return
예제 #14
0
파일: Net.py 프로젝트: xiaojingyi/finTest2
    def nn_sauron(self, fname, param):
        net = MyCaffeNet({"debug": True})
        data, label = net.dataLayer(
                "", "mem", 32,
                tops=["data", "label"],
                memdim=[1,1,6001]
                )
        slices = net.sliceLayer(data, [3000, 6000])
        data1 = slices[0]
        data2 = slices[1]
        sim = slices[2]

        drop = 0.7
        lr = [1, 2]
        ip1 = net.fcLayer(data1, 2048, replace="relu", \
                dropout=drop, wname=["w1", "b1"], lr=lr)
        ip2 = net.fcLayer(ip1, 2048, replace="relu", \
                dropout=drop, wname=["w2", "b2"], lr=lr)
        ip3 = net.fcLayer(ip2, 128, replace="sigmoid", \
                dropout=0, wname=["w3", "b3"], lr=lr)
        decoder = net.fcLayer(ip3, 4, replace="sigmoid", wname=["dw", "db"], lr=lr)

        ip1_ = net.fcLayer(data2, 2048, replace="relu", \
                dropout=drop, wname=["w1", "b1"], lr=lr)
        ip2_ = net.fcLayer(ip1_, 2048, replace="relu", \
                dropout=drop, wname=["w2", "b2"], lr=lr)
        ip3_ = net.fcLayer(ip2_, 128, replace="sigmoid", \
                dropout=0, wname=["w3", "b3"], lr=lr)
        decoder_ = net.fcLayer(ip3_, 4, replace="sigmoid", wname=["dw", "db"], lr=lr)

        #concat = net.concatLayer(*(ip3, ip3_))
        #top = net.fcLayer(concat, 20, isout=True)
        top = net.fcLayer(ip3, 10, isout=True)

        loss = net.lossLayer(top, label, "softmax", 1)
        loss_sim = net.lossLayer(decoder, decoder_, "contrastive", 0, third_bottom=sim, param={"margin": param["margin"]})

        acc = net.accLayer(top, label)
        net.netStr(fname)
        return
예제 #15
0
파일: Net.py 프로젝트: xiaojingyi/finTest2
class Net(object):
    def __init__(self, config):
        if not config:
            self.bail(-1, "no config: Net init")
        self.config = config
        self.debug = config["debug"]
        self.dropout = config['dropout']
        #super(Net, self).__init__(config)

    def trainNet(self, fname):
        self.net_model = MyCaffeNet({"debug": True})
        self.net(fname, False)

    def testNet(self, fname):
        self.net_model = MyCaffeNet({"debug": True})
        self.net(fname, True)

    def atomLayers(self, bottom, label, label_muti, onelen, loss_num, is_test,
                   ones, zeros):
        points = [7 + onelen]
        slices = self.net_model.sliceLayer(bottom, points)

        fc = self.net_model.fcLayer(slices[0], 1024, replace="relu")

        drop = self.net_model.groupDropLayer(slices[1], 800, 400, False)
        conv1 = self.net_model.convLayer(drop, [onelen, 1], [onelen, 1], 64)
        layer = self.net_model.normLayer(conv1)
        pool1 = self.net_model.convLayer(layer, [6, 1], [2, 1],
                                         64,
                                         pad_wh=[2, 0])
        conv2 = self.net_model.convLayer(pool1, [4, 1], [2, 1],
                                         32,
                                         pad_wh=[1, 0])
        conv3 = self.net_model.convLayer(conv2, [4, 1], [2, 1],
                                         16,
                                         pad_wh=[1, 0])
        flat = self.net_model.flattenLayer(conv3)

        concat = self.net_model.concatLayer(*[fc, flat])

        n_num = 2048
        fc = self.net_model.fcLayer(concat, n_num, t="xavier", replace='relu')
        fc_drop = self.net_model.sameDropLayer(ones, fc, False)
        fc_drop = self.net_model.fcLayer(fc_drop,
                                         n_num,
                                         replace="sigmoid",
                                         decay=[1, 2])
        fc_drop = self.net_model.fcLayer(fc_drop,
                                         n_num,
                                         replace="sigmoid",
                                         decay=[1, 2])
        #fc_drop = self.net_model.eltwiseLayer([fc_drop, zeros], opt=1)
        out_for_loss = self.net_model.sameDropLayer(fc_drop, fc, False)
        loss = self.net_model.lossLayer(out_for_loss, zeros, "eloss", 0.1)
        fc = self.net_model.sameDropLayer(fc, fc_drop, False)
        fc = self.net_model.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.net_model.scaleLayer(fc, in_place=False)

        fc = self.net_model.fcLayer(fc, n_num, t="xavier", replace='relu')
        fc_drop = self.net_model.sameDropLayer(ones, fc, False)
        fc_drop = self.net_model.fcLayer(fc_drop,
                                         n_num,
                                         replace="sigmoid",
                                         decay=[1, 2])
        fc_drop = self.net_model.fcLayer(fc_drop,
                                         n_num,
                                         replace="sigmoid",
                                         decay=[1, 2])
        #fc_drop = self.net_model.eltwiseLayer([fc_drop, zeros], opt=1)
        out_for_loss = self.net_model.sameDropLayer(fc_drop, fc, False)
        loss = self.net_model.lossLayer(out_for_loss, zeros, "eloss", 0.1)
        fc = self.net_model.sameDropLayer(fc, fc_drop, False)
        fc = self.net_model.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.net_model.scaleLayer(fc, in_place=False)

        out = self.net_model.fcLayer(fc, 20, t="xavier", isout=True)
        out_seg = self.net_model.sigmoidLayer(out)
        self.net_model.silenceLayer(out_seg)
        loss = self.net_model.lossLayer(out, label_muti, "sigcross", loss_num)
        acc = self.net_model.accLayer(out, label)
        return fc

    def net(self, netname, is_test):
        data, label_muti, label, label_, sim, zeros, ones = self.net_model.pyDataLayer(
            "DataTrain" if not is_test else "DataTest",
            "DataTrain" if not is_test else "DataTest",
            0 if not is_test else 1,
            tops=[
                "data", "label_muti", "label", "label_", "sim", "zeros", "ones"
            ])

        onelen = 10
        stocklen = 800
        weeklen = 7
        alllen = onelen + weeklen + onelen * stocklen
        points = [alllen, alllen * 2]
        slices = self.net_model.sliceLayer(data, points)
        self.net_model.silenceLayer(label_)
        fc1 = self.atomLayers(slices[0], label, label_muti, onelen, 1.0,
                              is_test, ones, zeros)
        fc2 = self.atomLayers(slices[1], label, label_muti, onelen, 1.0,
                              is_test, ones, zeros)
        fc3 = self.atomLayers(slices[2], label, label_muti, onelen, 1.0,
                              is_test, ones, zeros)
        concat = self.net_model.concatLayer(*[fc1, fc2, fc3])
        concat = self.net_model.groupDropLayer(concat, 3, 2, False)

        fc = self.net_model.fcLayer(concat, 1024, t="xavier", replace='relu')
        fc_drop1 = self.net_model.dropLayer(fc, self.dropout, False)
        """
        fc_drop2 = self.net_model.dropLayer(fc, self.dropout, False)
        fc_drop3 = self.net_model.dropLayer(fc, self.dropout, False)
        """

        out = self.net_model.fcLayer(fc_drop1, 20, t="xavier", isout=True)
        out_seg = self.net_model.sigmoidLayer(out)
        self.net_model.silenceLayer(out_seg)
        self.net_model.silenceLayer(label_muti)

        #loss = self.net_model.lossLayer(out, label_muti, "sigcross", 1)
        loss = self.net_model.lossLayer(out,
                                        label,
                                        "softmax",
                                        1,
                                        param={"ignore_label": 0})
        acc = self.net_model.accLayer(out, label)
        """
        s1 = self.net_model.fcLayer(fc_drop1, 8, t="xavier", replace='sigmoid', wname=["s_w", "s_b"])
        s2 = self.net_model.fcLayer(fc_drop2, 8, t="xavier", replace='sigmoid', wname=["s_w", "s_b"])
        s3 = self.net_model.fcLayer(fc_drop3, 8, t="xavier", replace='sigmoid', wname=["s_w", "s_b"])
        loss_s1 = self.net_model.lossLayer(s1, s2, third_bottom=sim, t="contrastive", param={"margin": 2, "margin_sim": 2.5}, weight=0)
        loss_s2 = self.net_model.lossLayer(s1, s3, third_bottom=sim, t="contrastive", param={"margin": 2, "margin_sim": 2.5}, weight=0)
        loss_s3 = self.net_model.lossLayer(s2, s3, third_bottom=sim, t="contrastive", param={"margin": 2, "margin_sim": 2.5}, weight=0)
        """
        """
        out2 = self.net_model.fcLayer(fc, 20, t="xavier", isout=True)
        loss = self.net_model.lossLayer(out, label, "softmax", 1)

        out2_seg = self.net_model.softmaxLayer(out2)
        out_final = self.net_model.eltwiseLayer([out_seg, out2_seg], 0)

        acc = self.net_model.accLayer(out_final, label)
        """

        self.net_model.silenceLayer(sim)
        self.net_model.netStr(netname)
        return

    def testPrint(self):
        print "Hello World!"

    def bail(self, sig, msg):
        print sig, ": ", msg
        exit()
예제 #16
0
파일: Net.py 프로젝트: xiaojingyi/finTest2
 def trainNet(self, fname):
     self.net_model = MyCaffeNet({"debug": True})
     self.net(fname, False)
예제 #17
0
    def alexNet(self, single_dim, stock_dim, num, margin=1):
        net = MyCaffeNet({"debug": True})
        d_dim = 36768
        data, label = net.dataLayer(
                "", "mem", 32,
                tops=["data", "label"],
                memdim=[1,1,d_dim*2+1]
                )
        datas = net.sliceLayer(data, [d_dim, d_dim*2])
        sim = datas[2]
        points = [single_dim]
        dim_nn = 2048
        drop_out = 0.7

        " the normal side"
        slices = net.sliceLayer(datas[0], points)

        fc = net.fcLayer(slices[0], dim_nn, replace="relu", wname=["fc1_w", "fc1_b"], lr=[1, 2])
        fc = net.ippLayer(fc, dim_nn, wname=["ipp_w", "ipp_b"], lr=[1, 2])

        conv1 = net.convLayer( slices[1], [stock_dim, 1], [stock_dim, 1], 64, wname=["conv1_w", "conv1_b"], lr=[1, 2])
        pool1 = net.poolLayer(conv1, [5, 1], [1, 1], "ave")
        norm1 = net.normLayer(pool1)
        conv2 = net.convLayer(norm1, [5, 1], [2, 1], 64, wname=["conv2_w", "conv2_b"], lr=[1, 2])
        pool2 = net.poolLayer(conv2, [5, 1], [1, 1], "ave")
        conv3 = net.convLayer(pool2, [5, 1], [1, 1], 32, pad_wh=[4,0], wname=["conv3_w", "conv3_b"], lr=[1, 2])
        pool3 = net.poolLayer(conv3, [5, 1], [2, 1], "max")

        flat1 = net.flattenLayer(pool3)
        concat = net.concatLayer(*[fc, flat1])

        top1 = net.fcLayer(concat, dim_nn, replace="relu", dropout=drop_out, wname=["fc2_w", "fc2_b"], lr=[1, 2])
        top2 = net.fcLayer(top1, dim_nn, replace="relu", dropout=drop_out, wname=["fc3_w", "fc3_b"], lr=[1, 2])
        decoder = net.fcLayer(top2, 128, replace="sigmoid", wname=["dw", "db"], lr=[1, 2])
        " the normal side start"

        " the pair side start"
        slices = net.sliceLayer(datas[1], points)

        fc = net.fcLayer(slices[0], dim_nn, replace="relu", wname=["fc1_w", "fc1_b"], lr=[1, 2])
        fc = net.ippLayer(fc, dim_nn, wname=["ipp_w", "ipp_b"], lr=[1, 2])

        conv1 = net.convLayer( slices[1], [stock_dim, 1], [stock_dim, 1], 64, wname=["conv1_w", "conv1_b"], lr=[1, 2])
        pool1 = net.poolLayer(conv1, [5, 1], [1, 1], "ave")
        norm1 = net.normLayer(pool1)
        conv2 = net.convLayer(norm1, [5, 1], [2, 1], 64, wname=["conv2_w", "conv2_b"], lr=[1, 2])
        pool2 = net.poolLayer(conv2, [5, 1], [1, 1], "ave")
        conv3 = net.convLayer(pool2, [5, 1], [1, 1], 32, pad_wh=[4,0], wname=["conv3_w", "conv3_b"], lr=[1, 2])
        pool3 = net.poolLayer(conv3, [5, 1], [2, 1], "max")

        flat1 = net.flattenLayer(pool3)
        concat = net.concatLayer(*[fc, flat1])

        top1 = net.fcLayer(concat, dim_nn, replace="relu", dropout=drop_out, wname=["fc2_w", "fc2_b"], lr=[1, 2])
        top2_ = net.fcLayer(top1, dim_nn, replace="relu", dropout=drop_out, wname=["fc3_w", "fc3_b"], lr=[1, 2])
        decoder_ = net.fcLayer(top2_, 128, replace="sigmoid", wname=["dw", "db"], lr=[1, 2])
        " the pair side end"

        top3 = net.fcLayer(top2, 20, isout=True, lr=[1, 2])
        loss = net.lossLayer(top3, label, "softmax", 1)
        loss_sim = net.lossLayer(decoder, decoder_, "contrastive", 0, third_bottom=sim, param={"margin": margin})
        acc = net.accLayer(top3, label)
        net.netStr(self.netname)
        return
예제 #18
0
파일: Net.py 프로젝트: xiaojingyi/finTest2
class Net(object):
    def __init__(self, config):
        if not config:
            self.bail(-1, "no config: Net init")
        self.config = config
        self.debug = config["debug"]
        self.dropout = config['dropout']
        #super(Net, self).__init__(config)

    def trainNet(self, fname):
        self.nt = MyCaffeNet({"debug": True})
        #self.convNet(fname, False)
        self.net(fname, False)

    def testNet(self, fname):
        self.nt = MyCaffeNet({"debug": True})
        #self.convNet(fname, True)
        self.net(fname, True)

    def convNet(self, netname, is_test):
        data, pridata = self.nt.pyDataLayer(
            "DataTrain" if not is_test else "DataTest",
            "DataTrain" if not is_test else "DataTest",
            0 if not is_test else 1,
            tops=["data", "pridata"])
        self.nt.silenceLayer(pridata)
        data_use, pos_mask, neg_mask = self.nt.drop2Layer(data, 0.1)
        target = self.nt.sameDropLayer(data, neg_mask)

        data_use = self.nt.reshapeLayer(data_use, [0, 1, 2, 1024])
        target = self.nt.reshapeLayer(target, [0, 1, 2, 1024])

        conv = self.nt.convLayer(data_use,
                                 ksize_wh=[32, 2],
                                 stride_wh=[4, 1],
                                 nout=64,
                                 pad_wh=[2, 1],
                                 t="xavier",
                                 std=0.1,
                                 replace="relu")
        pool = self.nt.poolLayer(
            conv,
            ksize_wh=[3, 3],
            stride_wh=[1, 1],
            t="max",
        )
        pool = self.nt.dropLayer(pool, 0.5)
        """
        conv = self.nt.convLayer(pool,
                ksize_wh = [5, 1], 
                stride_wh = [1, 1], 
                nout = 64, 
                pad_wh = [2, 0],
                t="xavier", replace="sigmoid")
        deconv = self.nt.deConvLayer(conv,
                ksize_wh = [5, 1], 
                stride_wh = [1, 1], 
                pad_wh = [2, 0],
                nout = 128, 
                )
        deconv = self.nt.sigmoidLayer(deconv)

        conv = self.nt.batchNormLayer(conv, gs=is_test)
        conv = self.nt.scaleLayer(conv)
        conv = self.nt.reluLayer(conv)
        deconv = self.nt.batchNormLayer(deconv, gs=is_test)
        deconv = self.nt.scaleLayer(deconv)
        """
        unpool = self.nt.unpoolLayer(pool,
                                     ksize_wh=[3, 3],
                                     stride_wh=[1, 1],
                                     nout=64)
        deconv = self.nt.deConvLayer(
            unpool,
            ksize_wh=[32, 2],
            stride_wh=[4, 1],
            pad_wh=[2, 1],
            nout=1,
        )
        deconv = self.nt.reluLayer(deconv)
        out = self.nt.deConvLayer(
            deconv,
            ksize_wh=[17, 1],
            stride_wh=[1, 1],
            pad_wh=[8, 0],
            nout=1,
        )

        loss = self.nt.lossLayer(out,
                                 target,
                                 "sigcross",
                                 1,
                                 third_bottom=neg_masks)

        sig = self.nt.sigmoidLayer(out)
        sim_target = self.nt.sameDropLayer(sig, neg_masks)
        loss = self.nt.lossLayer(sim_target, target, "eloss", 0)

        self.nt.netStr(netname)

    def net(self, netname, is_test):
        data, pridata = self.nt.pyDataLayer(
            "DataTrain" if not is_test else "DataTest",
            "DataTrain" if not is_test else "DataTest",
            0 if not is_test else 1,
            tops=["data", "pridata"])
        self.nt.silenceLayer(pridata)
        data_use, pos_mask, neg_mask = self.nt.drop2Layer(data, 0.1)
        self.nt.silenceLayer(pos_mask)
        target = self.nt.sameDropLayer(data, neg_mask)

        fc = self.nt.fcLayer(data_use, 2048, t="xavier", replace='relu')
        fc = self.nt.fcLayer(fc, 2048, t="xavier", replace='relu')
        fc = self.nt.fcLayer(fc, 2048, t="xavier", replace='relu')
        out = self.nt.fcLayer(fc, 1024, t="xavier", isout=True)

        loss = self.nt.lossLayer(out,
                                 target,
                                 "sigcross",
                                 1,
                                 third_bottom=neg_mask)
        sig = self.nt.sigmoidLayer(out)
        sim_target = self.nt.sameDropLayer(sig, neg_mask)
        loss = self.nt.lossLayer(sim_target, target, "eloss", 0)
        """
        fc = self.nt.normLayer(fc)

        """

        self.nt.netStr(netname)
        return

    def testPrint(self):
        print "Hello World!"

    def bail(self, sig, msg):
        print sig, ": ", msg
        exit()
예제 #19
0
    def testNet(self, single_dim, stock_dim, num, margin=1):
        net = MyCaffeNet({"debug": True})
        d_dim = 4012
        data, label = net.dataLayer(
                "", "mem", 128,
                tops=["data", "label"],
                memdim=[1,1,d_dim]
                )
        """
        """
        points = [12]
        slices = net.sliceLayer(data, points)

        fc = net.fcLayer(slices[0], 1024, replace="relu")

        #drop = slices[1]
        drop = net.groupDropLayer(slices[1], 800, 400, False)
        conv1 = net.convLayer(drop, [5, 1], [5, 1], 64)
        layer = net.normLayer(conv1)
        pool1 = net.poolLayer(layer, [6, 1], [2, 1], "max", pad_wh=[2, 0])
        conv2 = net.convLayer(pool1, [4, 1], [2, 1], 32, pad_wh=[1, 0])
        #conv_tmp = net.convLayer(layer, [6, 1], [2, 1], 64, pad_wh=[2, 0])
        #conv2 = net.convLayer(conv_tmp, [4, 1], [2, 1], 32, pad_wh=[1, 0])
        conv3 = net.convLayer(conv2, [4, 1], [2, 1], 16, pad_wh=[1, 0])
        flat = net.flattenLayer(conv3)

        concat = net.concatLayer(*[fc, flat])

        helper1 = net.fcLayer(concat, 2048, t="xavier", 
                replace='relu', dropout=self.dropout)
        helper1_2 = net.fcLayer(helper1, 2048, t="xavier",
                replace='relu', dropout=self.dropout)

        helper1_out = net.fcLayer(helper1_2, 20, t="xavier", isout=True)
        helper1_loss = net.lossLayer(helper1_out, label, "softmax", 1)
        helper1_acc = net.accLayer(helper1_out, label)

        net.netStr(self.netname)
        return
예제 #20
0
파일: Net.py 프로젝트: xiaojingyi/finTest2
 def testNet(self, fname):
     self.nt = MyCaffeNet({"debug": True})
     #self.convNet(fname, True)
     self.net(fname, True)
예제 #21
0
    def testNetSim(self, single_dim, stock_dim, num, margin=1):
        net = MyCaffeNet({"debug": True})
        d_dim = 4012
        data, label = net.dataLayer(
                "", "mem", 64,
                tops=["data", "label"],
                memdim=[1,1,d_dim*2+1]
                )
        datas = net.sliceLayer(data, [d_dim, d_dim*2])
        sim = datas[2]
        points = [12]

        " the normal side"
        slices = net.sliceLayer(datas[0], points)
        fc = net.fcLayer(slices[0], 1024, replace="relu", wname=["fc1_w", "fc1_b"], lr=[1, 2])

        drop = net.groupDropLayer(slices[1], 800, 600, False)
        conv1 = net.convLayer(drop, [5, 1], [5, 1], 64, wname=["conv1_w", "conv1_b"], lr=[1, 2])
        layer = net.normLayer(conv1)
        pool1 = net.poolLayer(layer, [6, 1], [2, 1], "ave", pad_wh=[2, 0])
        conv2 = net.convLayer(pool1, [4, 1], [2, 1], 32, pad_wh=[1, 0], wname=["conv2_w", "conv2_b"], lr=[1, 2])
        conv3 = net.convLayer(conv2, [4, 1], [2, 1], 16, pad_wh=[1, 0], wname=["conv3_w", "conv3_b"], lr=[1, 2])
        flat = net.flattenLayer(conv3)

        concat = net.concatLayer(*[fc, flat])

        fc = net.fcLayer(concat, 2048, replace='relu', dropout=0, wname=["fc2_w", "fc2_b"], lr=[1, 2])
        drop = net.dropLayer(fc, self.dropout)
        fc = net.fcLayer(drop, 2048, replace='relu', dropout=0, wname=["fc3_w", "fc3_b"], lr=[1, 2])
        drop1 = net.dropLayer(fc, self.dropout)
        decoder = net.fcLayer(drop1, 128, replace="sigmoid", wname=["dw", "db"], lr=[1, 2])
        " the normal side start"

        out = net.fcLayer(drop1, 20, t="xavier", isout=True)

        " the pair side start"
        slices = net.sliceLayer(datas[1], points)
        fc = net.fcLayer(slices[0], 1024, replace="relu", wname=["fc1_w", "fc1_b"], lr=[1, 2])

        drop = net.groupDropLayer(slices[1], 800, 600, False)
        conv1 = net.convLayer(drop, [5, 1], [5, 1], 64, wname=["conv1_w", "conv1_b"], lr=[1, 2])
        layer = net.normLayer(conv1)
        pool1 = net.poolLayer(layer, [6, 1], [2, 1], "ave", pad_wh=[2, 0])
        conv2 = net.convLayer(pool1, [4, 1], [2, 1], 32, pad_wh=[1, 0], wname=["conv2_w", "conv2_b"], lr=[1, 2])
        conv3 = net.convLayer(conv2, [4, 1], [2, 1], 16, pad_wh=[1, 0], wname=["conv3_w", "conv3_b"], lr=[1, 2])
        flat = net.flattenLayer(conv3)

        concat = net.concatLayer(*[fc, flat])

        fc = net.fcLayer(concat, 2048, replace='relu', dropout=0, wname=["fc2_w", "fc2_b"], lr=[1, 2])
        drop = net.dropLayer(fc, self.dropout)
        fc = net.fcLayer(drop, 2048, replace='relu', dropout=0, wname=["fc3_w", "fc3_b"], lr=[1, 2])
        drop1_ = net.dropLayer(fc, self.dropout)
        decoder_ = net.fcLayer(drop1_, 128, replace="sigmoid", wname=["dw", "db"], lr=[1, 2])
        " the pair side end"

        loss = net.lossLayer(out, label, "softmax", 0.01)
        acc = net.accLayer(out, label)

        loss_sim = net.lossLayer(decoder, decoder_, "contrastive", 1, third_bottom=sim, param={"margin": margin})
        net.netStr(self.netname)
        return
예제 #22
0
    def alexNet(self, margin=1):
        net = MyCaffeNet({"debug": True})
        onelen = 24
        d_dim = onelen + onelen * 800
        data, label = net.dataLayer(
                "", "mem", 32,
                tops=["data", "label"],
                memdim=[1,1,d_dim]
                )
        points = [onelen]
        slices = net.sliceLayer(data, points)

        fc = net.fcLayer(slices[0], 1024, replace="relu")

        conv1 = net.convLayer(slices[1], [onelen, 1], [onelen, 1], 64)
        layer = net.normLayer(conv1)
        pool1 = net.poolLayer(layer, [6, 1], [2, 1], "ave", pad_wh=[2, 0])
        conv2 = net.convLayer(pool1, [4, 1], [2, 1], 32, pad_wh=[1, 0])
        conv3 = net.convLayer(conv2, [4, 1], [2, 1], 16, pad_wh=[1, 0])
        flat = net.flattenLayer(conv3)

        concat = net.concatLayer(*[fc, flat])

        helper1 = net.fcLayer(concat, 2048, replace='relu', dropout=self.dropout)
        helper1_2 = net.fcLayer(helper1, 2048, replace='relu', dropout=self.dropout)
        helper1_out = net.fcLayer(helper1_2, 20, t="xavier", isout=True)
        helper1_loss = net.lossLayer(helper1_out, label, "softmax", 1)
        helper1_acc = net.accLayer(helper1_out, label)

        net.netStr(self.netname)
        return
예제 #23
0
class NetTest(object):
    def __init__(self, config):
        if not config:
            self.bail(-1, "no config: NetTest init")
        self.config = config
        self.debug = config["debug"]
        #super(NetTest, self).__init__(config)
        self.dropout = 0.6

    def trainNet(self, fname):
        self.net = MyCaffeNet({"debug": True})
        self.fcNet(fname, False)

    def testNet(self, fname):
        self.net = MyCaffeNet({"debug": True})
        self.fcNet(fname, True)

    def kakalaBlock(self, bottom, per=0.5):
        fc = self.net.fcLayer(bottom, 512, replace="relu")
        fc = self.net.fcLayer(fc, 512, replace="relu")
        out = self.net.binGateLayer(fc, bottom, per)
        return out

    def treeBlock(self, bottom, num=64):
        fc = self.net.fcLayer(bottom, num, replace="relu", dropout=0.5)
        out = self.net.fcLayer(fc, 20, isout=True)
        return fc, out

    def resNetMy(self, netname, is_test):
        data, label = self.net.pyDataLayer(
            "DataTrain" if not is_test else "DataTest",
            "DataTrain" if not is_test else "DataTest",
            0 if not is_test else 1,
        )
        outs = []
        n_num = 128
        data = self.net.dropLayer(data, 0.5, in_place=False)
        fc = self.net.fcLayer(data, n_num, replace="relu", dropout=0.5)
        for i in range(10):
            fc, out = self.treeBlock(fc, n_num)
            outs.append(out)
        out = self.net.eltwiseLayer(outs, 1)  # sum
        loss = self.net.lossLayer(out, label, "softmax", 1)
        acc = self.net.accLayer(out, label)

        self.net.netStr(netname)
        return

    def fcNet(self, netname, is_test):
        data, label, zeros, ones, ones_ = self.net.pyDataLayer(
            "DataTrain" if not is_test else "DataTest",
            "DataTrain" if not is_test else "DataTest",
            0 if not is_test else 1,
            tops=["data", "label", "zeros", "ones", "ones_"])

        n_num = 128

        fc = self.net.fcLayer(data, n_num, replace="relu")
        fc_drop = fc

        # drop begin
        #fc = self.net.batchNormLayer(fc, gs=is_test, in_place=False)
        #fc_drop = self.net.sameDropLayer(ones, fc, False)
        fc_drop = self.net.fcLayer(fc_drop,
                                   n_num,
                                   replace="sigmoid",
                                   decay=[1, 0])
        fc_drop = self.net.fcLayer(fc_drop,
                                   n_num,
                                   replace="sigmoid",
                                   decay=[1, 0])
        out_for_loss = self.net.sameDropLayer(fc_drop, fc, False)
        loss = self.net.lossLayer(out_for_loss, zeros, "eloss", 0.1)
        # drop end

        fc_final = self.net.sameDropLayer(fc, fc_drop, False)
        fc_final = self.net.batchNormLayer(fc_final,
                                           gs=is_test,
                                           in_place=False)
        fc_final = self.net.scaleLayer(fc_final, in_place=False)
        fc = fc_final
        """
        fc_drop = self.net.sameDropLayer(ones_, fc_drop, False)
        fc_drop = self.net.eltwiseLayer([fc_drop, ones], opt=1)

        fc = self.net.fcLayer(fc, n_num, replace="relu")
        fc_drop = self.net.fcLayer(
                fc_drop, n_num, replace="sigmoid",
                decay=[1, 2])
        fc_drop = self.net.fcLayer(
                fc_drop, n_num, replace="sigmoid",
                decay=[1, 2])
        #fc_drop = self.net.eltwiseLayer([fc_drop, zeros], opt=1)
        out_for_loss = self.net.sameDropLayer(fc_drop, fc, False)
        loss = self.net.lossLayer(out_for_loss, zeros, "eloss", 0.1)
        fc = self.net.sameDropLayer(fc, fc_drop, False)
        fc = self.net.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.net.scaleLayer(fc, in_place=False)

        fc = self.net.fcLayer(fc, n_num, replace="relu")
        fc_drop = self.net.fcLayer(
                fc_drop, n_num, replace="sigmoid",
                decay=[1, 2])
        fc_drop = self.net.fcLayer(
                fc_drop, n_num, replace="sigmoid",
                decay=[1, 2])
        #fc_drop = self.net.eltwiseLayer([fc_drop, zeros], opt=1)
        out_for_loss = self.net.sameDropLayer(fc_drop, fc, False)
        loss = self.net.lossLayer(out_for_loss, zeros, "eloss", 0.1)
        fc = self.net.sameDropLayer(fc, fc_drop, False)
        fc = self.net.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.net.scaleLayer(fc, in_place=False)

        fc = self.net.fcLayer(fc, n_num, replace="relu")
        fc_drop = self.net.fcLayer(
                fc_drop, n_num, replace="sigmoid",
                decay=[1, 2])
        fc_drop = self.net.fcLayer(
                fc_drop, n_num, replace="sigmoid",
                decay=[1, 2])
        #fc_drop = self.net.eltwiseLayer([fc_drop, zeros], opt=1)
        out_for_loss = self.net.sameDropLayer(fc_drop, fc, False)
        loss = self.net.lossLayer(out_for_loss, zeros, "eloss", 0.1)
        fc = self.net.sameDropLayer(fc, fc_drop, False)
        fc = self.net.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.net.scaleLayer(fc, in_place=False)

        fc = self.net.fcLayer(fc, n_num, replace="relu")
        fc_drop = self.net.fcLayer(
                fc_drop, n_num, replace="sigmoid",
                decay=[1, 2])
        fc_drop = self.net.fcLayer(
                fc_drop, n_num, replace="sigmoid",
                decay=[1, 2])
        #fc_drop = self.net.eltwiseLayer([fc_drop, zeros], opt=1)
        out_for_loss = self.net.sameDropLayer(fc_drop, fc, False)
        loss = self.net.lossLayer(out_for_loss, zeros, "eloss", 0.1)
        fc = self.net.sameDropLayer(fc, fc_drop, False)
        fc = self.net.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.net.scaleLayer(fc, in_place=False)

        fc = self.net.fcLayer(fc, n_num, replace="relu")
        fc_drop = self.net.fcLayer(
                fc_drop, n_num, replace="sigmoid",
                decay=[1, 2])
        fc_drop = self.net.fcLayer(
                fc_drop, n_num, replace="sigmoid",
                decay=[1, 2])
        #fc_drop = self.net.eltwiseLayer([fc_drop, zeros], opt=1)
        out_for_loss = self.net.sameDropLayer(fc_drop, fc, False)
        loss = self.net.lossLayer(out_for_loss, zeros, "eloss", 0.1)
        fc = self.net.sameDropLayer(fc, fc_drop, False)
        fc = self.net.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.net.scaleLayer(fc, in_place=False)

        fc = self.net.fcLayer(fc, n_num, replace="relu")
        fc_drop = self.net.fcLayer(
                fc_drop, n_num, replace="sigmoid",
                decay=[1, 2])
        fc_drop = self.net.fcLayer(
                fc_drop, n_num, replace="sigmoid",
                decay=[1, 2])
        #fc_drop = self.net.eltwiseLayer([fc_drop, zeros], opt=1)
        out_for_loss = self.net.sameDropLayer(fc_drop, fc, False)
        loss = self.net.lossLayer(out_for_loss, zeros, "eloss", 0.1)
        fc = self.net.sameDropLayer(fc, fc_drop, False)
        fc = self.net.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.net.scaleLayer(fc, in_place=False)

        fc = self.net.fcLayer(fc, n_num, replace="relu")
        fc_drop = self.net.fcLayer(
                fc_drop, n_num, replace="sigmoid",
                decay=[1, 2])
        fc_drop = self.net.fcLayer(
                fc_drop, n_num, replace="sigmoid",
                decay=[1, 2])
        #fc_drop = self.net.eltwiseLayer([fc_drop, zeros], opt=1)
        out_for_loss = self.net.sameDropLayer(fc_drop, fc, False)
        loss = self.net.lossLayer(out_for_loss, zeros, "eloss", 0.1)
        fc = self.net.sameDropLayer(fc, fc_drop, False)
        fc = self.net.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.net.scaleLayer(fc, in_place=False)

        fc = self.net.fcLayer(fc, n_num, replace="relu")
        fc_drop = self.net.fcLayer(
                fc_drop, n_num, replace="sigmoid",
                decay=[1, 2])
        fc_drop = self.net.fcLayer(
                fc_drop, n_num, replace="sigmoid",
                decay=[1, 2])
        #fc_drop = self.net.eltwiseLayer([fc_drop, zeros], opt=1)
        out_for_loss = self.net.sameDropLayer(fc_drop, fc, False)
        loss = self.net.lossLayer(out_for_loss, zeros, "eloss", 0.1)
        fc = self.net.sameDropLayer(fc, fc_drop, False)
        fc = self.net.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.net.scaleLayer(fc, in_place=False)

        fc = self.net.fcLayer(fc, n_num, replace="relu")
        fc_drop = self.net.fcLayer(
                fc_drop, n_num, replace="sigmoid",
                decay=[1, 2])
        fc_drop = self.net.fcLayer(
                fc_drop, n_num, replace="sigmoid",
                decay=[1, 2])
        #fc_drop = self.net.eltwiseLayer([fc_drop, zeros], opt=1)
        out_for_loss = self.net.sameDropLayer(fc_drop, fc, False)
        loss = self.net.lossLayer(out_for_loss, zeros, "eloss", 0.1)
        fc = self.net.sameDropLayer(fc, fc_drop, False)
        fc = self.net.batchNormLayer(fc, gs=is_test, in_place=False)
        fc = self.net.scaleLayer(fc, in_place=False)

        """
        """
        fc = self.net.fcLayer(fc, n_num, replace="relu")

        loss = self.net.lossLayer(out_for_loss, zeros, "eloss", 0.1)
        out_for_loss = self.net.sameDropLayer(fc_drop, fc, False)

        # reach 56.3%
        fc_drop = self.net.fcLayer(fc, n_num/2, replace="relu")
        fc_drop = self.net.fcLayer(fc_drop, n_num, replace="relu")
        loss = self.net.lossLayer(fc_drop, fc, "eloss", 0.1)
        # reach 56.3%

        fc_drop = self.net.fcLayer(fc, n_num, replace="sigmoid")
        fc = self.net.eltwiseLayer([fc, fc_drop], opt=0)
        loss = self.net.lossLayer(fc, zeros, "eloss", 0.1)
        fc = self.net.batchNormLayer(fc, in_place=False)
        fc = self.net.scaleLayer(fc, in_place=False)

        fc = self.net.dropLayer(fc, 0.5)


        fc = fc1
        fc_drop = self.net.fcLayer(fc, n_num/2, replace="relu")
        fc_drop = self.net.dropLayer(fc_drop, 0.5)
        fc_drop = self.net.fcLayer(fc_drop, n_num, replace="relu")
        loss = self.net.lossLayer(fc_drop, fc, "eloss", 0.1)

        fc = self.net.fcLayer(fc, n_num, replace="relu")
        fc_drop = self.net.fcLayer(fc, n_num/2, replace="relu")
        fc_drop = self.net.dropLayer(fc_drop, 0.5)
        fc_drop = self.net.fcLayer(fc_drop, n_num, replace="relu")
        loss = self.net.lossLayer(fc_drop, fc, "eloss", 0.1)

        fc_plus = self.net.eltwiseLayer([fc, fc_drop], opt=1)
        loss = self.net.lossLayer(fc_plus, zeros, "eloss", 0.1)

        fc = self.net.dropLayer(fc, 0.5, in_place=False)
        fc = fc2

        fc = self.net.fcLayer(fc1_d, n_num, replace="relu")
        fc = self.net.dropLayer(fc, 0.5, in_place=False)

        fc = self.net.fcLayer(fc, n_num, replace="relu")
        fc_drop = self.net.fcLayer(fc, n_num, replace="sigmoid")
        fc = self.net.eltwiseLayer([fc, fc_drop], opt=0)
        loss = self.net.lossLayer(fc, zeros, "eloss", 0.1)
        fc = self.net.batchNormLayer(fc, in_place=False)
        fc = self.net.scaleLayer(fc, in_place=False)

        fc_in = self.net.fcLayer(fc, n_num, replace="sigmoid")
        fc_drop = self.net.fcLayer(fc_in, n_num, replace="sigmoid")
        fc = self.net.eltwiseLayer([fc_in, fc_drop], opt=0)
        loss = self.net.lossLayer(fc, zeros, "eloss", 0.3)
        fc = self.net.batchNormLayer(fc, in_place=False)
        fc = self.net.scaleLayer(fc, in_place=False)

        """

        self.net.silenceLayer(zeros)
        self.net.silenceLayer(ones)
        self.net.silenceLayer(ones_)
        fc = self.net.fcLayer(fc, n_num, replace="sigmoid")
        out = self.net.fcLayer(fc, 2, isout=True)
        loss = self.net.lossLayer(out, label, "softmax", 1)
        acc = self.net.accLayer(out, label)

        self.net.netStr(netname)
        return

    def testPrint(self):
        print "Hello World!"

    def bail(self, sig, msg):
        print sig, ": ", msg
        exit()