コード例 #1
0
ファイル: Vis.py プロジェクト: bitores/MachineLearning
def main():
    nn = NNDist()
    save = False
    load = False

    lr = 0.001
    lb = 0.001
    epoch = 1000
    record_period = 4

    x, y = DataUtil.gen_spiral(50, 3, 3, 2.5)

    if not load:
        nn.build([x.shape[1], 6, 6, 6, y.shape[1]])
        nn.optimizer = "Adam"
        nn.preview()
        nn.fit(x, y, lr=lr, lb=lb, verbose=1, record_period=record_period,
               epoch=epoch, batch_size=128, train_only=True,
               animation_params={"show": True, "mp4": False, "period": record_period})
        if save:
            nn.save()
        nn.visualize2d(x, y)
        nn.draw_results()
    else:
        nn.load()
        nn.preview()
        nn.evaluate(x, y)

    nn.show_timing_log()
コード例 #2
0
ファイル: Mnist.py プロジェクト: oldtimestj/MachineLearning
def main():
    nn = NNDist()
    verbose = 2

    lr = 0.001
    epoch = 50
    record_period = 5

    timing = Timing(enabled=True)
    timing_level = 1
    nn.feed_timing(timing)

    x, y = DataUtil.get_dataset("mnist",
                                "../../_Data/mnist.txt",
                                quantized=True,
                                one_hot=True)

    nn.add(ReLU((x.shape[1], 400)))
    nn.add(CrossEntropy((y.shape[1], )))

    nn.fit(x,
           y,
           lr=lr,
           epoch=epoch,
           record_period=record_period,
           verbose=verbose,
           train_rate=0.8)
    nn.draw_logs()

    timing.show_timing_log(timing_level)
コード例 #3
0
def visualize_nn():
    x, y = DataUtil.gen_xor()
    nn = NNDist()
    nn.add("ReLU", (x.shape[1], 6))
    nn.add("ReLU", (6,))
    nn.add("Softmax", (y.shape[1],))
    nn.fit(x, y, epoch=1000, draw_detailed_network=True)
コード例 #4
0
ファイル: MergedNB.py プロジェクト: zxsted/MachineLearning-1
    def feed_data(self, x, y, sample_weight=None):
        if sample_weight is not None:
            sample_weight = np.asarray(sample_weight)
        x, y, wc, features, feat_dics, label_dic = DataUtil.quantize_data(
            x, y, wc=self._whether_continuous, separate=True)
        if self._whether_continuous is None:
            self._whether_continuous = wc
            self._whether_discrete = ~self._whether_continuous
        self.label_dic = label_dic
        discrete_x, continuous_x = x
        cat_counter = np.bincount(y)
        self._cat_counter = cat_counter
        labels = [y == value for value in range(len(cat_counter))]

        labelled_x = [discrete_x[ci].T for ci in labels]
        self._multinomial._x, self._multinomial._y = x, y
        self._multinomial._labelled_x, self._multinomial._label_zip = labelled_x, list(zip(labels, labelled_x))
        self._multinomial._cat_counter = cat_counter
        self._multinomial._feat_dics = [dic for i, dic in enumerate(feat_dics) if self._whether_discrete[i]]
        self._multinomial._n_possibilities = [len(feats) for i, feats in enumerate(features)
                                              if self._whether_discrete[i]]
        self._multinomial.label_dic = label_dic

        labelled_x = [continuous_x[label].T for label in labels]
        self._gaussian._x, self._gaussian._y = continuous_x.T, y
        self._gaussian._labelled_x, self._gaussian._label_zip = labelled_x, labels
        self._gaussian._cat_counter, self._gaussian.label_dic = cat_counter, label_dic

        self.feed_sample_weight(sample_weight)
コード例 #5
0
    def feed_data(self, x, y, sample_weight=None):
        #
        if sample_weight is not None:
            sample_weight = np.asarray(sample_weight)
        x, y, _, features, feat_dicts, label_dict = DataUtil.quantize_data(x, y, wc=np.array([False] * len(x[0])))

        # 利用Numpy中bincount方法,获得各类别数据的个数;
        cat_counter = np.bincount(y)
        # 记录各维度特征的取值个数;
        n_possibilities = [len(feats) for feats in features]

        # 获得各类别数据的下标;
        labels = [y == value for value in range(len(cat_counter))]

        # 利用下标获取按类别分开后的输入数据的数组;
        labelled_x = [x[ci].T for ci in labels]

        # 更新模型的各个属性;
        self._x, self._y = x, y
        self._labelled_x, self._label_zip = labelled_x, list(zip(labels, labelled_x))
        self._cat_counter, self._feat_dicts, self._n_possibilities = cat_counter, feat_dicts, n_possibilities
        self.label_dict = label_dict

        # 调用处理样本权重的函数,以更新记录条件概率的数组;
        self.feed_sample_weight(sample_weight)
コード例 #6
0
def main():
    nn = NNDist()
    save = False
    load = False
    show_loss = True
    train_only = False
    verbose = 2

    lr = 0.001
    lb = 0.001
    epoch = 5
    record_period = 1

    x, y = DataUtil.get_dataset("mnist", "../../../../_Data/mnist.txt", quantized=True, one_hot=True)
    batch_size = 128

    if not load:
        nn.add("ReLU", (x.shape[1], 1024))
        nn.add("ReLU", (1024,))
        nn.add("CrossEntropy", (y.shape[1],))
        nn.optimizer = "Adam"
        nn.preview()
        nn.fit(x, y, lr=lr, lb=lb,
               epoch=epoch, batch_size=batch_size, record_period=record_period,
               show_loss=show_loss, train_only=train_only, do_log=True, verbose=verbose)
        if save:
            nn.save()
        nn.draw_results()
    else:
        nn.load()
        nn.preview()
        print(nn.evaluate(x, y)[0])

    nn.show_timing_log()
コード例 #7
0
ファイル: Vis.py プロジェクト: reder66/MachineLearning
def main():
    nn = NNDist()
    save = False
    load = False

    lr = 0.001
    lb = 0.001
    epoch = 1000
    record_period = 4

    x, y = DataUtil.gen_spiral(50, 3, 3, 2.5)

    if not load:
        nn.build([x.shape[1], 6, 6, 6, y.shape[1]])
        nn.optimizer = "Adam"
        nn.preview()
        nn.fit(x, y, lr=lr, lb=lb, verbose=1, record_period=record_period,
               epoch=epoch, batch_size=128, train_only=True,
               animation_params={"show": True, "mp4": True, "period": record_period})
        if save:
            nn.save()
        nn.visualize2d(x, y)
        nn.draw_results()
    else:
        nn.load()
        nn.preview()
        nn.evaluate(x, y)

    nn.show_timing_log()
コード例 #8
0
def main():

    nn = NN()
    epoch = 50

    x, y = DataUtil.get_dataset("cifar10", "../_Data/cifar10.txt", quantized=True, one_hot=True)

    x = x.reshape(len(x), 3, 32, 32)
    nn.add("ConvReLU", (x.shape[1:], (32, 3, 3)))
    nn.add("ConvReLU", ((32, 3, 3),))
    nn.add("MaxPool", ((3, 3),), 2)
    nn.add("ConvNorm")
    nn.add("ConvDrop")
    nn.add("ConvReLU", ((64, 3, 3),))
    nn.add("ConvReLU", ((64, 3, 3),))
    nn.add("AvgPool", ((3, 3),), 2)
    nn.add("ConvNorm")
    nn.add("ConvDrop")
    nn.add("ConvReLU", ((32, 3, 3),))
    nn.add("ConvReLU", ((32, 3, 3),))
    nn.add("AvgPool", ((3, 3),), 2)
    nn.add("ReLU", (512,))
    nn.add("Identical", (64,), apply_bias=False)
    nn.add("Normalize", activation="ReLU")
    nn.add("Dropout")
    nn.add("CrossEntropy", (y.shape[1],))

    # nn.disable_timing()
    nn.fit(x, y, lr=0.001, epoch=epoch, train_rate=0.8,
           metrics=["acc"], record_period=1, verbose=4)
    nn.evaluate(x, y)
    nn.show_timing_log()
    nn.draw_logs()
コード例 #9
0
def main():

    nn = NNDist()
    epoch = 1000

    timing = Timing(enabled=True)
    timing_level = 1
    nn.feed_timing(timing)

    x, y = DataUtil.gen_spiral(100)

    nn.add(ReLU((x.shape[1], 24)))
    nn.add(ReLU((24, )))
    nn.add(Softmax((y.shape[1], )))

    nn.fit(x,
           y,
           epoch=epoch,
           verbose=2,
           metrics=["acc", "f1_score"],
           train_rate=0.8)
    nn.draw_logs()
    nn.visualize_2d(x, y)

    timing.show_timing_log(timing_level)
コード例 #10
0
ファイル: Mnist.py プロジェクト: bitores/MachineLearning
 def __init__(self, im=None, om=None):
     self._im, self._om = im, om
     self._cursor = self._indices = None
     self._x, self._y = DataUtil.get_dataset("mnist", "../../_Data/mnist.txt", quantized=True, one_hot=True)
     self._x = self._x.reshape(-1, 28, 28)
     self._x_train, self._x_test = self._x[:1800], self._x[1800:]
     self._y_train, self._y_test = self._y[:1800], self._y[1800:]
コード例 #11
0
ファイル: TestLinear.py プロジェクト: bitores/MachineLearning
def main():

    x, y = DataUtil.gen_two_clusters(n_dim=2, dis=2.5, center=5, one_hot=False)
    y[y == 0] = -1

    animation_params = {
        "show": False, "period": 50, "mp4": False,
        "dense": 400, "draw_background": True
    }

    svm = LinearSVM(animation_params=animation_params)
    svm.fit(x, y)
    svm.evaluate(x, y)
    svm.visualize2d(x, y, padding=0.1, dense=400)

    svm = TFLinearSVM(animation_params=animation_params)
    svm.fit(x, y)
    svm.evaluate(x, y)
    svm.visualize2d(x, y, padding=0.1, dense=400)

    if TorchLinearSVM is not None:
        svm = TorchLinearSVM(animation_params=animation_params)
        svm.fit(x, y)
        svm.evaluate(x, y)
        svm.visualize2d(x, y, padding=0.1, dense=400)

    perceptron = Perceptron()
    perceptron.fit(x, y)
    perceptron.evaluate(x, y)
    perceptron.visualize2d(x, y, padding=0.1, dense=400)

    perceptron.show_timing_log()
コード例 #12
0
def visualize_nn():
    x, y = DataUtil.gen_xor()
    nn = NNDist()
    nn.add("ReLU", (x.shape[1], 6))
    nn.add("ReLU", (6, ))
    nn.add("Softmax", (y.shape[1], ))
    nn.fit(x, y, epoch=1000, draw_detailed_network=True)
コード例 #13
0
def main():

    x, y = DataUtil.gen_two_clusters(n_dim=2, dis=2.5, center=5, one_hot=False)
    y[y == 0] = -1

    animation_params = {
        "show": False, "period": 50, "mp4": False,
        "dense": 400, "draw_background": True
    }

    svm = LinearSVM(animation_params=animation_params)
    svm.fit(x, y)
    svm.evaluate(x, y)
    svm.visualize2d(x, y, padding=0.1, dense=400)

    svm = TFLinearSVM(animation_params=animation_params)
    svm.fit(x, y)
    svm.evaluate(x, y)
    svm.visualize2d(x, y, padding=0.1, dense=400)

    if TorchLinearSVM is not None:
        svm = TorchLinearSVM(animation_params=animation_params)
        svm.fit(x, y)
        svm.evaluate(x, y)
        svm.visualize2d(x, y, padding=0.1, dense=400)

    perceptron = Perceptron()
    perceptron.fit(x, y)
    perceptron.evaluate(x, y)
    perceptron.visualize2d(x, y, padding=0.1, dense=400)

    perceptron.show_timing_log()
コード例 #14
0
ファイル: MergedNB.py プロジェクト: bitores/MachineLearning
    def feed_data(self, x, y, sample_weight=None):
        if sample_weight is not None:
            sample_weight = np.asarray(sample_weight)
        x, y, wc, features, feat_dicts, label_dict = DataUtil.quantize_data(
            x, y, wc=self._whether_continuous, separate=True)
        if self._whether_continuous is None:
            self._whether_continuous = wc
            self._whether_discrete = ~self._whether_continuous
        self.label_dict = label_dict

        discrete_x, continuous_x = x

        cat_counter = np.bincount(y)
        self._cat_counter = cat_counter

        labels = [y == value for value in range(len(cat_counter))]
        labelled_x = [discrete_x[ci].T for ci in labels]

        self._multinomial._x, self._multinomial._y = x, y
        self._multinomial._labelled_x, self._multinomial._label_zip = labelled_x, list(zip(labels, labelled_x))
        self._multinomial._cat_counter = cat_counter
        self._multinomial._feat_dicts = [dic for i, dic in enumerate(feat_dicts) if self._whether_discrete[i]]
        self._multinomial._n_possibilities = [len(feats) for i, feats in enumerate(features)
                                              if self._whether_discrete[i]]
        self._multinomial.label_dict = label_dict

        labelled_x = [continuous_x[label].T for label in labels]

        self._gaussian._x, self._gaussian._y = continuous_x.T, y
        self._gaussian._labelled_x, self._gaussian._label_zip = labelled_x, labels
        self._gaussian._cat_counter, self._gaussian.label_dict = cat_counter, label_dict

        self.feed_sample_weight(sample_weight)
コード例 #15
0
ファイル: CIFAR10.py プロジェクト: bitores/MachineLearning
def main():

    nn = NN()
    epoch = 50

    x, y = DataUtil.get_dataset("cifar10", "../_Data/cifar10.txt", quantized=True, one_hot=True)

    x = x.reshape(len(x), 3, 32, 32)
    nn.add("ConvReLU", (x.shape[1:], (32, 3, 3)))
    nn.add("ConvReLU", ((32, 3, 3),))
    nn.add("MaxPool", ((3, 3),), 2)
    nn.add("ConvNorm")
    nn.add("ConvDrop")
    nn.add("ConvReLU", ((64, 3, 3),))
    nn.add("ConvReLU", ((64, 3, 3),))
    nn.add("AvgPool", ((3, 3),), 2)
    nn.add("ConvNorm")
    nn.add("ConvDrop")
    nn.add("ConvReLU", ((32, 3, 3),))
    nn.add("ConvReLU", ((32, 3, 3),))
    nn.add("AvgPool", ((3, 3),), 2)
    nn.add("ReLU", (512,))
    nn.add("Identical", (64,), apply_bias=False)
    nn.add("Normalize", activation="ReLU")
    nn.add("Dropout")
    nn.add("CrossEntropy", (y.shape[1],))

    # nn.disable_timing()
    nn.fit(x, y, lr=0.001, epoch=epoch, train_rate=0.8,
           metrics=["acc"], record_period=1, verbose=4)
    nn.evaluate(x, y)
    nn.show_timing_log()
    nn.draw_logs()
コード例 #16
0
ファイル: Mnist.py プロジェクト: bitores/MachineLearning
def main():
    x, y = DataUtil.get_dataset("mnist", "../../../_Data/mnist.txt", quantized=True, one_hot=True)
    x = x.reshape(len(x), 1, 28, 28)

    nn = NNDist()

    # nn.add("ReLU", (x.shape[1], 24))
    # nn.add("ReLU", (24, ))
    # nn.add("CrossEntropy", (y.shape[1], ))

    nn.add("ConvReLU", (x.shape[1:], (32, 3, 3)))
    nn.add("ConvReLU", ((32, 3, 3),))
    nn.add("MaxPool", ((3, 3),), 2)
    nn.add("ConvNorm")
    nn.add("ConvDrop")
    nn.add("ConvReLU", ((64, 3, 3),), std=0.01)
    nn.add("ConvReLU", ((64, 3, 3),), std=0.01)
    nn.add("AvgPool", ((3, 3),), 2)
    nn.add("ConvNorm")
    nn.add("ConvDrop")
    nn.add("ConvReLU", ((32, 3, 3),))
    nn.add("ConvReLU", ((32, 3, 3),))
    nn.add("AvgPool", ((3, 3),), 2)
    nn.add("ReLU", (512,))
    nn.add("Identical", (64,))
    nn.add("Normalize", activation="ReLU")
    nn.add("Dropout")
    nn.add("CrossEntropy", (y.shape[1],))

    nn.optimizer = "Adam"
    nn.preview()
    nn.fit(x, y, verbose=2, do_log=True)
    nn.evaluate(x, y)
    nn.draw_results()
    nn.show_timing_log()
コード例 #17
0
 def __init__(self, im=None, om=None, one_hot=True):
     super(MnistGenerator, self).__init__(im, om)
     self._x, self._y = DataUtil.get_dataset("mnist",
                                             "../_Data/mnist.txt",
                                             quantized=True,
                                             one_hot=one_hot)
     self._x = self._x.reshape(-1, 28, 28)
     self._x_train, self._x_test = self._x[:1800], self._x[1800:]
     self._y_train, self._y_test = self._y[:1800], self._y[1800:]
コード例 #18
0
 def __init__(self, im=None, om=None):
     self._im, self._om = im, om
     self._cursor = self._indices = None
     self._x, self._y = DataUtil.get_dataset("mnist",
                                             "../../_Data/mnist.txt",
                                             quantized=True,
                                             one_hot=True)
     self._x = self._x.reshape(-1, 28, 28)
     self._x_train, self._x_test = self._x[:1800], self._x[1800:]
     self._y_train, self._y_test = self._y[:1800], self._y[1800:]
コード例 #19
0
ファイル: Test.py プロジェクト: JiangGuanying/MachineLearning
def main():
    nn = NNDist()
    save = False
    load = False
    show_loss = True
    train_only = False
    visualize = False
    verbose = 2

    lr = 0.001
    lb = 0.001
    epoch = 10
    record_period = 1

    timing = Timing(enabled=True)
    timing_level = 1

    x, y = DataUtil.get_dataset("mnist", "../../_Data/mnist.txt", quantized=True, one_hot=True)
    x = x.reshape(len(x), 1, 28, 28)

    if not load:
        nn.add("ConvReLU", (x.shape[1:], (32, 3, 3)))
        nn.add("ConvReLU", ((32, 3, 3),))
        nn.add("MaxPool", ((3, 3),), 1)
        nn.add("ConvNorm")
        nn.add("ConvDrop")
        nn.add("ConvReLU", ((64, 3, 3),))
        nn.add("ConvReLU", ((64, 3, 3),))
        nn.add("MaxPool", ((3, 3),), 1)
        nn.add("ConvNorm")
        nn.add("ConvDrop")
        nn.add("ConvReLU", ((32, 3, 3),))
        nn.add("ConvReLU", ((32, 3, 3),))
        nn.add("MaxPool", ((3, 3),), 1)
        nn.add("ReLU", (512,))
        nn.add("Identical", (64,))
        nn.add("Normalize")
        nn.add("Dropout")
        nn.add("CrossEntropy", (y.shape[1],))
        nn.optimizer = "Adam"
        nn.preview()
        nn.feed_timing(timing)
        nn.fit(x, y, lr=lr, lb=lb,
               epoch=epoch, batch_size=32, record_period=record_period,
               show_loss=show_loss, train_only=train_only,
               do_log=True, verbose=verbose, visualize=visualize)
        if save:
            nn.save()
        nn.draw_results()
    else:
        nn.load()
        nn.preview()
        nn.evaluate(x, y)

    timing.show_timing_log(timing_level)
コード例 #20
0
def main():
    save = False
    load = False
    show_loss = True
    train_only = False
    verbose = 2

    lr = 0.001
    lb = 0.001
    epoch = 10
    record_period = 1

    x, y = DataUtil.get_dataset("mnist", "../../../_Data/mnist.txt", quantized=True, one_hot=True)
    x = x.reshape(len(x), 1, 28, 28)

    if not load:
        nn = NNDist()

        # nn.add("ReLU", (x.shape[1], 24))
        # nn.add("ReLU", (24, ))
        # nn.add("CrossEntropy", (y.shape[1], ))

        nn.add("ConvReLU", (x.shape[1:], (32, 3, 3)))
        nn.add("ConvReLU", ((32, 3, 3),))
        nn.add("MaxPool", ((3, 3),), 2)
        nn.add("ConvNorm")
        nn.add("ConvDrop")
        nn.add("ConvReLU", ((64, 3, 3),), std=0.01)
        nn.add("ConvReLU", ((64, 3, 3),), std=0.01)
        nn.add("AvgPool", ((3, 3),), 2)
        nn.add("ConvNorm")
        nn.add("ConvDrop")
        nn.add("ConvReLU", ((32, 3, 3),))
        nn.add("ConvReLU", ((32, 3, 3),))
        nn.add("AvgPool", ((3, 3),), 2)
        nn.add("ReLU", (512,))
        nn.add("Identical", (64,))
        nn.add("Normalize", activation="ReLU")
        nn.add("Dropout")
        nn.add("CrossEntropy", (y.shape[1],))
        nn.optimizer = "Adam"
        nn.preview(verbose=verbose)
        nn.fit(x, y, lr=lr, lb=lb,
               epoch=epoch, batch_size=256, record_period=record_period,
               show_loss=show_loss, train_only=train_only, do_log=True, tensorboard_verbose=1, verbose=verbose)
        if save:
            nn.save()
    else:
        nn = NNFrozen()
        nn.load()
        nn.preview()
        nn.evaluate(x, y)

    nn.show_timing_log()
コード例 #21
0
def main():

    nn = NNDist()
    epoch = 1000

    x, y = DataUtil.gen_xor(100)

    nn.add(ReLU((x.shape[1], 24)))
    nn.add(CrossEntropy((y.shape[1], )))

    nn.fit(x, y, epoch=epoch)
    nn.evaluate(x, y)
コード例 #22
0
ファイル: Test.py プロジェクト: bitores/MachineLearning
def main():

    nn = NNDist()
    epoch = 1000

    x, y = DataUtil.gen_xor(100)

    nn.add(ReLU((x.shape[1], 24)))
    nn.add(CrossEntropy((y.shape[1],)))

    nn.fit(x, y, epoch=epoch)
    nn.evaluate(x, y)
コード例 #23
0
ファイル: MultinomialNB.py プロジェクト: huchangchun/ML
 def feed_data(self, x, y, sample_weight=None):
     if sample_weight is not None:
         sample_weight = np.asarray(sample_weight)
     x, y, _, features, feat_dicts, label_dict = DataUtil.quantize_data(x, y, wc=np.array([False] * len(x[0])))
     cat_counter=np.bincount(y) #统计两个类别的个数
     n_possibilities = [len(feats) for feats in features] #记录各维度特征的取值个数
     labels = [y == value for value in range(len(cat_counter)) ]#获取各类别的数据的下标
     labelled_x = [x[ci].T for ci in labels]
     self._x, self._y = x, y
     self._labelled_x, self._label_zip = labelled_x, list(zip(labels, labelled_x))
     self._cat_counter, self._feat_dicts, self._n_possibilities = cat_counter, feat_dicts, n_possibilities
     self.label_dict = label_dict
     self.feed_sample_weight(sample_weight)
コード例 #24
0
    def feed_data(self, x, y, sample_weight=None):
        if sample_weight is not None:
            sample_weight = np.asarray(sample_weight)
        x, y, _, features, feat_dicts, label_dict = DataUtil.quantize_data(x, y, wc=np.array([False] * len(x[0])))
        cat_counter = np.bincount(y)
        n_possibilities = [len(feats) for feats in features]
        labels = [y == value for value in range(len(cat_counter))]
        labelled_x = [x[ci].T for ci in labels]

        self._x, self._y = x, y
        self._labelled_x, self._label_zip = labelled_x, list(zip(labels, labelled_x))
        self._cat_counter, self._feat_dicts, self._n_possibilities = cat_counter, feat_dicts, n_possibilities
        self.label_dict = label_dict
        self.feed_sample_weight(sample_weight)
コード例 #25
0
ファイル: Test.py プロジェクト: bitores/MachineLearning
def main():

    nn = NNDist()
    epoch = 1000

    x, y = DataUtil.gen_spiral(100)

    nn.add(ReLU((x.shape[1], 24)))
    nn.add(ReLU((24,)))
    nn.add(CrossEntropy((y.shape[1],)))

    nn.fit(x, y, epoch=epoch, verbose=2, metrics=["acc", "f1_score"], train_rate=0.8)
    nn.draw_logs()
    nn.visualize_2d(x, y)
コード例 #26
0
def main():

    # # x, y = DataUtil.gen_xor(100, one_hot=False)
    # x, y = DataUtil.gen_spin(20, 4, 2, 2, one_hot=False)
    # # x, y = DataUtil.gen_two_clusters(n_dim=2, one_hot=False)
    # y[y == 0] = -1
    #
    # svm = SKSVM()
    # # svm = SKSVM(kernel="poly", degree=12)
    # svm.fit(x, y)
    # svm.estimate(x, y)
    # svm.visualize2d(x, y, padding=0.1, dense=400, emphasize=svm.support_)
    #
    # svm = SVM()
    # _logs = [_log[0] for _log in svm.fit(x, y, metrics=["acc"])]
    # svm.estimate(x, y)
    # svm.visualize2d(x, y, padding=0.1, dense=400, emphasize=svm["alpha"] > 0)

    (x_train,
     y_train), (x_test,
                y_test), *_ = DataUtil.get_dataset("mushroom",
                                                   "../_Data/mushroom.txt",
                                                   train_num=100,
                                                   quantize=True,
                                                   tar_idx=0)
    y_train[y_train == 0] = -1
    y_test[y_test == 0] = -1

    svm = SKSVM()
    svm.fit(x_train, y_train)
    svm.estimate(x_train, y_train)
    svm.estimate(x_test, y_test)

    svm = SVM()
    _logs = [
        _log[0] for _log in svm.fit(
            x_train, y_train, metrics=["acc"], x_test=x_test, y_test=y_test)
    ]
    # svm.fit(x_train, y_train, p=12)
    svm.estimate(x_train, y_train)
    svm.estimate(x_test, y_test)

    plt.figure()
    plt.title(svm.title)
    plt.plot(range(len(_logs)), _logs)
    plt.show()

    svm.show_timing_log()
コード例 #27
0
def main():

    x, y = DataUtil.gen_two_clusters(n_dim=2, dis=2.5, center=5, one_hot=False)
    y[y == 0] = -1

    svm = LinearSVM()
    svm.fit(x, y, epoch=10 ** 5, lr=1e-3)
    svm.estimate(x, y)
    svm.visualize2d(x, y, padding=0.1, dense=400)

    perceptron = Perceptron()
    perceptron.fit(x, y)
    perceptron.estimate(x, y)
    perceptron.visualize2d(x, y)

    perceptron.show_timing_log()
コード例 #28
0
ファイル: Test.py プロジェクト: powerL/MachineLearning
def main():

    nn = NN()
    epoch = 1000

    x, y = DataUtil.gen_spin(120, 4, 2, 6)

    nn.add(ReLU((x.shape[1], 24)))
    nn.add(ReLU((24, )))
    nn.add(Softmax((y.shape[1], )))

    # nn.disable_timing()
    nn.fit(x, y, epoch=epoch, train_rate=0.8, metrics=["acc"])
    nn.evaluate(x, y)
    nn.visualize2d(x, y)
    nn.show_timing_log()
    nn.draw_logs()
コード例 #29
0
ファイル: Test.py プロジェクト: bitores/MachineLearning
def main():

    nn = NN()
    epoch = 1000

    x, y = DataUtil.gen_spiral(120, 7, 7, 4)

    nn.add(ReLU((x.shape[1], 24)))
    nn.add(ReLU((24, )))
    nn.add(CostLayer((y.shape[1],), "CrossEntropy"))

    # nn.disable_timing()
    nn.fit(x, y, epoch=epoch, train_rate=0.8, metrics=["acc"])
    nn.evaluate(x, y)
    nn.visualize2d(x, y)
    nn.show_timing_log()
    nn.draw_logs()
コード例 #30
0
ファイル: Test.py プロジェクト: zxsted/MachineLearning-1
def main():

    nn = NN()
    epoch = 1000

    x, y = DataUtil.gen_spiral(120, 7, 7, 4)

    nn.add(ReLU((x.shape[1], 24)))
    nn.add(ReLU((24, )))
    nn.add(CostLayer((y.shape[1], ), "CrossEntropy"))

    # nn.disable_timing()
    nn.fit(x, y, epoch=epoch, train_rate=0.8, metrics=["acc"])
    nn.evaluate(x, y)
    nn.visualize2d(x, y)
    nn.show_timing_log()
    nn.draw_logs()
コード例 #31
0
ファイル: Test.py プロジェクト: CourAgeZ/MachineLearning
def main():

    nn = NN()
    epoch = 1000

    x, y = DataUtil.gen_spin(100, 4, 2, 6)

    nn.add(ReLU((x.shape[1], 24)))
    nn.add(ReLU((24, )))
    if backend == "Basic":
        nn.add(Softmax((y.shape[1], )))
    else:
        nn.add(CrossEntropy((y.shape[1], )))

    nn.fit(x, y, epoch=epoch, verbose=2)
    nn.estimate(x, y)
    nn.visualize2d(x, y)
    nn.show_timing_log()
コード例 #32
0
def main():

    nn = NNDist()
    epoch = 1000

    x, y = DataUtil.gen_spiral(100)

    nn.add(ReLU((x.shape[1], 24)))
    nn.add(ReLU((24, )))
    nn.add(CrossEntropy((y.shape[1], )))

    nn.fit(x,
           y,
           epoch=epoch,
           verbose=2,
           metrics=["acc", "f1_score"],
           train_rate=0.8)
    nn.draw_logs()
    nn.visualize_2d(x, y)
コード例 #33
0
ファイル: Mnist.py プロジェクト: huchangchun/ML
def main():
    nn = NN()
    epoch = 10
    x, y = DataUtil.get_dataset("mnist",
                                "../Data/mnist.txt",
                                quantized=True,
                                one_hot=True)
    nn.add("ReLU", (x.shape[1], 24))
    nn.add("ReLU", (24, ))
    nn.add("CrossEntropy", (y.shape[1], ))

    nn.fit(x,
           y,
           lr=0.001,
           epoch=epoch,
           train_rate=0.8,
           metrics=["acc"],
           record_period=1,
           verbose=2)
コード例 #34
0
ファイル: CIFAR10.py プロジェクト: oldtimestj/MachineLearning
def main():

    nn = NNDist()
    verbose = 2

    lr = 0.001
    epoch = 50
    record_period = 5

    timing = Timing(enabled=True)
    timing_level = 1
    nn.feed_timing(timing)

    x, y = DataUtil.get_dataset("cifar10",
                                "../../_Data/cifar10.txt",
                                quantized=True,
                                one_hot=True)
    x = x.reshape(len(x), 3, 32, 32)

    nn.add(ConvReLU((x.shape[1:], (32, 3, 3))))
    nn.add(ConvReLU(((32, 3, 3), )))
    nn.add(MaxPool(((3, 3), ), 2))
    nn.add(ConvReLU(((64, 3, 3), )))
    nn.add(ConvReLU(((64, 3, 3), )))
    nn.add(AvgPool(((3, 3), ), 2))
    nn.add(ConvReLU(((32, 3, 3), )))
    nn.add(ConvReLU(((32, 3, 3), )))
    nn.add(AvgPool(((3, 3), ), 2))
    nn.add(ReLU((512, )))
    nn.add(ReLU((64, )))
    nn.add(CrossEntropy((y.shape[1], )))

    nn.fit(x,
           y,
           lr=lr,
           epoch=epoch,
           record_period=record_period,
           verbose=verbose,
           train_rate=0.8)
    nn.draw_logs()

    timing.show_timing_log(timing_level)
コード例 #35
0
def main():

    nn = NNDist()
    epoch = 1000

    timing = Timing(enabled=True)
    timing_level = 1
    nn.feed_timing(timing)

    x, y = DataUtil.gen_spiral(100)

    nn.add(ReLU((x.shape[1], 24)))
    nn.add(ReLU((24, )))
    nn.add(Softmax((y.shape[1], )))

    nn.fit(x, y, epoch=epoch)
    nn.visualize_2d(x, y)
    nn.evaluate(x, y)

    timing.show_timing_log(timing_level)
コード例 #36
0
ファイル: Test.py プロジェクト: bitores/MachineLearning
def main():

    nn = NNDist()
    epoch = 1000

    timing = Timing(enabled=True)
    timing_level = 1
    nn.feed_timing(timing)

    x, y = DataUtil.gen_spiral(100)

    nn.add(ReLU((x.shape[1], 24)))
    nn.add(ReLU((24,)))
    nn.add(Softmax((y.shape[1],)))

    nn.fit(x, y, epoch=epoch, verbose=2, metrics=["acc", "f1_score"], train_rate=0.8)
    nn.draw_logs()
    nn.visualize_2d(x, y)

    timing.show_timing_log(timing_level)
コード例 #37
0
ファイル: Test.py プロジェクト: bitores/MachineLearning
def main():

    nn = NNDist()
    epoch = 1000

    timing = Timing(enabled=True)
    timing_level = 1
    nn.feed_timing(timing)

    x, y = DataUtil.gen_spiral(100)

    nn.add(ReLU((x.shape[1], 24)))
    nn.add(ReLU((24,)))
    nn.add(Softmax((y.shape[1],)))

    nn.fit(x, y, epoch=epoch)
    nn.visualize_2d(x, y)
    nn.evaluate(x, y)

    timing.show_timing_log(timing_level)
コード例 #38
0
def main():
    nn = NNDist()
    save = False
    load = False

    lr = 0.001
    lb = 0.001
    epoch = 1000

    timing = Timing(enabled=True)
    timing_level = 1

    x, y = DataUtil.gen_spiral(50, 3, 3, 2.5)

    if not load:
        nn.build([x.shape[1], 6, 6, 6, y.shape[1]])
        nn.optimizer = "Adam"
        nn.preview()
        nn.feed_timing(timing)
        nn.fit(x,
               y,
               lr=lr,
               lb=lb,
               verbose=1,
               record_period=4,
               epoch=epoch,
               batch_size=128,
               train_only=True,
               draw_detailed_network=True,
               make_mp4=True,
               show_animation=True)
        if save:
            nn.save()
        nn.draw_results()
        nn.visualize2d()
    else:
        nn.load()
        nn.preview()
        nn.evaluate(x, y)

    timing.show_timing_log(timing_level)
コード例 #39
0
ファイル: Vis.py プロジェクト: oldtimestj/MachineLearning
def main():
    nn = NNDist()
    save = False
    load = False

    lr = 0.001
    lb = 0.001
    epoch = 1000

    timing = Timing(enabled=True)
    timing_level = 1

    x, y = DataUtil.gen_xor()

    if not load:
        nn.add("ReLU", (x.shape[1], 2))
        nn.add("ReLU", (3, ))
        nn.add("ReLU", (3, ))
        nn.add("CrossEntropy", (y.shape[1], ))
        nn.optimizer = "Adam"
        nn.preview()
        nn.feed_timing(timing)
        nn.fit(x,
               y,
               lr=lr,
               lb=lb,
               verbose=1,
               epoch=epoch,
               batch_size=128,
               train_only=True,
               draw_detailed_network=True)
        if save:
            nn.save()
        nn.draw_results()
        nn.visualize2d()
    else:
        nn.load()
        nn.preview()
        nn.evaluate(x, y)

    timing.show_timing_log(timing_level)
コード例 #40
0
def main():
    timing = Timing(enabled=True)
    timing_level = 1

    x, y = DataUtil.get_dataset("mnist",
                                "../../_Data/mnist.txt",
                                quantized=True,
                                one_hot=True)
    x = x.reshape(len(x), 1, 28, 28)

    nn = NNDist()

    # nn.add("ReLU", (x.shape[1], 24))
    # nn.add("ReLU", (24, ))
    # nn.add("CrossEntropy", (y.shape[1], ))

    nn.add("ConvReLU", (x.shape[1:], (32, 3, 3)))
    nn.add("ConvReLU", ((32, 3, 3), ))
    nn.add("MaxPool", ((3, 3), ), 2)
    nn.add("ConvNorm")
    nn.add("ConvDrop")
    nn.add("ConvReLU", ((64, 3, 3), ), std=0.01)
    nn.add("ConvReLU", ((64, 3, 3), ), std=0.01)
    nn.add("AvgPool", ((3, 3), ), 2)
    nn.add("ConvNorm")
    nn.add("ConvDrop")
    nn.add("ConvReLU", ((32, 3, 3), ))
    nn.add("ConvReLU", ((32, 3, 3), ))
    nn.add("AvgPool", ((3, 3), ), 2)
    nn.add("ReLU", (512, ))
    nn.add("Identical", (64, ))
    nn.add("Normalize", activation="ReLU")
    nn.add("Dropout")
    nn.add("CrossEntropy", (y.shape[1], ))

    nn.optimizer = "Adam"
    nn.preview()
    nn.fit(x, y, verbose=2, do_log=True, show_loss=True)
    nn.draw_results()

    timing.show_timing_log(timing_level)
コード例 #41
0
def main():
    """
    模型的重点:
    1.构建网络时w,b的初始化:输入层,隐层..,输出层(损失层)
    w,b是层与层之间连接的参数,我们把参数的分层比较桥,
    第一个桥的w的维度是数据的(特征维度,第一个隐层的神经元个数),b的维度就是(第一个隐层神经元个数)
    第二个桥的w的维度是(第一个隐层神经元的个数,第二个隐层的神经元的个数)
    ...
    最后一个桥是连接隐层和输出的,其维度是(最后一个隐层的神经元个数,输出层的类别)
    2.前向传导把所有激活值保存下来
    
    3.反向传播的梯度求解
    第一步有别与其他步,
    第一步调用CostLayers的bp_first进行bp算法的第一步得到损失层的输出对输入的梯度delta[-1]
    
    4.w,b的更新
    最后一步有别与前面的步骤,在更新w0的时候, W_(i-1)' = v^T_(i-1) * delta_(i)中,v用的是输入x_batch
   
    5.模型的优化
    采用了Adam的优化器,效果最稳定高效,在tf中也可以用这个
    
    6.模型的预测
    实际上是通过训练好的w,b一层层计算 X * W + b,并取最后一层输出作为预测值,然后取预测值中的最大值下标得到预测标签y^,
    最后通过y,y^求准确率
    """
    nn = NN()
    epoch = 1000
    x, y = DataUtil.gen_spiral(
        120, 7, 7, 4
    )  #x(840,2) y(840,7):7是one_hot形式下的类别[1,0,0,0,0,0,0]表示第0个类别[0,1,0,0,0,0,0]表示第二个类别
    nn.add(ReLU((x.shape[1], 24)))
    nn.add(ReLU((24, )))
    nn.add(CostLayer((y.shape[1], ), "CrossEntropy"))

    nn.fit(x, y, epoch=epoch, train_rate=0.8, metrics=["acc", "f1-score"])
    nn.evaluate(x, y)
    nn.visualize2d(x, y)
    nn.show_timing_log()
    nn.draw_logs()
コード例 #42
0
def vis_test():
    nn = NNDist()
    epoch = 1000
    record_period = 4
    make_mp4 = True

    timing = Timing(enabled=True)
    timing_level = 1
    x, y = DataUtil.gen_spiral(50, 3, 3, 2.5)
    nn.build([x.shape[1], 6, 6, 6, y.shape[1]])
    nn.optimizer = "Adam"
    nn.preview()
    nn.feed_timing(timing)
    nn.fit(x,
           y,
           verbose=1,
           record_period=record_period,
           epoch=epoch,
           train_only=True,
           draw_detailed_network=True,
           make_mp4=make_mp4,
           show_animation=True)
    nn.draw_results()
    timing.show_timing_log(timing_level)
コード例 #43
0
ファイル: TestSVM.py プロジェクト: jiayanliu/MachineLearning
import os
import sys
root_path = os.path.abspath("../../../../")
if root_path not in sys.path:
    sys.path.append(root_path)

from copy import deepcopy

from Util.Util import DataUtil
from _Dist.NeuralNetworks._Tests.TestUtil import draw_acc
from _Dist.NeuralNetworks.b_TraditionalML.SVM import SVM
from _Dist.NeuralNetworks.f_AutoNN.DistNN import AutoAdvanced

base_params = {"model_param_settings": {"n_epoch": 30, "metric": "acc"}}
(x, y), (x_test, y_test) = DataUtil.gen_noisy_linear(n_dim=2, n_valid=2, test_ratio=0.01, one_hot=False)
svm = SVM(**deepcopy(base_params)).fit(
    x, y, x_test, y_test, snapshot_ratio=0).visualize2d(x_test, y_test)
nn = AutoAdvanced("NoisyLinear", **deepcopy(base_params)).fit(
    x, y, x_test, y_test, snapshot_ratio=0).visualize2d(x_test, y_test)
draw_acc(svm, nn)
コード例 #44
0
ファイル: Test.py プロジェクト: bitores/MachineLearning
        self._opt = opt

    def fit(self, x, y):
        self._func_cache = self._func(x.shape[1], x, y)
        line_search = None if self._line_search is None else self._line_search(self._func_cache)
        self._opt_cache = self._opt(self._func_cache, line_search)
        self._opt_cache.opt()
        self._beta = self._func_cache._beta

    def predict(self, x, get_raw_results=False, **kwargs):
        pi = 1 / (1 + np.exp(-np.atleast_2d(x).dot(self._beta)))
        if get_raw_results:
            return pi
        return (pi >= 0.5).astype(np.double)

data, labels = DataUtil.gen_two_clusters(one_hot=False)
lr = LR(LM)
lr.fit(data, labels)
lr.evaluate(data, labels)
lr["func_cache"].refresh_cache(lr["beta"])
print("Loss:", lr["func_cache"].loss(lr["beta"]))
lr.visualize2d(data, labels, dense=400)
# Draw Training Curve
plt.figure()
plt.plot(np.arange(len(lr["opt_cache"].log))+1, lr["opt_cache"].log)
plt.show()


# Example3: RBFN Regression with BFGS & Armijo using "Automatic Differentiation"
# "RBFN" represents "Radial Basis Function Network". Typically, we use Gaussian Function for this example
class RBFN(Function):
コード例 #45
0
if __name__ == '__main__':
    # _x, _y = gen_random()
    # test(_x, _y, algorithm="RF", epoch=1)
    # test(_x, _y, algorithm="RF", epoch=10)
    # test(_x, _y, algorithm="RF", epoch=50)
    # test(_x, _y, algorithm="SKRandomForest")
    # test(_x, _y, epoch=1)
    # test(_x, _y, epoch=1)
    # test(_x, _y, epoch=10)
    # _x, _y = gen_xor()
    # test(_x, _y, algorithm="RF", epoch=1)
    # test(_x, _y, algorithm="RF", epoch=10)
    # test(_x, _y, algorithm="RF", epoch=1000)
    # test(_x, _y, algorithm="SKAdaBoost")
    _x, _y = DataUtil.gen_spiral(size=20, n=4, n_class=2, one_hot=False)
    _y[_y == 0] = -1
    # test(_x, _y, clf="SKTree", epoch=10)
    # test(_x, _y, clf="SKTree", epoch=1000)
    # test(_x, _y, algorithm="RF", epoch=10)
    test(_x, _y, algorithm="RF", epoch=30, n_cores=4)
    test(_x, _y, algorithm="SKAdaBoost")

    train_num = 6000
    (x_train, y_train), (x_test, y_test), *_ = DataUtil.get_dataset(
        "mushroom", "../_Data/mushroom.txt", n_train=train_num, quantize=True, tar_idx=0)
    y_train[y_train == 0] = -1
    y_test[y_test == 0] = -1

    cv_test(x_train, y_train, x_test, y_test, clf="MNB", epoch=1)
    cv_test(x_train, y_train, x_test, y_test, clf="MNB", epoch=5)
コード例 #46
0
ファイル: Basic.py プロジェクト: jiayanliu/MachineLearning
        for i, n_unit in enumerate(self.hidden_units):
            net = self._fully_connected_linear(net, [current_dimension, n_unit], i)
            net = self._build_layer(i, net)
            current_dimension = n_unit
        appendix = "_final_projection"
        fc_shape = self.hidden_units[-1] if self.hidden_units else current_dimension
        self._output = self._fully_connected_linear(net, [fc_shape, self.n_class], appendix)


if __name__ == '__main__':
    from Util.Util import DataUtil

    for generator in (DataUtil.gen_xor, DataUtil.gen_spiral, DataUtil.gen_nine_grid):
        x_train, y_train = generator(size=1000, one_hot=False)
        x_test, y_test = generator(size=100, one_hot=False)
        nn = Basic(model_param_settings={"n_epoch": 200}).scatter2d(x_train, y_train).fit(
            x_train, y_train, x_test, y_test, snapshot_ratio=0
        ).draw_losses().visualize2d(
            x_train, y_train, title="Train"
        ).visualize2d(
            x_test, y_test, padding=2, title="Test"
        )

    for size in (256, 1000, 10000):
        (x_train, y_train), (x_test, y_test) = DataUtil.gen_noisy_linear(
            size=size, n_dim=2, n_valid=2, test_ratio=100 / size, one_hot=False
        )
        nn = Basic(model_param_settings={"n_epoch": 200}).scatter2d(x_train, y_train).fit(
            x_train, y_train, x_test, y_test, snapshot_ratio=0
        ).draw_losses().visualize2d(x_train, y_train, title="Train").visualize2d(x_test, y_test, title="Test")
コード例 #47
0
ファイル: CIFAR10.py プロジェクト: bitores/MachineLearning
def main():
    log = ""

    nn = NNDist()
    save = False
    load = False
    show_loss = True
    train_only = False
    do_log = True
    verbose = 4

    lr = 0.001
    lb = 0.001
    epoch = 10
    record_period = 1
    weight_scale = 0.001
    optimizer = "Adam"
    nn.optimizer = optimizer

    x, y = DataUtil.get_dataset("cifar10", "../../../_Data/cifar10.txt", quantized=True, one_hot=True)

    draw = True
    img_shape = (3, 32, 32)
    x = x.reshape(len(x), *img_shape)

    if not load:
        nn.add("ConvReLU", (x.shape[1:], (32, 3, 3)))
        nn.add("ConvReLU", ((32, 3, 3),))
        nn.add("MaxPool", ((3, 3),), 2)
        nn.add("ConvNorm")
        nn.add("ConvDrop")
        nn.add("ConvReLU", ((64, 3, 3),), std=0.01)
        nn.add("ConvReLU", ((64, 3, 3),), std=0.01)
        nn.add("AvgPool", ((3, 3),), 2)
        nn.add("ConvNorm")
        nn.add("ConvDrop")
        nn.add("ConvReLU", ((32, 3, 3),))
        nn.add("ConvReLU", ((32, 3, 3),))
        nn.add("AvgPool", ((3, 3),), 2)
        nn.add("ReLU", (512, ))
        nn.add("Identical", (64, ), apply_bias=False)
        nn.add("Normalize", activation="ReLU")
        nn.add("Dropout")
        nn.add("CrossEntropy", (y.shape[1], ))

        nn.preview()
        nn.fit(x, y,
               lr=lr, lb=0, epoch=epoch, weight_scale=weight_scale,
               record_period=record_period, show_loss=show_loss, train_only=train_only,
               do_log=do_log, verbose=verbose)
        nn.draw_results()

        if save:
            nn.save()
        if draw:
            # nn.draw_conv_weights()
            nn.draw_conv_series(x[:3], img_shape)
    else:
        nn.load()
        print("Optimizer: " + nn.optimizer)
        nn.preview()
        nn.fit(x, y, epoch=1, lr=lr, lb=lb, verbose=verbose)
        # nn.fit(x, y, x_cv, y_cv, x_test, y_test, epoch=1, lr=lr, lb=lb, verbose=verbose)
        if draw:
            # nn.draw_conv_weights()
            nn.draw_conv_series(x[:3], img_shape)
        nn.draw_results()

        acc = nn.evaluate(x, y)[0]
        log += "Test set Accuracy  : {:12.6} %".format(100 * acc) + "\n"
        print("=" * 30 + "\n" + "Results\n" + "-" * 30)
        print(log)

    nn.show_timing_log()
コード例 #48
0
ファイル: a_Basic.py プロジェクト: jiayanliu/MachineLearning
import numpy as np

from Util.Util import DataUtil
from _Dist.NeuralNetworks.c_BasicNN.NN import Basic
from _Dist.NeuralNetworks.b_TraditionalML.SVM import LinearSVM, SVM
from _Dist.NeuralNetworks._Tests._UnitTests.UnitTestUtil import clear_cache


base_params = {
    "name": "UnitTest",
    "model_param_settings": {"n_epoch": 3, "max_epoch": 5}
}
svm = SVM(**copy.deepcopy(base_params))
nn = Basic(**copy.deepcopy(base_params))
linear_svm = LinearSVM(**copy.deepcopy(base_params))
train_set, cv_set, test_set = DataUtil.gen_special_linear(1000, 2, 2, 2, one_hot=False)


class TestSVM(unittest.TestCase):
    def test_00_train(self):
        self.assertIsInstance(
            svm.fit(*train_set, *cv_set, verbose=0), SVM,
            msg="Train failed"
        )

    def test_01_predict(self):
        self.assertIs(svm.predict(train_set[0]).dtype, np.dtype("float32"), "Predict failed")
        self.assertIs(svm.predict_classes(cv_set[0]).dtype, np.dtype("int32"), "Predict classes failed")

    def test_02_evaluate(self):
        self.assertEqual(len(svm.evaluate(*train_set, *cv_set)), 3, "Evaluation failed")
コード例 #49
0
ファイル: Test.py プロジェクト: jiayanliu/MachineLearning
import os
import sys
root_path = os.path.abspath("../../../../")
if root_path not in sys.path:
    sys.path.append(root_path)

from Util.Util import DataUtil
from _Dist.NeuralNetworks.c_BasicNN.NN import Basic
from _Dist.NeuralNetworks.e_AdvancedNN.NN import Advanced
from _Dist.NeuralNetworks._Tests.TestUtil import draw_acc

x_cv = y_cv = None
(x, y), (x_test, y_test) = DataUtil.gen_noisy_linear(one_hot=False)


def block_test(generator, **kwargs):
    (x_, y_), (x_test_, y_test_) = generator(**kwargs, one_hot=False)

    basic_ = Basic(**base_params).fit(x_, y_, x_test_, y_test_, snapshot_ratio=0)
    advanced_params["model_structure_settings"]["use_pruner"] = False
    wnd_dndf_ = Advanced(**advanced_params).fit(x_, y_, x_test_, y_test_, snapshot_ratio=0)
    advanced_params["model_structure_settings"]["use_pruner"] = True
    wnd_dndf_pruned_ = Advanced(**advanced_params).fit(x_, y_, x_test_, y_test_, snapshot_ratio=0)

    print("BasicNN              ", end="")
    basic_.evaluate(x_, y_, x_cv, y_cv, x_test_, y_test_)
    print("WnD & DNDF           ", end="")
    wnd_dndf_.evaluate(x_, y_, x_cv, y_cv, x_test_, y_test_)
    print("WnD & DNDF & Pruner  ", end="")
    wnd_dndf_pruned_.evaluate(x_, y_, x_cv, y_cv, x_test_, y_test_)
コード例 #50
0
ファイル: KP.py プロジェクト: bitores/MachineLearning
        err = -y_batch * (x_batch.dot(self._alpha) + self._b) * sample_weight_batch
        mask = err >= 0  # type: np.ndarray
        if not np.any(mask):
            self._model_grads = [None, None]
        else:
            delta = -y_batch[mask] * sample_weight_batch[mask]
            self._model_grads = [
                np.sum(delta[..., None] * x_batch[mask], axis=0),
                np.sum(delta)
            ]
        return np.sum(err[mask])


if __name__ == '__main__':
    # xs, ys = DataUtil.gen_two_clusters(center=5, dis=1, scale=2, one_hot=False)
    xs, ys = DataUtil.gen_spiral(20, 4, 2, 2, one_hot=False)
    # xs, ys = DataUtil.gen_xor(one_hot=False)
    ys[ys == 0] = -1

    animation_params = {
        "show": False, "mp4": False, "period": 50,
        "dense": 400, "draw_background": True
    }

    kp = KP(animation_params=animation_params)
    kp.fit(xs, ys, kernel="poly", p=12, epoch=200)
    kp.evaluate(xs, ys)
    kp.visualize2d(xs, ys, dense=400)

    kp = GDKP(animation_params=animation_params)
    kp.fit(xs, ys, kernel="poly", p=12, epoch=10000)
コード例 #51
0
ファイル: Test.py プロジェクト: jiayanliu/MachineLearning
import os
import sys
root_path = os.path.abspath("../../../../")
if root_path not in sys.path:
    sys.path.append(root_path)

import numpy as np
import matplotlib.pyplot as plt

from Util.Util import DataUtil
from _Dist.NeuralNetworks._Tests.Pruner.Advanced import Advanced

# (x, y), (x_test, y_test), *_ = DataUtil.get_dataset("mnist", "_Data/mnist.txt", n_train=1600, quantized=True)
(x, y), (x_test, y_test) = DataUtil.gen_noisy_linear(n_dim=100, n_valid=5, one_hot=False)

data_info = {
    "numerical_idx": [True] * 100 + [False],
    "categorical_columns": []
}

# nn = Advanced(
#     "NoisyLinear",
#     data_info=data_info,
#     model_param_settings={
#         "n_epoch": 40
#     },
#     model_structure_settings={
#         "use_wide_network": False,
#         "use_pruner": True,
#         "pruner_params": {
#             "prune_method": "surgery",
コード例 #52
0
ファイル: Mnist.py プロジェクト: bitores/MachineLearning
 def __init__(self, im=None, om=None, one_hot=True):
     super(MnistGenerator, self).__init__(im, om)
     self._x, self._y = DataUtil.get_dataset("mnist", "../_Data/mnist.txt", quantized=True, one_hot=one_hot)
     self._x = self._x.reshape(-1, 28, 28)
     self._x_train, self._x_test = self._x[:1800], self._x[1800:]
     self._y_train, self._y_test = self._y[:1800], self._y[1800:]
コード例 #53
0
                plt.bar(tmp_x - 0.35 * c, self._data[j][c, :], width=0.35,
                        facecolor=colors[self.label_dict[c]], edgecolor="white",
                        label=u"class: {}".format(self.label_dict[c]))
            plt.xticks([i for i in range(sj + 2)], [""] + [rev_dict[i] for i in range(sj)] + [""])
            plt.ylim(0, 1.0)
            plt.legend()
            if not save:
                plt.show()
            else:
                plt.savefig("d{}".format(j + 1))

if __name__ == '__main__':
    import time

    train_num = 6000
    (x_train, y_train), (x_test, y_test) = DataUtil.get_dataset(
        "mushroom", "../../_Data/mushroom.txt", n_train=train_num, tar_idx=0)

    learning_time = time.time()
    nb = MultinomialNB()
    nb.fit(x_train, y_train)
    learning_time = time.time() - learning_time
    estimation_time = time.time()
    nb.evaluate(x_train, y_train)
    nb.evaluate(x_test, y_test)
    estimation_time = time.time() - estimation_time
    print(
        "Model building  : {:12.6} s\n"
        "Estimation      : {:12.6} s\n"
        "Total           : {:12.6} s".format(
            learning_time, estimation_time,
            learning_time + estimation_time
コード例 #54
0
def main(visualize=True):
    # x, y = DataUtil.get_dataset("balloon1.0(en)", "../_Data/balloon1.0(en).txt")
    x, y = DataUtil.get_dataset(
        "test", "/Users/lily/Documents/MachineLearning-master/_Data/test.txt")
    fit_time = time.time()
    tree = CartTree(whether_continuous=[False] * 4)
    tree.fit(x, y, train_only=True)
    fit_time = time.time() - fit_time
    if visualize:
        tree.view()
    estimate_time = time.time()
    tree.evaluate(x, y)
    estimate_time = time.time() - estimate_time
    print("Model building  : {:12.6} s\n"
          "Estimation      : {:12.6} s\n"
          "Total           : {:12.6} s".format(fit_time, estimate_time,
                                               fit_time + estimate_time))
    if visualize:
        tree.visualize()

    train_num = 6000
    (x_train, y_train), (x_test, y_test), *_ = DataUtil.get_dataset(
        "mushroom",
        "/Users/lily/Documents/MachineLearning-master/_Data/mushroom.txt",
        tar_idx=0,
        train_num=train_num)
    fit_time = time.time()
    tree = C45Tree()
    tree.fit(x_train, y_train)
    fit_time = time.time() - fit_time
    if visualize:
        tree.view()
    estimate_time = time.time()
    tree.evaluate(x_train, y_train)
    tree.evaluate(x_test, y_test)
    estimate_time = time.time() - estimate_time
    print("Model building  : {:12.6} s\n"
          "Estimation      : {:12.6} s\n"
          "Total           : {:12.6} s".format(fit_time, estimate_time,
                                               fit_time + estimate_time))
    if visualize:
        tree.visualize()

    x, y = DataUtil.gen_xor(one_hot=False)
    fit_time = time.time()
    tree = CartTree()
    tree.fit(x, y, train_only=True)
    fit_time = time.time() - fit_time
    if visualize:
        tree.view()
    estimate_time = time.time()
    tree.evaluate(x, y, n_cores=1)
    estimate_time = time.time() - estimate_time
    print("Model building  : {:12.6} s\n"
          "Estimation      : {:12.6} s\n"
          "Total           : {:12.6} s".format(fit_time, estimate_time,
                                               fit_time + estimate_time))
    if visualize:
        tree.visualize2d(x, y, dense=1000)
        tree.visualize()

    wc = [False] * 16
    continuous_lst = [0, 5, 9, 11, 12, 13, 14]
    for _cl in continuous_lst:
        wc[_cl] = True

    train_num = 2000
    (x_train, y_train), (x_test, y_test), *_ = DataUtil.get_dataset(
        "bank1.0",
        "/Users/lily/Documents/MachineLearning-master/_Data/bank1.0.txt",
        train_num=train_num,
        quantize=True)
    fit_time = time.time()
    tree = CartTree()
    tree.fit(x_train, y_train)
    fit_time = time.time() - fit_time
    if visualize:
        tree.view()
    estimate_time = time.time()
    tree.evaluate(x_test, y_test)
    estimate_time = time.time() - estimate_time
    print("Model building  : {:12.6} s\n"
          "Estimation      : {:12.6} s\n"
          "Total           : {:12.6} s".format(fit_time, estimate_time,
                                               fit_time + estimate_time))
    if visualize:
        tree.visualize()

    tree.show_timing_log()
コード例 #55
0
ファイル: MergedNB.py プロジェクト: bitores/MachineLearning
            if discrete:
                idx += 1
        return x

if __name__ == '__main__':
    import time

    whether_continuous = [False] * 16
    continuous_lst = [0, 5, 9, 11, 12, 13, 14]
    for cl in continuous_lst:
        whether_continuous[cl] = True

    train_num = 40000

    data_time = time.time()
    (x_train, y_train), (x_test, y_test) = DataUtil.get_dataset(
        "bank1.0", "../../_Data/bank1.0.txt", n_train=train_num)
    data_time = time.time() - data_time

    learning_time = time.time()
    nb = MergedNB(whether_continuous=whether_continuous)
    nb.fit(x_train, y_train)
    learning_time = time.time() - learning_time

    estimation_time = time.time()
    nb.evaluate(x_train, y_train)
    nb.evaluate(x_test, y_test)
    estimation_time = time.time() - estimation_time

    print(
        "Data cleaning   : {:12.6} s\n"
        "Model building  : {:12.6} s\n"
コード例 #56
0
ファイル: TestSVM.py プロジェクト: bitores/MachineLearning
def main():

    # x, y = DataUtil.gen_xor(100, one_hot=False)
    x, y = DataUtil.gen_spiral(20, 4, 2, 2, one_hot=False)
    # x, y = DataUtil.gen_two_clusters(n_dim=2, one_hot=False)
    y[y == 0] = -1

    animation_params = {
        "show": False, "mp4": False, "period": 50,
        "dense": 400, "draw_background": True
    }

    svm = SVM(animation_params=animation_params)
    svm.fit(x, y, kernel="poly", p=12, epoch=600)
    svm.evaluate(x, y)
    svm.visualize2d(x, y, padding=0.1, dense=400, emphasize=svm["alpha"] > 0)

    svm = GDSVM(animation_params=animation_params)
    svm.fit(x, y, kernel="poly", p=12, epoch=10000)
    svm.evaluate(x, y)
    svm.visualize2d(x, y, padding=0.1, dense=400, emphasize=svm["alpha"] > 0)

    if TorchSVM is not None:
        svm = TorchSVM(animation_params=animation_params)
        svm.fit(x, y, kernel="poly", p=12)
        svm.evaluate(x, y)
        svm.visualize2d(x, y, padding=0.1, dense=400, emphasize=svm["alpha"] > 0)

    svm = TFSVM()
    svm.fit(x, y)
    svm.evaluate(x, y)
    svm.visualize2d(x, y, padding=0.1, dense=400)

    svm = SKSVM()
    # svm = SKSVM(kernel="poly", degree=12)
    svm.fit(x, y)
    svm.evaluate(x, y)
    svm.visualize2d(x, y, padding=0.1, dense=400, emphasize=svm.support_)

    (x_train, y_train), (x_test, y_test), *_ = DataUtil.get_dataset(
        "mushroom", "../_Data/mushroom.txt", n_train=100, quantize=True, tar_idx=0)
    y_train[y_train == 0] = -1
    y_test[y_test == 0] = -1

    svm = SKSVM()
    svm.fit(x_train, y_train)
    svm.evaluate(x_train, y_train)
    svm.evaluate(x_test, y_test)

    svm = TFSVM()
    svm.fit(x_train, y_train)
    svm.evaluate(x_train, y_train)
    svm.evaluate(x_test, y_test)

    if TorchSVM is not None:
        svm = TorchSVM()
        svm.fit(x_train, y_train)
        svm.evaluate(x_train, y_train)
        svm.evaluate(x_test, y_test)

        svm = TorchSVM()
        logs = [log[0] for log in svm.fit(
            x_train, y_train, metrics=["acc"], x_test=x_test, y_test=y_test
        )]
        svm.evaluate(x_train, y_train)
        svm.evaluate(x_test, y_test)

        plt.figure()
        plt.title(svm.title)
        plt.plot(range(len(logs)), logs)
        plt.show()

    svm = SVM()
    logs = [log[0] for log in svm.fit(
        x_train, y_train, metrics=["acc"], x_test=x_test, y_test=y_test
    )]
    svm.evaluate(x_train, y_train)
    svm.evaluate(x_test, y_test)

    plt.figure()
    plt.title(svm.title)
    plt.plot(range(len(logs)), logs)
    plt.show()

    svm = GDSVM()
    logs = [log[0] for log in svm.fit(
        x_train, y_train, metrics=["acc"], x_test=x_test, y_test=y_test
    )]
    svm.evaluate(x_train, y_train)
    svm.evaluate(x_test, y_test)

    plt.figure()
    plt.title(svm.title)
    plt.plot(range(len(logs)), logs)
    plt.show()

    svm.show_timing_log()
コード例 #57
0
ファイル: TestTree.py プロジェクト: bitores/MachineLearning
def main(visualize=True):
    # x, y = DataUtil.get_dataset("balloon1.0(en)", "../_Data/balloon1.0(en).txt")
    x, y = DataUtil.get_dataset("test", "../_Data/test.txt")
    fit_time = time.time()
    tree = CartTree(whether_continuous=[False] * 4)
    tree.fit(x, y, train_only=True)
    fit_time = time.time() - fit_time
    if visualize:
        tree.view()
    estimate_time = time.time()
    tree.evaluate(x, y)
    estimate_time = time.time() - estimate_time
    print(
        "Model building  : {:12.6} s\n"
        "Estimation      : {:12.6} s\n"
        "Total           : {:12.6} s".format(
            fit_time, estimate_time,
            fit_time + estimate_time
        )
    )
    if visualize:
        tree.visualize()

    train_num = 6000
    (x_train, y_train), (x_test, y_test), *_ = DataUtil.get_dataset(
        "mushroom", "../_Data/mushroom.txt", tar_idx=0, n_train=train_num)
    fit_time = time.time()
    tree = C45Tree()
    tree.fit(x_train, y_train)
    fit_time = time.time() - fit_time
    if visualize:
        tree.view()
    estimate_time = time.time()
    tree.evaluate(x_train, y_train)
    tree.evaluate(x_test, y_test)
    estimate_time = time.time() - estimate_time
    print(
        "Model building  : {:12.6} s\n"
        "Estimation      : {:12.6} s\n"
        "Total           : {:12.6} s".format(
            fit_time, estimate_time,
            fit_time + estimate_time
        )
    )
    if visualize:
        tree.visualize()

    x, y = DataUtil.gen_xor(one_hot=False)
    fit_time = time.time()
    tree = CartTree()
    tree.fit(x, y, train_only=True)
    fit_time = time.time() - fit_time
    if visualize:
        tree.view()
    estimate_time = time.time()
    tree.evaluate(x, y, n_cores=1)
    estimate_time = time.time() - estimate_time
    print(
        "Model building  : {:12.6} s\n"
        "Estimation      : {:12.6} s\n"
        "Total           : {:12.6} s".format(
            fit_time, estimate_time,
            fit_time + estimate_time
        )
    )
    if visualize:
        tree.visualize2d(x, y, dense=1000)
        tree.visualize()

    wc = [False] * 16
    continuous_lst = [0, 5, 9, 11, 12, 13, 14]
    for _cl in continuous_lst:
        wc[_cl] = True

    train_num = 2000
    (x_train, y_train), (x_test, y_test), *_ = DataUtil.get_dataset(
        "bank1.0", "../_Data/bank1.0.txt", n_train=train_num, quantize=True)
    fit_time = time.time()
    tree = CartTree()
    tree.fit(x_train, y_train)
    fit_time = time.time() - fit_time
    if visualize:
        tree.view()
    estimate_time = time.time()
    tree.evaluate(x_test, y_test)
    estimate_time = time.time() - estimate_time
    print(
        "Model building  : {:12.6} s\n"
        "Estimation      : {:12.6} s\n"
        "Total           : {:12.6} s".format(
            fit_time, estimate_time,
            fit_time + estimate_time
        )
    )
    if visualize:
        tree.visualize()

    tree.show_timing_log()
コード例 #58
0
    def predict(self, x, bound=None):
        if bound is None:
            _matrix = np.array([_tree.predict(x) for _tree in self._trees]).T
        else:
            _matrix = np.array(
                [_tree.predict(x) for _tree in self._trees[:bound]]).T
        return np.array([RandomForest.most_appearance(rs) for rs in _matrix])


if __name__ == '__main__':
    import time

    train_num = 100
    (x_train,
     y_train), (x_test, y_test) = DataUtil.get_dataset("mushroom",
                                                       "../_Data/mushroom.txt",
                                                       train_num=train_num,
                                                       tar_idx=0)

    learning_time = time.time()
    forest = RandomForest()
    forest.fit(x_train, y_train)
    learning_time = time.time() - learning_time
    estimation_time = time.time()
    forest.estimate(x_train, y_train)
    forest.estimate(x_test, y_test)
    estimation_time = time.time() - estimation_time
    print("Model building  : {:12.6} s\n"
          "Estimation      : {:12.6} s\n"
          "Total           : {:12.6} s".format(
              learning_time, estimation_time, learning_time + estimation_time))
    forest.show_timing_log()
コード例 #59
0
                plt.bar(tmp_x - 0.35 * c, self._data[j][c, :], width=0.35,
                        facecolor=colors[self.label_dict[c]], edgecolor="white",
                        label=u"class: {}".format(self.label_dict[c]))
            plt.xticks([i for i in range(sj + 2)], [""] + [rev_dict[i] for i in range(sj)] + [""])
            plt.ylim(0, 1.0)
            plt.legend()
            if not save:
                plt.show()
            else:
                plt.savefig("d{}".format(j + 1))

if __name__ == '__main__':
    import time

    for dataset in ("balloon1.0", "balloon1.5"):
        _x, _y = DataUtil.get_dataset(dataset, "../../_Data/{}.txt".format(dataset))
        learning_time = time.time()
        nb = MultinomialNB()
        nb.fit(_x, _y)
        learning_time = time.time() - learning_time
        print("=" * 30)
        print(dataset)
        print("-" * 30)
        estimation_time = time.time()
        nb.evaluate(_x, _y)
        estimation_time = time.time() - estimation_time
        print(
            "Model building  : {:12.6} s\n"
            "Estimation      : {:12.6} s\n"
            "Total           : {:12.6} s".format(
                learning_time, estimation_time,