示例#1
0
def reproducible_network_train(seed=0, epochs=500, **additional_params):
    """
    Make a reproducible train for Gradient Descent based neural
    network with a XOR problem and return trained network.

    Parameters
    ----------
    seed : int
        Random State seed number for reproducibility. Defaults to ``0``.
    epochs : int
        Number of epochs for training. Defaults to ``500``.
    **additional_params
        Aditional parameters for Neural Network.

    Returns
    -------
    GradientDescent instance
        Returns trained network.
    """
    environment.reproducible(seed)

    xavier_normal = init.XavierNormal()
    tanh_weight1 = xavier_normal.sample((2, 5), return_array=True)
    tanh_weight2 = xavier_normal.sample((5, 1), return_array=True)

    network = algorithms.GradientDescent(connection=[
        layers.Input(2),
        layers.Tanh(5, weight=tanh_weight1),
        layers.Tanh(1, weight=tanh_weight2),
    ],
                                         batch_size='all',
                                         **additional_params)
    network.train(xor_input_train, xor_target_train, epochs=epochs)
    return network
示例#2
0
    def fit(self,data,target):
        data_scaler = preprocessing.MinMaxScaler()
        target_scaler = preprocessing.MinMaxScaler()

        data = data_scaler.fit_transform(data)
        target = target_scaler.fit_transform(target.reshape(-1,1))

        environment.reproducible()
        x_train,x_test,y_train,y_test = train_test_split(data,target,train_size=0.85)
        self.x_train = x_train
        self.y_train = y_train
        self.x_test = x_test
        self.y_test = y_test
        print (x_test)

        cgnet = algorithms.ConjugateGradient(
            connection=[
                layers.Input(2),
                layers.Sigmoid(10),
                layers.Sigmoid(1),
            ],
            search_method = 'golden',
            show_epoch=25,
            verbose=True,
            addons=[algorithms.LinearSearch],
        )
        cgnet.train(x_train,y_train,x_test,y_test,epochs=100)
        self._model = cgnet
        return self
示例#3
0
def ANN(X_train, X_test, y_train, y_test, X_dummy):
    environment.reproducible()
    target_scaler = OneHotEncoder()
    net = algorithms.Momentum(
        [
            layers.Input(17),
            layers.Relu(100),
            layers.Relu(70),
            layers.Softmax(32),
        ],
        error='categorical_crossentropy',
        step=0.01,
        verbose=True,
        shuffle_data=True,
        momentum=0.99,
        nesterov=True,
    )
    # converting vector to one hot encoding
    d1 = int(y_train.shape[0])
    d2 = int(y_test.shape[0])
    Y_train = np.zeros((d1, 32))
    Y_test = np.zeros((d2, 32))
    Y_train[np.arange(d1), y_train] = 1
    Y_test[np.arange(d2), y_test] = 1

    net.architecture()
    net.train(X_train, Y_train, X_test, Y_test, epochs=20)
    y_predicted = net.predict(X_test).argmax(axis=1)
    y_dummy = net.predict(X_dummy).argmax(axis=1)
    #print 'predicted values'
    #print y_predicted
    Y_test = np.asarray(Y_test.argmax(axis=1)).reshape(len(Y_test))
    #print(metrics.classification_report(Y_test, y_predicted))
    return y_dummy, y_predicted, metrics.accuracy_score(Y_test, y_predicted)
示例#4
0
    def test_reproducible_environment_math_library(self):
        environment.reproducible(seed=0)
        x1 = random.random()

        environment.reproducible(seed=0)
        x2 = random.random()

        self.assertAlmostEqual(x1, x2)
示例#5
0
def TrainANN(model, x, y, e=1000):
    environment.reproducible()
    x_train, x_test, y_train, y_test = train_test_split(x,
                                                        y,
                                                        train_size=0.8,
                                                        test_size=0.2)
    model.train(x_train, y_train, x_test, y_test, epochs=e)
    return x_train, x_test, y_train, y_test
示例#6
0
    def test_reproducible_environment_numpy_library(self):
        environment.reproducible(seed=0)
        x1 = np.random.random((10, 10))

        environment.reproducible(seed=0)
        x2 = np.random.random((10, 10))

        np.testing.assert_array_almost_equal(x1, x2)
示例#7
0
    def test_reproducible_environment_numpy_library(self):
        environment.reproducible(seed=0)
        x1 = np.random.random((10, 10))

        environment.reproducible(seed=0)
        x2 = np.random.random((10, 10))

        np.testing.assert_array_almost_equal(x1, x2)
示例#8
0
    def test_reproducible_environment_math_library(self):
        environment.reproducible(seed=0)
        x1 = random.random()

        environment.reproducible(seed=0)
        x2 = random.random()

        self.assertAlmostEqual(x1, x2)
示例#9
0
def PNN(X_train, X_test, y_train, y_test, X_dummy):
    environment.reproducible()
    pnn = algorithms.PNN(std=0.1, verbose=False)
    pnn.train(X_train, y_train)
    #print 'done trainin'
    y_predicted = pnn.predict(X_test)
    y_dummy = pnn.predict(X_dummy)
    #print y_predicted
    return y_dummy, y_predicted, metrics.accuracy_score(y_test, y_predicted)
    def test_gain_relu_he_normal_scale(self):
        environment.reproducible()
        he_initializer = init.HeNormal(gain=1)
        sample_1 = he_initializer.sample((3, 2))

        environment.reproducible()
        he_initializer = init.HeNormal(gain='relu')
        sample_2 = he_initializer.sample((3, 2))

        self.assertAlmostEqual(np.mean(sample_2 / sample_1), math.sqrt(2))
示例#11
0
文件: base.py 项目: EdwardBetts/neupy
    def setUp(self):
        environment.reproducible(seed=self.random_seed)

        if not self.verbose:
            logging.disable(logging.CRITICAL)

        if self.use_sandbox_mode:
            # Optimize unit tests speed. In general all task very
            # simple so some Theano optimizations can be redundant.
            environment.sandbox()
示例#12
0
    def test_gd(self):
        environment.reproducible()
        x_train, _, y_train, _ = simple_classification()

        network = algorithms.BaseGradientDescent(
            layers.Input(10) > layers.Tanh(20) > layers.Tanh(1),
            step=0.1,
            verbose=False)
        network.train(x_train, y_train, epochs=100)
        self.assertLess(network.errors.last(), 0.05)
示例#13
0
    def test_gain_relu_he_normal_scale(self):
        environment.reproducible()
        he_initializer = init.HeNormal(gain=1)
        sample_1 = he_initializer.sample((3, 2))

        environment.reproducible()
        he_initializer = init.HeNormal(gain='relu')
        sample_2 = he_initializer.sample((3, 2))

        self.assertAlmostEqual(np.mean(sample_2 / sample_1), math.sqrt(2))
示例#14
0
    def setUp(self):
        environment.reproducible(seed=self.random_seed)

        if not self.verbose:
            logging.disable(logging.CRITICAL)

        if self.use_sandbox_mode:
            # Optimize unit tests speed. In general all task very
            # simple so some Theano optimizations can be redundant.
            environment.sandbox()
示例#15
0
文件: base.py 项目: Xnsam/neupy
    def setUp(self):
        environment.reproducible(seed=self.random_seed)

        if not self.verbose:
            logging.disable(logging.CRITICAL)

        if self.use_sandbox_mode:
            # Optimize unit tests speed. In general all task very
            # simple so some Theano optimizations can be redundant.
            environment.sandbox()

        # Clean identifiers map for each test
        layers.BaseLayer.global_identifiers_map = {}
示例#16
0
文件: base.py 项目: itdxer/neupy
    def setUp(self):
        environment.reproducible(seed=self.random_seed)

        if not self.verbose:
            logging.disable(logging.CRITICAL)

        if self.use_sandbox_mode:
            # Optimize unit tests speed. In general all task very
            # simple so some Theano optimizations can be redundant.
            environment.sandbox()

        # Clean identifiers map for each test
        layers.BaseLayer.global_identifiers_map = {}
示例#17
0
def load_data_neupy(show=False, show_indexes=[0]):
    environment.reproducible()
    dataset = datasets.load_digits()
    x_train, x_test, y_train, y_test = train_test_split(dataset.data,
                                                        dataset.target,
                                                        test_size=0.3)

    if show:
        for i in show_indexes:
            plt.imshow(x_train[i].reshape(8, 8))
            plt.show()
            plt.close()
    return (x_train, y_train), (x_test, y_test)
示例#18
0
def load_data(path_to_file):

    environment.reproducible()

    #take str args, if none load default trial set
    if path_to_file == None:

        print("Loading sklearn dataset")
        dataset = datasets.load_digits()
        n_samples = dataset.target.size
        n_dimensionality = dataset.data.shape[1]  #gives input dimensions

        n_classes = []
        for counter in dataset.target:
            if counter not in n_classes:
                n_classes.append(counter)
        n_classes = max(n_classes) + 1  #gives output dimensions

        # One-hot encoder
        target = np.zeros((n_samples, n_classes))
        target[np.arange(n_samples), dataset.target] = 1

        x_train, x_test, y_train, y_test = train_test_split(dataset.data,
                                                            target,
                                                            train_size=0.7)

    #import data form custom file
    else:

        dataset = pd.read_csv(path_to_file)
        data, target_raw = dataset.iloc[:, :-1], dataset.iloc[:, -1]
        n_samples = dataset.shape[0]
        n_dimensionality = data.shape[1]

        # integer encode
        label_encoder = LabelEncoder()
        integer_encoded = label_encoder.fit_transform(target_raw)
        # binary encode
        onehot_encoder = OneHotEncoder(sparse=False)
        integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
        target = onehot_encoder.fit_transform(integer_encoded)

        n_classes = target.shape[1]

        x_train, x_test, y_train, y_test = train_test_split(data,
                                                            target,
                                                            train_size=0.7)

    return x_train, x_test, y_train, y_test, n_classes, n_dimensionality
示例#19
0
    def go(self):
        dataset = datasets.load_boston()
        data, target = dataset.data, dataset.target

        data_scaler = preprocessing.MinMaxScaler()
        target_scaler = preprocessing.MinMaxScaler()

        data = data_scaler.fit_transform(data)
        target = target_scaler.fit_transform(target.reshape(-1, 1))

        environment.reproducible()

        x_train, x_test, y_train, y_test = train_test_split(data,
                                                            target,
                                                            train_size=0.85)
示例#20
0
def run_neural_net(connection, data):

    #import_modules()

    dataset = data

    data, target = dataset.data, dataset.target

    data_scalar = preprocessing.MinMaxScaler()
    target_scalar = preprocessing.MinMaxScaler()

    data = data_scalar.fit_transform(data)
    target = target_scalar.fit_transform(target.reshape(-1, 1))

    environment.reproducible()

    x_train, x_test, y_train, y_test = train_test_split(data,
                                                        target,
                                                        train_size=0.85)

    cgnet = algorithms.ConjugateGradient(
        connection,
        search_method='golden',
        show_epoch=5,
        verbose=True,
        addons=[algorithms.LinearSearch],
    )

    time_start = time.time()
    cgnet.train(x_train, y_train, x_test, y_test, epochs=50)
    time_end = time.time()

    #plots.error_plot(cgnet)

    y_predict = cgnet.predict(x_test).round(1)
    error = mae(target_scalar.inverse_transform(y_test), \
                  target_scalar.inverse_transform(y_predict))

    #print(time_end - time_start)

    #print(target_scalar.inverse_transform(y_test), \
    #              target_scalar.inverse_transform(y_predict))

    #print(error)

    return ([time_end - time_start, error])
示例#21
0
def train_model(g):
    """训练模型
    Parameters:
    ----------
    g: 待优化的光滑因子
    return: 返回的是预测值与真实值
    """
    environment.reproducible()
    df, norm_eigen, norm_target = read_csv()
    x_train = norm_eigen[:10]
    y_train = norm_target[:10]
    x_test = norm_eigen[10:]
    y_test = norm_target[10:]
    gn = algorithms.GRNN(std=g)
    gn.train(x_train, y_train)
    y_predicted = gn.predict(x_train)
    return y_predicted, y_train
示例#22
0
def train(X, Y):

    environment.reproducible()
    img_size = X.shape[1]
    network = algorithms.Momentum(
        [
            layers.Input(img_size),
            layers.Relu(100),
            layers.Softmax(Y.shape[1]),
        ],
        error='categorical_crossentropy',
        step=0.01,
        verbose=True,
        shuffle_data=True,
        momentum=0.9,
        nesterov=True,
    )
    network.architecture()
    network.train(X, Y, epochs=20)
    return network
示例#23
0
    def setUp(self):
        tf.reset_default_graph()

        if self.single_thread:
            sess = tensorflow_session()
            sess.close()

            config = tf.ConfigProto(
                allow_soft_placement=True,
                intra_op_parallelism_threads=1,
                inter_op_parallelism_threads=1,
            )
            tensorflow_session.cache = tf.Session(config=config)

        if not self.verbose:
            logging.disable(logging.CRITICAL)

        # Clean identifiers map for each test
        layers.BaseLayer.global_identifiers_map = {}
        environment.reproducible(seed=self.random_seed)
示例#24
0
def run_neural_net():

    import_modules()

    dataset = datasets.load_boston()
    data, target = dataset.data, dataset.target

    data_scalar = preprocessing.MinMaxScaler()
    target_scalar = preprocessing.MinMaxScaler()

    data = data_scalar.fit_transform(data)
    target = target_scalar.fit_transform(target.reshape(-1, 1))

    environment.reproducible()

    x_train, x_test, y_train, y_test = train_test_split(data,
                                                        target,
                                                        train_size=0.85)

    cgnet = algorithms.ConjugateGradient(
        connection=[
            layers.Input(13),
            layers.Sigmoid(75),
            layers.Sigmoid(25),
            layers.Sigmoid(1),
        ],
        search_method='golden',
        show_epoch=1,
        verbose=True,
        addons=[algorithms.LinearSearch],
    )

    cgnet.train(x_train, y_train, x_test, y_test, epochs=30)

    plots.error_plot(cgnet)

    y_predict = cgnet.predict(x_test).round(1)
    error = rmsle(target_scalar.invers_transform(y_test), \
                  target_scalar.invers_transform(y_predict))

    return (error)
示例#25
0
def main_neupy(args):
    import matplotlib.pyplot as plt

    from neupy import algorithms, environment


    environment.reproducible()
    plt.style.use('ggplot')    

    
    # data
    EP = np.load("data/simplearm_n3000/EP.npy")

    input_data = EP[:,:2]
    
    sofmnet = algorithms.SOFM(
        n_inputs=2,
        n_outputs=20,

        step=0.01,
        show_epoch=10,
        shuffle_data=False,
        verbose=True,

        learning_radius=2,
        features_grid=(20, 1),
        )

    plt.plot(input_data.T[0:1, :], input_data.T[1:2, :], 'kx', alpha=0.5)
    sofmnet.train(input_data, epochs=100)

    print("> Start plotting")
    plt.xlim(-1, 1.2)
    plt.ylim(-1, 1.2)

    plt.plot(sofmnet.weight[0:1, :], sofmnet.weight[1:2, :], 'bo', markersize=8, linewidth=5)
    plt.show()

    for data in input_data:
        print(sofmnet.predict(np.reshape(data, (2, 1)).T))
示例#26
0
    def go(self):
        raw = self.datafile.read().splitlines()

        data = self._prepare_data(raw[::2])
        target = self._prepare_target(raw[1::2])
        print(len(data))
        print(len(target))

        environment.reproducible()

        x_train, x_test, y_train, y_test = train_test_split(data,
                                                            target,
                                                            train_size=0.85)

        print(x_train[0])
        connections = [
            layers.Input(100),
            layers.Linear(200),
            layers.Sigmoid(150),
            layers.Sigmoid(5),
        ]

        cgnet = algorithms.ConjugateGradient(
            connection=connections,
            search_method='golden',
            show_epoch=25,
            verbose=True,
            addons=[algorithms.LinearSearch],
        )

        cgnet.train(x_train, y_train, x_test, y_test, epochs=100)
        plots.error_plot(cgnet)

        y_predict = cgnet.predict(x_test).round(1)
        error = rmsle(y_test, y_predict)
        print(error)

        with open('lib/net/base_searcher.pickle', 'wb') as f:
            pickle.dump(cgnet, f)
def start():
    environment.reproducible()
    plt.style.use('ggplot')

    input_data = np.array([
        [0.1961, 0.9806],
        [-0.1961, 0.9806],
        [0.9806, 0.1961],
        [0.9806, -0.1961],
        [-0.5812, -0.8137],
        [-0.8137, -0.5812],
    ])

    sofmnet = algorithms.SOFM(
        n_inputs=2,
        n_outputs=3,

        step=0.5,
        show_epoch=20,
        shuffle_data=True,
        verbose=True,

        learning_radius=0,
        features_grid=(3,1),
    )
    plt.plot(input_data[:,0], input_data[:,1], 'ko')
    sofmnet.train(input_data, epochs=100)

    print("> Start plotting")
    plt.xlim(-1, 1.2)
    plt.ylim(-1, 1.2)

    plt.plot(sofmnet.weight[0:1, :], sofmnet.weight[1:2, :], 'bx')
    plt.show()

    for data in input_data:
        print(sofmnet.predict(np.reshape(data, (2, 1)).T))
示例#28
0
def load_data(path_to_file):

    environment.reproducible()

    if path_to_file == "MNIST":

        mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

        def TRAIN_SIZE(num):
            print('Total Training Images in Dataset = ' +
                  str(mnist.train.images.shape))
            print('--------------------------------------------------')
            x_train = mnist.train.images[:num, :]
            print('x_train Examples Loaded = ' + str(x_train.shape))
            y_train = mnist.train.labels[:num, :]
            print('y_train Examples Loaded = ' + str(y_train.shape))
            print('')
            return x_train, y_train

        def TEST_SIZE(num):
            print('Total Test Examples in Dataset = ' +
                  str(mnist.test.images.shape))
            print('--------------------------------------------------')
            x_test = mnist.test.images[:num, :]
            print('x_test Examples Loaded = ' + str(x_test.shape))
            y_test = mnist.test.labels[:num, :]
            print('y_test Examples Loaded = ' + str(y_test.shape))
            return x_test, y_test

        x_train, y_train = TRAIN_SIZE(55000)
        x_test, y_test = TEST_SIZE(55000)

        return x_train, x_test, y_train, y_test

    # take str args, if none load default trial set
    if path_to_file == None:

        print("Loading sklearn dataset")
        dataset = datasets.load_digits()
        n_samples = dataset.target.size
        n_dimensionality = dataset.data.shape[1]  # gives input dimensions

        n_classes = []
        for counter in dataset.target:
            if counter not in n_classes:
                n_classes.append(counter)
        n_classes = max(n_classes) + 1  # gives output dimensions

        # One-hot encoder
        target = np.zeros((n_samples, n_classes))
        target[np.arange(n_samples), dataset.target] = 1

        x_train, x_test, y_train, y_test = train_test_split(dataset.data,
                                                            target,
                                                            train_size=0.5)

    # import data form custom file
    if path_to_file == "./input_data/r.csv":
        print "Reading file: ", path_to_file
        dataset = pd.read_csv(path_to_file)
        data, target_raw = dataset.iloc[:, :-1], dataset.iloc[:, -1]
        n_samples = dataset.shape[0]
        n_dimensionality = data.shape[1]
        print "File dimensions: ", n_samples, n_dimensionality

        # integer encode
        label_encoder = LabelEncoder()
        integer_encoded = label_encoder.fit_transform(target_raw)
        # binary encode
        onehot_encoder = OneHotEncoder(sparse=False)
        integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
        target = onehot_encoder.fit_transform(integer_encoded)

        n_classes = target.shape[1]

        x_train, x_test, y_train, y_test = train_test_split(data,
                                                            target,
                                                            train_size=0.7)

    return x_train, x_test, y_train, y_test
示例#29
0
from sklearn import datasets, preprocessing
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
from neupy import algorithms, layers, estimators, environment

environment.reproducible()
plt.style.use('ggplot')

dataset = datasets.load_boston()
data = dataset.data
target = dataset.target.reshape((-1, 1))

data_scaler = preprocessing.MinMaxScaler((-3, 3))
target_scaler = preprocessing.MinMaxScaler()

data = data_scaler.fit_transform(data)
target = target_scaler.fit_transform(target)

x_train, x_test, y_train, y_test = train_test_split(data,
                                                    target,
                                                    train_size=0.85)

cgnet = algorithms.Hessian(
    connection=[
        layers.Sigmoid(13),
        layers.Sigmoid(50),
        layers.Sigmoid(10),
        layers.Output(1),
    ],
    verbose=True,
)
示例#30
0
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
from neupy import algorithms, layers, environment


environment.reproducible()
environment.speedup()

mnist = datasets.fetch_mldata('MNIST original')

data = (mnist.data / 255.).astype(np.float32)

np.random.shuffle(data)
x_train, x_test = data[:60000], data[60000:]
x_train_4d = x_train.reshape((60000, 1, 28, 28))
x_test_4d = x_test.reshape((10000, 1, 28, 28))

conv_autoencoder = algorithms.Momentum(
    [
        layers.Input((1, 28, 28)),

        layers.Convolution((16, 3, 3)) > layers.Relu(),
        layers.Convolution((16, 3, 3)) > layers.Relu(),
        layers.MaxPooling((2, 2)),

        layers.Convolution((32, 3, 3)) > layers.Relu(),
        layers.MaxPooling((2, 2)),

        layers.Reshape(),