Example #1
0
def nn(x, t, batch_size, epochs, feature=None, validation=None):
    """
    簡単なニューラルネットワークのモデルを作成する関数

    Parameters
    ----------
    x : ndarray
        学習用データ
    t : ndarray
        教師データ
    batch_size : int
        バッチサイズ
    eopchs : int
        エポック数
    feature : int
        Feature Scalingの選択
    """
    data = {}
    data['x'] = x
    data['t'] = t
    # データの仕分け
    #data['x'], data['t'], data['val'] = __sorting__(x)
    # データの前処理
    for i in data.keys():
        data[i] = __feature__(data[i], feature)  # 標準化と正規化
        data[i] = __shuffle__(data[i], 0)  # データシャッフル
    # データのシャッフル
    #x, t = __shuffle__(x, t, 0)
    #data['x'], data['t'] = __shuffle__(data['x'], data['t'], 0)

    model = Sequential()
    model.add(Input(input_shape=x.shape[1]))
    #model.add(Dense(50, activation='relu', weight_initializer='relu'))
    #model.add(Dense(50, activation='relu', weight_initializer='relu'))
    model.add(Dense(50, activation='sigmoid', weight_initializer='sigmoid'))
    model.add(Dense(50, activation='sigmoid', weight_initializer='sigmoid'))
    #model.add(Dense(t.shape[1],  activation='softmax'))
    #model.compile(loss='cross_entropy_error')
    model.add(Dense(t.shape[1], activation='liner'))
    model.compile(loss='mean_squared_error')

    #history=model.fit(x, t, batch_size=batch_size, epochs=epochs, validation=validation)
    history = model.fit(data['x'],
                        data['t'],
                        batch_size=batch_size,
                        epochs=epochs,
                        validation=validation)

    # lossグラフ
    loss = history['loss_ave']
    val_loss = history['val_loss']
    nb_epoch = len(loss)
    plt.plot(range(nb_epoch), loss, marker='.', label='loss')
    plt.plot(range(nb_epoch), val_loss, marker='.', label='val_loss')
    plt.legend(loc='best', fontsize=10)
    plt.grid()
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.show()
Example #2
0
def nn(x, t, batch_size, epochs, feature=None, validation=None):
    """
    簡単なニューラルネットワークのモデルを作成する関数

    Parameters
    ----------
    x : ndarray
        学習用データ
    t : ndarray
        教師データ
    batch_size : int
        バッチサイズ
    eopchs : int
        エポック数
    feature : int
        Feature Scalingの選択
    """

    model = Sequential()
    model.add(Input(input_shape=x.shape[1]))
    model.add(Dense(50, activation='relu', weight_initializer='relu'))
    model.add(Dense(50, activation='relu', weight_initializer='relu'))
    #model.add(Dense(50, activation='sigmoid', weight_initializer='sigmoid'))
    #model.add(Dense(50, activation='sigmoid', weight_initializer='sigmoid'))
    #model.add(Dense(t.shape[1],  activation='softmax'))
    #model.compile(loss='cross_entropy_error')
    model.add(Dense(t.shape[1], activation='liner'))
    model.compile(loss='mean_squared_error')

    #history=model.fit(x, t, batch_size=batch_size, epochs=epochs, validation=validation)
    history = model.fit(x,
                        t,
                        batch_size=batch_size,
                        epochs=epochs,
                        validation=validation)

    # lossグラフ
    loss = history['loss_ave']
    val_loss = history['val_loss']
    train_acc = history['train_acc']

    nb_epoch = len(loss)
    plt.plot(range(nb_epoch), loss, marker='.', label='loss')
    plt.plot(range(nb_epoch), val_loss, marker='.', label='val_loss')
    plt.plot(range(nb_epoch), train_acc, marker='.', label='train_acc')
    plt.legend(loc='best', fontsize=10)
    plt.grid()
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.show()
Example #3
0
def nn(x, t, batch_size, epochs, feature):
    """
    簡単なニューラルネットワークのモデルを作成する関数

    Parameters
    ----------
    x : ndarray
        学習用データ
    t : ndarray
        教師データ
    batch_size : int
        バッチサイズ
    eopchs : int
        エポック数
    feature : int
        Feature Scalingの選択
    """
    # 標準化
    if (feature == 0 or feature == 2):
        x = data_std(x)
        t = data_std(t)
    if (feature == 1 or feature == 2):
        x = data_nom(x)
        t = data_nom(t)

    model = Sequential()
    model.add(Input(input_shape=x.shape[1]))
    #model.add(Dense(50, activation='relu', weight_initializer='relu'))
    #model.add(Dense(50, activation='relu', weight_initializer='relu'))
    model.add(Dense(50, activation='sigmoid', weight_initializer='sigmoid'))
    model.add(Dense(50, activation='sigmoid', weight_initializer='sigmoid'))
    model.add(Dense(t.shape[1], activation='liner'))
    model.compile(loss='mean_squared_error')

    history = model.fit(x, t, batch_size=batch_size, epochs=epochs)

    # lossグラフ
    loss = history['loss_ave']
    nb_epoch = len(loss)
    plt.plot(range(nb_epoch), loss, marker='.', label='loss')
    plt.legend(loc='best', fontsize=10)
    plt.grid()
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.show()
def LayerAdd_click(layerName, Node, weight=None, bias=None, activation=None):
    """ NetWorkにレイヤを追加していく関数 """
    Node = int(Node)

    if layerName == 'input':
        model.add(Input(input_shape=Node))
    elif layerName == 'Dense':
        model.add(Dense(Node, activation, weight, bias))
    model.printParams()
Example #5
0
    def LayerAdd_click(self, layerName, Node, weight=None, bias=None, activation=None):
        """ NetWorkにレイヤを追加していく関数 """
        Node = int(Node)

        if layerName == 'input':
            self.model.add(Input(input_shape=Node))
        elif layerName == 'Dense':
            self.model.add(Dense(Node, activation, weight, bias))
        
        print(layerName + ':' + str(Node))
        return layerName
Example #6
0
def nn(x, t, batch_size, epochs, feature=None, validation=None):
    """
    簡単なニューラルネットワークのモデルを作成する関数

    Parameters
    ----------
    x : ndarray
        学習用データ
    t : ndarray
        教師データ
    batch_size : int
        バッチサイズ
    eopchs : int
        エポック数
    feature : int
        Feature Scalingの選択
    """       
    #-------------------------------
    # DataFeature
    #-------------------------------
    if feature != None:
        x = Datafeature(x, feature)
        t = Datafeature(t, feature)

    #-------------------------------
    # Validation
    #-------------------------------
    if validation != None:    # バリデーションが最初からセットされているとき
        x_val = validation[0]
        t_val = validation[1]
    else:
        x = __shuffle__(x)
        t = __shuffle__(t)
        x_val, x = __sorting__(x, 100)
        t_val, t = __sorting__(t, 100)

    # 学習曲線を可視化するコールバックを用意する
    higher_better_metrics = ['r2']
    visualize_cb = LearningVisualizationCallback(higher_better_metrics)
    callbacks = [
        visualize_cb,
    ]

    model = Sequential()
    model.add(Input(input_shape=x.shape[1]))
    model.add(Dense(50, activation='relu', weight_initializer='relu'))
    model.add(Dense(50, activation='relu', weight_initializer='relu'))
    #model.add(Dense(50, activation='sigmoid', weight_initializer='sigmoid'))
    #model.add(Dense(50, activation='sigmoid', weight_initializer='sigmoid'))
    #model.add(Dense(t.shape[1],  activation='softmax'))
    #model.compile(loss='cross_entropy_error')
    model.add(Dense(t.shape[1], activation = 'liner'))
    model.compile(loss='mean_squared_error', metrics = ['r2', 'rsme'])

    #history=model.fit(x, t, batch_size=batch_size, epochs=epochs, validation=validation)
    history = model.fit(x, t, batch_size=batch_size,
                        epochs=epochs, validation=(x_val, t_val), callbacks=callbacks)

    # lossグラフ
    loss = history['loss_ave']
    val_loss = history['val_loss']

    nb_epoch = len(loss)
    plt.plot(range(nb_epoch), loss, marker = '.', label = 'loss')
    plt.plot(range(nb_epoch), val_loss, marker='.', label='val_loss')
    plt.legend(loc = 'best', fontsize = 10)
    plt.grid()
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.show()
Example #7
0
    #正解データの加工
    t_train = keras.utils.to_categorical(t_train, 10)  # one_hot_labelに変換
    t_test = keras.utils.to_categorical(t_test,  10)
    """

    # 学習曲線を可視化するコールバックを用意する
    higher_better_metrics = ['r2']
    visualize_cb = LearningVisualizationCallback(higher_better_metrics)
    callbacks = [
        visualize_cb,
    ]

    model = Sequential()
    model.add(Input(input_shape=x_train.shape[1]))
    model.add(Dense(50, activation='relu', weight_initializer='relu'))
    model.add(Dense(50, activation='relu', weight_initializer='relu'))
    #model.add(Dense(50, activation='sigmoid', weight_initializer='sigmoid'))
    #model.add(Dense(50, activation='sigmoid', weight_initializer='sigmoid'))
    #model.add(Dense(t.shape[1],  activation='softmax'))
    #model.compile(loss='cross_entropy_error')
    model.add(Dense(t_train.shape[1], activation='liner'))
    model.compile(loss='mean_squared_error', optimizer='sgd', metrics=['r2'])

    epochs = 10
    batch_size = 128

    history = model.fit(x_train,
                        t_train,
                        batch_size=batch_size,
                        epochs=epochs,
Example #8
0
    #正解データの加工
    t_train = keras.utils.to_categorical(t_train, 10)  # one_hot_labelに変換
    t_test = keras.utils.to_categorical(t_test,  10)
    """

    # 学習曲線を可視化するコールバックを用意する
    higher_better_metrics = ["r2"]
    visualize_cb = LearningVisualizationCallback(higher_better_metrics)
    callbacks = [
        visualize_cb,
    ]

    model = Sequential()
    model.add(Input(input_shape=x_train.shape[1]))
    model.add(Dense(50, activation="relu", weight_initializer="relu"))
    model.add(Dense(50, activation="relu", weight_initializer="relu"))
    # model.add(Dense(50, activation='sigmoid', weight_initializer='sigmoid'))
    # model.add(Dense(50, activation='sigmoid', weight_initializer='sigmoid'))
    # model.add(Dense(t.shape[1],  activation='softmax'))
    # model.compile(loss='cross_entropy_error')
    model.add(Dense(t_train.shape[1], activation="liner"))
    model.compile(loss="mean_squared_error", optimizer="sgd", metrics=["r2"])

    epochs = 50
    batch_size = 128

    history = model.fit(
        x_train,
        t_train,
        batch_size=batch_size,