コード例 #1
0
def test_init_from_file(X, y):

    print("-" * 20 + " test init from file " + "-" * 20)

    # load the last model from file
    filename = f"rbf_InitFromFile.h5"
    print(f"Load model from file {filename} ... ", end="")
    model = load_model(filename, custom_objects={'RBFLayer': RBFLayer})
    print("OK")

    res = model.predict(X).squeeze()  # y was (50, ), res (50, 1); why?
    print(f"MSE: {MSE(y, res):.4f}")

    # load the weights of the same model separately
    rbflayer = RBFLayer(10,
                        initializer=InitFromFile("centers.npy"),
                        betas=InitFromFile("widths.npy"),
                        input_shape=(1, ))
    print("rbf layer created")
    outputlayer = Dense(1,
                        kernel_initializer=InitFromFile("weights.npy"),
                        use_bias=False)
    print("output layer created")

    model2 = Sequential()
    model2.add(rbflayer)
    model2.add(outputlayer)

    res2 = model2.predict(X).squeeze()
    print(f"MSE: {MSE(y, res2):.4f}")
    print("Same responses: ", all(res == res2))
コード例 #2
0
def train(X_train, y_train, epochs = 50):
#    dataset_sz = X.shape[0]
#    test_sz = X_test.shape[0]
    
    train_sz = X_train.shape[0]
    
    X_train = np.reshape(X_train, (train_sz, 1))
    y_train = np.reshape(y_train, (train_sz, 1))
    
    # Initialising the RBF
    regressor = Sequential()
    
    # Adding the input layer and the first layer and Drop out Regularization
    regressor.add(RBFLayer(500, initializer=InitCentersRandom(X_train), betas=2.0, input_shape=(1,)))
    regressor.add(Dropout(.2))
    
    # Adding the output layer
    regressor.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
    
    # Compiling the RBF
    regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
    
    # Fitting the RBF to the Training set
    regressor.fit(X_train, y_train, batch_size = 32, epochs = epochs)
    
    return regressor
コード例 #3
0
ファイル: DNN-GP-training.py プロジェクト: rkarmakar19/DNN-GP
def baseline_model():
    # create model
    model = Sequential()
    rbflayer = RBFLayer(30,
                        initializer=InitCentersRandom(Xtrain),
                        betas=1.0,
                        input_shape=(6, ))

    model.add(rbflayer)
    model.add(
        Dense(50, input_dim=30, kernel_initializer='normal',
              activation='relu'))
    model.add(Dense(25, kernel_initializer='normal', activation='relu'))
    model.add(Dense(12, kernel_initializer='normal', activation='relu'))
    #model.add(GaussianNoise(0.1))
    model.add(Dense(1, kernel_initializer='normal'))
    # Compile model
    model.compile(loss='mean_squared_error',
                  optimizer='adam',
                  metrics=["mean_squared_error"])

    # Training
    model.fit(Xtrain, ytrain, batch_size=10, epochs=10, verbose=0)
    #model.fit(X_train, y_train, batch_size = 32, epochs=20, verbose=2)

    scores = model.evaluate(Xtest, ytest, verbose=0)
    model.save("model-DNNGP.h5")
    return model
コード例 #4
0
def test(X, y, initializer):

    title = f" test {type(initializer).__name__} "
    print("-" * 20 + title + "-" * 20)

    # create RBF network as keras sequential model
    model = Sequential()
    rbflayer = RBFLayer(10,
                        initializer=initializer,
                        betas=2.0,
                        input_shape=(1, ))
    outputlayer = Dense(1, use_bias=False)

    model.add(rbflayer)
    model.add(outputlayer)

    model.compile(loss='mean_squared_error', optimizer=RMSprop())

    # fit and predict
    model.fit(X, y, batch_size=50, epochs=2000, verbose=0)

    y_pred = model.predict(X)

    # show graph
    plt.plot(X, y_pred)  # prediction
    plt.plot(X, y)  # response from data
    plt.plot([-1, 1], [0, 0], color='black')  # zero line
    plt.xlim([-1, 1])

    # plot centers
    centers = rbflayer.get_weights()[0]
    widths = rbflayer.get_weights()[1]
    plt.scatter(centers, np.zeros(len(centers)), s=20 * widths)

    plt.show()

    # calculate and print MSE
    y_pred = y_pred.squeeze()
    print(f"MSE: {MSE(y, y_pred):.4f}")

    # saving to and loading from file
    filename = f"rbf_{type(initializer).__name__}.h5"
    print(f"Save model to file {filename} ... ", end="")
    model.save(filename)
    print("OK")

    print(f"Load model from file {filename} ... ", end="")
    newmodel = load_model(filename, custom_objects={'RBFLayer': RBFLayer})
    print("OK")

    # check if the loaded model works same as the original
    y_pred2 = newmodel.predict(X).squeeze()
    print("Same responses: ", all(y_pred == y_pred2))
    # I know that I compared floats, but results should be identical

    # save, widths & weights separately
    np.save("centers", centers)
    np.save("widths", widths)
    np.save("weights", outputlayer.get_weights()[0])
コード例 #5
0
def get_compiled_model(initializer):
    model = tf.keras.Sequential([
        RBFLayer(500, initializer=initializer, betas=2.0, input_shape=(2, )),
        Dense(1, use_bias=False)
    ])

    model.compile(optimizer='adam',
                  loss=['MeanSquaredError'],
                  metrics=['MeanSquaredError'])
    return model
def RBFNN(X_train, X_test, y_train, y_test):
    model = Sequential()
    rbflayer = RBFLayer(10,
                        initializer=InitCentersRandom(X_train),
                        betas=2.0,
                        input_shape=(len(X_train[0]), ))
    model.add(rbflayer)
    model.add(Dense(1))
    model.compile(loss='mean_squared_error', optimizer=RMSprop())
    model.fit(X_train, y_train, batch_size=50, epochs=2000, verbose=0)
    row = getRow(X_test, y_test)
    result1 = model.predict(row)
    y_pred = model.predict(X_test)
    return result1, y_pred
コード例 #7
0
def model(loss, n_inputs):

    rbf = Sequential()
    rbf.add(
        RBFLayer(16,
                 input_shape=(n_inputs, ),
                 initializer=RandomUniform(-1.0, 1.0),
                 betas=1.0))
    rbf.add(Dense(1, use_bias=False))

    rbf.summary()

    rbf.compile(loss=loss, optimizer=RMSprop())
    return rbf
コード例 #8
0
def create_network(train_set, trainingConfig):

    model = Sequential()

    # Create the RBF layer
    if trainingConfig.use_kmeans:
        initializer = InitCentersKMeans(train_set, trainingConfig.k_num)
        layer_exit_num = trainingConfig.k_num
    else:
        initializer = InitCentersRandom(train_set,
                                        trainingConfig.random_samples_num)
        layer_exit_num = trainingConfig.random_samples_num

    rbflayer = RBFLayer(layer_exit_num,
                        initializer,
                        betas=trainingConfig.betas,
                        input_shape=(784, ))

    # First layer is the RBF layer
    model.add(rbflayer)

    if trainingConfig.hidden_layers_num > 0:

        if trainingConfig.use_kmeans:
            hidden_layer_output = trainingConfig.k_num
        else:
            hidden_layer_output = trainingConfig.random_samples_num

        # Add the hidden layers, Dense neural is used in combination with Dropout
        hidden_layers_range = range(trainingConfig.hidden_layers_num)

        for n in hidden_layers_range:
            model.add(Dropout(trainingConfig.dropoutRate))
            model.add(
                Dense(units=hidden_layer_output,
                      activation=trainingConfig.hidden_layer_act_func))

    # last classification layer, output dim is 10, 10 possible classes
    model.add(Dense(units=10, activation=trainingConfig.last_layer_act_func))

    model.summary()

    model.compile(
        loss='categorical_crossentropy',
        optimizer=RMSprop(),  # Used for multiclass problems
        metrics=['accuracy'
                 ])  # Accuracy because the problem solved is classification

    return (model, rbflayer)
コード例 #9
0
def add_rbf_layer(model, betas, X_train, Y_train, X_test, Y_test):
    """ Create a new model as a copy of model + RBF network. Train 
    it on [X_train, Y_train], reports its test accuracy and returns 
    the new model.
    """

    newmodel = Sequential()
    for i in range(len(model.layers)):
        newmodel.add(model.layers[i])

    #    for layer in newmodel.layers:
    #        layer.trainable = False

    rbflayer = RBFLayer(300, betas=betas)
    newmodel.add(rbflayer)
    newmodel.add(Dense(10, use_bias=False, name="dense_rbf"))
    newmodel.add(Activation('softmax', name="Activation_rbf"))

    newmodel.compile(loss='categorical_crossentropy',
                     optimizer=RMSprop(),
                     metrics=['acc'])

    newmodel.summary()

    #model.compile(loss='mean_squared_error',
    #              optimizer=SGD(lr=0.1, decay=1e-6))

    newmodel.fit(X_train,
                 Y_train,
                 batch_size=128,
                 epochs=10,
                 verbose=1,
                 validation_data=(X_test, Y_test))

    Y_pred = newmodel.predict(X_test)
    print("Test Accuracy: ", accuracy_score(Y_pred, Y_test))

    return newmodel
コード例 #10
0
def add_rbf_layer(model, betas, X_train, Y_train, X_test, Y_test):
    """ Create a new model as a copy of model + RBF network. Train 
    it on [X_train, Y_train], reports its test accuracy and returns 
    the new model.
    """

    newmodel = Sequential()
    for i in range(len(model.layers) - 1):
        newmodel.add(model.layers[i])

    for layer in newmodel.layers:
        layer.trainable = False

    obs = newmodel.predict(X_train)
    num_clusters = 50

    rbflayer = RBFLayer(num_clusters, betas=betas)
    newmodel.add(rbflayer)

    newmodel.add(
        Dense(10, use_bias=False, name="dense_rbf", activation='softmax'))

    newmodel.compile(loss='categorical_crossentropy',
                     optimizer=Adam(lr=0.0001),
                     metrics=['acc'])

    newmodel.summary()

    #model.compile(loss='mean_squared_error',
    #              optimizer=SGD(lr=0.1, decay=1e-6))

    init_weights = True

    if (init_weights):
        #gmm
        # gmm = mixture.GaussianMixture(n_components=num_clusters, covariance_type='spherical')
        # gmm.fit(obs)
        # centers = gmm.means_
        # betas = np.linalg.inv(gmm.covariances_)
        # betas = 1./gmm.covariances_

        #kmeans
        kmeans = KMeans(n_clusters=num_clusters,
                        precompute_distances=True,
                        n_init=10).fit(obs)
        centers = kmeans.cluster_centers_
        betas = np.zeros(num_clusters, )

        #p closest neighbours
        # knn = NearestNeighbors(n_neighbors=P, algorithm='ball_tree').fit(obs)
        # distances, indices = knn.kneighbors(centers)
        # for i, distance in enumerate(distances):
        #     betas[i] = np.sum(distance) * 1./P

        #maximum distance
        norms = np.linalg.norm(obs, axis=1)
        max_dist = np.max(norms)
        scaled_dist = max_dist * 1.5
        print('max_dist:', max_dist)
        betas = np.full((num_clusters, ), 2. / scaled_dist, dtype='f')
        weights = [centers, betas]
        rbflayer.set_weights(weights)

    newmodel.fit(X_train, Y_train, batch_size=128, epochs=10, verbose=1)

    Y_pred = newmodel.predict(X_test)
    print("Test Accuracy: ", accuracy_score(Y_pred, Y_test))

    return newmodel
コード例 #11
0
print("X_train after transform \n", X_train)
#print("reshaped shape 1 Xtrain ", X_train.shape, "X_trrain 941 element ", X_train[940])
print("y_train with size ", y_train.shape)

############ Building the RBF ############
# Initialising the RBF
regressor = Sequential()

# Adding the input layer and the first layer and Drop out Regularization
#Anti gia X_train[0] sto InitCentersRandom vazw kai X_lookback
# betas = 2.0
regressor.add(
    RBFLayer(units,
             input_dim=lookback,
             initializer=InitCentersRandom(X_train[0]),
             betas=1.0,
             input_shape=(1, lookback)))
regressor.add(Dropout(.2))

# Adding the 2nd hidden layer
#regressor.add(LSTM(10, input_shape=(1, lookback)))
#regressor.add( RBFLayer(50, initializer=InitCentersRandom(X_all_2nd_layer), betas=2.0, input_shape=(1, units)))
#regressor.add(Dropout(.2))
#regressor.add(Dense(units=50, kernel_initializer='uniform', activation='relu'))
#regressor.add(Dropout(.2))

# Adding the output layer
regressor.add(
    Dense(units=step_size, kernel_initializer='uniform', activation='linear'))
コード例 #12
0

def load_data():

    data = np.loadtxt("data/data.txt")
    X = data[:, :-1]
    y = data[:, -1:]
    return X, y

if __name__ == "__main__":

    X, y = load_data()

    model = Sequential()
    rbflayer = RBFLayer(10,
                        initializer=InitCentersRandom(X),
                        betas=2.0,
                        input_shape=(1,))
    model.add(rbflayer)
    model.add(Dense(1))

    model.compile(loss='mean_squared_error',
                  optimizer=RMSprop())

    model.fit(X, y,
              batch_size=50,
              epochs=2000,
              verbose=1)

    y_pred = model.predict(X)

    print(rbflayer.get_weights())
コード例 #13
0
ファイル: Dts_5_1.py プロジェクト: lukkascost/ICA_UFC
for j in range(10):
    slices = KFold(n_splits=K_FOLD, shuffle=True)
    oData = Data(len(oDataSet.labelsNames), 31, samples=50)
    oData.random_training_test_by_percent(
        np.unique(classes, return_counts=True)[1], 0.8)
    grid_result = np.zeros((len(GRID_NEURON), len(GRID_B), K_FOLD))
    for g1, g_param in enumerate(GRID_NEURON):
        for g2, g2_param in enumerate(GRID_B):
            k_slice = 0
            for train, test in slices.split(oData.Training_indexes):
                K.clear_session()

                model = Sequential()
                rbflayer = RBFLayer(
                    g_param,
                    initializer=InitCentersRandom(
                        oDataSet.attributes[oData.Training_indexes[train]]),
                    betas=g2_param,
                    input_shape=(base.shape[1], ))
                model.add(rbflayer)
                model.add(
                    Dense(len(oDataSet.labelsNames), activation='sigmoid'))
                model.compile(loss='categorical_crossentropy',
                              optimizer=_OPTIMIZER)
                model.fit(oDataSet.attributes[oData.Training_indexes[train]],
                          binarizer(
                              oDataSet.labels[oData.Training_indexes[train]]),
                          batch_size=50,
                          epochs=epochs,
                          verbose=0)

                y_pred = model.predict(
コード例 #14
0
ファイル: test.py プロジェクト: NTU-CCA/EE7207
    mean, std = np.mean(X, axis=0, keepdims=True), np.std(X,
                                                          axis=0,
                                                          keepdims=True)
    X = (X - mean) / std
    y = data[:, -1]
    y = (y + 1) / 2
    return X, y


if __name__ == "__main__":

    X, y = load_data()

    model = Sequential()
    rbflayer = RBFLayer(20,
                        initializer=InitCentersRandom(X),
                        betas=1.0,
                        input_shape=(X.shape[1], ))
    model.add(rbflayer)
    model.add(Dense(1, activation='sigmoid', use_bias=False))

    model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.001))

    model.fit(X, y, batch_size=50, epochs=2000, verbose=1)
    # model.save("some_fency_file_name.h5")

    y_pred = model.predict(X)

    # print(rbflayer.get_weights())

    total_num = X.shape[0]
    y_pred = np.squeeze(y_pred)
コード例 #15
0
ファイル: main_file.py プロジェクト: saifudnm/google_bangkit
    
    scaler, data_bm = load_data(comodity_numb)
    data_bm_lag = data_bm.iloc[:0-comodity_numb,:]
    
    X = data_bm_lag.iloc[:, :comodity_numb]
    y = data_bm_lag.iloc[:, comodity_numb:]

    X_train, X_test, y_train, y_test = split(X, y)
    
    # get time
    for i in range(2,3):
        start = time.time()
        
        model = Sequential()
        rbflayer = RBFLayer(2,
                            InitCentersKMeans(X_train),
                            input_shape=(1,))
        
        model.add(rbflayer)
        model.add(Dense(1)) # sesuai jumlah array target
        
        model.compile(loss='mean_squared_error',
                      optimizer=RMSprop(),
                      metrics=['accuracy', 'mse', 'mae'])
        
        history = model.fit(X_train, y_train,
                            batch_size=50,
                            epochs=200,
                            verbose=1)
        
        y_pred = model.predict(X_test)
def add_rbf_layer(model, betas, X_train, Y_train, X_test, Y_test):
    """ Create a new model as a copy of model + RBF network. Train 
    it on [X_train, Y_train], reports its test accuracy and returns 
    the new model.
    """

    sess = tf.InteractiveSession()
    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    newmodel = Sequential()
    copymodel = Sequential()
    for i in range(len(model.layers)):
        newmodel.add(model.layers[i])
        copymodel.add(model.layers[i])

    #    for layer in newmodel.layers:
    #        layer.trainable = False

    rbflayer = RBFLayer(300, betas=betas)

    newmodel.add(rbflayer)
    newmodel.add(Dense(10, use_bias=False, name="dense_rbf"))
    newmodel.add(Activation('softmax', name="Activation_rbf"))

    newmodel.compile(loss='categorical_crossentropy',
                     optimizer=RMSprop(),
                     metrics=['acc'])

    newmodel.summary()
    rbf = newmodel.get_layer(index=-3)
    '''
    import pdb; pdb.set_trace()
    rbf = newmodel.get_layer(index=-3)
    print("Betas and centers before training:")
    old_betas = sess.run(rbf.betas)
    old_centers = sess.run(rbf.centers)
    print(sess.run(rbf.betas))
    print(sess.run(rbf.centers))
    '''
    #model.compile(loss='mean_squared_error',
    #              optimizer=SGD(lr=0.1, decay=1e-6))

    newmodel.fit(X_train, Y_train, batch_size=128, epochs=3, verbose=1)

    print("Betas and centers after training:")
    new_betas = sess.run(rbf.betas)
    new_centers = sess.run(rbf.centers)

    print(sess.run(rbf.betas))
    print(sess.run(rbf.centers))

    drbf = newmodel.get_layer(name="dense_rbf")
    trained_weights = sess.run(drbf.weights)[0]
    trained_weights = trained_weights.T
    from collections import Counter
    important_weights = Counter()

    for i in range(10):
        important = np.argpartition(trained_weights[i], -50)[-50:]
        for j in important:
            important_weights[j] += 1

    import pdb
    pdb.set_trace()
    top_30_units = important_weights.most_common(30)
    tb = np.array(new_betas[top_30_units[0][0]])
    tc = np.array([new_centers[top_30_units[0][0]]])
    for unit, count in top_30_units[1:]:
        tb = np.append(tb, new_betas[unit])
        tc = np.append(tc, [new_centers[unit]], axis=0)

    import pdb
    pdb.set_trace()

    Y_pred = newmodel.predict(X_test)
    print("Test Accuracy: ", accuracy_score(Y_pred, Y_test))

    rbflayer = RBFLayer(30, betas=betas)
    copymodel.add(rbflayer)
    copymodel.add(Dense(10, use_bias=False, name="dense_rbf"))
    copymodel.add(Activation('softmax', name="Activation_rbf"))

    op1 = copymodel.layers[-3].betas.assign(tb)
    op2 = copymodel.layers[-3].centers.assign(tc)
    sess.run(op1)
    sess.run(op2)

    #newmodel.layers[-2].set_weights(t)
    copymodel.compile(loss='categorical_crossentropy',
                      optimizer=RMSprop(),
                      metrics=['acc'])

    copymodel.summary()
    copymodel.fit(X_train, Y_train, batch_size=128, epochs=3, verbose=1)
    Y_pred = copymodel.predict(X_test)
    print(accuracy_score(Y_pred, Y_test))

    import pdb
    pdb.set_trace()

    return newmodel
コード例 #17
0
ファイル: rbf.py プロジェクト: scorrea92/LSC
    y_train = scaler.transform(y_train)
    y_val = scaler.transform(y_val)

    return x_train, y_train, x_val, y_val, test, scaler, y_val_nostandard, y_train_nostandard


# Get Data
path_train = '../dataset_cajamar/Dataset_Salesforce_Predictive_Modelling_TRAIN.txt'
path_test = '../dataset_cajamar/Dataset_Salesforce_Predictive_Modelling_TEST.txt'

x_train, y_train, x_val, y_val, test, scaler, y_val_nostandard, y_train_nostandard = data(
    path_train, path_test)

model = Sequential()
rbflayer = RBFLayer(10,
                    initializer=InitCentersRandom(x_train),
                    betas=2.0,
                    input_shape=(76, ))
model.add(rbflayer)

model.add(Dense(512))
model.add(BN())
model.add(GN(0.3))
model.add(Activation('relu'))

model.add(Dense(1))
model.add(Activation('relu'))

model.compile(loss='mape', optimizer=RMSprop(), metrics=['mse'])

model.fit(x_train,
          y_train,