コード例 #1
0
 def __init__(self,
              voca_size,
              input_size,
              bilstm_hidden_size,
              ffnn_hidden_size,
              ffnn_output_size,
              freeze=True):
     super(BOW_BiLSTM_RANDOM, self).__init__()
     # Get the dimension of embeddings as the input size of bilstm
     self.voca_size = voca_size
     self.input_size = input_size
     self.bilstm_hidden_size = bilstm_hidden_size
     self.ffnn_hidden_size = ffnn_hidden_size
     self.ffnn_output_size = ffnn_output_size
     self.freeze = freeze
     # BOW layer
     self.bow = nn.EmbeddingBag(voca_size, input_size, mode='mean')
     self.bow.weight.requires_grad = not self.freeze
     # Bilstm network
     self.bilstm = nn.LSTM(self.input_size,
                           self.bilstm_hidden_size,
                           bidirectional=True)
     # Feed forward neural network with one hidden layer
     self.ffnn = FFNN(self.bilstm_hidden_size * 2, self.ffnn_hidden_size,
                      self.ffnn_output_size)
     # Softmax layer
     self.log_softmax = nn.LogSoftmax(dim=1)
コード例 #2
0
    def __init__(self,
                 voca_size,
                 input_size,
                 bilstm_hidden_size,
                 ffnn_hidden_size,
                 ffnn_output_size,
                 freeze=True):
        super(BiLSTM_FFNN_RANDOM, self).__init__()
        self.voca_size = voca_size
        self.input_size = input_size
        self.bilstm_hidden_size = bilstm_hidden_size
        self.ffnn_hidden_size = ffnn_hidden_size
        self.ffnn_output_size = ffnn_output_size
        self.freeze = freeze
        # Generate random word embeddings
        self.embeddingLayer = nn.Embedding(self.voca_size, self.input_size)
        # Freezed --> gradient not required ; Not freezed --> gradient required
        self.embeddingLayer.weight.requires_grad = not self.freeze

        # Bilstm network
        self.bilstm = nn.LSTM(self.input_size,
                              self.bilstm_hidden_size,
                              bidirectional=True)
        # Feed forward neural network with one hidden layer
        self.ffnn = FFNN(self.bilstm_hidden_size * 2, self.ffnn_hidden_size,
                         self.ffnn_output_size)
        # Softmax layer
        self.log_softmax = nn.LogSoftmax(dim=1)
コード例 #3
0
 def __init__(self,
              embeddings,
              bilstm_hidden_size,
              ffnn_hidden_size,
              ffnn_output_size,
              freeze=True):
     super(BOW_BiLSTM_PRE, self).__init__()
     # Get the dimension of embeddings as the input size of bilstm
     _, self.input_size = embeddings.size()
     self.bilstm_hidden_size = bilstm_hidden_size
     self.ffnn_hidden_size = ffnn_hidden_size
     self.ffnn_output_size = ffnn_output_size
     self.freeze = freeze
     # Use pretrained embeddings, bag of words layer
     self.bow = nn.EmbeddingBag.\
             from_pretrained(
                 embeddings,
                 freeze=self.freeze,
                 mode='mean'
             )
     # Bilstm network
     self.bilstm = nn.LSTM(self.input_size,
                           self.bilstm_hidden_size,
                           bidirectional=True)
     # Feed forward neural network with one hidden layer
     self.ffnn = FFNN(self.bilstm_hidden_size * 2, self.ffnn_hidden_size,
                      self.ffnn_output_size)
     # Softmax layer
     self.log_softmax = nn.LogSoftmax(dim=1)
コード例 #4
0
ファイル: bow_ffnn.py プロジェクト: wzzMSc/TM_CW1
 def __init__(self, embeddings, hidden_size, output_size, freeze=True):
     super(BOW_FFNN, self).__init__()
     self.voca_size, self.input_size = embeddings.size()
     self.hidden_size = hidden_size
     self.output_size = output_size
     self.freeze = freeze
     self.bow = nn.EmbeddingBag.from_pretrained(embeddings,
                                                self.freeze,
                                                mode='mean')
     self.ffnn = FFNN(self.input_size, self.hidden_size, self.output_size)
コード例 #5
0
 def __init__(self, embeddings, hidden_size, output_size, freeze = True):
     super(BOW_FFNN_PRE,self).__init__()
     self.voca_size,self.input_size = embeddings.size()
     self.hidden_size = hidden_size
     self.output_size = output_size
     self.freeze = freeze
     # BOW layer
     self.bow = nn.EmbeddingBag.from_pretrained(embeddings,self.freeze,mode='mean')
     # FFNN with single hidden layer
     self.ffnn = FFNN(self.input_size,self.hidden_size,self.output_size)
     self.log_softmax = nn.LogSoftmax(dim = 1)
コード例 #6
0
    def __init__(self, voca_size, input_size, bilstm_hidden_size, ffnn_hidden_size, ffnn_output_size, freeze = True):
        super(BiLSTM_FFNN_RANDOM, self).__init__()
        self.voca_size = voca_size
        self.input_size = input_size
        self.bilstm_hidden_size = bilstm_hidden_size
        self.ffnn_hidden_size = ffnn_hidden_size
        self.ffnn_output_size = ffnn_output_size
        self.freeze = freeze
        
        self.embeddingLayer = nn.Embedding(self.voca_size, self.input_size)
        self.embeddingLayer.weight.requires_grad = not self.freeze

        self.bilstm = nn.LSTM(self.input_size,self.bilstm_hidden_size, bidirectional=True)
        self.ffnn = FFNN(self.bilstm_hidden_size*2,self.ffnn_hidden_size, self.ffnn_output_size)
        self.log_softmax = nn.LogSoftmax(dim=1)
コード例 #7
0
    def __init__(self,
                 voca_size,
                 input_size,
                 hidden_size,
                 output_size,
                 freeze=True):
        super(BOW_FFNN_RANDOM, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.freeze = freeze

        self.bow = nn.EmbeddingBag(voca_size, input_size, mode='mean')
        self.bow.weight.requires_grad = not self.freeze

        self.ffnn = FFNN(self.input_size, self.hidden_size, self.output_size)
        self.log_softmax = nn.LogSoftmax(dim=1)
コード例 #8
0
    def __init__(self,
                 embeddings,
                 bilstm_hidden_size,
                 ffnn_hidden_size,
                 ffnn_output_size,
                 freeze=True):
        super(BiLSTM_FFNN_PRE, self).__init__()
        _, self.input_size = embeddings.size()
        self.bilstm_hidden_size = bilstm_hidden_size
        self.ffnn_hidden_size = ffnn_hidden_size
        self.ffnn_output_size = ffnn_output_size
        self.freeze = freeze

        self.emb = nn.Embedding.from_pretrained(embeddings, self.freeze)

        self.bilstm = nn.LSTM(self.input_size,
                              self.bilstm_hidden_size,
                              bidirectional=True)
        self.ffnn = FFNN(self.bilstm_hidden_size * 2, self.ffnn_hidden_size,
                         self.ffnn_output_size)
        self.log_softmax = nn.LogSoftmax(dim=1)
コード例 #9
0
ファイル: main.py プロジェクト: eysteinn13/DT2119Project
def test_ffnn():
    params = {
        'n_layers': 4,
        'hidden_nodes': [512, 512, 512, 512],
        'epochs': 10,
        'use_dynamic_features': True,
        'use_mspec': False,
        'as_mat': False,
        'speaker_norm': False,
        'context_length': 17
    }
    net = FFNN(params)
    model = net.train_model()
    net.set_model(model)
    y_true, yp = net.predict_on_test()
    print("FFNN RESULTS")
    print(get_f1_score(y_true, yp))
    print(get_accuracy(y_true, yp))
    print(classification_report(y_true, yp))
コード例 #10
0
from ffnn import FFNN
import numpy as np
from sklearn.datasets import make_moons

nn = FFNN(2, [5], 2, noise_scale=.1)

I = np.identity(2)
X_data, y = make_moons(200, .17)
y_data = np.array([I[value] for value in y])
fit_score = nn.fit(X_data, y_data)
for fit_line in fit_score:
    print(fit_line)
print(nn.accuracy(X_data, y_data))
コード例 #11
0
ファイル: test_ffnn.py プロジェクト: nzufelt/nnet_params
from ffnn import FFNN
import numpy as np
from sklearn.datasets import make_moons

nn = FFNN(2,[5],2,noise_scale=.1)

I = np.identity(2)
X_data,y = make_moons(200,.17)
y_data = np.array([I[value] for value in y]) 
fit_score = nn.fit(X_data,y_data)
for fit_line in fit_score:
    print(fit_line)
print(nn.accuracy(X_data,y_data))


コード例 #12
0
        y_test = test.pIC50.values

        tan_gp = TanimotoGP()
        tan_gp.fit(X_train, y_train)
        gp_pred, gp_var = tan_gp.predict(X_test)
        gp_r2 = r2_score(y_test, gp_pred)
        gp_rmse = rmse(y_test, gp_pred)
        print(gp_r2, gp_rmse)

        xgb = XGBRegressor()
        xgb.fit(X_train, y_train)
        xgb_pred = xgb.predict(X_test)
        xgb_r2 = r2_score(y_test, xgb_pred)
        xgb_rmse = rmse(y_test, xgb_pred)

        ff_nn = FFNN()
        ff_nn.fit(X_train, y_train)
        ff_pred = ff_nn.predict(X_test)
        ff_r2 = r2_score(y_test, ff_pred)
        ff_rmse = rmse(y_test, ff_pred)
        print([dataset, col, gp_r2, xgb_r2, ff_r2, gp_rmse, xgb_rmse, ff_rmse])
        sys.stdout.flush()
        res.append(
            [dataset, col, gp_r2, xgb_r2, ff_r2, gp_rmse, xgb_rmse, ff_rmse])

res_df = pd.DataFrame(res,
                      columns=[
                          "dataset", "split", 'gp_r2', 'xgb_r2', 'ffnn_r2',
                          'gp_rmse', 'xgb_rmse', 'ffnn_rmse'
                      ])
res_df.to_csv("comparison.csv", index=False)
コード例 #13
0
ファイル: transfer_learning.py プロジェクト: pmantica1/NLP
if __name__ == "__main__":
    feature_vector_dimensions = 300
    questions_vector_dimensions = 500
    kernel_size = 3

    classifier_hidden_size_1 = 300
    classifier_hidden_size_2 = 150
    num_labels = 2

    learning_rate = 1e-4
    weight_decay = 1e-3
    n_epochs = 4
    batch_size = 16

    encoder = CNN(feature_vector_dimensions, questions_vector_dimensions, kernel_size)
    classifier = FFNN(questions_vector_dimensions, classifier_hidden_size_1, classifier_hidden_size_2, num_labels)

    lamb_list = [1e-1] 
    best_lamb = 0 
    best_score = 0 

    database = TransferLearningDatabase()
    for lamb in lamb_list: 
        training_dataset = database.get_training_set()
        android_validation_dataset = database.get_validation_set()
        android_test_dataset = database.get_testing_set()

        optimizer_encoder = torch.optim.Adam(encoder.parameters(), lr=learning_rate, weight_decay=weight_decay)
        optimizer_domain = torch.optim.Adam(classifier.parameters(), lr=learning_rate, weight_decay=weight_decay)

        for epoch in xrange(n_epochs):
コード例 #14
0
base = ('rnn_sgd_base.pt', 'ffnn_sgd_base.pt')
hx2 = ('rnn_sgd_hx2.pt', 'ffnn_sgd_hx2.pt')
lx2 = ('rnn_sgd_lx2.pt', 'ffnn_sgd_lx2.pt')

files = (base, hx2, lx2)
directory = 'models_part_a/'

base_models = []
#RNN SGD BASE
path = directory + base[0]
model = RNN(32, 1, 64, True)
model.load_state_dict(torch.load(path))
base_models.append(model)
#FFNN SGD BASE
path = directory + base[1]
model = FFNN(97305, 32, 1)
model.load_state_dict(torch.load(path))
base_models.append(model)

hx2_models = []
#RNN SGD hx2
path = directory + hx2[0]
model = RNN(64, 1, 64, True)
model.load_state_dict(torch.load(path))
hx2_models.append(model)
#FFNN SGD hx2
path = directory + hx2[1]
model = FFNN(97305, 64, 1)
model.load_state_dict(torch.load(path))
hx2_models.append(model)
コード例 #15
0
ファイル: nn.py プロジェクト: luhongkai/fin-ca
def ffnn_instance():
    ffnn = FFNN(RANDOM_INIT,
                ALL_WEIGHTS_TRAINABLE,
                17, [40, 4], [['rbmw1', 'rbmhb1'], ['rbmw2', 'rbmhb2']],
                transfer_function=tf.nn.sigmoid)
    return ffnn
コード例 #16
0
pickle_file = "network.pickle"

with open("mnist_array_output.pickle", "rb") as f:
    X_train, X_test, y_train, y_test = pickle.load(f)

load_pickle = input("Load pickled networks (y/n)? ") == "y"
do_plot = input("Plot the predicted data? (y/n)? ") == "y"
if load_pickle:
    with open(pickle_file, "rb") as f:
        nn_relu = pickle.load(f)
else:
    do_pickle = input("Pickle the neural networks (y/n)? ") == "y"
    layer_structure = [X_train.shape[1], 200, y_train.shape[1]]
    training_data = list(zip(X_train, y_train))

    nn_relu = FFNN(layer_structure, activation_function=ReLU)
    nn_relu.SGD_train(training_data)

    if do_pickle:
        with open(pickle_file, "wb") as f:
            pickle.dump(nn_relu, f)
            
if do_plot:
    y_predict_relu_vector_form = nn_relu.predict(X_test)

    # Reshape from vector y values into scalars
    y_predict_relu = np.zeros(y_predict_relu_vector_form.shape[1])
    for i, prediction in enumerate(y_predict_relu_vector_form.T):
        y_predict_relu[i] = np.argmax(prediction)
                
    with open("mnist_X_test_unraveled.pickle", "rb") as f:
コード例 #17
0
ファイル: main.py プロジェクト: willle3456/CircuitBuilder
    z2[x][27] = 1.0

train_labels = np.concatenate((train_labels, z2), axis=0)

train_in = train_in.reshape(
    (train_in.shape[0], train_in.shape[1] * train_in.shape[2]))
train_in = np.concatenate((train_in, im_list))
train_in = train_in.astype(float)
for x in range(len(train_in)):
    train_in[x] = abs(255 - train_in[x])
    train_in[x] = train_in[x] / 255.0

print(train_in.shape)
# Define the neural network structure
# Here we have just one hidden layer with 2048 nodes
network = FFNN([784, 2048, 28], post_function=tf.nn.softmax, session=1)
# Train the model using gradient descent


def cross_entropy_with_softmax(model_output, true_output):
    return tf.reduce_sum(
        tf.nn.softmax_cross_entropy_with_logits(labels=true_output,
                                                logits=model_output))


network.train(
    train_in,
    train_labels,
    epochs=5,  # was 1100
    batch_size=100,
    lc_interval=300,
コード例 #18
0
from ffnn import FFNN

out_dir = "../../doc/assets/"

N = 100
deg = 8
x = np.linspace(0, 1, N)
y = np.linspace(0, 1, N)
x, y = np.meshgrid(x, y)
x = np.ravel(x)
y = np.ravel(y)
X = design_matrix(x, y, deg)
Y = franke(x, y, noise_sigma=0.1, noise=True)
X_train, X_test, y_train, y_test = split_and_scale(X, Y, test_size=0.3)

layer_structure = [X.shape[1], 50, 1]
training_data = list(zip(X_train, y_train))

nn1 = FFNN(layer_structure, eta=0.1)
eta01_MSE = nn1.SGD_train(training_data, report_convergence=True)
nn2 = FFNN(layer_structure, eta=0.01)
eta001_MSE = nn2.SGD_train(training_data, report_convergence=True)
nn2 = FFNN(layer_structure, eta=0.001)
eta0001_MSE = nn2.SGD_train(training_data, report_convergence=True)

batches = list(range(len(eta01_MSE)))
plt.plot(batches, eta01_MSE, label=r"$\eta = 0.1$")
plt.plot(batches, eta001_MSE, label=r"$\eta = 0.01$")
plt.plot(batches, eta0001_MSE, label=r"$\eta = 0.001$")
plt.legend()
plt.show()
コード例 #19
0
x = np.ravel(x)
y = np.ravel(y)
X = design_matrix(x, y, deg)
Y = franke(x, y, noise_sigma=0.1, noise=True)
X_train, X_test, y_train, y_test = split_and_scale(X, Y, test_size=0.3)

load_pickle = input("Load pickled networks (y/n)? ") == "y"
if load_pickle:
    with open("network.pickle", "rb") as f:
        nn_sigmoid, nn_relu, nn_leaky = pickle.load(f)
else:
    do_pickle = input("Pickle the neural networks (y/n)? ") == "y"
    layer_structure = [X.shape[1], 50, 1]
    training_data = list(zip(X_train, y_train))

    nn_sigmoid = FFNN(layer_structure, epochs=20)
    nn_sigmoid.SGD_train(training_data)

    nn_relu = FFNN(layer_structure, epochs=20, activation_function=ReLU)
    nn_relu.SGD_train(training_data)

    nn_leaky = FFNN(layer_structure, epochs=20, activation_function=leaky_ReLU)
    nn_leaky.SGD_train(training_data)

    if do_pickle:
        with open("network.pickle", "wb") as f:
            pickle.dump((nn_sigmoid, nn_relu, nn_leaky), f)

nn_sigmoid_predict = nn_sigmoid.predict(X_test)
nn_relu_predict = nn_relu.predict(X_test)
nn_leaky_predict = nn_leaky.predict(X_test)