Esempio n. 1
0
# model setting
lr = 0.01
ep = 1000
bs = 9000

X, y = make_moons(n_samples=N_SAMPLES, noise=0.2, random_state=100)
# x = [[-0.3, 0.5], [1.4, -0.4], ...], y = [0, 1, 1, ...]
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=TEST_SIZE,
                                                    random_state=42)
X_val, X_test, y_val, y_test = train_test_split(X,
                                                y,
                                                test_size=TEST_SIZE,
                                                random_state=42)

net = Net()
net.add_layer(2, 25, 'relu').add_layer(25, 50, 'relu').add_layer(
    50, 50, 'relu').add_layer(50, 25, 'relu').add_layer(25, 1, 'sigmoid')
net.train(X_train,
          y_train,
          ep,
          batch_size=bs,
          learning_rate=lr,
          optimizer='Adam',
          val_x=X_val,
          val_y=y_val,
          early_stop_interval=20,
          auto_save_interval=10,
          log=True)
Esempio n. 2
0
from net import Net
from file_reader import FileReader
from vocabulary import Vocabulary
from trainer import Trainer
from data_set_functions import *
import time

label_d, review_d = FileReader.run()
v = Vocabulary(review_d, label_d)
reviews = v.featurize_reviews()

nn = Net(v.num_of_items())
nn.add_layer(20)
nn.add_output_layer(1)

t = Trainer(nn, 0.1)

train_data, validation_data, test_data = segment_data(
    list(zip(reviews, label_d)))


def train_epoch(epoch_num, training_set, validation_set):
    start_time = time.time()
    print(f"Epoch number {epoch_num}")
    batches = batch_data(training_set, 1)

    for i, batch in enumerate(batches):
        # print(f"Batch {i + 1} of {len(batches)}")
        if i % 100 == 0:
            time_diff = time.time() - start_time
            per_sec = i / time_diff