Esempio n. 1
0
    def __init__(self):
        super(Simulation, self).__init__()

        #Components
        self.add("sin_calc", Sin())
        self.add("sin_verify", Sin())
        self.add("sin_meta_model", MetaModel(params=('x',),
                                             responses=('f_x',)))
        self.sin_meta_model.default_surrogate = NeuralNet(n_hidden_nodes=5)

        #Training the MetaModel
        self.add("DOE_Trainer", DOEdriver())
        self.DOE_Trainer.DOEgenerator = FullFactorial()
        # Seems to need a lot of training data for decent prediction of sin(x),
        # at least with default 'cg' method.
        self.DOE_Trainer.DOEgenerator.num_levels = 2500
        self.DOE_Trainer.add_parameter("sin_calc.x", low=0, high=20)
        self.DOE_Trainer.add_response("sin_calc.f_x")

        self.connect('DOE_Trainer.case_inputs.sin_calc.x',
                     'sin_meta_model.params.x')
        self.connect('DOE_Trainer.case_outputs.sin_calc.f_x',
                     'sin_meta_model.responses.f_x')

        #MetaModel Validation
        self.add("DOE_Validate", DOEdriver())
        self.DOE_Validate.DOEgenerator = Uniform()
        self.DOE_Validate.DOEgenerator.num_samples = 100
        self.DOE_Validate.add_parameter(("sin_meta_model.x", "sin_verify.x"),
                                        low=0, high=20)
        self.DOE_Validate.add_response("sin_verify.f_x")
        self.DOE_Validate.add_response("sin_meta_model.f_x")

        #Iteration Hierarchy
        self.driver.workflow.add(['DOE_Trainer', 'DOE_Validate'])
        self.DOE_Trainer.workflow.add('sin_calc')
        self.DOE_Validate.workflow.add(('sin_verify', 'sin_meta_model'))
Esempio n. 2
0
    def __init__(self):
        super(Simulation, self).__init__()

        #Components
        self.add("sin_meta_model", MetaModel())
        self.sin_meta_model.surrogate = {"default": NeuralNet()}
        self.sin_meta_model.surrogate_args = {"default": {'n_hidden_nodes': 5}}
        self.sin_meta_model.model = Sin()
        self.sin_meta_model.recorder = DBCaseRecorder()

        #Training the MetaModel
        self.add("DOE_Trainer", DOEdriver())
        self.DOE_Trainer.DOEgenerator = FullFactorial()
        self.DOE_Trainer.DOEgenerator.num_levels = 25
        self.DOE_Trainer.add_parameter("sin_meta_model.x")
        self.DOE_Trainer.case_outputs = ["sin_meta_model.f_x"]
        self.DOE_Trainer.add_event("sin_meta_model.train_next")
        self.DOE_Trainer.recorders = [DBCaseRecorder()]
        self.DOE_Trainer.force_execute = True

        #MetaModel Validation
        self.add("sin_calc", Sin())
        self.add("DOE_Validate", DOEdriver())
        self.DOE_Validate.DOEgenerator = Uniform()
        self.DOE_Validate.DOEgenerator.num_samples = 100
        self.DOE_Validate.add_parameter(("sin_meta_model.x", "sin_calc.x"))
        self.DOE_Validate.case_outputs = ["sin_calc.f_x", "sin_meta_model.f_x"]
        self.DOE_Validate.recorders = [DBCaseRecorder()]
        self.DOE_Validate.force_execute = True

        #Iteration Hierarchy
        self.driver.workflow = SequentialWorkflow()
        self.driver.workflow.add(['DOE_Trainer', 'DOE_Validate'])
        self.DOE_Trainer.workflow.add('sin_meta_model')
        self.DOE_Validate.workflow.add('sin_meta_model')
        self.DOE_Validate.workflow.add('sin_calc')
Esempio n. 3
0
from neural_net.regularizations import L1_regularization
from neural_net.weights import uniform_init
from neural_net.prepare_dataset import x_y_split, x_y_split_by_index

# Classification
train = pd.read_csv(
    'datasets/neural_net/classification/data.simple.train.100.csv')
x_train, y_train = x_y_split(train, 'cls')
test = pd.read_csv(
    'datasets/neural_net/classification/data.simple.test.100.csv')
x_test, y_test = x_y_split(test, 'cls')

nn = NeuralNet(2, weight_init=uniform_init)\
    .add_layer(Layer(10, sigmoid))\
    .add_layer(Layer(15, sigmoid))\
    .add_layer(Layer(10, sigmoid))\
    .add_layer(Layer(2, sigmoid))\
    .set_optimizer(momentum.set_params({"coef": 0.05}))\
    .set_loss(hinge)
nn.budget.set_epoch_limit(50).set_detection_limit(1.3)
nn.fit(x_train, y_train, x_test, y_test, learning_rate=0.02, batch_size=8)

# Regression
train = pd.read_csv('datasets/neural_net/regression/data.cube.train.100.csv')
x_train, y_train = x_y_split_by_index(train, -1)
test = pd.read_csv('datasets/neural_net/regression/data.cube.test.100.csv')
x_test, y_test = x_y_split_by_index(test, -1)

nn = NeuralNet(1, weight_init=uniform_init, visualize=True)\
    .add_layer(Layer(10, sigmoid))\
    .add_layer(Layer(15, sigmoid))\
Esempio n. 4
0
 np.random.seed(123)
 nns = []
 output_activation = softmax if dataset.task_type == "classification" else linear
 n_output_neurons = dataset.y_train.shape[
     1] if dataset.task_type == "classification" else 1
 loss = hinge if dataset.task_type == "classification" else MSE
 error_name = 'Accuracy' if dataset.task_type == "classification" else 'MSE'
 error_subplots = 2 if dataset.task_type == "classification" else 1
 first_layer_size = 10 if dataset.task_type == "classification" else 50
 layer_sizes = [10, 20, 50]
 layers_error = []
 for layer_size in layer_sizes:
     nn = NeuralNet(dataset.x_train.shape[1], weight_init=uniform_init, name=f"layers: {first_layer_size}, {layer_size}",
                    is_regression=dataset.task_type == "regression") \
         .add_layer(Layer(first_layer_size, tanh)) \
         .add_layer(Layer(layer_size, tanh)) \
         .add_layer(Layer(n_output_neurons, output_activation)) \
         .set_optimizer(RMSProp.set_params({"coef": 0.9})) \
         .set_regularization(L1_regularization.set_params({"coef": 0.0001})) \
         .set_loss(loss)
     nn.budget.set_epoch_limit(100).set_detection_limit(3)
     n = 10
     errors = [np.empty(n), np.empty(n), np.empty(n), np.empty(n)]
     for i in range(n):
         nn.fit(dataset.x_train,
                dataset.y_train,
                dataset.x_test,
                dataset.y_test,
                learning_rate=0.01,
                batch_size=32)
         errors[0][i] = nn.get_loss_train()[-1]
         errors[1][i] = nn.get_loss_test()[-1]
Esempio n. 5
0
from cartpole_params import NeuralNetParams, TrainingParams
from cartpole_environment.environment import Environment
from neural_net.neural_net import NeuralNet
from cartpole_trainer.trainer import Trainer

if __name__ == "__main__":
    logfile = os.path.join('logs', 'logging_file.log')
    print(logfile)
    if not os.path.isdir('logs'):
        os.makedirs('logs')
    logging.config.fileConfig('logging.conf', defaults={'logfile': logfile})
    logging.debug("Starting main")
    warnings.filterwarnings('ignore')
    neural_net_params = NeuralNetParams()
    training_params = TrainingParams()
    environment = Environment(training_params.render_environment)

    neural_net = NeuralNet()
    neural_net.initialize(neural_net_params.state_size,
                          environment.get_action_size(),
                          training_params.learning_rate)
    neural_net.setup_tensorboard(training_params.tensorboard_folder)
    trainer = Trainer(training_params.model_path)
    if training_params.train:
        trainer.train_model(environment, neural_net,
                            training_params.max_training_episodes,
                            training_params.gamma)
    if training_params.test:
        trainer.test_model(environment, neural_net,
                           training_params.max_testing_episodes)
Esempio n. 6
0
    transform = transforms.Compose([transforms.ToTensor(),
                                    transforms.Normalize((0.5,), (0.5,))])

    # read training labels and images
    trainset = datasets.MNIST('dataset/', download=True, \
                               train=True, transform=transform) 
    trainloader = torch.utils.data.DataLoader(trainset, batch_size = BATCH_SIZE, shuffle=True)

    # set up neural net and optimizer config
    nn_config = {'input_size': IMG_SIZE, 'output_size': 10, \
                 'num_hidden_layers': 2, 'hidden_layer_sizes': (128, 16), \
                 'dropout': 0.2}
    optim_config = {'learning_rate': 0.005, 'momentum': 0.9}

    # initialize nn and trainer
    nn = NeuralNet(nn_config)
    trainer = Trainer(nn, optim_config)

    # train model
    with tqdm(total = len(trainloader)) as pbar:
        for ii, (imgs, labels) in enumerate(trainloader):

            loss = trainer.train(imgs.view(imgs.shape[0], -1), labels)

            if DEBUG_MODE and (ii % 10) == 9:
                print(loss)

            pbar.update(1)

    # read training labels and images
    testset = datasets.MNIST('dataset/', download=True, \
Esempio n. 7
0
from neural_net.neural_net import NeuralNet, Layer
from neural_net.optimizers import RMSProp
from neural_net.regularizations import L1_regularization
from neural_net.weights import uniform_init
from neural_net.prepare_dataset import x_y_split

from neural_net.plot import Plotter

train = pd.read_csv(
    'datasets/neural_net/classification/data.simple.train.1000.csv')
test = pd.read_csv(
    'datasets/neural_net/classification/data.simple.test.1000.csv')
x_train, y_train = x_y_split(train, 'cls')
x_test, y_test = x_y_split(test, 'cls')

nn = NeuralNet(2, weight_init=uniform_init)\
    .add_layer(Layer(20, tanh))\
    .add_layer(Layer(2, softmax))\
    .set_optimizer(RMSProp.set_params({"coef": 0.1}))\
    .set_regularization(L1_regularization.set_params({"coef": 0.05}))\
    .set_loss(LogLoss)
nn.budget.set_epoch_limit(3).set_detection_limit(1.3)
nn.fit(x_train, y_train, x_test, y_test, learning_rate=0.02, batch_size=32)

print(f'MSE: {MSE.compute_loss(nn.predict(x_train), y_train)}')

plotter = Plotter(x_test, y_test, [nn])
plotter.plot_data_1d()
plotter.plot_measure_results_data(NeuralNet.get_loss_test, "LogLoss")
plotter.plot_measure_results_data(NeuralNet.get_MSE_test, "MSE test")
Esempio n. 8
0
                                                  y_train,
                                                  train_size=35000,
                                                  random_state=123)


def accuracy_of_model(nn):
    res = nn.predict(x_val / 255)
    res = np.argmax(res, axis=1)
    y_numeric = np.argmax(y_val, axis=1)
    return np.sum(y_numeric.transpose() == res) / y_numeric.shape[0]


np.random.seed(123)
nn = NeuralNet(x_train.shape[1], weight_init=uniform_init, name="mnist", is_regression=False) \
    .add_layer(Layer(180, sigmoid)) \
    .add_layer(Layer(40, sigmoid)) \
    .add_layer(Layer(y_train.shape[1], softmax)) \
    .set_loss(MSE)
nn.budget.set_epoch_limit(500)
nn.fit(x_train / 255,
       y_train,
       x_val / 255,
       y_val,
       learning_rate=0.002,
       batch_size=128)
print(accuracy_of_model(nn))

np.random.seed(123)
nn = NeuralNet(x_train.shape[1], weight_init=uniform_init, name="mnist", is_regression=False) \
    .add_layer(Layer(250, sigmoid)) \
    .add_layer(Layer(90, sigmoid)) \