Exemplo n.º 1
0
 def __init__(self,
              input_dim: int,
              neuron_numbers: list,
              activation_functions: list,
              loss_function,
              learning_rate: float,
              optimizer=optimizers.Optimizer(),
              bias=True,
              seed=42):
     """
     :param input_dim:
     :param neuron_numbers:
     :param activation_functions:
     :param loss_function:
     :param learning_rate:
     :param bias:
     """
     np.random.seed(seed)
     self.learning_rate = learning_rate
     self.neuron_numbers = neuron_numbers
     self.activation_functions = activation_functions
     self.loss_function, self.loss_function_derivative = lf.text2func(
         loss_function)
     self.cache = {}
     self.loss_on_iteration = None
     self.X = None
     self.y = None
     self.n = input_dim  # dimension of observations
     self.m = None  # number of observations
     self.layers = None
     self.bias = bias
     self.optimizer = optimizer
     self.create_layers(self.bias)
Exemplo n.º 2
0
def main():
    """ Run training and export summaries to data_dir/logs for a single test
    setup and a single set of parameters. Summaries include a) TensorBoard
    summaries, b) the latest train/test accuracies and raw edit distances
    (status.txt), c) the latest test predictions along with test ground-truth
    labels (test_label_seqs.pkl, test_prediction_seqs.pkl), d) visualizations
    as training progresses (test_visualizations_######.png)."""

    args = define_and_process_args()
    print('\n', 'ARGUMENTS', '\n\n', args, '\n')

    log_dir = get_log_dir(args)
    print('\n', 'LOG DIRECTORY', '\n\n', log_dir, '\n')

    standardized_data_path = os.path.join(args.data_dir, args.data_filename)
    if not os.path.exists(standardized_data_path):
        message = '%s does not exist.' % standardized_data_path
        raise ValueError(message)

    dataset = data.Dataset(standardized_data_path)
    train_raw_seqs, test_raw_seqs = dataset.get_splits(args.test_users)
    train_triplets = [data.prepare_raw_seq(seq) for seq in train_raw_seqs]
    test_triplets = [data.prepare_raw_seq(seq) for seq in test_raw_seqs]

    train_input_seqs, train_reset_seqs, train_label_seqs = zip(*train_triplets)
    test_input_seqs, test_reset_seqs, test_label_seqs = zip(*test_triplets)

    Model = eval('models.' + args.model_type + 'Model')
    input_size = dataset.input_size
    target_size = dataset.num_classes

    # This is just to satisfy a low-CPU requirement on our cluster
    # when using GPUs.
    if 'CUDA_VISIBLE_DEVICES' in os.environ:
        config = tf.ConfigProto(intra_op_parallelism_threads=2,
                                inter_op_parallelism_threads=2)
    else:
        config = None

    with tf.Session(config=config) as sess:
        model = Model(input_size, target_size, args.num_layers,
                      args.hidden_layer_size, args.init_scale,
                      args.dropout_keep_prob)
        optimizer = optimizers.Optimizer(model.loss, args.num_train_sweeps,
                                         args.initial_learning_rate,
                                         args.num_initial_sweeps,
                                         args.num_sweeps_per_decay,
                                         args.decay_factor,
                                         args.max_global_grad_norm)
        train(sess, model, optimizer, log_dir, args.batch_size,
              args.num_sweeps_per_summary, args.num_sweeps_per_save,
              train_input_seqs, train_reset_seqs, train_label_seqs,
              test_input_seqs, test_reset_seqs, test_label_seqs)
Exemplo n.º 3
0
def main():
    data = pd.read_csv("./projekt1/classification/data.simple.train.1000.csv")

    X = np.array(data.loc[:, ['x', 'y']])
    y = data.cls
    y -= 1
    # one hot encoding
    y_ohc = np.zeros((y.size, int(np.max(y)) + 1))
    y_ohc[np.arange(y.size), y.astype(np.int)] = 1
    y = y_ohc

    from sklearn.preprocessing import StandardScaler
    ss = StandardScaler()
    X = ss.fit_transform(X)

    input_dim = 2
    neuron_numbers = [4, 4, 2]
    activation_functions = ['relu', 'relu', 'sigmoid']
    loss_function = 'logistic_loss'
    learning_rate = 0.01
    optimizer = optimizers.Optimizer()
    batch_size = 128
    val_split = 0.1
    num_epochs = 50
    seed = 42
    dataset_name = "test"
    experiment_name = "test1"

    experiment_dict = {
        "input_dim": input_dim,
        "neuron_numbers":
        neuron_numbers,  # number of neurons in consecutive layers
        "activation_functions": activation_functions,
        "loss_function": loss_function,
        "learning_rate": learning_rate,
        "optimizer": optimizer,
        "batch_size": batch_size,
        "validation_split": val_split,
        "num_epochs": num_epochs,
        "seed": seed,
        "dataset_name": dataset_name,
        "experiment_name": experiment_name
    }

    output = experiments_pipeline(X, y, experiment_dict, True)
    print(
        read_experiment(experiment_dict['experiment_name'] + '_' +
                        experiment_dict['dataset_name'] + '.json'))
Exemplo n.º 4
0
 def __init__(self,
              input_dim,
              neuron_numbers,
              activation_functions,
              loss_function,
              learning_rate,
              optimizer=optimizers.Optimizer(),
              batch_size=1,
              bias=True,
              seed=42):
     """
     Wrapper for NeuralNetwork class
     :param input_dim: input size
     :param neuron_numbers: list of hidden layers' size
     :param activation_functions: list of activation functions for each layer
     :param loss_function
     :param learning_rate
     :param batch_size
     :param bias: boolean triggering if bias has to be fitted
     """
     random.seed(seed)
     self.learning_rate = learning_rate
     self.batch_size = batch_size
     self.NN = NeuralNetworkCore(input_dim,
                                 neuron_numbers,
                                 activation_functions,
                                 loss_function,
                                 learning_rate,
                                 optimizer,
                                 bias,
                                 seed=seed)
     self.loss_on_epoch = []
     self.validation_split = None
     self.loss_on_epoch_valid = None
     self.cache_weights_on_epoch = None
     self.test_rmse = None
def perform_experiment(dataset, d, exp_objective, exp_values, num_reps):
    """
    """
    X_train = dataset['X_train']
    y_train = dataset['y_train']
    X_test = dataset['X_test']
    y_test = dataset['y_test']

    d = d.copy()

    for k in exp_values.keys():
        d[k] = {}
        d[k]['test_rmse'] = []

    for i in range(num_reps):

        for k, v in exp_values.items():
            if exp_objective == 'lr':

                NN = NeuralNetworkWrapper(d['input_dim'],
                                          d['neuron_numbers'], ['relu'] *
                                          (len(d['neuron_numbers']) - 1) +
                                          d['output_activation'],
                                          d['loss_function'],
                                          v,
                                          optimizers.Optimizer(),
                                          d['batch_size'],
                                          seed=(d['seed'] + i))

            elif exp_objective == 'activation_function':

                NN = NeuralNetworkWrapper(d['input_dim'],
                                          d['neuron_numbers'],
                                          v * (len(d['neuron_numbers']) - 1) +
                                          d['output_activation'],
                                          d['loss_function'],
                                          d['learning_rate'],
                                          optimizers.Optimizer(),
                                          d['batch_size'],
                                          seed=(d['seed'] + i))

            elif exp_objective == 'inertia':

                NN = NeuralNetworkWrapper(d['input_dim'],
                                          d['neuron_numbers'], ['relu'] *
                                          (len(d['neuron_numbers']) - 1) +
                                          d['output_activation'],
                                          d['loss_function'],
                                          d['learning_rate'],
                                          optimizers.GDwithMomentum(v),
                                          d['batch_size'],
                                          seed=(d['seed'] + i))

            elif exp_objective == 'batch_size':

                NN = NeuralNetworkWrapper(d['input_dim'],
                                          d['neuron_numbers'], ['relu'] *
                                          (len(d['neuron_numbers']) - 1) +
                                          d['output_activation'],
                                          d['loss_function'],
                                          d['learning_rate'],
                                          optimizers.Optimizer(),
                                          v,
                                          seed=(d['seed'] + i))
            elif exp_objective == 'loss_func':

                NN = NeuralNetworkWrapper(d['input_dim'],
                                          d['neuron_numbers'], ['relu'] *
                                          (len(d['neuron_numbers']) - 1) +
                                          d['output_activation'],
                                          v,
                                          d['learning_rate'],
                                          optimizers.Optimizer(),
                                          d['batch_size'],
                                          seed=(d['seed'] + i))

            NN.train(X_train,
                     y_train,
                     d['num_epochs'],
                     validation_split=0,
                     test_rmse=(X_test, y_test),
                     verbosity=False)

            d[k]['test_rmse'].append(NN.test_rmse)

    for k in exp_values.keys():
        # aggregating results
        d[k]['test_rmse_mean'] = np.mean(np.array(d[k]['test_rmse']).T, axis=1)
        d[k]['test_rmse_std'] = np.std(np.array(d[k]['test_rmse']).T, axis=1)

        d[k] = {
            "RMSE":
            d[k]['test_rmse_mean'],
            "RMSE std":
            d[k]['test_rmse_std'],
            "Best RMSE":
            np.min(d[k]['test_rmse_mean']),
            "Best RMSE std":
            d[k]['test_rmse_std'][np.argmin(d[k]['test_rmse_mean'])]
        }

    return {k: d[k] for k in exp_values.keys()}