def test_model_iter(self):
        np.random.seed(self.seed)
        random.set_seed(self.seed)

        self.results_1 = []
        models = [build_model_1, build_model_2, build_model_3]
        # loop over three models
        for m in range(len(models)):
            model = KerasRegressor(build_fn=models[m],
                                   epochs=100,
                                   batch_size=20,
                                   verbose=0,
                                   shuffle=False)
            kf = KFold(n_splits=3)
            result = cross_val_score(model, self.X, self.y, cv=kf)
            self.results_1.append(result)

        np_testing.assert_array_almost_equal(
            np.array(self.results_1).mean(axis=1),
            np.array(self.exercise.results_1).mean(axis=1),
            decimal=0)
        np_testing.assert_array_almost_equal(
            np.array(self.results_1).std(axis=1),
            np.array(self.exercise.results_1).std(axis=1),
            decimal=0)
示例#2
0
    def test_model_3(self):
        np.random.seed(self.seed)
        random.set_seed(self.seed)

        model_3 = Sequential()
        model_3.add(
            Dense(16, activation='relu', input_dim=self.X_train.shape[1]))
        model_3.add(Dropout(0.2))
        model_3.add(Dense(12, activation='relu'))
        model_3.add(Dropout(0.1))
        model_3.add(Dense(8, activation='relu'))
        model_3.add(Dropout(0.1))
        model_3.add(Dense(4, activation='relu'))
        model_3.add(Dropout(0.1))
        model_3.add(Dense(1, activation='sigmoid'))

        # Choose the loss function to be binary cross entropy and the optimizer to be SGD for training the model
        model_3.compile(optimizer='sgd', loss='binary_crossentropy')
        # train the model
        model_3.fit(self.X_train,
                    self.y_train,
                    epochs=300,
                    batch_size=50,
                    verbose=0)
        loss = model_3.evaluate(self.X_test, self.y_test)
        ex_loss = self.exercise.model_3.evaluate(self.exercise.X_test,
                                                 self.exercise.y_test)
        np_testing.assert_approx_equal(loss, ex_loss, significant=0)
    def test_batch_epoch_iter(self):
        np.random.seed(self.seed)
        random.set_seed(self.seed)

        self.results_2 = []
        epochs = [100, 150]
        batches = [20, 15]

        # Loop over pairs of epochs and batch_size
        for e in range(len(epochs)):
            for b in range(len(batches)):
                model = KerasRegressor(build_fn=build_model_2,
                                       epochs=epochs[e],
                                       batch_size=batches[b],
                                       verbose=0,
                                       shuffle=False)
                kf = KFold(n_splits=3)
                result = cross_val_score(model, self.X, self.y, cv=kf)
                self.results_2.append(result)

        np_testing.assert_array_almost_equal(
            np.array(self.results_2).mean(axis=1),
            np.array(self.exercise.results_2).mean(axis=1),
            decimal=0)
        np_testing.assert_array_almost_equal(
            np.array(self.results_2).std(axis=1),
            np.array(self.exercise.results_2).std(axis=1),
            decimal=0)
def seed_everything():
    '''
    Seeds everything for reproducible results
    :return: None
    '''
    np.random.seed(42)
    set_seed(42)
示例#5
0
    def test_opt_act_iter(self):
        np.random.seed(self.seed)
        random.set_seed(self.seed)
        self.results_3 = []
        optimizers = ['adam', 'sgd', 'rmsprop']
        # loop over optimizers
        for optimizer in optimizers:
            regressor = KerasRegressor(build_fn=build_model_2,
                                       epochs=100,
                                       batch_size=50,
                                       verbose=0)
            model = make_pipeline(StandardScaler(), regressor)
            kfold = KFold(n_splits=self.n_folds,
                          shuffle=True,
                          random_state=self.seed)
            result = cross_val_score(model, self.X, self.y, cv=kfold)
            self.results_3.append(result)

        np_testing.assert_almost_equal(
            np.array(self.results_3).mean(axis=1),
            np.array(self.activity.results_3).mean(axis=1),
            decimal=0)
        np_testing.assert_almost_equal(
            np.array(self.results_3).std(axis=1),
            np.array(self.activity.results_3).std(axis=1),
            decimal=0)
示例#6
0
    def test_model_iter(self):
        np.random.seed(self.seed)
        random.set_seed(self.seed)

        self.results_1 = []
        models = [build_model_1, build_model_2, build_model_3]
        for i in range(len(models)):
            regressor = KerasRegressor(build_fn=models[i],
                                       epochs=100,
                                       batch_size=50,
                                       verbose=0)
            model = make_pipeline(StandardScaler(), regressor)
            kfold = KFold(n_splits=self.n_folds,
                          shuffle=True,
                          random_state=self.seed)
            result = cross_val_score(model, self.X, self.y, cv=kfold)
            self.results_1.append(result)

        np_testing.assert_almost_equal(
            np.array(self.results_1).mean(axis=1),
            np.array(self.activity.results_1).mean(axis=1),
            decimal=0)
        np_testing.assert_almost_equal(
            np.array(self.results_1).std(axis=1),
            np.array(self.activity.results_1).std(axis=1),
            decimal=0)
    def test_opt_act_iter(self):
        np.random.seed(self.seed)
        random.set_seed(self.seed)

        self.results_3 = []
        activations = ['relu', 'tanh']
        optimizers = ['sgd', 'adam', 'rmsprop']
        for o in range(len(optimizers)):
            for a in range(len(activations)):
                optimizer = optimizers[o]
                activation = activations[a]
                model = KerasRegressor(build_fn=build_model_2_mod,
                                       epochs=100,
                                       batch_size=20,
                                       verbose=0,
                                       shuffle=False)
                kf = KFold(n_splits=3)
                result = cross_val_score(model, self.X, self.y, cv=kf)
                self.results_3.append(result)

        np_testing.assert_array_almost_equal(
            np.array(self.results_3).mean(axis=1),
            np.array(self.exercise.results_3).mean(axis=1),
            decimal=0)
        np_testing.assert_array_almost_equal(
            np.array(self.results_3).std(axis=1),
            np.array(self.exercise.results_3).std(axis=1),
            decimal=0)
示例#8
0
 def test_param_set_4(self):
     def build_model(rate):
         model = Sequential()
         model.add(Dense(10, input_dim=self.X.shape[1], activation='relu'))
         model.add(Dropout(rate))
         model.add(Dense(6, activation='relu'))
         model.add(Dropout(rate))
         model.add(Dense(4, activation='relu'))
         model.add(Dropout(rate))
         model.add(Dense(1, activation='sigmoid'))
         model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
         return model
     
     np.random.seed(self.seed)
     random.set_seed(self.seed)
     
     model = KerasClassifier(build_fn=build_model, verbose=0, shuffle=False)
     rate = [0.0, 0.05, 0.1]
     epochs = [100]
     batch_size = [20]
     param_grid = dict(rate=rate, epochs=epochs, batch_size=batch_size)
     grid_seach = GridSearchCV(estimator=model, param_grid=param_grid, cv=5)
     self.results_4 = grid_seach.fit(self.X, self.y)
     
     np_testing.assert_approx_equal(self.activity.results_4.best_score_,
                                    self.results_4.best_score_, significant=2)
示例#9
0
    def test_batch_epoch_iter(self):
        np.random.seed(self.seed)
        random.set_seed(self.seed)
        self.results_2 = []
        epochs = [80, 100]
        batches = [50, 25]
        for i in range(len(epochs)):
            for j in range(len(batches)):
                regressor = KerasRegressor(build_fn=build_model_2,
                                           epochs=epochs[i],
                                           batch_size=batches[j],
                                           verbose=0)
                model = make_pipeline(StandardScaler(), regressor)
                kfold = KFold(n_splits=self.n_folds,
                              shuffle=True,
                              random_state=self.seed)
                result = cross_val_score(model, self.X, self.y, cv=kfold)
                self.results_2.append(result)

        np_testing.assert_almost_equal(
            np.array(self.results_2).mean(axis=1),
            np.array(self.activity.results_2).mean(axis=1),
            decimal=0)
        np_testing.assert_almost_equal(
            np.array(self.results_2).std(axis=1),
            np.array(self.activity.results_2).std(axis=1),
            decimal=0)
def model_fit_eval(    
    train_paths_labels,
    val_paths_labels,
    test_paths_labels,
    eval_table=None,
    table=None,
    _resize=[250, 250],
    norm=255.0,
    batch_size=128,
    filters=4,
    lr=1e-3,
    epochs=30,
    verbose=1, 
    pretrained_weights=None,
    model_path=None,
    distance=absolute_distance,
    distance_output_shape=None,
    prediction_activation='sigmoid',
    train_ds=None,
    val_ds=None,
    callbacks=None,
    steps_per_epoch=None,
    validation_steps=None,
    prefix='',
#     shuffle=True,
    patience=3,
    kernel_initializer=initialize_weights,
    kernel_initializer_d=initialize_weights_dense,
    kernel_regularizer=l2(2e-4),
    kernel_regularizer_d=l2(1e-3),
    bias_initializer=initialize_bias,
    kernel_size_list=[(10, 10), (7, 7), (4, 4), (4, 4)],
    units=4*64,
    optimizer=None,
    loss='binary_crossentropy',
    metrics=['accuracy', Precision(name='Precision'), Recall(name='Recall')],
    tensorboard_histogram_freq=1,
    random_seed=2,
):
    seed(random_seed)
    set_seed(random_seed)
    model, _ = model_fit(table=table, train_paths_labels=train_paths_labels,
                            val_paths_labels=val_paths_labels, _resize=_resize, norm=norm,
                            batch_size=batch_size, filters=filters, lr=lr, epochs=epochs,
                            loss=loss, metrics=metrics, verbose=verbose,
                            pretrained_weights=pretrained_weights, model_path=model_path,
                            prediction_activation=prediction_activation, 
                            distance=distance, distance_output_shape=distance_output_shape,
                            train_ds=train_ds, val_ds=val_ds, callbacks=callbacks,
                            steps_per_epoch=steps_per_epoch, validation_steps=validation_steps,
                            prefix=prefix, patience=patience, tensorboard_histogram_freq=tensorboard_histogram_freq,
                        )
    scores = model_evaluate(model, images_labels_paths=test_paths_labels, norm=norm, _resize=_resize, verbose=verbose)
    if eval_table is not None:
        eval_table.add_row(scores)
        print(eval_table)
    else:
        print(scores)

    return model
示例#11
0
def func(X, y, n_fold, n_hid, n_epo, seed=12345):
    '''
    return the average of weighted error for `n_fold` splits
    '''
    train_score = []
    test_score = []
    set_seed(seed)
    kf = KFold(n_splits=n_fold)
    for train_index, test_index in kf.split(X):
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        model = tf.keras.Sequential()
        model.add(tf.keras.layers.Dense(n_hid, activation=rbf,
                                        input_dim=1))  #hid2
        model.add(tf.keras.layers.Dense(1))
        model.compile(loss='mean_squared_error',
                      optimizer=tf.keras.optimizers.Adam(0.01))
        model.fit(X_train, y_train, epochs=n_epo, verbose=0)

        error_train = tf.keras.losses.MSE(
            model.predict(X_train).flatten(), y_train).numpy()
        error_test = tf.keras.losses.MSE(
            model.predict(X_test).flatten(), y_test).numpy()
        train_score.append(error_train)
        test_score.append(error_test)
    return np.mean(train_score), np.mean(test_score)
示例#12
0
def parametric_bootstrap(boots,
                         data,
                         fit,
                         evaluate,
                         predict,
                         sigma=0,
                         seed=23):
    """
    Using a trained network, we 
      1) predict parameters theta
      2) simulate data from these parameters
      3) predict with simulated data
      4) get confidence intervals from empirical distribution of these errors
    To simulate data we will (deterministically) map theta -> u
    and then throw noise on top of u many times.
    """
    # unpack data, fit to training data
    train, test = data
    set_seed(seed)
    fit(train)
    print('test mse:', evaluate(test))
    theta_hats = dict()

    Phi, theta = test
    for b in range(boots):
        boot_Phi, _ = add_noise(Phi, sigma, seed=b)
        boot_test = (boot_Phi, theta)
        theta_hats[b] = predict(boot_test)
    return theta_hats
示例#13
0
def setup_seed():
    os.environ["TF_DETERMINISTIC_OPS"] = "1"
    os.environ["PYTHONHASHSEED"] = str(SEED)
    random.seed(SEED)
    np.random.seed(SEED)
    set_seed(SEED)
    return
示例#14
0
def dnn(
    input_x,
    output_length=1,
    seed=2020,
    output_activation="linear",
    loss="mse",
    metrics="mse",
):
    """Define a DNN model architecture using Keras.

    Args:
        input_x (int): Number of features.
        output_length (int): Number of output steps.
        output_activation: Activation function for outputs.

    Returns:
        model (keras model): Model to be trained.

    """

    set_seed(seed)

    model = models.Sequential()
    model.add(layers.Dense(16, activation="relu", input_dim=input_x))
    # model.add(layers.Dense(256, activation='relu', input_dim=input_x))
    model.add(layers.Dense(16, activation="relu"))
    # model.add(layers.Dense(32, activation='relu'))
    # model.add(layers.Dense(32, activation='relu'))
    model.add(layers.Dense(output_length, activation=output_activation))
    model.compile(optimizer="adam", loss=loss, metrics=metrics)

    return model
示例#15
0
def set_seeds(seed_val=42):
    '''fix seeds for reproducibility.

    '''
    from numpy.random import seed
    seed(seed_val)
    from tensorflow import random
    random.set_seed(seed_val)
示例#16
0
def Autoencoder(X_train,
                act_func='elu',
                NUM_EPOCHS=100,
                BATCH_SIZE=10,
                visualize=True):
    seed(10)
    set_seed(10)

    # Input layer:
    model = Sequential()
    # First hidden layer, connected to input vector X.
    model.add(
        Dense(10,
              activation=act_func,
              kernel_initializer='glorot_uniform',
              kernel_regularizer=regularizers.l2(0.0),
              input_shape=(X_train.shape[1], )))

    model.add(
        Dense(2, activation=act_func, kernel_initializer='glorot_uniform'))

    model.add(
        Dense(10, activation=act_func, kernel_initializer='glorot_uniform'))

    model.add(Dense(X_train.shape[1], kernel_initializer='glorot_uniform'))

    model.compile(loss='mse', optimizer='adam')

    history = model.fit(np.array(X_train),
                        np.array(X_train),
                        batch_size=BATCH_SIZE,
                        epochs=NUM_EPOCHS,
                        validation_split=0.05,
                        verbose=1)

    X_pred = model.predict(np.asarray(X_train))
    X_pred = pd.DataFrame(X_pred, columns=X_train.columns)
    X_pred.index = X_train.index

    scored = pd.DataFrame(index=X_train.index)
    scored['Loss_mae'] = np.mean(np.abs(X_pred - X_train), axis=1)

    if visualize:
        plt.plot(history.history['loss'], 'b', label='Training loss')
        plt.plot(history.history['val_loss'], 'r', label='Validation loss')
        plt.legend(loc='upper right')
        plt.xlabel('Epochs')
        plt.ylabel('Loss, [mse]')
        plt.ylim([0, .1])
        plt.show()

        plt.figure()
        sns.distplot(scored['Loss_mae'], bins=10, kde=True, color='blue')
        plt.xlim([0.0, .5])

    return model
示例#17
0
def create_model(inputSize, hiddenDropout, visibleDropout, noBlocks,
                 noDenseLayer, increaseFilters, learning_rate, pooling_type,
                 architecture_type):
    # Set random seeds to make situation equal for all models
    seed(_NUMPY_SEED_)
    set_seed(_TENSORFLOW_SEED_)

    noFilters = 64
    model = keras.Sequential()

    # Layers before first block
    model.add(
        tf.keras.layers.Conv2D(filters=noFilters,
                               kernel_size=(3, 3),
                               padding='same',
                               input_shape=(inputSize, inputSize, 62)))
    if (visibleDropout != 1):
        model.add(Dropout(visibleDropout))

    # layers in Blocks
    for i in range(noBlocks):
        if (increaseFilters == 1):
            noFilters = 64 * pow(2, i)
        model.add(Conv2D(filters=noFilters, kernel_size=(3, 3),
                         padding='same'))
        model.add(Conv2D(filters=noFilters, kernel_size=(3, 3),
                         padding='same'))

        if (pooling_type == 'MaxPooling'):
            model.add(MaxPooling2D(pool_size=(2, 2)))
        elif (pooling_type == 'AveragePooling'):
            model.add(AveragePooling2D(pool_size=(2, 2)))
        else:
            log('Unknown Pooling type {0} | Default Pooling is using: MaxPooling'
                .format(pooling_type))
            model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(BatchNormalization())
        if (hiddenDropout != 1):
            model.add(Dropout(hiddenDropout))

    if (architecture_type == 1):
        model.add(Dense(1024))
        model.add(Dense(1024))
        model.add(Dense(1024))
        model.add(BatchNormalization())

    # Layers after last block
    for i in range(noDenseLayer - 1):
        model.add(Dense(1024))
    # Last layer
    model.add(Dense(1))

    model_optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
    model.compile(loss='mape', optimizer=model_optimizer)
    return model
示例#18
0
    def trainable(config, reporter):
        """
        Args:
            config (dict): Parameters provided from the search algorithm
                or variant generation.
        """
        if not isinstance(config['update_rule'], str):
            update_rule = update_rules[int(config['update_rule'])]
        else:
            update_rule = config['update_rule']
        K, N, L = int(config['K']), int(config['N']), int(config['L'])

        run_name = f"run-{get_session_num(logdir)}"
        run_logdir = join(logdir, run_name)
        # for each attack, the TPMs should start with the same weights
        initial_weights_tensors = get_initial_weights(K, N, L)
        training_steps_ls = {}
        eve_scores_ls = {}
        losses_ls = {}
        # for each attack, the TPMs should use the same inputs
        seed = tfrandom.uniform([],
                                minval=0,
                                maxval=tfint64.max,
                                dtype=tfint64).numpy()
        for attack in ['none', 'geometric']:
            initial_weights = {
                tpm: weights_tensor_to_variable(weights, tpm)
                for tpm, weights in initial_weights_tensors.items()
            }
            tfrandom.set_seed(seed)

            if tensorboard:
                attack_logdir = join(run_logdir, attack)
                attack_writer = tensorflow.summary.create_file_writer(
                    attack_logdir)
                with attack_writer.as_default():
                    training_steps, sync_scores, loss = run(
                        update_rule, K, N, L, attack, initial_weights)
            else:
                training_steps, sync_scores, loss = run(
                    update_rule, K, N, L, attack, initial_weights)
            training_steps_ls[attack] = training_steps
            eve_scores_ls[attack] = sync_scores
            losses_ls[attack] = loss
        avg_training_steps = tensorflow.math.reduce_mean(
            list(training_steps_ls.values()))
        avg_eve_score = tensorflow.math.reduce_mean(
            list(eve_scores_ls.values()))
        mean_loss = tensorflow.math.reduce_mean(list(losses_ls.values()))
        reporter(
            avg_training_steps=avg_training_steps.numpy(),
            avg_eve_score=avg_eve_score.numpy(),
            mean_loss=mean_loss.numpy(),
            done=True,
        )
示例#19
0
 def _build_model(self):
     set_seed(84)
     # neural net to approximate Q-value function:
     model = Sequential()
     model.add(
         Dense(5, input_dim=self.state_size,
               activation='relu'))  # 1st hidden layer; states as input
     model.add(Dense(5, activation='relu'))  # 2nd hidden layer
     model.add(Dense(self.action_size, activation='linear')
               )  # 2 actions, so 2 output neurons: 0 and 1 (L/R)
     model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
     return model
 def test_model_perf(self):
     np.random.seed(self.seed)
     random.set_seed(self.seed)
     model = KerasRegressor(build_fn=build_model,
                            epochs=100,
                            batch_size=20,
                            verbose=0)
     kf = KFold(n_splits=5)
     self.results = cross_val_score(model, self.X, self.y, cv=kf)
     np_testing.assert_array_almost_equal(self.exercise.results,
                                          self.results,
                                          decimal=0)
示例#21
0
def cnn(input_x, input_y, output_length=1, seed=2020, kernel_size=2):
    """Define a CNN model architecture using Keras.

    Args:
        input_x (int): Number of time steps to include in each sample, i.e. how
            much history is matched with a given target.
        input_y (int): Number of features for each time step in the input data.
        n_steps_out (int): Number of output steps.
        seed (int): Seed for random initialization of weights.
        kernel_size (int): Size of kernel in CNN.

    Returns:
        model (keras model): Model to be trained.

    """

    set_seed(seed)

    kernel_size = kernel_size

    model = models.Sequential()
    model.add(
        layers.Conv1D(filters=64,
                      kernel_size=kernel_size,
                      activation="relu",
                      input_shape=(input_x, input_y),
                      name="input_layer"))
    model.add(
        layers.Conv1D(filters=64,
                      kernel_size=kernel_size,
                      activation="elu",
                      name="conv1d_1"))
    model.add(
        layers.Conv1D(filters=64,
                      kernel_size=kernel_size,
                      activation="relu",
                      name="conv1d_2"))
    # model.add(layers.Conv1D(filters=64, kernel_size=kernel_size,
    #     activation="relu", name="conv1d_3"))
    # model.add(layers.Conv1D(filters=64, kernel_size=kernel_size,
    #     activation="relu", name="conv1d_4"))
    # model.add(layers.MaxPooling1D(pool_size=2, name="pool_1"))
    model.add(layers.Dropout(rate=0.1))
    model.add(layers.Flatten(name="flatten"))
    model.add(layers.Dense(128, activation="relu", name="dense_1"))
    model.add(layers.Dense(64, activation="relu", name="dense_2"))
    # model.add(layers.Dense(32, activation="relu", name="dense_3"))
    model.add(
        layers.Dense(output_length, activation="linear", name="output_layer"))
    model.compile(optimizer="adam", loss="mse")

    return model
def test_correct_transformation():
    np.random.seed(1)
    set_seed(2)  # tensorflow

    nrt = initialize_transformer()
    nrt._is_fitted = True

    x = np.ones((1, 3))
    u1 = np.array([[0, 0.69286364]])

    u2 = nrt.transform(x)

    assert_allclose(u1, u2)
def grid_sparse(data_encoding, train_data, test_data, results_file_name = "grid_sparse_results.csv"):
    """Does a grid search on a sparse network. 
    
       Trains sparse networks, finds best thresholds, and records the best value using that thresh in a csv
    """

    # Look at this option for using tensorflow tuning
    # https://medium.com/ml-book/neural-networks-hyperparameter-tuning-in-tensorflow-2-0-a7b4e2b574a1

    #TODO: These shouldn't be needed
    seed(1234)
    random.set_seed(2345)

    column = ['Input Layer', 'Bottleneck Layer', 'Input Sparse', 'Hidden Sparse', 'Thresh', 'Score']
    results = pd.DataFrame(columns=column)

    max_layer_outer = 80
    min_layer_outer = 60
    inc_layer_outer = 5

    min_layer_inner = 50
    inc_layer_inner = 4

    max_sparse_outer = 0.3
    min_sparse_outer = 0.1
    inc_sparse_outer = 0.1

    max_sparse_inner = 0.5
    min_sparse_inner = 0.1
    inc_sparse_inner = 0.1
  
    for i in range(min_layer_outer, max_layer_outer, inc_layer_outer):
        for j in range(min_layer_inner, i, inc_layer_inner):
            for k in np.arange(min_sparse_outer, max_sparse_outer, inc_sparse_outer):
                for l in np.arange(min_sparse_inner, max_sparse_inner, inc_sparse_inner):

                    param_string = str(i) + "_" + str(j) + "_" + str(k) + "_" + str(l);
                    print("Running " + param_string)
                    file_name = "result_" + param_string + ".txt"
                    model_name = param_string + ".h5"

                    params = HyperP.cAutoHyper(model_name,[i, j],[0.005, 0], [k, l, 0])

                    model = IDS.train(train_data, data_encoding, params)
                    thresh, score = IDS.find_best_thresh(model, test_data)

                    results = results.append({'Input Layer':i, 'Bottleneck Layer':j, 'Input Sparse': k, 'Hidden Sparse': l,
                                         'Thresh': thresh, 'Score': score}, ignore_index=True)
                    results.to_csv(results_file_name)

    print(results)
def test_correct_vote():
    np.random.seed(3)
    set_seed(4)  # tensorflow

    X = np.random.randn(1000, 3)
    Y = np.sum(X, axis=1).reshape((1000, 1))

    nrv = NeuralRegressionVoter(epochs=30, lr=1)
    nrv.fit(X, Y)

    X_test = np.ones((5, 3))
    Y_test = 3 * np.ones((5, 1))

    assert_allclose(Y_test, nrv.vote(X_test), atol=1e-4)
示例#25
0
def create_model(inputSize, hiddenDropout, visibleDropout, noBlocks,
                 noDenseLayer, increaseFilters):
    # Set random seeds to make situation equal for all models
    seed(_NUMPY_SEED_)
    set_seed(_TENSORFLOW_SEED_)

    noFilters = 64
    model = keras.Sequential()

    # Layers before first block
    model.add(
        tf.keras.layers.Conv2D(filters=noFilters,
                               kernel_size=(3, 3),
                               padding='same',
                               activation='relu',
                               input_shape=(inputSize, inputSize, 62)))
    if (visibleDropout != 0):
        model.add(Dropout(visibleDropout))

    # layers in Blocks
    for i in range(noBlocks):
        if (increaseFilters == 1):
            noFilters = 64 * pow(2, i)
        model.add(
            Conv2D(filters=noFilters,
                   kernel_size=(3, 3),
                   padding='same',
                   activation="relu"))
        model.add(
            Conv2D(filters=noFilters,
                   kernel_size=(3, 3),
                   padding='same',
                   activation="relu"))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(BatchNormalization())
        if (hiddenDropout != 0):
            model.add(Dropout(hiddenDropout))

    # Layers after last block
    for i in range(noDenseLayer - 1):
        model.add(Dense(512, activation="relu"))
    # Last layer
    model.add(Dense(1, activation="relu"))

    model.compile('adam', 'mean_squared_error', metrics=['accuracy'])
    # model.compile(loss=keras.losses.poisson, optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    # model.compile(optimizer='adam', loss=tf.keras.losses.Poisson())
    return model
示例#26
0
    def train(self, verbose=0, sigma=0, seed=23, transform=False):
        """
        Compiles the model, prints a summary, fits to data
        The boolean transform rescales the data if True (default),
        and uses raw data otherwise.

        The input sigma controls the noise for the train/val inputs
        """
        # load data and targets
        Phi_train, theta_Phi_train = deepcopy(self.train_data)
        Phi_val, theta_Phi_val = deepcopy(self.val_data)

        # add noise
        Phi_train, train_noise = tools.add_noise(Phi_train, sigma, seed=2)
        Phi_val, val_noise = tools.add_noise(Phi_val, sigma, seed=3)

        self.transformed = transform
        if transform:
            # transform train and val inputs
            Phi_train_tformer = preprocessing.MaxAbsScaler()
            Phi_val_tformer = preprocessing.MaxAbsScaler()
            Phi_train = Phi_train_tformer.fit_transform(Phi_train)
            Phi_val = Phi_val_tformer.fit_transform(Phi_val)

            # transform train and val targets
            theta_Phi_train_tformer = preprocessing.MaxAbsScaler()
            theta_Phi_val_tformer = preprocessing.MaxAbsScaler()
            theta_Phi_train = theta_Phi_train_tformer.fit_transform(
                theta_Phi_train)
            theta_Phi_val = theta_Phi_val_tformer.fit_transform(theta_Phi_val)

        # compile and print summary
        set_seed(seed)
        self.build_model()
        self.model.summary()

        # make callbacks and fit model
        callbacks = self.get_callbacks()
        self.model.fit(x=Phi_train,
                       y=theta_Phi_train,
                       validation_data=(Phi_val, theta_Phi_val),
                       batch_size=self.batch_size,
                       epochs=self.epochs,
                       callbacks=callbacks,
                       verbose=verbose)
        print('test mse:',
              self.model.evaluate(self.test_data[0], self.test_data[1]))
        print('test thetas:', self.model.predict(self.test_data[0]))
示例#27
0
    def setUp(self):
        import Activity7_02
        self.activity = Activity7_02
        self.seed = 1
        np.random.seed(self.seed)
        random.set_seed(self.seed)

        self.classifier = Sequential()
        self.classifier.add(
            Conv2D(32, (3, 3), input_shape=(64, 64, 3), activation='relu'))
        self.classifier.add(Conv2D(32, (3, 3), activation='relu'))
        self.classifier.add(Conv2D(32, (3, 3), activation='relu'))
        self.classifier.add(MaxPool2D(pool_size=(2, 2)))
        self.classifier.add(Conv2D(32, (3, 3), activation='relu'))
        self.classifier.add(MaxPool2D(pool_size=(2, 2)))
        self.classifier.add(Flatten())
        self.classifier.add(Dense(units=128, activation='relu'))
        self.classifier.add(Dense(128, activation='relu'))
        self.classifier.add(Dense(128, activation='relu'))
        self.classifier.add(Dense(128, activation='relu'))
        self.classifier.add(Dense(units=1, activation='sigmoid'))
        self.classifier.compile(optimizer='adam',
                                loss='binary_crossentropy',
                                metrics=['accuracy'])

        train_datagen = ImageDataGenerator(rescale=1. / 255,
                                           shear_range=0.2,
                                           zoom_range=0.2,
                                           horizontal_flip=True)

        test_datagen = ImageDataGenerator(rescale=1. / 255)
        training_set = train_datagen.flow_from_directory(
            '../dataset/training_set',
            target_size=(64, 64),
            batch_size=32,
            class_mode='binary')

        test_set = test_datagen.flow_from_directory('../dataset/test_set',
                                                    target_size=(64, 64),
                                                    batch_size=32,
                                                    class_mode='binary')
        self.classifier.fit_generator(training_set,
                                      steps_per_epoch=10000,
                                      epochs=2,
                                      validation_data=test_set,
                                      validation_steps=2500,
                                      shuffle=False)
示例#28
0
def perform_single_experiment(model_name, loss_name, alpha, epochs, batch_size,
                              seed, data_augmentation, subtract_pixel_mean,
                              autolog_freq, cluster_job, dataset_name, decay,
                              gradient_clipping, model_checkpoints, warmup,
                              initial_lr, lr_sched_multipler,
                              reduce_on_plateau, test_run, load_ext_params,
                              temp_scaling):
    config = init_env(autolog_freq=autolog_freq, seed=seed)

    # Reduce logging output
    logging.getLogger("urllib3").setLevel(logging.WARNING)

    with mlflow.start_run():
        # Meta parameters
        loss_type = get_loss_type_by_name(loss_name)
        model_params = ModelParameters()
        model_params.set_parameter("seed", seed)
        random.set_seed(model_params.get_parameter("seed"))
        model_params.set_parameter("batch_size", batch_size)
        model_params.set_parameter("epochs", epochs)
        model_params.set_parameter("model_type",
                                   get_model_type_by_name(model_name))
        model_params.set_parameter("loss_type", loss_type)
        model_params.set_parameter("alpha", alpha)
        model_params.set_parameter("dataset_name", dataset_name)
        model_params.set_parameter("data_augmentation", data_augmentation)
        model_params.set_parameter("subtract_pixel_mean", subtract_pixel_mean)
        model_params.set_parameter("decay", decay)
        model_params.set_parameter("gradient_clipping", gradient_clipping)
        model_params.set_parameter("warmup", warmup)
        model_params.set_parameter("initial_lr", initial_lr)
        model_params.set_parameter("lr_sched_multipler", lr_sched_multipler)
        model_params.set_parameter("reduce_on_plateau", reduce_on_plateau)
        model_params.set_parameter("test_run", test_run)
        model_params.set_parameter("load_ext_params", load_ext_params)
        model_params.set_parameter("temp_scaling", temp_scaling)
        if load_ext_params:
            hyper_params_path = get_hyperparameter_file_path(model_params)
            model_params.load_parameters_from_file(
                os.path.join(ROOT_DIR, hyper_params_path),
                '{}_{}'.format(model_params.get_parameter("model_type"),
                               dataset_name))

        model_params.log_parameters()

        return perform_run(model_params, cluster_job, model_checkpoints,
                           config)
    def setUp(self):
        import Exercise8_03
        self.exercise = Exercise8_03
        
        vgg_model = keras.applications.vgg16.VGG16()
        
        self.seed = 42
        np.random.seed(self.seed)
        random.set_seed(self.seed)
        last_layer = str(vgg_model.layers[-1])

        self.classifier= Sequential()
        for layer in vgg_model.layers:
            if str(layer) != last_layer:
                self.classifier.add(layer)
                
        for layer in self.classifier.layers:
            layer.trainable=False        
        self.classifier.add(Dense(1, activation='sigmoid'))
        self.classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
        
        generate_train_data = ImageDataGenerator(rescale = 1./255,
                                   shear_range = 0.2,
                                   zoom_range = 0.2,
                                   horizontal_flip = True)

        generate_test_data = ImageDataGenerator(rescale = 1./255)        
        
        training_dataset = generate_train_data.flow_from_directory('../Data/Dataset/training_set',
                                                 target_size = (224, 224),
                                                 batch_size = 32,
                                                 class_mode = 'binary')

        test_dataset = generate_test_data.flow_from_directory('../Data/Dataset/test_set',
                                            target_size = (224, 224),
                                            batch_size = 32,
                                            class_mode = 'binary')

        
        
        self.classifier.fit_generator(
            training_dataset, steps_per_epoch = 100, epochs = 10,
            validation_data = test_dataset, validation_steps = 30,
            shuffle=False)
示例#30
0
    def DNN_train(self, dim=None, seed=None, neurons1=None, neurons2=None, learning_rate=None, epochs=None):

        np.random.seed(seed)
        random.set_seed(seed)

        def norm(X1,dim):
            K = np.zeros((len(X1),dim))
            for ii in np.arange(0,dim,1):
                K[:,ii] = np.reshape((X1[:,ii]-np.mean(X1[:,ii]))/(np.std(X1[:,ii])),len(X1))
            return K

        normed_obs_ind = norm(self.obs_ind, dim)
        normed_obs = norm(self.obs.reshape(len(self.obs),1), 1)

        def build_model():
          model = keras.Sequential([
            layers.Dense(neurons1, activation='softmax', bias_initializer='zeros'), # input_shape=[len(train_dataset.keys())],
            layers.Dense(neurons2, activation='softmax',bias_initializer='zeros'),
            layers.Dense(1,bias_initializer='zeros')
          ])

          # optimizer = tf.keras.optimizers.RMSprop(0.001)
          optimizer = tf.keras.optimizers.RMSprop(learning_rate)

          model.compile(loss='mse',
                        optimizer=optimizer,
                        metrics=['mae', 'mse'])
          return model

        model = build_model()

        # Inspect the model

        model.fit(
          normed_obs_ind, normed_obs,
          epochs=epochs, validation_split = 0.0,
          shuffle = False)

        model.summary()


        return model