Beispiel #1
0
 def adagrad_optimizer(self, learning_rate):
     optimizer = optimizers.Adagrad(learning_rate=learning_rate)
     return optimizer
Beispiel #2
0
def lstm_dense_sunspots(args):
    """
    Main function
    """
    # %%
    # IMPORTS

    # code repository sub-package imports
    from artificial_neural_networks.code.utils.download_monthly_sunspots import \
        download_monthly_sunspots
    from artificial_neural_networks.code.utils.generic_utils import save_regress_model, \
        series_to_supervised, affine_transformation
    from artificial_neural_networks.code.utils.vis_utils import regression_figs

    # %%

    if args.verbose > 0:
        print(args)

    # For reproducibility
    if args.reproducible:
        os.environ['PYTHONHASHSEED'] = '0'
        np.random.seed(args.seed)
        rn.seed(args.seed)
        tf.set_random_seed(args.seed)
        sess = tf.Session(graph=tf.get_default_graph())
        K.set_session(sess)
        # print(hash("keras"))

    # %%
    # Load the Monthly sunspots dataset

    sunspots_path = download_monthly_sunspots()
    sunspots = np.genfromtxt(fname=sunspots_path,
                             dtype=np.float32,
                             delimiter=",",
                             skip_header=1,
                             usecols=1)

    # %%
    # Train-Test split

    L_series = len(sunspots)

    split_ratio = 2 / 3  # between zero and one
    n_split = int(L_series * split_ratio)

    look_back = args.look_back

    train = sunspots[:n_split]
    test = sunspots[n_split - look_back:]

    train_x, train_y = series_to_supervised(train, look_back)
    test_x, test_y = series_to_supervised(test, look_back)

    # %%
    # PREPROCESSING STEP

    scaling_factor = args.scaling_factor
    translation = args.translation

    n_train = train_x.shape[0]  # number of training examples/samples
    n_test = test_x.shape[0]  # number of test examples/samples

    n_in = train_x.shape[1]  # number of features / dimensions
    n_out = train_y.shape[1]  # number of steps ahead to be predicted

    # Reshape training and test sets
    train_x = train_x.reshape(n_train, n_in, 1)
    test_x = test_x.reshape(n_test, n_in, 1)

    # Apply preprocessing
    train_x_ = affine_transformation(train_x, scaling_factor, translation)
    train_y_ = affine_transformation(train_y, scaling_factor, translation)
    test_x_ = affine_transformation(test_x, scaling_factor, translation)
    test_y_ = affine_transformation(test_y, scaling_factor, translation)

    # %%
    # Model hyperparameters and ANN Architecture

    x = Input(shape=(n_in, 1))  # input layer
    h = x

    h = LSTM(units=args.layer_size)(h)  # hidden layer

    out = Dense(units=n_out, activation=None)(h)  # output layer

    model = Model(inputs=x, outputs=out)

    if args.verbose > 0:
        model.summary()

    def root_mean_squared_error(y_true, y_pred):
        return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))

    loss_function = root_mean_squared_error

    metrics = ['mean_absolute_error', 'mean_absolute_percentage_error']

    lr = args.lrearning_rate
    epsilon = args.epsilon
    optimizer_selection = {
        'Adadelta':
        optimizers.Adadelta(lr=lr, rho=0.95, epsilon=epsilon, decay=0.0),
        'Adagrad':
        optimizers.Adagrad(lr=lr, epsilon=epsilon, decay=0.0),
        'Adam':
        optimizers.Adam(lr=lr,
                        beta_1=0.9,
                        beta_2=0.999,
                        epsilon=epsilon,
                        decay=0.0,
                        amsgrad=False),
        'Adamax':
        optimizers.Adamax(lr=lr,
                          beta_1=0.9,
                          beta_2=0.999,
                          epsilon=epsilon,
                          decay=0.0),
        'Nadam':
        optimizers.Nadam(lr=lr,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=epsilon,
                         schedule_decay=0.004),
        'RMSprop':
        optimizers.RMSprop(lr=lr, rho=0.9, epsilon=epsilon, decay=0.0),
        'SGD':
        optimizers.SGD(lr=lr, momentum=0.0, decay=0.0, nesterov=False)
    }

    optimizer = optimizer_selection[args.optimizer]

    model.compile(optimizer=optimizer, loss=loss_function, metrics=metrics)

    # %%
    # Save trained models for every epoch

    models_path = r'artificial_neural_networks/trained_models/'
    model_name = 'sunspots_lstm_dense'
    weights_path = models_path + model_name + '_weights'
    model_path = models_path + model_name + '_model'
    file_suffix = '_{epoch:04d}_{val_loss:.4f}_{val_mean_absolute_error:.4f}'

    if args.save_weights_only:
        file_path = weights_path
    else:
        file_path = model_path

    file_path += file_suffix

    monitor = 'val_loss'

    if args.save_models:
        checkpoint = ModelCheckpoint(file_path + '.h5',
                                     monitor=monitor,
                                     verbose=args.verbose,
                                     save_best_only=args.save_best,
                                     mode='auto',
                                     save_weights_only=args.save_weights_only)
        callbacks = [checkpoint]
    else:
        callbacks = []

    # %%
    # TRAINING PHASE

    if args.time_training:
        start = timer()

    model.fit(x=train_x_,
              y=train_y_,
              validation_data=(test_x_, test_y_),
              batch_size=args.batch_size,
              epochs=args.n_epochs,
              verbose=args.verbose,
              callbacks=callbacks)

    if args.time_training:
        end = timer()
        duration = end - start
        print('Total time for training (in seconds):')
        print(duration)

    # %%
    # TESTING PHASE

    # Predict preprocessed values
    train_y_pred_ = model.predict(train_x_)[:, 0]
    test_y_pred_ = model.predict(test_x_)[:, 0]

    # Remove preprocessing
    train_y_pred = affine_transformation(train_y_pred_,
                                         scaling_factor,
                                         translation,
                                         inverse=True)
    test_y_pred = affine_transformation(test_y_pred_,
                                        scaling_factor,
                                        translation,
                                        inverse=True)

    train_rmse = sqrt(mean_squared_error(train_y, train_y_pred))
    train_mae = mean_absolute_error(train_y, train_y_pred)
    train_r2 = r2_score(train_y, train_y_pred)

    test_rmse = sqrt(mean_squared_error(test_y, test_y_pred))
    test_mae = mean_absolute_error(test_y, test_y_pred)
    test_r2 = r2_score(test_y, test_y_pred)

    if args.verbose > 0:
        print('Train RMSE: %.4f ' % (train_rmse))
        print('Train MAE: %.4f ' % (train_mae))
        print('Train (1 - R_squared): %.4f ' % (1.0 - train_r2))
        print('Train R_squared: %.4f ' % (train_r2))
        print('')
        print('Test RMSE: %.4f ' % (test_rmse))
        print('Test MAE: %.4f ' % (test_mae))
        print('Test (1 - R_squared): %.4f ' % (1.0 - test_r2))
        print('Test R_squared: %.4f ' % (test_r2))

    # %%
    # Data Visualization

    if args.plot:
        regression_figs(train_y=train_y,
                        train_y_pred=train_y_pred,
                        test_y=test_y,
                        test_y_pred=test_y_pred)

    # %%
    # Save the architecture and the lastly trained model

    save_regress_model(model, models_path, model_name, weights_path,
                       model_path, file_suffix, test_rmse, test_mae, args)

    # %%

    return model
Beispiel #3
0
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('softmax'))
'''

sgd = optimizers.Adagrad(lr=0.01, epsilon=None, decay=0.0)





model.compile(loss='mean_squared_error',
              optimizer= sgd,
              metrics=['accuracy'])

# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True)
import tensorflow as tf
from keras import optimizers
from keras.layers import Input
from keras.models import Model
from keras.layers import Dense, Flatten, Reshape, Dropout
from keras.layers import Convolution1D, MaxPooling1D, BatchNormalization
from keras.layers import Lambda


def mat_mul(A, B):
    return tf.matmul(A, B)



# adam = optimizers.Adam(lr=0.1, decay= 0.1/epoch_num)
adam = optimizers.Adagrad(lr= learning_rate ,decay= 0)   #0.01 , 0.7 worked well

# ------------------------------------ Pointnet Architecture
# input_Transformation_net
input_points = Input(shape=(num_points, 3))
x = Convolution1D(64, 1, activation='relu',
                  input_shape=(num_points, 3))(input_points)
x = BatchNormalization()(x)
x = Convolution1D(128, 1, activation='relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(1024, 1, activation='relu')(x)
x = BatchNormalization()(x)
x = MaxPooling1D(pool_size=num_points)(x)
x = Dense(512, activation='relu')(x)
x = BatchNormalization()(x)
x = Dense(256, activation='relu')(x)
Beispiel #5
0
model.add(Dense(256))
model.add(Activation('sigmoid'))

model.add(Dropout(0.5))

model.add(Dense(16))
model.add(Activation('sigmoid'))

model.add(Dropout(0.5))

model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer=optimizers.Adagrad(lr=0.0058),
              metrics=['accuracy'])

model.load_weights("Models/Model1/weights.h5")

data = generate_arrays(data_dir)
res = model.predict_classes(data[0], verbose=0)
print("Total: ", len(res))

# true pos, true neg, false pos, false neg
for i in range(len(res)):
    if res[i] == 1 and data[1][i] == 1:
        tp += 1
    elif res[i] == 1 and data[1][i] == 0:
        fp += 1
    elif res[i] == 0 and data[1][i] == 1:
Beispiel #6
0
def train_VGG19_Model(csv_file , lr , ep):
    def step_decay_schedule(initial_lr=1e-3, decay_factor=0.75, step_size=10):
        def schedule(epoch):
            return initial_lr * (decay_factor ** np.floor(epoch / step_size))
        return LearningRateScheduler(schedule)


    def Read_image(path):
        im = Image.open(path).convert('RGB')
        return im


    X = []
    Y = []
    dataset = pd.read_csv(csv_file)
    for index, row in dataset.iterrows():
        X.append(array(Read_image(row[0]).resize((100, 100))).flatten() / 255.0)
        Y.append(row[1])

    X = np.array(X)
    Y = to_categorical(Y, 2)
    X = X.reshape(-1, 100, 100, 3)
    X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.20, random_state=5)

    # Load the VGG model
    vgg_conv = VGG19(weights='imagenet', include_top=False, input_shape=(100, 100, 3))

    # Create the model
    model = models.Sequential()
    # Freeze the layers  the first layers
    for layer in vgg_conv.layers[:-5]:
        layer.trainable = False

    # Check the trainabl status of the individual layers
    for layer in vgg_conv.layers:
        print(layer, layer.trainable)



    model.add(vgg_conv)
    model.summary()
    model.add(layers.Flatten())
    model.add(layers.Dense(1024, activation='relu'))
    model.add(layers.Dropout(0.50))
    model.add(layers.Dense(1024, activation='relu'))
    model.add(layers.Dropout(0.50))
    model.add(layers.Dense(2, activation='softmax'))

    optimizer = optimizers.Adagrad(lr=lr, epsilon=None, decay=0.0)
    model.compile(optimizer=optimizer,
                  loss="mean_squared_error",
                  metrics=["accuracy"])

    lr_sched = step_decay_schedule(initial_lr=1e-4, decay_factor=0.75, step_size=2)

    epochs = ep
    batch_size = 20

    history = model.fit(X_train, Y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_val, Y_val), verbose=2,callbacks=[lr_sched])
    # Plot the loss and accuracy curves for training and validation
    fig, ax = plt.subplots(3, 1)
    ax[0].plot(history.history['loss'], color='b', label="Training loss")
    ax[0].plot(history.history['val_loss'], color='r', label="validation loss", axes=ax[0])
    legend = ax[0].legend(loc='best', shadow=True)

    ax[1].plot(history.history['acc'], color='b', label="Training accuracy")
    ax[1].plot(history.history['val_acc'], color='r', label="Validation accuracy")
    legend = ax[1].legend(loc='best', shadow=True)


    def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
        plt.imshow(cm, interpolation='nearest', cmap=cmap)
        plt.title(title)
        plt.colorbar()
        tick_marks = np.arange(len(classes))
        plt.xticks(tick_marks, classes, rotation=45)
        plt.yticks(tick_marks, classes)

        if normalize:
            cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]

        thresh = cm.max() / 2.
        for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
            plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black")

        plt.tight_layout()
        plt.ylabel('True label')
        plt.xlabel('Predicted label')


    # Predict the values from the validation dataset
    Y_pred = model.predict(X_val)
    # Convert predictions classes to one hot vectors
    Y_pred_classes = np.argmax(Y_pred, axis=1)
    # Convert validation observations to one hot vectors
    Y_true = np.argmax(Y_val, axis=1)
    # compute the confusion matrix
    confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
    # plot the confusion matrix
    plot_confusion_matrix(confusion_mtx, classes=range(2))

    image_path = os.getcwd()+"\\Figures"
    Models_path = os.getcwd()+"\\Re_Traind_Models"
    file_number =random.randint(1, 1000000)
    plot_Name = image_path+"\\VGG19_"+str(file_number)+".png"
    Model_Name = Models_path+"\\VGG19_"+str(file_number)+".h5"
    plt.savefig(plot_Name , transparent =True , bbox_incehs="tight" , pad_inches = 2 , dpi = 50)
    model.save(Model_Name)
    return plot_Name , Model_Name
Beispiel #7
0
def run_nn(**kwargs):
    """
    Run the neural network for the given parameters

    Adapted from the code provided in lecture
    """

    # Start the timer
    start_t = timer()

    # Number of input, hidden, and output nodes
    input_nodes = 784
    hidden_nodes = 200
    output_nodes = 10

    # Set parameters
    learning_rate = kwargs["learning_rate"]
    optimizer = kwargs["optimizer"]
    batch_size = kwargs["batch_size"]
    epochs = kwargs["epochs"]

    # Create the Keras model
    model = Sequential()
    model.add(
        Dense(
            hidden_nodes,
            activation='sigmoid',
            input_shape=(input_nodes, ),
            bias=False,
        ))
    model.add(Dense(
        output_nodes,
        activation='sigmoid',
        bias=False,
    ))
    # Print the model summary
    model.summary()

    # Set the optimizer
    if optimizer == "adam":
        opt = optimizers.Adam(learning_rate=learning_rate)
    elif optimizer == "sgd":
        opt = optimizers.SGD(learning_rate=learning_rate)
    elif optimizer == "rmsprop":
        opt = optimizers.RMSprop(learning_rate=learning_rate)
    elif optimizer == "adagrad":
        opt = optimizers.Adagrad(learning_rate=learning_rate)
    elif optimizer == "adadelta":
        opt = optimizers.Adadelta(learning_rate=learning_rate)
    elif optimizer == "adamax":
        opt = optimizers.Adamax(learning_rate=learning_rate)
    elif optimizer == "nadam":
        opt = optimizers.Nadam(learning_rate=learning_rate)
    # Default optimizer is adam
    else:
        opt = optimizers.Adam(learning_rate=learning_rate)

    # Define the error criterion, optimizer, and an optional metric to monitor during training
    model.compile(
        loss='categorical_crossentropy',
        optimizer=opt,
        metrics=['accuracy'],
    )

    # Load the mnist training data CSV
    df = pd.read_csv("mnist_csv/mnist_train.csv", header=None)

    # Columns 1-784 are the input values
    x_train = np.asfarray(df.loc[:, 1:input_nodes].values)
    x_train /= 255.0

    # Column 0 is the desired label
    labels = df.loc[:, 0].values

    # Convert labels to one-hot vectors
    y_train = np_utils.to_categorical(labels, output_nodes)

    # Train the neural network
    # Train the model
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1)

    # Save the model
    model.save('MNIST_3layer_keras.h5')
    print('model saved')

    # Test the model

    # Load the MNIST test data CSV file into a list
    test_data_file = open('mnist_csv/mnist_test.csv', 'r')
    test_data_list = test_data_file.readlines()
    test_data_file.close()

    # Scorecard for how well the network performs, initially empty
    scorecard = []

    # Go through all the data in the test data set, one by one
    for record in test_data_list:
        # Split the record by the commas
        data_sample = record.split(',')

        # Correct answer is first value
        correct_label = int(data_sample[0])

        # Scale and shift the inputs
        inputs = np.asfarray(data_sample[1:]) / 255.0

        # Make prediction
        outputs = model.predict(np.reshape(inputs, (1, len(inputs))))

        # The index of the highest value corresponds to the label
        label = np.argmax(outputs)

        # Append correct or incorrect to list
        if label == correct_label:
            # Network's answer matches correct answer, add 1 to scorecard
            scorecard.append(1)
        else:
            # Netowrk's answer doesn't match correct answer, add 0 to scorecard
            scorecard.append(0)
            pass

        pass

    # Calculate the accuracy
    scorecard_array = np.asarray(scorecard)
    accuracy = scorecard_array.sum() / scorecard_array.size
    print('accuracy = {}'.format(accuracy))

    # Stop the timer
    end_t = timer()
    execution_time = end_t - start_t
    print('elapsed time = {}'.format(execution_time))

    output = {'accuracy': accuracy, 'execution_time': execution_time}
    return output
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation('relu'))

model.add(Dropout(0.75))

model.add(Dense(64))
model.add(Activation('sigmoid'))

model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer=
              optimizers.Adagrad(lr=0.05),
              metrics=['accuracy'])


def generate_arrays_from_dir(path, batchsz):  
    db = pd.read_csv(db_file) 
    while 1:
       with open(path) as f:
            r = csv.reader(f)
            batchCount = 0
            batchX = []
            batchy = []
            for ln in r:
                X = np.array(list(np.array(ln[1:(len(ln))],dtype=np.uint32).tobytes()))
                y = 1 if db.loc[db['id'] == int(ln[0][1:])].values[0][2] == 'p' else 0
                batchX.append(np.array(X))
Beispiel #9
0
def create_model(x_train, y_train, x_val, y_val, layer_sizes):
    def coeff_determination(y_true, y_pred):
        SS_res = K.sum(K.square(y_true - y_pred))
        SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
        return (1 - SS_res / (SS_tot + K.epsilon()))

    model = models.Sequential()

    model.add(
        layers.Dense(
            {{
                choice([
                    np.power(2, 0),
                    np.power(2, 1),
                    np.power(2, 2),
                    np.power(2, 3),
                    np.power(2, 4),
                    np.power(2, 5),
                    np.power(2, 6),
                    np.power(2, 7)
                ])
            }},
            activation={{
                choice([
                    'relu', 'selu', 'tanh', 'softmax', 'softplus', 'linear',
                    None
                ])
            }},
            input_shape=(len(data.columns), )))

    model.add(
        layers.Dense(
            {{
                choice([
                    np.power(2, 0),
                    np.power(2, 1),
                    np.power(2, 2),
                    np.power(2, 3),
                    np.power(2, 4),
                    np.power(2, 5),
                    np.power(2, 6),
                    np.power(2, 7)
                ])
            }},
            activation={{
                choice([
                    'relu', 'selu', 'tanh', 'softmax', 'softplus', 'linear',
                    None
                ])
            }}))

    model.add(
        layers.Dense(
            {{
                choice([
                    np.power(2, 0),
                    np.power(2, 1),
                    np.power(2, 2),
                    np.power(2, 3),
                    np.power(2, 4),
                    np.power(2, 5),
                    np.power(2, 6),
                    np.power(2, 7)
                ])
            }},
            activation={{
                choice([
                    'relu', 'selu', 'tanh', 'softmax', 'softplus', 'linear',
                    None
                ])
            }}))

    model.add(
        layers.Dense(1,
                     activation={{
                         choice([
                             'relu', 'selu', 'tanh', 'softmax', 'softplus',
                             'linear', None
                         ])
                     }}))

    RMS = optimizers.Adagrad(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})

    model.compile(optimizer=RMS,
                  loss={{
                      choice([
                          'mean_absolute_error', 'mean_squared_error',
                          'mean_absolute_percentage_error',
                          'mean_squared_logarithmic_error', 'hinge',
                          'squared_hinge', 'logcosh'
                      ])
                  }},
                  metrics=[coeff_determination])

    model.fit(x_train,
              y_train,
              epochs={{choice([25, 50, 75, 100, 500])}},
              batch_size={{choice([10, 16, 20, 32, 64])}},
              validation_data=(x_val, y_val))

    score, acc = model.evaluate(x_val, y_val, verbose=0)
    print('Validation accuracy:', acc)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Beispiel #10
0
    #Train Conciseness Model
    cvscores_conciseness = []
    for train, test in kfold.split(X_train, y_train):
        print("train", train)
        print("test", test)

        y_train_cat = keras.utils.to_categorical(y_train, 2)
        #Build keras model
        model = Sequential()
        model.add(Dense(units=100, activation='relu', input_dim=shape[1]))
        model.add(Dropout(rate=0.2))
        model.add(Dense(units=50, activation='relu'))
        model.add(Dense(units=2, activation='softmax'))

        sgd = optimizers.Adagrad(lr=0.01)
        model.compile(loss='mean_squared_error',
                      optimizer=sgd,
                      metrics=['mse'])

        #Train model
        model.fit(X_train[train],
                  y_train_cat[train],
                  epochs=150,
                  batch_size=128,
                  validation_data=(X_train[test], y_train_cat[test]))

        loss_and_metrics = model.evaluate(X_train[test],
                                          y_train_cat[test],
                                          batch_size=128)
        print(loss_and_metrics)
def get_model(name, **kwargs):
    """
    Instantiate and obtain a model with thr hyperparameters
    Args:
        name: string of the model name
        kwargs: hyperparameters
    Returns:
        model: Keras network
        optimizer: Keras optimizer
        criterion: Keras loss Function
        kwargs: hyperparameters with sane defaults
    """
    #    cuda = kwargs.setdefault('cuda', False)
    n_classes = kwargs['n_classes']
    #name = kwargs['dataset']
    n_bands = kwargs['n_bands']
    weights = np.ones(n_classes)
    #    weights[torch.LongTensor(kwargs['ignored_labels'])] = 0.
    weights = kwargs.setdefault('weights', weights)
    model_name = None
    if name == 'nn':
        kwargs.setdefault('patch_size', 1)
        center_pixel = True

        #model,model_name = Baseline(n_bands, n_classes, kwargs.setdefault('dropout', True))
        model, model_name = _1April(n_bands, n_classes,
                                    kwargs.setdefault('dropout', True))
        lr = kwargs.setdefault('learning_rate', 0.0001)
        optimizer = optimizers.Adam(lr=lr)
        #       optimizer = optim.Adam(model.parameters(), lr=lr)
        criterion = 'categorical_crossentropy'
        kwargs.setdefault('epoch', 100)
        kwargs.setdefault('batch_size', 100)
    elif name == 'hu':
        kwargs.setdefault('patch_size', 1)
        kwargs.setdefault('epoch', 100)
        kwargs.setdefault('batch_size', 100)
        center_pixel = True
        #        input_channels=((kwargs['batch_size'],n_bands))
        model, model_name = HuEtAl.build(n_bands, n_classes)
        lr = kwargs.setdefault('learning_rate', 0.01)
        optimizer = optimizers.Adam(lr=lr)
        criterion = 'categorical_crossentropy'
    elif name == 'hamida':
        patch_size = kwargs.setdefault('patch_size', 5)
        center_pixel = True
        model, model_name = HamidaEtAl.build(n_bands,
                                             n_classes,
                                             patch_size=patch_size)
        lr = kwargs.setdefault('learning_rate', 0.01)
        optimizer = optimizers.SGD(lr=lr, decay=0.0005)
        kwargs.setdefault('batch_size', 100)
        criterion = 'categorical_crossentropy'
    elif name == 'lee':
        kwargs.setdefault('epoch', 200)
        patch_size = kwargs.setdefault('patch_size', 5)
        center_pixel = True
        model, model_name = LeeEtAl.build(n_bands, n_classes)
        lr = kwargs.setdefault('learning_rate', 0.001)
        optimizer = optimizers.Adam(lr=lr)
        criterion = 'categorical_crossentropy'

    elif name == 'chen':
        patch_size = kwargs.setdefault('patch_size', 27)
        center_pixel = True
        model, model_name = ChenEtAl.build(n_bands,
                                           n_classes,
                                           patch_size=patch_size)
        lr = kwargs.setdefault('learning_rate', 0.003)
        optimizer = optimizers.SGD(lr=lr)
        criterion = 'categorical_crossentropy'
        kwargs.setdefault('epoch', 400)
        kwargs.setdefault('batch_size', 100)
    elif name == 'li':
        patch_size = kwargs.setdefault('patch_size', 5)
        center_pixel = True
        model, model_name = LiEtAl.build(n_bands,
                                         n_classes,
                                         n_planes=16,
                                         patch_size=patch_size)
        lr = kwargs.setdefault('learning_rate', 0.01)
        optimizer = optimizers.SGD(lr=lr, momentum=0.9, decay=0.0005)
        epoch = kwargs.setdefault('epoch', 200)
        criterion = 'categorical_crossentropy'
    elif name == 'he':
        kwargs.setdefault('patch_size', 7)
        kwargs.setdefault('batch_size', 40)
        lr = kwargs.setdefault('learning_rate', 0.01)
        center_pixel = True
        model, model_name = HeEtAl.build(n_bands,
                                         n_classes,
                                         patch_size=kwargs['patch_size'])
        # For Adagrad, we need to load the model on GPU before creating the optimizer

        optimizer = optimizers.Adagrad(lr=lr, decay=0.01)
        criterion = 'categorical_crossentropy'
    elif name == 'luo':
        # All  the  experiments  are  settled  by  the  learning  rate  of  0.1,
        # the  decay  term  of  0.09  and  batch  size  of  100.
        kwargs.setdefault('patch_size', 3)
        kwargs.setdefault('batch_size', 100)
        lr = kwargs.setdefault('learning_rate', 0.1)
        center_pixel = True
        model, model_name = LuoEtAl.build(n_bands,
                                          n_classes,
                                          patch_size=kwargs['patch_size'])
        optimizer = optimizers.SGD(lr=lr, decay=0.09)
        criterion = 'categorical_crossentropy'
    elif name == 'sharma':
        # We train our S-CNN from scratch using stochastic gradient descent with
        # momentum set to 0.9, weight decay of 0.0005, and with a batch size
        # of 60.  We initialize an equal learning rate for all trainable layers
        # to 0.05, which is manually decreased by a factor of 10 when the validation
        # error stopped decreasing. Prior to the termination the learning rate was
        # reduced two times at 15th and 25th epoch. [...]
        # We trained the network for 30 epochs
        kwargs.setdefault('batch_size', 60)
        epoch = kwargs.setdefault('epoch', 30)
        lr = kwargs.setdefault('lr', 0.05)
        center_pixel = True
        # We assume patch_size = 64
        kwargs.setdefault('patch_size', 64)
        model, model_name = SharmaEtAl.build(n_bands,
                                             n_classes,
                                             patch_size=kwargs['patch_size'])
        optimizer = optimizers.SGD(lr=lr, decay=0.0005)
        criterion = 'categorical_crossentropy'
    elif name == 'liu':
        kwargs['supervision'] = 'semi'
        # "The learning rate is set to 0.001 empirically. The number of epochs is set to be 40."
        kwargs.setdefault('epoch', 40)
        lr = kwargs.setdefault('lr', 0.001)
        center_pixel = True
        patch_size = kwargs.setdefault('patch_size', 9)
        model, model_name = LiuEtAl.build(n_bands, n_classes, patch_size)
        optimizer = optimizers.SGD(lr=lr)
        criterion = ['categorical_crossentropy',
                     'mean_squared_error']  #weighted_loss(1.0)


#        K.mean(K.square(rec, squeeze_all(data[:,:,:,patch_size//2,patch_size//2])))
#        kwargs.setdefault('scheduler', optim.lr_scheduler.MultiStepLR(optimizer, milestones=[epoch // 2, (5 * epoch) // 6], gamma=0.1))
    elif name == 'boulch':
        kwargs['supervision'] = 'semi'
        kwargs.setdefault('patch_size', 1)
        kwargs.setdefault('epoch', 100)
        lr = kwargs.setdefault('lr', 0.001)
        center_pixel = True
        model, model_name = BoulchEtAl.build(n_bands, n_classes)
        optimizer = optimizers.SGD(lr=lr)
        criterion = ['categorical_crossentropy',
                     'mean_squared_error']  #weighted_loss(0.1)
    elif name == 'mou':
        kwargs.setdefault('patch_size', 1)
        center_pixel = True
        kwargs.setdefault('epoch', 100)
        # "The RNN was trained with the Adadelta algorithm [...] We made use of a
        # fairly  high  learning  rate  of  1.0  instead  of  the  relatively  low
        # default of  0.002 to  train the  network"
        lr = kwargs.setdefault('lr', 1.0)
        model, model_name = MouEtAl.build(n_bands, n_classes)
        # For Adadelta, we need to load the model on GPU before creating the optimizer
        #        model = model.to(device)
        optimizer = optimizers.Adadelta(lr=lr)
        criterion = 'categorical_crossentropy'
    elif name == 'squeezenet':
        kwargs.setdefault('patch_size', 3)
        kwargs.setdefault('batch_size', 40)
        kwargs.setdefault('epoch', 100)
        lr = kwargs.setdefault('learning_rate', 0.5)
        center_pixel = True
        model, model_name = Squeezenet().build(n_bands,
                                               n_classes,
                                               patch_size=kwargs['patch_size'])
        optimizer = optimizers.Adadelta(lr=lr)
        criterion = 'categorical_crossentropy'
    else:
        raise KeyError("{} model is unknown.".format(name))

    epoch = kwargs.setdefault('epoch', 100)
    #kwargs.setdefault('scheduler', None)
    kwargs.setdefault('batch_size', 100)
    kwargs.setdefault('dataset', None)
    kwargs.setdefault('supervision', 'full')
    kwargs.setdefault('flip_augmentation', False)
    kwargs.setdefault('radiation_augmentation', False)
    kwargs.setdefault('mixture_augmentation', False)
    kwargs['center_pixel'] = center_pixel
    return model, model_name, optimizer, criterion, kwargs
Beispiel #12
0
# generate dataset
train_choking_x, train_choking_y = data_loader.read("train_choking")
vali_choking_x, vali_choking_y = data_loader.read("validation_choking")

num, time, w, h, color = train_choking_x.shape

model_recog = models.Sequential()
model_recog.add(layers.Reshape((time, -1)))
model_recog.add(
    layers.LSTM(units=64,
                dropout=0.1,
                recurrent_dropout=0.1,
                return_sequences=True))
model_recog.add(layers.LSTM(units=32, dropout=0.1, recurrent_dropout=0.1))
model_recog.add(layers.Dense(1, activation="sigmoid"))
model_recog.compile(optimizer=optimizers.Adagrad(),
                    loss='binary_crossentropy',
                    metrics=['acc'])
history = model_recog.fit(train_choking_x,
                          train_choking_y,
                          epochs=10,
                          batch_size=30,
                          validation_data=(vali_choking_x, vali_choking_y))

model_recog.save("choking_model.h5")
#y_pred = model.predict(test_choking_x)
#print("real_label", vali_choking_y)

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
def cnnClassifier():
    img_height_rows = 32
    img_width_cols = 32
    im_shape = (img_height_rows, img_width_cols, 1)
    print(im_shape)
    x_train = X_train.reshape(
        X_train.shape[0],
        *im_shape)  # Python TIP :the * operator unpacks the tuple
    x_test = X_test.reshape(X_test.shape[0], *im_shape)
    global cnn
    kernelSize = (3, 3)
    ip_activation = 'relu'
    ip_conv_0 = Conv2D(filters=4,
                       kernel_size=kernelSize,
                       input_shape=im_shape,
                       activation=ip_activation)
    cnn.add(ip_conv_0)
    # Add the next Convolutional+Activation layer
    ip_conv_0_1 = Conv2D(filters=4,
                         kernel_size=kernelSize,
                         activation=ip_activation)
    cnn.add(ip_conv_0_1)
    # Add the Pooling layer
    pool_0 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding="same")
    cnn.add(pool_0)
    ip_conv_1 = Conv2D(filters=4,
                       kernel_size=kernelSize,
                       activation=ip_activation)
    cnn.add(ip_conv_1)
    ip_conv_1_1 = Conv2D(filters=4,
                         kernel_size=kernelSize,
                         activation=ip_activation)
    cnn.add(ip_conv_1_1)
    pool_1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding="same")
    cnn.add(pool_1)
    flat_layer_0 = Flatten()
    cnn.add(Flatten())
    # Now add the Dense layers
    h_dense_0 = Dense(units=20,
                      activation=ip_activation,
                      kernel_initializer='uniform')
    cnn.add(h_dense_0)
    # Let's add one more before proceeding to the output layer
    h_dense_1 = Dense(units=1024,
                      activation=ip_activation,
                      kernel_initializer='uniform',
                      name='dense11')
    cnn.add(h_dense_1)
    n_classes = 36
    op_activation = 'softmax'
    output_layer = Dense(units=n_classes,
                         activation=op_activation,
                         kernel_initializer='uniform')
    cnn.add(output_layer)
    opt = optimizers.Adagrad(lr=0.001)
    loss = 'categorical_crossentropy'
    metrics = ['accuracy']
    # Compile the classifier using the configuration we want
    cnn.compile(optimizer=opt, loss=loss, metrics=metrics)
    history = cnn.fit(x_train,
                      T_train,
                      batch_size=300,
                      epochs=5,
                      validation_data=(x_test, T_test))
    scores = cnn.evaluate(x_test, T_test, verbose=0)
    print("Accuracy: %.2f%%" % (scores[1] * 100))
Beispiel #14
0
        s.append(v)
        df[col] = (df[col] - m) / v
        scale.append(s)

# In[7]:

x_train = df.values
y_train = Y

# In[8]:

model_1 = Sequential()
model_1.add(
    Dense(len(x_train[0]), activation='relu', input_dim=len(x_train[0])))
model_1.add(Dense(1, activation='sigmoid'))
model_1.compile(optimizer=optimizers.Adagrad(lr=0.0123),
                loss='binary_crossentropy',
                metrics=['binary_accuracy'])
model_1.summary()

# In[9]:

history1 = model_1.fit(x_train,
                       y_train,
                       epochs=50,
                       batch_size=128,
                       validation_split=0.1)

# In[10]:

model_2 = Sequential()
Beispiel #15
0
"""

# ----------------------------------------------
# Training

#def top_6(y_true, y_pred):
#    return tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=6)

#from keras.utils import multi_gpu_model
#model = multi_gpu_model(model, gpus=2)

model.compile(
    loss='categorical_crossentropy',  #loss='binary_crossentropy',
    #optimizer='adagrad',
    #optimizer=optimizers.RMSprop(lr=0.01),
    optimizer=optimizers.Adagrad(lr=0.01),
    metrics=['accuracy'])

train_steps_per_epoch = math.ceil(train_generator.n /
                                  train_generator.batch_size)
validation_steps = math.ceil(valid_generator.n / valid_generator.batch_size)
print('train data size:', train_generator.n)
print('train steps per epoch:', train_steps_per_epoch)
print('valid data size:', valid_generator.n)
print('validation_steps:', validation_steps)

history = model.fit_generator(train_generator,
                              steps_per_epoch=train_steps_per_epoch,
                              epochs=300,
                              validation_data=valid_generator,
                              validation_steps=validation_steps)
    ])),
])

print('start pipeline')
start_time = time.time()
train = full_pipe.fit_transform(train)
print("full_pipe fit_time: ", time.time() - start_time)

start_time  = time.time()
model_in = Input(shape=(train.shape[1],), dtype='float32', sparse=True)
out = layers.Dense(192, activation='relu')(model_in)
out = layers.Dense(64, activation='relu')(out)
out = layers.Dense(64, activation='relu')(out)
out = layers.Dense(1)(out)
model = Model(model_in, out)
model.compile(optimizer=optimizers.Adagrad(), loss='mse')   

for i in range(3):
    model.fit(train, y, batch_size=2**(11+i), epochs=1, verbose=0)

print("model fit_time: ",time.time()-start_time)

del train
gc.collect()

print('start predicting')


def load_test():
    for df in pd.read_csv('../input/test_stg2.tsv', sep='\t', chunksize=700000):
        yield df
Beispiel #17
0
def main():
    parser = argparse.ArgumentParser(
        description='simple 3D convolution for action recognition')
    parser.add_argument('--batch', type=int, default=32)
    parser.add_argument('--epoch', type=int, default=100)
    parser.add_argument('--videos',
                        type=str,
                        default='UCF101',
                        help='directory where videos are stored')
    parser.add_argument('--nclass', type=int, default=249)
    parser.add_argument('--output', type=str, required=True)
    parser.add_argument('--skip', type=bool, default=True)
    parser.add_argument('--depth', type=int, default=16)

    args = parser.parse_args()

    #Initializing the dimentions of the frames
    img_rows, img_cols, frames = 32, 32, args.depth
    channel = 3
    nb_classes = args.nclass
    fname_npz = 'dataset_test_{}_{}_{}.npz'.format(args.nclass, args.depth,
                                                   args.skip)
    vid3d = videoto3d1.Videoto3D(img_rows, img_cols, frames)

    #If the dataset is already stored in npz file:
    if os.path.exists(fname_npz):
        loadeddata = np.load(fname_npz)
        X, Y = loadeddata["X"], loadeddata["Y"]
    else:
        #If not, we load the data with the helper function and save it for future use:
        x, y = loaddata(args.videos, vid3d, args.nclass, args.output,
                        args.skip)
        Y = np_utils.to_categorical(y, nb_classes)
        X = x.reshape((x.shape[0], img_rows, img_cols, frames, channel))
        X = X.astype('float32')
        np.savez(fname_npz, X=X, Y=Y)
        print('Saved test dataset to dataset_test.npz.')

    print('X_shape:{}\nY_shape:{}'.format(X.shape, Y.shape))

    # Define model
    model = model_from_json(
        open('3dcnnresult/3dcnn_500_32_adam2.json', 'r').read())
    model.load_weights('3dcnnresult/3dcnn_500_32_adam2.h5')
    model.summary()
    print("Loaded model from disk")

    #List of Optimizers we used:
    adam = optimizers.Adam(lr=0.01, decay=0.0001, amsgrad=False)
    sgd = optimizers.SGD(lr=0.001, momentum=0.9, decay=0.001, nesterov=True)
    ada = optimizers.Adagrad(lr=0.01, epsilon=None, decay=0.0)
    nadam = optimizers.Nadam(lr=0.01,
                             beta_1=0.9,
                             beta_2=0.999,
                             epsilon=None,
                             schedule_decay=0.004)

    #Compiling and fitting the model
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])

    #Evaluating the model on the test_set
    loss, acc = model.evaluate(X, Y, verbose=1)
    print('Test loss:', loss)
    print('Test accuracy:', acc)
Beispiel #18
0
nb_classes = Y_train.shape[1]
print(nb_classes, 'classes')

rate = 0.1

model = Sequential()
model.add(Dense(64, input_shape=(dims,), activation='relu'))
model.add(Dropout(0.5, noise_shape=None, seed=42))
model.add(Dense(64, init='uniform', activation='relu'))
model.add(Dropout(0.5, noise_shape=None, seed=42))
model.add(Dense(1, init='uniform', activation='sigmoid'))


sgd = optimizers.SGD(lr=0.01, decay=0.005, momentum=0.5, nesterov=True)
rmsprop = optimizers.RMSprop(lr=0.01, rho=0.9, epsilon=1e-08, decay=0.001)
adagrad = optimizers.Adagrad(lr=0.01, epsilon=1e-09, decay=0.0001)
adadelta = optimizers.Adadelta(lr=0.1, rho=0.95, epsilon=1e-08, decay=0.005)
adamax = optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.001)
adam = optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.001)

model.compile(optimizer=adagrad, loss='binary_crossentropy', metrics=["accuracy"])
model.summary()

print("Model fitting...")

fBestModel = 'best_model2.h5' 
#early_stop = EarlyStopping(monitor='val_loss', patience=500, verbose=1) 
best_model = ModelCheckpoint(fBestModel, verbose=0, save_best_only=True)

model.fit(X_train, Y_train, validation_data = (X_val, Y_val), 
          epochs=30000, batch_size=dims, verbose=True,
Beispiel #19
0
def get_optimizer(opt,
                  decay=None,
                  lr=None,
                  momentum=0.0,
                  nesterov=False,
                  beta_1=0.9,
                  beta_2=0.999,
                  epsilon=1e-8,
                  rho=None):
    """
    get_optimizer is a wrapper for Keras optimizers.

    Parameters
    ----------
    beta_1 : `float`
        adam optimizer parameter in range [0, 1) for updating bias first
        moment estimate
    beta_2 : `float`
        adam optimizer parameter in range [0, 1) for updating bias second
        moment estimate
    decay : `None` or `float`
        learning rate decay
    epsilon : `float`
        parameter for numerical stability
    opt : `str`
        Keras optimizer. Options: "sgd", "adam", "nadam", "rmsprop",
        "adagrad", "adamax" and "adadelta"
    lr : `None` or `float`
        optimizer learning rate
    momentum : `float`
        accelerate the gradient descent in the direction that dampens
        oscillations
    nesterov : `bool`
        use Nesterov Momentum
    rho : `None` or `float`
        gradient history

    Returns
    -------
    optimizer : :class:`keras.optimizer`
        keras optimizer object
    """

    ###############################
    # Stochastic Gradient Descent #
    ###############################
    if opt == 'sgd':

        if lr is None:
            lr = 0.01

        if decay is None:
            decay = 0.0

        optimizer = optimizers.SGD(lr=lr,
                                   momentum=momentum,
                                   decay=decay,
                                   nesterov=nesterov)
    ########
    # Adam #
    ########
    elif opt == 'adam':

        if lr is None:
            lr = 0.001

        if decay is None:
            decay = 0.0

        optimizer = optimizers.Adam(lr=lr,
                                    beta_1=beta_1,
                                    beta_2=beta_2,
                                    epsilon=epsilon,
                                    decay=decay)
    ##########
    # Adamax #
    ##########
    elif opt == 'adamax':

        if lr is None:
            lr = 0.002

        if decay is None:
            decay = 0.0

        optimizer = optimizers.Adam(lr=lr,
                                    beta_1=beta_1,
                                    beta_2=beta_2,
                                    epsilon=epsilon,
                                    decay=decay)
    #########
    # Nadam #
    #########
    # It is recommended to leave the parameters of this
    # optimizer at their default values.
    elif opt == 'nadam':

        if lr is None:
            lr = 0.002

        if decay is None:
            decay = 0.004

        optimizer = optimizers.Adam(lr=lr,
                                    beta_1=beta_1,
                                    beta_2=beta_2,
                                    epsilon=epsilon,
                                    decay=decay)

    ###########
    # RMSprop #
    ###########
    # It is recommended to leave the parameters of this
    # optimizer at their default values (except the learning
    # rate, which can be freely tuned).
    elif opt == 'rmsprop':

        if lr is None:
            lr = 0.001

        if decay is None:
            decay = 0.0

        if rho is None:
            rho = 0.9

        optimizer = optimizers.RMSprop(lr=lr,
                                       rho=rho,
                                       epsilon=epsilon,
                                       decay=decay)
    ###########
    # Adagrad #
    ###########
    # It is recommended to leave the parameters of this
    # optimizer at their default values.
    elif opt == 'adagrad':

        if lr is None:
            lr = 0.01

        if decay is None:
            decay = 0.0

        optimizer = optimizers.Adagrad(lr=lr, decay=decay, epsilon=epsilon)

    ############
    # Adadelta #
    ############
    # It is recommended to leave the parameters of this
    # optimizer at their default values.
    elif opt == 'adadelta':

        if lr is None:
            lr = 1.0

        if decay is None:
            decay = 0.0

        if rho is None:
            rho = 0.95

        optimizer = optimizers.Adadelta(lr=lr,
                                        rho=rho,
                                        epsilon=epsilon,
                                        decay=decay)
    else:
        print('ERROR: Unknown optimizer')
        sys.exit(1)

    return optimizer
def main():

    global svrg;
    global adaGrad;

    # Set up data

    # -------- Make sure you change the featureSize variable below to the correct dimension --------

    #result = loadData('a1a.t', 30956, 123)
    result = loadData('w8a.txt', 59245, 300)
    #result = loadData('covtype.libsvm.binary.scale', 581012, 54)
    
    examples = result[0]
    labels = result[1]

    #np.save("examples.npy", examples)
    #np.save("labels.npy", labels)

    #examples = np.load('examples.npy')
    #labels = np.load('labels.npy')


    global featureSize
    featureSize = 300

    # Test Keras
    estimatedIters = testKeras(examples, labels)

    print("Choosing GD Plan...")

    while(1):

        print("0 - SGD")
        print("1 - Momentum")
        print("2 - Nesterov-Momentum")
        print("3 - Adagrad")
        print("4 - Adadelta")
        print("5 - RMSprop")
        print("6 - Adam")

        algoChoice = int(input("Choose Algo: "))

        # Create Model for Keras
        model = Sequential()
        model.add(Dense(units=1, activation='linear', input_dim=featureSize))

        # Choose GD Algorithm for Keras
        if (algoChoice == 0):
            myOpt = optimizers.SGD(lr=0.01, momentum=0., decay=0., nesterov=False)
        elif (algoChoice == 1):
            myOpt = optimizers.SGD(lr=0.01, momentum=0.9, decay=0., nesterov=False)
        elif (algoChoice == 2):
            myOpt = optimizers.SGD(lr=0.01, momentum=0.9, decay=0., nesterov=True)
        elif (algoChoice == 3):
            myOpt = optimizers.Adagrad(lr=0.01, epsilon=1e-6)
        elif (algoChoice == 4):
            myOpt = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-6)
        elif (algoChoice == 5):
            myOpt = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-6)
        elif (algoChoice == 6):
            myOpt = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)

        model.compile(optimizer=myOpt, loss=logloss)

        # To get results, use early stop, double iter.
        customCallback = PrintDeltaLossAndTime()
        myCallbacks = [customCallback]
        output = model.fit(examples, labels, epochs=estimatedIters[algoChoice], batch_size=int(len(examples)/50), callbacks=myCallbacks)
Beispiel #21
0
def train():
    """
    Def trains a Keras model of SqueezeDet and stores the checkpoint after each epoch
    """

    #create config object
    cfg = load_dict(CONFIG)

    #create subdirs for logging of checkpoints and tensorboard stuff
    checkpoint_dir = log_dir_name + "/checkpoints"
    tb_dir = log_dir_name + "/tensorboard"

    #delete old checkpoints and tensorboard stuff
    if tf.gfile.Exists(checkpoint_dir) and cfg.init_file == 'none':
        tf.gfile.DeleteRecursively(checkpoint_dir)

    if tf.gfile.Exists(tb_dir):
        tf.gfile.DeleteRecursively(tb_dir)

    tf.gfile.MakeDirs(tb_dir)
    tf.gfile.MakeDirs(checkpoint_dir)

    #add stuff for documentation to config
    cfg.EPOCHS = EPOCHS
    cfg.OPTIMIZER = OPTIMIZER
    cfg.CUDA_VISIBLE_DEVICES = CUDA_VISIBLE_DEVICES
    cfg.GPUS = GPUS
    cfg.REDUCELRONPLATEAU = REDUCELRONPLATEAU

    if cfg.init_file != 'none':
        # ------------------ ImageTagger -----------------------
        # img_file = 'imagetagger\\jpg\\train_jpg.txt'
        # gt_dir = 'imagetagger\\train_jpg.json'
        # base = "D:\\Humanoid\\squeezeDet\\Embedded_Object_Detection\\imagetagger\\jpg\\TRAIN\\"

        # ------------------ Small COCO ------------------------
        img_file = 'dataset\\train_small.txt'
        gt_dir = 'dataset\\annotations\\train_small.json'
        base = "D:\\Humanoid\\squeezeDet\\Embedded_Object_Detection\\dataset\\train2017_small\\"

    else:
        # ---------------------- COCO ------------------------
        # img_file = 'dataset\\images.txt'
        # gt_dir = 'dataset\\annotations\\ann_train_clean.json'
        # base = 'D:\\Humanoid\\squeezeDet\\Embedded_Object_Detection\\dataset\\train2017\\'

        # ------------------ Small COCO ------------------------
        # img_file = 'dataset\\train_small.txt'
        # gt_dir = 'dataset\\annotations\\train_small.json'
        # base = "D:\\Humanoid\\squeezeDet\\Embedded_Object_Detection\\dataset\\train2017_small\\"

        # ------------------ ImageTagger -----------------------
        img_file = 'imagetagger\\train_jpg.txt'
        gt_dir = 'imagetagger\\train_jpg.json'
        base = "D:\\Humanoid\\squeezeDet\\Embedded_Object_Detection\\imagetagger\\jpg\\TRAIN\\"

    #open files with images and ground truths files with full path names
    with open(img_file) as imgs:
        img_names = imgs.read().splitlines()
    imgs.close()

    #set gpu
    if GPUS < 2:
        os.environ['CUDA_VISIBLE_DEVICES'] = CUDA_VISIBLE_DEVICES
    else:
        gpus = ""
        for i in range(GPUS):
            gpus += str(i) + ","
        os.environ['CUDA_VISIBLE_DEVICES'] = gpus

    #scale batch size to gpus
    cfg.BATCH_SIZE = cfg.BATCH_SIZE * GPUS

    if STEPS is not None:
        nbatches_train = STEPS
    else:
        nbatches_train, _ = divmod(len(img_names), cfg.BATCH_SIZE)

    #tf config and session
    config = tf.ConfigProto(allow_soft_placement=True)
    sess = tf.Session(config=config)
    K.set_session(sess)

    #instantiate model
    squeeze = SqueezeDet(cfg)

    #callbacks
    cb = []

    # print some run info
    print("Number of epochs:  {}".format(EPOCHS))
    print("Batch size: {}".format(cfg.BATCH_SIZE))
    print("STEPS ", nbatches_train)

    # set optimizer
    # multiply by number of workers do adjust for increased batch size
    if OPTIMIZER == "adam":
        if cfg.init_file != 'none':
            cfg.LR = 1e-6 * GPUS
            opt = optimizers.Adam(lr=cfg.LR * GPUS, clipnorm=cfg.MAX_GRAD_NORM)
            print("Adam with learning rate ", cfg.LR)
        else:
            cfg.LR = 1e-5 * GPUS
            opt = optimizers.Adam(lr=cfg.LR * GPUS, clipnorm=cfg.MAX_GRAD_NORM)
            print("Adam with learning rate ", cfg.LR)
    elif OPTIMIZER == "rmsprop":
        opt = optimizers.RMSprop(lr=0.001 * GPUS, clipnorm=cfg.MAX_GRAD_NORM)
        cfg.LR = 0.001 * GPUS
    elif OPTIMIZER == "adagrad":
        opt = optimizers.Adagrad(lr=1.0 * GPUS, clipnorm=cfg.MAX_GRAD_NORM)
        cfg.LR = 1 * GPUS
    # use default if nothing was given
    else:
        # create sgd with momentum and gradient clipping
        cfg.LR = 1e-4 * GPUS
        opt = optimizers.SGD(lr=cfg.LR,
                             decay=0,
                             momentum=cfg.MOMENTUM,
                             nesterov=False,
                             clipnorm=cfg.MAX_GRAD_NORM)
        print("SGD with learning rate: {}".format(cfg.LR))

        #add manuall learning rate decay
        #lrCallback = LearningRateScheduler(schedule)
        #cb.append(lrCallback)

    # save config file to log dir
    with open(log_dir_name + '/config.pkl', 'wb') as f:
        pickle.dump(cfg, f, pickle.HIGHEST_PROTOCOL)

    # add tensorboard callback
    tbCallBack = TensorBoard(log_dir=tb_dir,
                             histogram_freq=0,
                             write_graph=True,
                             write_images=True)

    cb.append(tbCallBack)

    # if flag was given, add reducelronplateu callback
    # Reduce learning rate when a metric has stopped improving
    if REDUCELRONPLATEAU:
        reduce_lr = ReduceLROnPlateau(monitor='loss',
                                      factor=0.1,
                                      verbose=1,
                                      patience=5,
                                      min_lr=0.0)
        cb.append(reduce_lr)

    # print keras model summary
    if VERBOSE:
        print(squeeze.model.summary())

    if cfg.init_file != "none":
        print("Weights initialized by name from {}".format(cfg.init_file))
        load_only_possible_weights(squeeze.model,
                                   cfg.init_file,
                                   verbose=VERBOSE)

        # since these layers already existed in the ckpt they got loaded, you can reinitialized them. TODO set flag for that
        for layer in squeeze.model.layers:
            for v in layer.__dict__:
                v_arg = getattr(layer, v)
                if "fire10" in layer.name or "fire11" in layer.name or "conv12" in layer.name:
                    if hasattr(v_arg, 'initializer'):
                        initializer_method = getattr(v_arg, 'initializer')
                        initializer_method.run(session=sess)
                        print('reinitializing layer {}.{}'.format(
                            layer.name, v))

    #create train generator
    with open(gt_dir, 'r') as f:
        data = json.load(f)
    f.close()
    print("File read")

    train_generator = generator_from_data_path(img_names,
                                               data,
                                               base,
                                               config=cfg)

    #make model parallel if specified
    if GPUS > 1:
        #use multigpu model checkpoint
        ckp_saver = ModelCheckpointMultiGPU(
            checkpoint_dir + "/model.{epoch:02d}-{loss:.2f}.hdf5",
            monitor='loss',
            verbose=0,
            save_best_only=False,
            save_weights_only=True,
            mode='auto',
            period=1)

        cb.append(ckp_saver)

        print("Using multi gpu support with {} GPUs".format(GPUS))
        # make the model parallel
        parallel_model = multi_gpu_model(squeeze.model, gpus=GPUS)
        parallel_model.compile(optimizer=opt,
                               loss=[squeeze.loss],
                               metrics=[
                                   squeeze.loss_without_regularization,
                                   squeeze.bbox_loss, squeeze.class_loss,
                                   squeeze.conf_loss
                               ])

        #actually do the training
        parallel_model.fit_generator(train_generator,
                                     epochs=EPOCHS,
                                     steps_per_epoch=nbatches_train,
                                     callbacks=cb)
    else:
        # add a checkpoint saver
        ckp_saver = ModelCheckpoint(checkpoint_dir +
                                    "/model.{epoch:02d}-{loss:.2f}.hdf5",
                                    monitor='loss',
                                    verbose=0,
                                    save_best_only=False,
                                    save_weights_only=True,
                                    mode='auto',
                                    period=1)
        cb.append(ckp_saver)

        print("Using single GPU")
        #compile model from squeeze object, loss is not a function of model directly
        squeeze.model.compile(optimizer=opt,
                              loss=[squeeze.loss],
                              metrics=[
                                  squeeze.loss_without_regularization,
                                  squeeze.bbox_loss, squeeze.class_loss,
                                  squeeze.conf_loss
                              ])

        #actually do the training
        squeeze.model.fit_generator(train_generator,
                                    epochs=EPOCHS,
                                    steps_per_epoch=nbatches_train,
                                    callbacks=cb,
                                    verbose=1)

    gc.collect()
def testKeras(examples, labels, subsetPercent = 0.2, desiredError = 0.001, timeLimit = 30):

    # Test each algorithm on a smaller dataset.

    exampleSubset = examples[0:int(len(examples)*subsetPercent)]
    labelSubset = labels[0:int(len(labels)*subsetPercent)]

    max_iterations = 10000
    estimatedIters = []

    allResults = []

    for i in range(7):

        plt.figure(i+1)

        # Create Model for Keras
        model = Sequential()
        model.add(Dense(units=1, activation='linear', input_dim=featureSize))

        # Choose GD Algorithm for Keras
        if (i == 0):
            myOpt = optimizers.SGD(lr=0.01, momentum=0., decay=0., nesterov=False)
            plt.title("SGD")
        elif (i == 1):
            myOpt = optimizers.SGD(lr=0.01, momentum=0.9, decay=0., nesterov=False)
            plt.title("Momentum")
        elif (i == 2):
            myOpt = optimizers.SGD(lr=0.01, momentum=0.9, decay=0., nesterov=True)
            plt.title("Nesterov-Momentum")
        elif (i == 3):
            myOpt = optimizers.Adagrad(lr=0.01, epsilon=1e-6)
            plt.title("Adagrad")
        elif (i == 4):
            myOpt = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-6)
            plt.title("Adadelta")
        elif (i == 5):
            myOpt = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-6)
            plt.title("RMSprop")
        elif (i == 6):
            myOpt = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
            plt.title("Adam")

        model.compile(optimizer=myOpt, loss=logloss)

        # Create Custom Callback. Run GD. History saved in output. Use it to find the changes in loss per iteration
        customCallback = EarlyStoppingByDeltaLossOrTime(desiredError, timeLimit)
        myCallbacks = [customCallback]
        output = model.fit(exampleSubset, labelSubset, epochs=max_iterations, batch_size=int(len(exampleSubset)/50), callbacks=myCallbacks)
        losses = np.array(output.history['loss'])
        deltaLosses = -np.diff(losses)

        # Run again on the full dataset, for a few iterations. Use this to find the average time per iteration.
        # Reset callback to reset time elapsed and history of losses.
        model = Sequential()
        model.add(Dense(units=1, activation='linear', input_dim=featureSize))
        model.compile(optimizer=myOpt, loss=logloss)
        customCallback = EarlyStoppingByDeltaLossOrTime(desiredError, timeLimit)
        myCallbacks = [customCallback]
        output = model.fit(examples, labels, epochs=5, batch_size=int(len(examples)/50), callbacks=myCallbacks)
        losses = np.array(output.history['loss'])
        timePerIter = myCallbacks[0].timeElapsed/len(losses)

        # Pass in the following:
        # 1. Array of DeltaLosses, iterations is length of array.
        # 2. Average Time per Iteration on the full dataset.
        results = fitGD(deltaLosses, timePerIter, desiredError)
        estimatedIters.append(results[0])
        print("ETI: ", results[0])
        print("ETA: ", results[1])

        allResults.append(results)

    for i in range(len(allResults)):
        print("Algo", i, "Iterations:", allResults[i][0], "ETA:", allResults[i][1])

    plt.show()

    return estimatedIters
Beispiel #23
0
feat_bilstm_c = Subtract()([bilstm_q1_c, bilstm_q2_c])
feat_bilstm_c = Lambda(lambda x: x**2)(feat_bilstm_c)

# ------ last hidden -------
pair = Concatenate()([feat_bilstm_w, feat_bilstm_c])
pair = layer_dense1_c(pair)
pair = Dropout(0.5)(pair)

# ------ predict -------
predict = Dense(1, activation='sigmoid')(pair)

LEARNING_RATE = 0.005
# optimizer = optimizers.SGD(lr=LEARNING_RATE, decay=1e-6, momentum=0.9, nesterov=True)
# optimizer = optimizers.Adam(lr=LEARNING_RATE, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-6, amsgrad=False)
optimizer = optimizers.Adagrad(lr=LEARNING_RATE, epsilon=1e-06)
model = Model(inputs=[input_q1_w, input_q1_c, input_q2_w, input_q2_c],
              outputs=predict)
# model.compile(loss='binary_crossentropy',
#               optimizer=optimizer,
#               metrics=['binary_crossentropy'])
model.compile(loss=focal_loss,
              optimizer=optimizer,
              metrics=['binary_crossentropy'])

###############################################
# train
###############################################
# train.test split
import random
random.shuffle(trainpair_idx)
Beispiel #24
0
def main():
    args = read_args()
    X_train, X_test, y_train, y_test_orginal = load_dataset()

    # TODO 2: Convert the labels to categorical
    y_test_orginal_categ = keras.utils.to_categorical(y_test_orginal, 2)
    y_train_categ = keras.utils.to_categorical(y_train, 2)

    print()
    # TODO 3: Build the Keras model
    model = Sequential()
    # Add all the layers

    units_output_layer = y_test_orginal_categ.shape[1]  #one per class

    if len(args.h_units) > 0:
        # Input to hidden layer
        model.add(
            Dense(args.h_units[0],
                  input_dim=X_train.shape[1],
                  kernel_regularizer=regularizers.l2(args.reg_l2)))
        model.add(Dropout(args.dropout[0]))
        model.add(Activation('relu'))

        for i in range(1, len(args.h_units)):
            model.add(
                Dense(args.h_units[i],
                      input_dim=X_train.shape[1],
                      kernel_regularizer=regularizers.l2(args.reg_l2)))
            model.add(Dropout(args.dropout[i]))
            model.add(Activation('relu'))

        # Hidden to output layer
        model.add(Dense(units_output_layer))
        model.add(Activation('softmax'))
    else:
        dropout = 0
        if len(args.dropout) > 0:
            dropout = args.dropout[0]
        model = Sequential([(Dense(units_output_layer,
                                   input_shape=(X_train.shape[1], ))),
                            Activation('softmax')])

    print(model.summary())

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.Adagrad(),
                  metrics=['accuracy'])

    # TODO 4: Fit the model
    history = model.fit(X_train,
                        y_train_categ,
                        batch_size=args.batch_size,
                        epochs=args.epochs)

    # TODO 5: Evaluate the model, calculating the metrics.
    # Option 1: Use the model.evaluate() method. For this, the model must be
    # already compiled with the metrics.
    #performance = model.evaluate(X_test)

    # Option 2: Use the model.predict() method and calculate the metrics using
    # sklearn. We recommend this, because you can store the predictions if
    # you need more analysis later. Also, if you calculate the metrics on a
    # notebook, then you can compare multiple classifiers.
    predictions = model.predict(X_test)

    for sample in predictions:
        if sample[0] < 0.5:
            sample[0] = 0
        else:
            sample[0] = 1

        if sample[1] < 0.5:
            sample[1] = 0
        else:
            sample[1] = 1

    acc = accuracy_score(predictions, y_test_orginal_categ)

    print('accuracy: ' + str(acc))
    predictions = numpy.argmax(predictions, axis=None, out=None)
    # TODO 6: Save the results.
    results = pandas.DataFrame(y_test_orginal, columns=['y_test_orginal'])
    results.loc[:, 'predicted'] = predictions
    if args.experiment_name is None or args.experiment_name == "":
        args.experiment_name = str(args.batch_size) + "-" + str(
            args.epochs) + "-" + str(args.h_units) + "-" + str(
                args.reg_l2) + "-" + str(args.dropout)
    results.to_csv('predictions_{}.csv'.format(args.experiment_name),
                   index=False)

    score_file = open('score_{}.txt'.format(args.experiment_name), 'w')
    score_file.write(str(acc) + '\n')
    score_file.close()
Beispiel #25
0
if algorithm == 'rmsprop':
    optimizer = opt.RMSprop(lr=0.001,
                            rho=0.9,
                            epsilon=1e-06,
                            clipnorm=clipnorm,
                            clipvalue=clipvalue)
elif algorithm == 'sgd':
    optimizer = opt.SGD(lr=0.01,
                        momentum=0.0,
                        decay=0.0,
                        nesterov=False,
                        clipnorm=clipnorm,
                        clipvalue=clipvalue)
elif algorithm == 'adagrad':
    optimizer = opt.Adagrad(lr=0.01,
                            epsilon=1e-06,
                            clipnorm=clipnorm,
                            clipvalue=clipvalue)
elif algorithm == 'adadelta':
    optimizer = opt.Adadelta(lr=1.0,
                             rho=0.95,
                             epsilon=1e-06,
                             clipnorm=clipnorm,
                             clipvalue=clipvalue)
elif algorithm == 'adam':
    optimizer = opt.Adam(lr=0.001,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         clipnorm=clipnorm,
                         clipvalue=clipvalue)
elif algorithm == 'adamax':
Beispiel #26
0
BS = 10
LR = 0.001
DECAY = 0.00
EPSILON = 0.99
DROPOUT = 0.5
DROP = 0.90
EPOCHS_DROP = 25
NUM_TRAIN_IMAGES = 0
NUM_TEST_IMAGES = 0

# instantiate L1, L2 regularizers
reg1 = regularizers.l1(0.02)
reg2 = regularizers.l2(0.125)

# optimizer parameters
adagrad = optimizers.Adagrad(lr=LR, epsilon=EPSILON, decay=DECAY)


# define data input generator:
def csv_image_generator(inputPath, bs, lb, mode="train"):
    # open the CSV and PGS files for reading
    f = open(inputPath, "r")
    # loop indefinitely
    while True:
        # initialize our batches of images and labels
        images = []
        labels = []
        # keep looping until we reach our batch size
        while len(images) < bs:  # images and PGSs have the same length
            # attempt to read the next line of the CSV file
            line = f.readline()
Beispiel #27
0
def test_adagrad():
    _test_optimizer(optimizers.Adagrad())
    _test_optimizer(optimizers.Adagrad(decay=1e-3))
train_col_stds = train_col_stds + (
    train_col_stds
    == 0) * 1  # adds 1 where the stdev is 0 to not break division
train_x = (train_x - train_col_means) / train_col_stds

test_x = mens_data_sql_processed_with_history[features][
    mens_data_sql_processed_with_history['date'] >= train_to_date].values
test_x = test_x.astype(float)
test_y = mens_data_sql_processed_with_history['player1Wins'][
    mens_data_sql_processed_with_history['date'] >= train_to_date].values
test_x = (test_x - train_col_means) / train_col_stds

# optimizers
optim_rmsprop = optimizers.RMSprop(lr=0.0001, rho=0.9)
optim_sgd = optimizers.SGD(lr=0.1, decay=1e-6, momentum=0.5, nesterov=True)
optim_adagrad = optimizers.Adagrad(lr=0.01, decay=0)
optim_adam = optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, decay=0)

# model
number_epochs = 20
batch_sizes = 2**7
val_split = 0.1
dropout = 0.0
#weights = np.zeros(train_x.shape[0])+1
#weights = 0.9 + np.asarray(mens_data_sql_processed_with_history['player1Wins'][mens_data_sql_processed_with_history['date']<train_to_date]*0.2)
weights = np.asarray(mens_data_sql_processed_with_history['b365P1'][
    mens_data_sql_processed_with_history['date'] < train_to_date])
#weights = np.asarray(mens_data_sql_processed_with_history['b365P2'][mens_data_sql_processed_with_history['date']<train_to_date])

input_dimension = len(features)
def lstm_dense_sunspots(args):
    """
    Main function
    """
    # %%
    # IMPORTS

    # code repository sub-package imports
    from artificial_neural_networks.code.utils.download_monthly_sunspots import \
        download_monthly_sunspots
    from artificial_neural_networks.code.utils.generic_utils import save_regress_model,  \
        series_to_supervised, affine_transformation
    from artificial_neural_networks.code.utils.vis_utils import regression_figs

    # %%

    if args.verbose > 0:
        print(args)

    # For reproducibility
    if args.reproducible:
        os.environ['PYTHONHASHSEED'] = '0'
        np.random.seed(args.seed)
        rn.seed(args.seed)
        tf.set_random_seed(args.seed)
        sess = tf.Session(graph=tf.get_default_graph())
        K.set_session(sess)
        # print(hash("keras"))

    # %%
    # Load the Monthly sunspots dataset

    sunspots_path = download_monthly_sunspots()
    sunspots = np.genfromtxt(fname=sunspots_path,
                             dtype=np.float32,
                             delimiter=",",
                             skip_header=1,
                             usecols=1)

    # %%
    # Train-Test split

    L_series = len(sunspots)

    split_ratio = 2 / 3  # between zero and one
    n_split = int(L_series * split_ratio)

    look_back = args.look_back
    steps_ahead = args.steps_ahead

    train = sunspots[:n_split + (steps_ahead - 1)]
    test = sunspots[n_split - look_back:]

    train_x, train_y = series_to_supervised(train, look_back, steps_ahead)
    test_x, test_y = series_to_supervised(test, look_back, steps_ahead)

    train_y_series = train[look_back:train.shape[0] - (steps_ahead - 1)]
    test_y_series = test[look_back:]

    # %%
    # PREPROCESSING STEP

    scaling_factor = args.scaling_factor
    translation = args.translation

    n_train = train_x.shape[0]  # number of training examples/samples
    n_test = test_x.shape[0]  # number of test examples/samples

    n_in = train_x.shape[1]  # number of features / dimensions
    n_out = train_y.shape[1]  # number of steps ahead to be predicted

    # Reshape training and test sets
    train_x = train_x.reshape(n_train, n_in, 1)
    test_x = test_x.reshape(n_test, n_in, 1)

    # Apply preprocessing
    train_x_ = affine_transformation(train_x, scaling_factor, translation)
    train_y_ = affine_transformation(train_y, scaling_factor, translation)
    test_x_ = affine_transformation(test_x, scaling_factor, translation)
    test_y_ = affine_transformation(test_y, scaling_factor, translation)
    train_y_series_ = affine_transformation(train_y_series, scaling_factor,
                                            translation)
    test_y_series_ = affine_transformation(test_y_series, scaling_factor,
                                           translation)

    # %%
    # Model hyperparameters and ANN Architecture

    stateful = args.stateful

    if stateful:
        x = Input(shape=(n_in, 1), batch_shape=(1, n_in, 1))  # input layer
    else:
        x = Input(shape=(n_in, 1))  # input layer
    h = x

    h = LSTM(units=args.layer_size, stateful=stateful)(h)  # hidden layer

    out = Dense(units=n_out, activation=None)(h)  # output layer

    model = Model(inputs=x, outputs=out)

    if args.verbose > 0:
        model.summary()

    def root_mean_squared_error(y_true, y_pred):
        return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))

    loss_function = root_mean_squared_error

    metrics = ['mean_absolute_error', 'mean_absolute_percentage_error']

    lr = args.lrearning_rate
    epsilon = args.epsilon
    optimizer_selection = {
        'Adadelta':
        optimizers.Adadelta(lr=lr, rho=0.95, epsilon=epsilon, decay=0.0),
        'Adagrad':
        optimizers.Adagrad(lr=lr, epsilon=epsilon, decay=0.0),
        'Adam':
        optimizers.Adam(lr=lr,
                        beta_1=0.9,
                        beta_2=0.999,
                        epsilon=epsilon,
                        decay=0.0,
                        amsgrad=False),
        'Adamax':
        optimizers.Adamax(lr=lr,
                          beta_1=0.9,
                          beta_2=0.999,
                          epsilon=epsilon,
                          decay=0.0),
        'Nadam':
        optimizers.Nadam(lr=lr,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=epsilon,
                         schedule_decay=0.004),
        'RMSprop':
        optimizers.RMSprop(lr=lr, rho=0.9, epsilon=epsilon, decay=0.0),
        'SGD':
        optimizers.SGD(lr=lr, momentum=0.0, decay=0.0, nesterov=False)
    }

    optimizer = optimizer_selection[args.optimizer]

    model.compile(optimizer=optimizer, loss=loss_function, metrics=metrics)

    # %%
    # Save trained models for every epoch

    models_path = r'artificial_neural_networks/trained_models/'
    model_name = 'sunspots_lstm_dense'
    weights_path = models_path + model_name + '_weights'
    model_path = models_path + model_name + '_model'
    file_suffix = '_{epoch:04d}_{val_loss:.4f}_{val_mean_absolute_error:.4f}'

    if args.save_weights_only:
        file_path = weights_path
    else:
        file_path = model_path

    file_path += file_suffix

    monitor = 'val_loss'

    if args.save_models:
        checkpoint = ModelCheckpoint(file_path + '.h5',
                                     monitor=monitor,
                                     verbose=args.verbose,
                                     save_best_only=args.save_best,
                                     mode='auto',
                                     save_weights_only=args.save_weights_only)
        callbacks = [checkpoint]
    else:
        callbacks = []

    # %%
    # TRAINING PHASE
    """
    if stateful:
        shuffle = False
    else:
        shuffle = True
    """

    if args.time_training:
        start = timer()

    for i in range(0, args.n_epochs):
        if args.verbose > 0:
            print('Epoch: {0}/{1}'.format(i + 1, args.n_epochs))

        model.fit(x=train_x_,
                  y=train_y_,
                  validation_data=(test_x_, test_y_),
                  batch_size=args.batch_size,
                  epochs=1,
                  verbose=args.verbose,
                  callbacks=callbacks,
                  shuffle=True)

        if stateful:
            model.reset_states()

    if args.time_training:
        end = timer()
        duration = end - start
        print('Total time for training (in seconds):')
        print(duration)

    # %%

    def model_predict(x_, y_):
        """
        Predict using the LSTM Model (Multi-step ahead Forecasting)
        """
        n_y_ = y_.shape[0]

        y_pred = np.zeros(n_y_)

        if args.recursive:  # Recursive Strategy # TODO
            if args.verbose > 0:
                print('Following Recursive Strategy ...')

            n_x_ = x_.shape[0]

            n_iter = int(np.floor(n_x_ / steps_ahead))
            L_last_window = n_x_ % steps_ahead

            first = 0

            # Multi-step ahead Forecasting of all the full windows
            for i in range(0, n_iter):
                """ if args.verbose > 0:
                    print('Completed: {0}/{1}'.format(i + 1, n_iter + 1)) """

                pred_start = i * steps_ahead
                pred_end = pred_start + steps_ahead

                # first time step of each window (no recursion possible)
                j = pred_start
                k = j - pred_start  # (always zero and unused)
                x_dyn = np.copy(x_[j:j + 1])  # use actual values only
                y_dyn = model.predict(x_dyn)[:, first]
                y_pred[j:j + 1] = y_dyn

                # remaining time steps of each window (with recursion)
                for j in range(pred_start + 1, pred_end):
                    k = j - pred_start
                    x_dyn = np.copy(x_[j:j +
                                       1])  # use actual values (if possible)
                    x_start = np.max([0, look_back - k])
                    y_start = np.max([0, k - look_back]) + pred_start
                    # y_start = np.max([pred_start, j - look_back])
                    x_dyn[0, x_start:look_back,
                          0] = np.copy(y_pred[y_start:j])  # use pred. values
                    y_dyn = model.predict(x_dyn)[:, first]
                    # y_after = np.max([0, y_dyn]) + 0.015 * np.random.randn()
                    y_pred[j:j + 1] = np.max([0, y_dyn])
                    # y_pred[j:j + 1] = y_dyn

            # Multi-step ahead Forecasting of the last window
            if L_last_window > 0:
                """ if args.verbose > 0:
                    print('Completed: {0}/{1}'.format(n_iter + 1, n_iter + 1)) """

                pred_start = n_x_ - L_last_window
                pred_end = n_y_

                # first time step of the last window (no recursion possible)
                j = pred_start
                k = j - pred_start  # (always zero and unused)
                x_dyn = np.copy(x_[j:j + 1])  # use actual values only
                y_dyn = model.predict(x_dyn)[:, first]
                y_pred[j:j + 1] = y_dyn

                # remaining time steps of the last window (with recursion)
                for j in range(pred_start + 1, pred_end):
                    k = j - pred_start
                    x_dyn = np.roll(x_dyn, -1)  # use act. values (if possible)
                    x_start = np.max([0, look_back - k])
                    y_start = np.max([0, k - look_back]) + pred_start
                    # y_start = np.max([pred_start, j - look_back])
                    x_dyn[0, x_start:look_back,
                          0] = np.copy(y_pred[y_start:j])  # use pred. values
                    y_dyn = model.predict(x_dyn)[:, first]
                    # y_after = np.max([0, y_dyn]) + 0.015 * np.random.randn()
                    y_pred[j:j + 1] = np.max([0, y_dyn])
                    # y_pred[j:j + 1] = y_dyn
            """
            # One-step ahead Forecasting

            n_x_ = x_.shape[0]
            for i in range(0, n_x_):
                x_dyn = x_[i:i+1]
                y_dyn = model.predict(x_dyn)[0, 0]
                y_pred[i] = y_dyn

            for i in range(n_x_, n_y):
                x_dyn[0, :, 0] = y_[i - look_back:i]
                y_dyn = model.predict(x_dyn)[0, 0]
                y_pred[i] = y_dyn
            """
        else:  # Multiple Ouptput Strategy # TODO
            if args.verbose > 0:
                print('Following Multiple Ouptput Strategy ...')

            n_iter = int(np.floor(n_y_ / steps_ahead))
            L_last_window = n_y_ % steps_ahead

            y_dyn = x_[0, steps_ahead]
            # Multi-step ahead Forecasting of all the full windows
            for i in range(0, n_iter):
                pred_start = i * steps_ahead
                pred_end = pred_start + steps_ahead
                x_dyn = x_[pred_start:pred_start + 1]  # TODO
                y_dyn = model.predict(x_dyn)[0]
                y_pred[pred_start:pred_end] = y_dyn

            # Multi-step ahead Forecasting of the last window
            if L_last_window > 0:
                pred_start = n_y_ - L_last_window
                pred_end = n_y_
                x_dyn[0, :, 0] = y_[pred_end - look_back:pred_end]
                y_dyn = model.predict(x_dyn)[0]
                y_pred[pred_start:pred_end] = y_dyn[:L_last_window]

        return y_pred

    # %%
    # TESTING PHASE

    # Predict preprocessed values
    train_y_sum = [0]
    test_y_sum = [0]
    reps = 1
    for i in range(reps):
        train_y_pred_ = model_predict(train_x_, train_y_series_)
        test_y_pred_ = model_predict(test_x_, test_y_series_)

        train_y_sum = np.sum([train_y_sum, train_y_pred_], axis=0)
        test_y_sum = np.sum([test_y_sum, test_y_pred_], axis=0)

    train_y_pred_ = train_y_sum / reps
    test_y_pred_ = test_y_sum / reps

    # Remove preprocessing
    train_y_pred = affine_transformation(train_y_pred_,
                                         scaling_factor,
                                         translation,
                                         inverse=True)
    test_y_pred = affine_transformation(test_y_pred_,
                                        scaling_factor,
                                        translation,
                                        inverse=True)

    train_rmse = sqrt(mean_squared_error(train_y_series, train_y_pred))
    train_mae = mean_absolute_error(train_y_series, train_y_pred)
    train_r2 = r2_score(train_y_series, train_y_pred)

    test_rmse = sqrt(mean_squared_error(test_y_series, test_y_pred))
    test_mae = mean_absolute_error(test_y_series, test_y_pred)
    test_r2 = r2_score(test_y_series, test_y_pred)

    if args.verbose > 0:
        print('Train RMSE: %.4f ' % (train_rmse))
        print('Train MAE: %.4f ' % (train_mae))
        print('Train (1 - R_squared): %.4f ' % (1.0 - train_r2))
        print('Train R_squared: %.4f ' % (train_r2))
        print('')
        print('Test RMSE: %.4f ' % (test_rmse))
        print('Test MAE: %.4f ' % (test_mae))
        print('Test (1 - R_squared): %.4f ' % (1.0 - test_r2))
        print('Test R_squared: %.4f ' % (test_r2))

    # %%
    # Data Visualization

    if args.plot:
        regression_figs(train_y=train_y_series,
                        train_y_pred=train_y_pred,
                        test_y=test_y_series,
                        test_y_pred=test_y_pred)

    # %%
    # Save the architecture and the lastly trained model

    save_regress_model(model, models_path, model_name, weights_path,
                       model_path, file_suffix, test_rmse, test_mae, args)

    # %%

    return model
Beispiel #30
0
def ANN_Model(X, Y, test_X, test_y):
    """----------测试集原数据作图----------------"""
    # plt.figure(0)  # 创建图表1
    # plt.title('observe')
    # plt.scatter([_ for _ in range(test_y.shape[0])], test_y)

    # 训练次数
    # epochs = input('输入训练批次:\n')

    # loss_func = input('loss函数('
    #                   'mae[mean_absolute_error]\n'
    #                   'mse[mean_squared_error]\n'
    #                   'msle[mean_squared_logarithmic_error]\n'
    #                   'squared_hinge[squared_hinge]\n'
    #                   'logcosh[logcosh]\n'
    #                   '):\n')
    loss_func = 'mse'
    """----------配置网络模型----------------"""
    # 配置网络结构
    model = Sequential()

    # hidden_units = input('隐藏层单元数量:\n')
    hidden_units = 20
    # 第一隐藏层的配置:输入17,输出20
    if layers_num == 1:
        model.add(
            Dense(hidden_units,
                  input_dim=len(InputIndex),
                  activation='sigmoid'))
        model.add(Dense(1, activation='sigmoid'))
    else:
        hidden_units1 = 20
        hidden_units2 = 16
        model.add(
            Dense(hidden_units1,
                  input_dim=len(InputIndex),
                  activation='sigmoid'))
        model.add(Dense(hidden_units2, activation='sigmoid'))
        model.add(Dense(1))

    # 编译模型,指明代价函数和更新方法
    Ada = optimizers.Adagrad(lr=0.018, epsilon=1e-06)
    model.compile(loss=loss_func, optimizer=Ada, metrics=[loss_func])
    """----------训练模型--------------------"""
    print("training starts.....")
    model.fit(X, Y, epochs=epochs, verbose=1, batch_size=256)
    """----------评估模型--------------------"""
    # 用测试集去评估模型的准确度
    cost = model.evaluate(test_X, test_y)
    print('\nTest accuracy:', cost)
    """----------模型存储--------------------"""
    save_model(model, weight_file_path)

    # 数据反归一化
    trueTestYv = org_teY
    temp = model.predict(test_X).reshape(-1, 1)
    predTestYv = (temp.T * npscale.reshape(-1, 1)[-1, :] +
                  npminthred.reshape(-1, 1)[-1, :]).T

    save_data = {
        'Test': list(trueTestYv.T[0]),
        'Predict': list(predTestYv.T[0])
    }
    predict_predYv = pd.DataFrame(save_data)
    predict_predYv.to_csv('data/predict_test_value.csv')
    """----------计算R^2--------------------"""
    testYv = test_y.values.flatten()
    predYv = model.predict(test_X).flatten()
    slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(
        testYv, predYv)
    print('R square is: ', r_value**2)

    # 数据反归一化
    trueAllYv = org_target.values
    temp = model.predict(train).reshape(-1, 1)
    predAllYv = (temp.T * npscale.reshape(-1, 1)[-1, :] +
                 npminthred.reshape(-1, 1)[-1, :]).T

    save_data = {
        'TrueData': list(trueAllYv.T[0]),
        'PredictData': list(predAllYv.T[0])
    }
    predict_AllYv = pd.DataFrame(save_data)
    predict_AllYv.to_csv('data/predict_all_value.csv')

    # 求偏导,然后直接作图

    # 获取每一层的权重
    weights = {}
    for layer in model.layers:
        weight = layer.get_weights()
        info = layer.get_config()
        weights[info['name']] = weight
        if info['name'] == 'dense_1':
            df_weights = pd.DataFrame(weight[0].T, columns=InputIndex)
        else:
            df_weights = pd.DataFrame(weight[0].T)

        df_bias = pd.DataFrame(weight[1].T, columns=['bias'])
        df = pd.concat([df_weights, df_bias], axis=1)
        df.to_csv('weights/' + info['name'] + '.csv')

    res = []
    for RawNo in range(train.shape[0]):
        TargetValue = list(train.loc[RawNo].values)
        x = UTPM.init_jacobian(TargetValue)
        # 求导函数,根据隐藏层数量选择
        # 1层:DerivativeExpression_jac1
        # 2层:DerivativeExpression_jac2
        if layers_num == 1:
            y = DerivativeExpression_jac1(x, weights)
        else:
            y = DerivativeExpression_jac2(x, weights)
        algopy_jacobian = UTPM.extract_jacobian(y)
        # 最后一列插入NEE
        res.append(list(algopy_jacobian))
    res = np.array(res)
    save_data = {
        'TrueNEE': list(trueAllYv.T[0]),
        'PredNEE': list(predAllYv.T[0])
    }
    predict_AllYv = pd.DataFrame(save_data)

    deriColumns = ['d' + str(_) for _ in train.columns.tolist()]
    result = pd.DataFrame(res, columns=deriColumns)
    result = pd.concat([result, original_data, predict_AllYv], axis=1)
    result.to_csv('data/result_jacobian.csv')

    result.dropna(inplace=True)

    for i in range(len(InputIndex)):
        plt.figure(i)  # 创建图表1
        IndexName = InputIndex[i]

        # result = result[(result['d'+IndexName] > -5000) & (result['d'+IndexName] < 5000)]

        y = abs(result['d' + IndexName].values *
                scale[IndexName]) / result.shape[0]
        x = result[IndexName].values
        plt.xlabel(IndexName)
        plt.ylabel("NEE-" + IndexName)
        plt.scatter(x, y, s=1)
        plt.savefig("res/" + IndexName + ".png")
    plt.show()