Пример #1
0
def main(args):
    model_id = build_model_id(args)
    model_path = build_model_path(args, model_id)
    setup_model_dir(args, model_path)

    rng = np.random.RandomState(args.seed)

    json_cfg = load_model_json(args, x_train=None, n_classes=None)
    model_cfg = ModelConfig(**json_cfg)
    if args.verbose:
        print("model_cfg " + str(model_cfg))

    sys.path.append(args.model_dir)
    import model
    from model import build_model, fit_model, load_train, load_validation

    train_data = load_train(args, model_cfg)
    validation_data = load_validation(args, model_cfg)

    if args.verbose:
        print("loading model")
    model = build_model(model_cfg, train_data, validation_data)
    fit_model(model, train_data, validation_data, args)
Пример #2
0
def callback(ch, method, properties, body):
    global no_data, model_timestamp, data, model

    print(f"Received {body}")
    task = json.loads(body)

    # Process fit model task
    if task['type'] == 'fit_model':
        # Fit model
        data = task['data']
        model = mdl.fit_model(data)

        # Load model to server
        requests.post(model_url + 'post_data', data=pickle.dumps(data))
        requests.post(model_url + 'post_model', data=pickle.dumps(model))

        # Announce that fit model task is complete
        requests.post(broker_url + 'fit_model_complete')
        print('New model has been created')

    # Process compute forecast task
    elif task['type'] == 'forecast':
        # Check if latest model is loaded. If not, get it from server
        latest_model_timestamp = requests.get(
            model_url + 'get_model_timestamp').json()['model_timestamp']
        if no_data or latest_model_timestamp != model_timestamp:
            data = pickle.loads(requests.get(model_url + 'get_data').content)
            model = pickle.loads(requests.get(model_url + 'get_model').content)
            model_timestamp = latest_model_timestamp
            no_data = False

        # Calculate forecast
        forecast_result = mdl.forecast(data, model, task['num_steps'])
        forecast_result = {
            'id': task['id'],
            'forecast_result': list(forecast_result)
        }

        # Send forecast result
        requests.post(broker_url + 'forecast_result', json=forecast_result)
        print(f'New forecast result has been computed {forecast_result}')
Пример #3
0
    start = datetime.datetime.now()

    if args.perplexity is not None:
        config.training_params["perplexity"] = args.perplexity
    if args.epochs is not None:
        config.training_params["n_epochs"] = args.epochs
    if args.batchsize is not None:
        config.training_params["batch_size"] = args.batchsize

    report_config = json.dumps({
        "settings": settings,
        "optimization": config.optimization_conf,
        "training": config.training_params
    })

    train_dl, val_dl = split_train_val(
        points_ds,
        val_size=0.2,
        batch_size=config.training_params["batch_size"],
        seed=config.seed)
    fit_model(ffnn,
              train_dl,
              val_dl,
              opt,
              **config.training_params,
              epochs_to_save_after=config.epochs_to_save_after,
              save_dir_path=config.save_dir_path,
              configuration_report=report_config)

    fin = datetime.datetime.now()
    print("Training time:", fin - start, flush=True)
Пример #4
0
    plt.gcf().savefig('loss.png')


if __name__ == '__main__':
    # Load the dataset and split them into training and test sets
    X_train, X_test, Y_train, Y_test = get_dataset()

    # Create the model and compile it
    model = create_model()
    compile_model(model)

    print(model.summary())
    print()

    print('Training model...')
    training_history = fit_model(model, X_train, Y_train)
    print()

    print('Evaluating model...')
    metrics = evaluate_model(model, X_test, Y_test)
    print()

    print('Loss on test set is:', metrics[0])
    print('Accuracy on test set is:', metrics[-1])
    print()

    # Uncomment to see the plot of the training and validation losses (loss.png)
    # print('Plotting training history...')
    # plot_training_history(training_history)
    # print('Done')
            #                 ('all->pol2 (train on all and test on poles (with no negs))', pol_pos_test, empty_test),
            ('all->pol3 (train on all and test on poles)', pol_pos_test,
             pol_neg_test)
        ])
]

# for (name, (train_pos, train_neg), (test_pos, test_neg)) in eval_pairs:
for train_tuple, test_tuples in eval_pairs:
    name, epochs, train_pos, train_neg = train_tuple
    log('Exp: ' + name + ', epochs: ' + str(epochs))
    X, y = create_dataset(train_pos, train_neg)
    X, y = shuffle(X, y)
    train_split = int(0.8 * X.shape[0])
    train_set = get_neon_set(X[:train_split], y[:train_split])
    val_set = get_neon_set(X[train_split:], y[train_split:])
    model = fit_model(train_set, val_set, num_epochs=epochs)
    train_error = test_model(model, train_set)
    log('Train Misclassification error = %.2f%%' % train_error)
    val_error = test_model(model, val_set)
    log('Val Misclassification error = %.2f%%' % val_error)
    for test_tuple in test_tuples:
        name, test_pos, test_neg = test_tuple
        log('Exp: ' + name)
        X_test, y_test = create_dataset(test_pos, test_neg)
        test_set = get_neon_set(X_test, y_test)
        test_error = test_model(model, test_set)
        log('  Test Misclassification error = %.2f%%' % test_error)
    log('')

    model.get_description()
    model.save_params('eq_polar_params.p')
Пример #6
0
    plt.plot(history.history['val_loss'], label='validation')
    plt.title('lrate=' + str(learning_rate))
    plt.legend(loc="upper right")


#make a list of learning rates to try out
learning_rates = [1E-3, 1E-4, 1E-7, 0.01]
#fixed number of epochs
num_epochs = 100
#fixed number of batches
batch_size = 10 

for i in range(len(learning_rates)):
  plot_no = 420 + (i+1)
  plt.subplot(plot_no)
  fit_model(features_train, labels_train, learning_rates[i], num_epochs, batch_size)

plt.tight_layout()
plt.show()
plt.savefig('static/images/my_plot.png')
print("See the plot on the right with learning rates", learning_rates)
import app #don't worry about this. This is to show you the plot in the browser.


######################################################################################################################################################
#Manual tuning: batch size
#The batch size is a hyperparameter that determines how many training samples are seen before updating the network’s parameters (weight and bias matrices).
#When the batch contains all the training examples, the process is called batch gradient descent. 
#If the batch has one sample, it is called the stochastic gradient descent. And finally, 
#when 1 < batch size < number of training points, is called mini-batch gradient descent. 
#An advantage of using batches is for GPU computation that can parallelize neural network computations.
Пример #7
0
def main():
    # parse the raw data files first
    normal_file_raw = 'dataset/normalTrafficTraining.txt'
    anomaly_file_raw = 'dataset/anomalousTrafficTest.txt'
    normal_test_raw = 'dataset/normalTrafficTest.txt'

    normal_test_parse = 'dataset/normalRequestTest.txt'
    normal_file_parse = 'dataset/normalRequestTraining.txt'
    anomaly_file_parse = 'dataset/anomalousRequestTest.txt'

    # Parse the files to decode the URLs in the raw HTTP requests and write them in a proper format
    parse_file(normal_file_raw, normal_file_parse)
    parse_file(anomaly_file_raw, anomaly_file_parse)
    parse_file(normal_test_raw, normal_test_parse)

    # Convert each HTTP request into a string and append each of these strings to a list
    X_train = to_string('../input/normalRequestTraining.txt')
    X_test_bad = to_string('../input/anomalousRequestTest.txt')
    X_test_good = to_string('../input/normalRequestTest.txt')

    # Label the good requests and bad requests
    # 0 --> good --> [1. 0.]
    # 1 --> bad -->  [0. 1.]
    y_train = [0] * len(X_train)
    y_bad = [1] * len(X_test_bad)
    y_good = [0] * len(X_test_good)

    # Put all the requests in the X and y lists
    y_unshuffled = y_bad + y_good + y_train
    X_unshuffled = X_test_bad + X_test_good + X_train

    # Shuffle the data
    X_shuffled, y_shuffled = shuffle(X_unshuffled, y_unshuffled)
    # use categorical output
    y_shuffled = to_categorical(y_shuffled)

    # set parameters:
    subset = None

    # Maximum length. Longer gets chopped. Shorter gets padded.
    maxlen = 1000

    # Model params
    # Filters for conv layers
    nb_filter = 64
    # Number of units in the dense layer
    dense_outputs = 64
    # Conv layer kernel size
    filter_kernels = [7, 7]
    # Number of units in the final output layer. Number of classes.
    cat_output = 2

    # Compile/fit params
    batch_size = 128
    nb_epoch = 20

    print('Loading data...')
    # # Expect x to be a list of sentences. Y to be index of the categories.
    (xt, yt), (x_test, y_test) = load_data(X_shuffled, y_shuffled)

    print('Creating vocab...')
    vocab, reverse_vocab, vocab_size, alphabet = create_vocab_set()

    print('Compile model...')
    model = create_model(filter_kernels, dense_outputs, maxlen, vocab_size,
                         nb_filter, cat_output)
    # Encode data
    xt = encode_data(xt, maxlen, vocab)
    x_test = encode_data(x_test, maxlen, vocab)

    print('Chars vocab: {}'.format(alphabet))
    print('Chars vocab size: {}'.format(vocab_size))
    print('X_train.shape: {}'.format(xt.shape))
    model.summary()

    print('Fit model...')
    patience = 5  # this is the number of epochs with no improvment after which the training will stop
    history = fit_model(model, xt, yt, patience, batch_size, nb_epoch)

    print("Testing model...")
    score = test_model(x_test, y_test, batch_size)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])

    # Graphs and data visualisation
    # Training Accuracy Vs validation Accuracy
    plt.figure(0)
    plt.figsize = (10, 10)
    plt.plot(history.history['acc'], 'r')
    plt.plot(history.history['val_acc'], 'g')
    plt.xticks(np.arange(0, 20, 1.0))
    plt.xlabel("Num of Epochs")
    plt.ylabel("Accuracy")
    plt.title("Training Accuracy Vs validation Accuracy")
    plt.legend(['train', 'validation'])

    # Training Loss Vs Validation Loss
    plt.figure(0)
    plt.figsize = (10, 10)
    plt.plot(history.history['loss'], 'r')
    plt.plot(history.history['val_loss'], 'g')
    plt.xticks(np.arange(0, 20, 1.0))
    plt.yticks(np.arange(0, 0.5, 0.1))
    plt.xlabel("Num of Epochs")
    plt.ylabel("Loss")
    plt.title("Training Loss Vs validation Loss")
    plt.legend(['train', 'validation'])

    # Classification Matrix
    y_pred = model.predict(x_test)
    y_pred1 = (y_pred > 0.5)
    matrix = confusion_matrix(y_test.argmax(axis=1), y_pred1.argmax(axis=1))
    print(matrix)
    plt.matshow(matrix, cmap=plt.cm.gray)
    plt.show()

    row_sum = matrix.sum(axis=1, keepdims=True)
    norm_conf = matrix / row_sum
    print(norm_conf)
    plt.matshow(norm_conf, cmap=plt.cm.gray)
    plt.show()
Пример #8
0
"""This script is used to train your model. You can modify it if you want."""

import numpy as np
import sys
import pandas as pd

# This script expects the dataset as a sys.args argument.
input_dataset = '../training.csv'  # The default value.
if len(sys.argv) >= 2:
    input_dataset = sys.argv[1]

# Load the dataset.
input_data = pd.read_csv(input_dataset)
Xraw = input_data.drop(columns=['claim_amount'])
yraw = input_data['claim_amount'].values

# Create a model, train it, then save it.
import model

new_model = model.fit_model(Xraw, yraw)

model.save_model(new_model)
Пример #9
0
 def train(self):
     self.sine = SiNE(self.nodes_num, 40, 20)
     fit_model(self.sine, self.training_data, 1, 0.5,
               len(self.training_data), 500, 0.0001)
     print("3. Training finished!")
     torch.save(self.sine.state_dict(), './epinions_parameters')

def design_model_no_dropout(X, learning_rate):
    model = Sequential(name="my_first_model")
    input = layers.InputLayer(input_shape=(X.shape[1], ))
    model.add(input)
    model.add(layers.Dense(128, activation='relu'))
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dense(24, activation='relu'))
    model.add(layers.Dense(1))
    opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)
    model.compile(loss='mse', metrics=['mae'], optimizer=opt)
    return model


#using the early stopping in fit_model
learning_rate = 0.001
num_epochs = 200
#train the model without dropout
history1 = fit_model(design_model_no_dropout(features_train, learning_rate),
                     features_train, labels_train, learning_rate, num_epochs)
#train the model with dropout
history2 = fit_model(design_model_dropout(features_train, learning_rate),
                     features_train, labels_train, learning_rate, num_epochs)

plot(history1, 'static/images/no_dropout.png')

plot(history2, 'static/images/with_dropout.png')

import app