def optimize_cnn(hype_space):
    """Build a convolutional neural network and train it."""
    if not is_gpu_available():
        tf.logging.warning('GPUs are not available')

    tf.logging.debug("Hyperspace: ", hype_space)
    tf.logging.debug("\n")
    try:
        model, model_name, result, _ = build_and_train(
            hype_space, log_for_tensorboard=True)

        tf.logging.info("Training ended with success:")
        tf.logging.info("Model name: %s", model_name)

        # Save training results to disks with unique filenames
        save_json_result(model_name, result)

        export_model(model_name)

        K.clear_session()
        del model
        tf.logging.info('before return result')
        return result

    except Exception as err:
        err_str = str(err)
        tf.logging.error(err_str)
        traceback_str = str(traceback.format_exc())
        tf.logging.error(traceback_str)
        return {
            'status': STATUS_FAIL,
            'err': err_str,
            'traceback': traceback_str
        }
Exemple #2
0
 def exporting_model(self):
     vocab_to_id = get_vocab_to_id(self.train_data_path, self.vocab_file,
                                   False)
     with tf.Session() as sess:
         rnn_model = RNNModel(self.rnn_size,
                              self.embedding_size, self.class_num,
                              len(vocab_to_id), self.learning_rate,
                              self.model_path)
         ckpt = tf.train.get_checkpoint_state(self.model_dir)
         rnn_model.saver.restore(sess, ckpt.model_checkpoint_path)
         export_model(sess, rnn_model, "export/", "1", vocab_to_id)
                                                           test_set['y']),
                                                       num_epochs=1,
                                                       shuffle=False)

    print('Testing...', end='', flush=True)
    # Evaluate accuracy.
    accuracy_score = classifier.evaluate(input_fn=test_input_fn)
    print('done')
    # print(accuracy_score)
    # print("\nTest Accuracy: {0:f}%\n".format(accuracy_score*100))
    new_samples = np.array(test_set['x'], dtype=np.float32)
    predict_input_fn = tf.estimator.inputs.numpy_input_fn(x={
        "x": new_samples,
        "weight": teweights
    },
                                                          num_epochs=1,
                                                          shuffle=False)

    predictions = list(classifier.predict(input_fn=predict_input_fn))
    predictions = [
        predictions[i]["class_ids"][0] for i in range(len(predictions))
    ]
    total = len(predictions)
    cor = 0
    for i in range(len(predictions)):
        if test_set['y'][i] == predictions[i]:
            cor += 1
    print('Accuracy: {}'.format(cor / total * 100))
    confusion_matrix(test_set['y'], predictions)
    export_model(classifier, model_name, num_input)
    num_epochs=1,
    shuffle=False)

print('Testing...', end='', flush=True)
# Evaluate accuracy.
accuracy_score = regressor.evaluate(input_fn=test_input_fn)
print('done')
print(accuracy_score)
samples = np.array(test_set['x'])
predict_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": samples},
                                                      num_epochs=1,
                                                      shuffle=False)

predictions = list(regressor.predict(input_fn=predict_input_fn))
error = np.zeros(num_targets)
for i in range(nTest):
    for j in range(num_targets):
        error[j] += fabs(
            round(predictions[i]['predictions'][j], 4) -
            test_set['y'][i][j]) * 100

avg = 0
for i in range(num_targets):
    error[i] /= nTest
    error[i] = round(error[i], 2)
    avg += error[i]
print("\nError rate for each output")
print(error)
print('Average error rate: {}%'.format(round(avg / num_targets, 2)))
export_model(regressor, model_name, num_input)
                # train
                history = current_model.fit(
                    x = X_train, y = Y_train,
                    epochs=general_conf['iterations'],
                    batch_size=general_conf['batch_size'],
                    validation_data=(X_test, Y_test),
                    callbacks = callbacks)
                # load best weights
                current_model.load_weights('{}-weights.h5'.format(outdir(out_name)))
                # test
                preds = current_model.evaluate(x = X_test, y = Y_test)
                show_stats(start_time, preds)
                
                # output results
                predictions = current_model.predict(X_test)
                Y_pred = ohe_to_label(predictions)
                Y_true = ohe_to_label(Y_test)
                
                conf_matrix(Y_true, Y_pred, class_conversion, out_name, save = True)
                
                # export model
                if general_conf['export_models']:
                    export_model(current_model, out_name)
                # notify
                telegram_send.send(['Training {}/{} finished'.format(cycle, total_cycles)])
            cycle += 1
                
# send finish signal via telegram and close aws instance
if general_conf['prod']:
    terminate()
Exemple #6
0
              (training_n_wrong_attack / training_info.attack_n))
        print('    Training relative normal error: %.3f' %
              (training_n_wrong_normal / training_info.normal_n))

        validation_error, validation_y_pred, validation_n_wrong_normal, validation_n_wrong_attack = analyze(
            model, validation_set)
        validation_info = validation_set.analyze()
        print('    Validation error: %.3f' % validation_error)
        print('    Validation relative attack error: %.3f' %
              (validation_n_wrong_attack / validation_info.attack_n))
        print('    Validation relative normal error: %.3f' %
              (validation_n_wrong_normal / validation_info.normal_n))

        print('    Exporting model')
        path = '../data/%s.json' % model_type
        utils.export_model(path, model)
        print('    Model exported in file: %s' % path)
    elif model_type == 'load_random_forest_bin':
        # total attack: 301
        # wrong attack: 276
        # total normal: 4699
        # wrong normal: 32
        # Total training error: 0.062
        # Attack training relative error: 0.917
        # Normal training relative error: 0.007
        # slowhttptest -u http://10.0.0.1 -c 5000 -i 10 -p 5 -r 100

        # dataset.crop(5000)
        dataset.matching(FlowPolicy(['rudy', 'slowread', 'slowloris']))
        dataset.binarize()
Exemple #7
0
accuracy_score = classifier.evaluate(input_fn=test_input_fn)['accuracy']
#print(accuracy_score)
print('done')
print("\nTest Accuracy: {0:f}%\n".format(accuracy_score*100))

new_samples = np.array(
    [[(19.042930756538436-latmin)/(latmax-latmin), (72.89973760314945-lngmin)/(lngmax-lngmin), 4/6, 14/23, 50/59],
    [(19.053680376327897-latmin)/(latmax-latmin), (72.88806462951663-lngmin)/(lngmax-lngmin), 4/6, 14/23, 50/59]], dtype=np.float32)
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
    x={"x": new_samples},
    num_epochs=1,
    shuffle=False)

predictions = list(classifier.predict(input_fn=predict_input_fn))
predicted_classes = [p["class_ids"] for p in predictions]
print(predicted_classes) # 3 2
# new_samples = np.array(training_set['x'], dtype=np.float32)
# predict_input_fn = tf.estimator.inputs.numpy_input_fn(
#     x={"x": new_samples},
#     num_epochs=1,
#     shuffle=False)

# predictions = list(classifier.predict(input_fn=predict_input_fn))
# acc = [predictions[i]["class_ids"][0] == training_set['y'][i] for i in range(len(predictions))]
# c = 0
# for a in acc:
#     if a:
#         c += 1
# print(c/len(acc)*100)
export_model(classifier, 'nn', num_input)
for layer in full_model.layers[:5]:
    layer.trainable = False

full_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy', f1])

if not Path("weights-ae-stacked-long-gaus.h5").is_file():
    full_model.fit(x = X_train_4ch, y = Y_train,
                        epochs=general_conf['iterations'],
                        batch_size=general_conf['batch_size'],
                        callbacks=callbacks('ae-stacked-long-gaus', True),
                        validation_data=(X_test_4ch, Y_test))

full_model.load_weights('weights-ae-stacked-long-gaus.h5')

##########################
#### CHECKING RESULTS ####

preds = full_model.evaluate(x = X_test_4ch, y = Y_test)

start_time = time.time()
show_stats(start_time, preds)

# output results
predictions = full_model.predict(X_test_4ch)
Y_pred = ohe_to_label(predictions)
Y_true = ohe_to_label(Y_test)

conf_matrix(Y_true, Y_pred, class_conversion, general_conf['model_name'], save = True)

export_model(full_model, general_conf['model_name'])