コード例 #1
0
def test_model(x1_code, y1_label, x2_code, y2_label):
    global model_record

    x1_code.extend(x2_code)
    y1_label.extend(y2_label)

    print('serializing opcodes')
    training.serialize_codes(x1_code)

    x_train, x_test, y_train, y_test = model_selection.train_test_split(x1_code, y1_label, shuffle=True)
    print('trainning set size: {0}'.format(len(x_train)))
    print('testing set size: {0}'.format(len(x_test)))

    record = json.load(open(model_record, 'r'))
    seq_length = len(reduce(lambda x, y: x if len(x) > len(y) else y, x1_code))
    optimizer = record['optimizer']
    learning_rate = record['learning_rate']
    loss = record['loss']
    n_epoch = record['n_epoch']
    batch_size = record['batch_size']

    x_train = tflearn.data_utils.pad_sequences(x_train, maxlen=seq_length, value=0.)
    x_test = tflearn.data_utils.pad_sequences(x_test, maxlen=seq_length, value=0.)

    y_train = tflearn.data_utils.to_categorical(y_train, nb_classes=2)

    network = training.create_network(
        seq_length,
        optimizer=optimizer,
        learning_rate=learning_rate,
        loss=loss
    )
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(
        x_train, y_train,
        n_epoch=n_epoch,
        shuffle=True,
        validation_set=0.1,
        show_metric=True,
        batch_size=batch_size,
        run_id='webshell')

    y_pred = model.predict(x_test)
    y_pred = argmax(y_pred, axis=1)

    print('metrics.accuracy_score:')
    print(metrics.accuracy_score(y_test, y_pred))
    print('metrics.confusion_matrix:')
    print(metrics.confusion_matrix(y_test, y_pred))
    print('metrics.precision_score:')
    print(metrics.precision_score(y_test, y_pred))
    print('metrics.recall_score:')
    print(metrics.recall_score(y_test, y_pred))
    print('metrics.f1_score:')
    print(metrics.f1_score(y_test, y_pred))
コード例 #2
0
def check_with_model(file_id):
    global model

    file = TempFile(os.path.join(app.config['UPLOAD_FOLDER']), file_id)
    file_opcodes = [training.get_file_opcode(file.get_path())]
    training.serialize_codes(file_opcodes)
    file_opcodes = tflearn.data_utils.pad_sequences(file_opcodes,
                                                    maxlen=seq_length,
                                                    value=0.)

    res_raw = model.predict(file_opcodes)
    res = {
        # revert from categorical
        'judge': True if argmax(res_raw, axis=1)[0] else False,
        'chance': float(res_raw[0][argmax(res_raw, axis=1)[0]])
    }
    return res
コード例 #3
0
def test_model(x1_code, y1_label, x2_code, y2_label):
    global model_path, model_record

    x1_code.extend(x2_code)
    y1_label.extend(y2_label)

    x_test, y_test = shuffle(x1_code, y1_label)
    print('testing set size: {0}'.format(len(x_test)))

    print('serializing opcodes...')
    training.serialize_codes(x_test)

    record = json.load(open(model_record, 'r'))
    seq_length = record['seq_length']
    optimizer = record['optimizer']
    learning_rate = record['learning_rate']
    loss = record['loss']

    x_test = tflearn.data_utils.pad_sequences(x_test,
                                              maxlen=seq_length,
                                              value=0.)
    y_test = tflearn.data_utils.to_categorical(y_test, nb_classes=2)

    network = training.create_network(seq_length,
                                      optimizer=optimizer,
                                      learning_rate=learning_rate,
                                      loss=loss)
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.load(model_path)

    y_pred = model.predict(x_test)
    y_pred = argmax(y_pred, axis=1)

    print('metrics.accuracy_score:')
    print(metrics.accuracy_score(y_test, y_pred))
    print('metrics.confusion_matrix:')
    print(metrics.confusion_matrix(y_test, y_pred))
    print('metrics.precision_score:')
    print(metrics.precision_score(y_test, y_pred))
    print('metrics.recall_score:')
    print(metrics.recall_score(y_test, y_pred))
    print('metrics.f1_score:')
    print(metrics.f1_score(y_test, y_pred))