Beispiel #1
0
def nofilter():
    # response = {'result': []}
    mythology = request.args.get('mythology')
    result = {}
    character = request.args.get("character0")
    if mythology == 'Indian' or mythology == 'Greek':
        if character:
            query = load_data.form_query4(character)
            print(query)
            data = load_data.load_data(query,
                                       "http://3.101.82.158:3030/SER531")
            print(data)
            result = load_data.clean_data(data)
        # if result:
        # 	response = result
    elif mythology == "Noted Fictional Characters":
        if character:
            query = load_data.form_query5(character)
            data = load_data.load_data(query, "http://dbpedia.org/sparql")
            result = data

    elif mythology == "Chinese":
        if character:
            query = load_data.form_query4(character)
            print(query)
            data = load_data.load_data(query,
                                       "http://54.183.203.151:3030/Chinese")
            result = load_data.clean_data(data)
    return result
Beispiel #2
0
def process():
    character = request.args.get('character0')

    query = load_data.form_query4(character)
    data = load_data.load_data(query, "http://3.101.82.158:3030/SER531")
    result = load_data.clean_data(data)
    print(result)
    graph.draw_graph(character, result)
    # return send_from_directory(app.config['CLIENT_IMAGES'],"unix.gv.pdf", as_attachment=True)
    return send_file('unix.gv.pdf', attachment_filename='something.pdf')
Beispiel #3
0
def show_result():
    mythology = request.args.get('mythology')
    data = None
    if mythology == 'Indian' or mythology == 'Greek':
        character = request.args.get('character0')
        filters = []
        vars = []
        filters.append(request.args.get('filter0'))
        filters.append(request.args.get('filter1'))
        vars.append(request.args.get('var1'))
        vars.append(request.args.get('var2'))
        vars.append(request.args.get('var3'))
        query = load_data.form_query2(character, filters, vars)
        data = load_data.load_data(query, "http://3.101.82.158:3030/SER531")
        data = load_data.clean_data(data)
    elif mythology == "Noted Fictional Characters":
        character1 = request.args.get('character')
        query = load_data.form_query4(character1)
        data = load_data.load_data(query, "http://dbpedia.org/sparql")
    return data
Beispiel #4
0
    # 结束后生成如下名称feature_dataSet-->'data/train_three_train_feature.csv'

    ###############################################################################
    #                合并正负数据集(打标签,负样本抽样,正负样本合并)               #
    ###############################################################################
    # 通过打标签和正负样本均衡合并,最后形成:'data/one_train_dataSet_final.csv'
    combine_feature_dataSet.main_combine()

    one_train_dataSet_final_path = 'data/one_train_dataSet_final.csv'
    two_train_dataSet_final_path = 'data/two_train_dataSet_final.csv'
    two_train_dataSet_final_path = 'data/two_train_dataSet_final.csv'
    # 通过清理不用用户和商品,最后形成:如下特征集:
    one_train_dataSet_after_clean_path = 'data/one_train_dataSet_after_clean.csv'
    two_train_dataSet_after_clean_path = 'data/two_train_dataSet_after_clean.csv'
    three_train_dataSet_after_clean_path = 'data/three_train_dataSet_after_clean.csv'
    load_data.clean_data(one_train_dataSet_final_path,
                         one_train_dataSet_after_clean_path, '_one')
    load_data.clean_data(two_train_dataSet_final_path,
                         two_train_dataSet_after_clean_path, '_two')
    load_data.clean_data(three_train_dataSet_path,
                         three_train_dataSet_after_clean_path, '_three')

    ###############################################################################
    #                              模型预估  RF                                   #
    ###############################################################################

    # 特征筛选后:
    one_train_dataSet_after_clean_path = 'data/one_train_dataSet_after_clean.csv'
    two_train_dataSet_after_clean_path = 'data/two_train_dataSet_after_clean.csv'
    three_train_dataSet_after_clean_path = 'data/three_train_dataSet_after_clean.csv'
    three_train_dataSet_path
Beispiel #5
0
from feature_selection import sort_pop
from feature_selection import random_crossover
from feature_selection import mutation
from feature_selection import half_crossover
from feature_selection import remove_duplicates
import pandas as pd
import random
import time
import matplotlib.pyplot as plt

# Set random seed
random.seed(1)
time.sleep(1)

dataset = read_data()
dataset = clean_data(dataset)
print("!!!!!!!!!")
print(len(dataset.index))

print("Fitness if using all features", fitness_function(dataset))

# Use the standard 13 features as a benchmark to measure against
standard_dataset = dataset[[
    "age", "sex", "CP", "trestbps", "chol", "FBS", "restecg", "thalach",
    "exang", "oldpeak", "slope", "ca", "thal", "num"
]]

print("Fitness of the standard features typically included",
      fitness_function(standard_dataset))

population = []