Beispiel #1
0
def modeling():

    options = [{
        'title': 'iris',
        'id': 1,
    }]
    print(options)

    elements = [
        {
            'title': 'Network layers number',
            'id': 'layers_n',
            'type': '',
            'default': 1,
        },
        {
            'title': 'Neuron number',
            'id': 'nn',
            'type': '',
            'default': 10,
        },
        {
            'title': 'Activation functions list',
            'id': 'func',
            'type': '',
            'default': 'sigmoid',
        },
        {
            'title': 'Metrics',
            'id': 'metrics',
            'type': '',
            'default': 'accuracy',
        },
        {
            'title': 'Loss',
            'id': 'loss',
            'type': '',
            'default': 'categorical_crossentropy',
        },
        {
            'title': 'Epoch number',
            'id': 'ep',
            'type': '',
            'default': '100',
        },
        {
            'title': 'Datasets',
            'id': 'dataset',
            'type': '',
            'options': options,
            'default': options[0]['id'],
        },
    ]
    return_url = "/"

    if request.args.get('result'):

        dataset_to_comps = [
            'sepal_length', 'sepal_width', 'petal_length', 'petal_width'
        ]  # more tables???
        model_info, datasets = get_model_info(base, request.args.get('models'))

        neurons = request.args.get('nn').split(',')
        input_dim = [len(dataset_to_comps)] + [0] * (len(neurons) - 1)
        activation = request.args.get('func').split(',')

        etl = ETL(manager=base)
        load_data_instr = {"category_name": 'Iris Fisher'}
        path = 'local_files/iris.csv'
        etl.load_supervised_data(path=path,
                                 ctg_name=load_data_instr["category_name"])

        #        x1 = base.get_raw_data(RateName=dataset_to_comps[0])
        #        x1 = pd.DataFrame(x1[2].float_value)
        #        x2 = base.get_raw_data(RateName=dataset_to_comps[1])
        #        x2 = pd.DataFrame(x2[2].float_value)
        #        x3 = base.get_raw_data(RateName=dataset_to_comps[2])
        #        x3 = pd.DataFrame(x3[2].float_value)
        #        x4 = base.get_raw_data(RateName=dataset_to_comps[3])
        #        x4 = pd.DataFrame(x4[2].float_value)

        X = pd.read_csv(path)
        y = X['species']
        X = X.drop('species', axis=1)

        X = X.as_matrix()
        train_X, test_X, train_y, test_y = train_test_split(X,
                                                            y,
                                                            train_size=0.7,
                                                            random_state=42)
        train_y_ohe = np.array(get_dummies(train_y), dtype=np.float64)
        test_y_ohe = np.array(get_dummies(test_y), dtype=np.float64)

        #        build_args = {
        #            'build_args': [
        #                {'neurons': neurons[i], 'input_dim': input_dim[i], 'activation': activation[i], 'init': 'normal'} for i in range(len(neurons))
        ##                {'neurons' : 16, 'input_dim' : 4, 'init' : 'normal', 'activation' : 'relu'},
        ##                {'neurons' : 3, 'input_dim' : 0, 'init' : 'normal', 'activation' : 'sigmoid'}
        #                ],
        #            'compile_args': {
        #                    'loss': request.args.get('loss'),
        #                    'optimizer': 'adam',
        #                    'metrics': request.args.get('metrics')
        #            }
        #        }
        #        compile_args = {
        #                    'loss': request.args.get('loss'),
        #                    'optimizer': 'adam',
        #                    'metrics': request.args.get('metrics')
        #            }
        #        fit_args = {'nb_epoch': request.args.get('ep'), 'batch_size': 1, 'verbose': 0}
        #        evaluate_args = {'verbose': 0}
        #        predict_args = {}

        build_args = {
            'build_args': [{
                'neurons': 16,
                'input_dim': 4,
                'init': 'normal',
                'activation': 'relu'
            }, {
                'neurons': 3,
                'input_dim': 0,
                'init': 'normal',
                'activation': 'sigmoid'
            }],
            'compile_args': {
                'loss': 'categorical_crossentropy',
                'optimizer': 'adam',
                'metrics': 'accuracy'
            }
        }
        compile_args = {
            'loss': 'categorical_crossentropy',
            'optimizer': 'adam',
            'metrics': 'accuracy'
        }
        fit_args = {'epochs': 100, 'batch_size': 1, 'verbose': 1}
        evaluate_args = {'verbose': 0}
        predict_args = {}

        print(build_args)

        m = KerasClassifier(name='iris', args=build_args)
        history = m.fit(train_X, train_y_ohe, fit_args=fit_args)
        loss, accuracy = m.evaluate(test_X, test_y_ohe, evaluate_args)
        prediction = m.predict(train_X)

        loss_data = history.history['loss'][1:]

        return render_template("modeling.html",
                               elements=elements,
                               return_url=return_url,
                               loss=request.args.get('loss'),
                               loss_data=list(
                                   zip(list(range(len(loss_data) - 1)),
                                       loss_data)))
    else:

        return render_template(
            "input.html",
            elements=elements,
            return_url=return_url,
        )
Beispiel #2
0
import numpy as np
import pandas as pd
import datetime
from models.models import *


if __name__ == '__main__':
    # Connection to DataBase and assemble Scheme
    DB = DBManager()
    etl = ETL(manager=DB)

##### LOADING DATA FROM VARIOUS SOURCES

    # Download local files for superviesd learning
    load_data_instr = {"category_name": 'Iris Fisher'}
    etl.load_supervised_data(path='local_files/iris.csv', ctg_name=load_data_instr["category_name"])

    # Define categories for JapanExchange_Derivatives_ex2
    cats = [Category(name='futures', description='azaza'),
            Category(name='call', description='azaza'),
            Category(name='put', description='azaza'),
            Category(name='cbr', description='azaza')]
    DB.session.add_all(cats)

    # Import Future Data
    c, r, rh = etl.get_Kospi_data_ex1('../Kospi Quotes Eikon Loader.xlsx')

    # Download file 'rb_e20161027.txt.csv'
    etl.get_JapanExchange_Derivatives_ex2('../rb_e20161027.txt.csv')

    # Import data from pdf