コード例 #1
0
    def test_fit(self):
        #file_path = "../Dataset/00-91-Drugs-All-In-One-File.csv"
        #loaded_data = FileManager.load_file(file_path)

        read_data = ReadData()
        loaded_data = read_data.read_data_and_set_variable_settings("../Dataset/00-91-Drugs-All-In-One-File.csv", "../Dataset/VariableSetting.csv")

        data_manager = DataManager(normalizer=None)
        data_manager.set_data(loaded_data)
        data_manager.split_data_into_train_valid_test_sets()

        model = svm.SVR()

        velocity = Velocity()
        velocity_matrix = velocity.create_first_velocity()

        # define the first population
        # validation of a row generating random row for
        population = Population(velocity_matrix=velocity_matrix)
        population.create_first_population()



        debpso = DEBPSO(population.population_matrix[1])
        debpso.fit(data_manager.inputs[SplitTypes.Train], data_manager.targets[SplitTypes.Train])
コード例 #2
0
    def test_transform(self):
        read_data = ReadData()
        loaded_data = read_data.read_data_and_set_variable_settings("../Dataset/00-91-Drugs-All-In-One-File.csv", "../Dataset/VariableSetting.csv")


        data_manager = DataManager(normalizer=None)
        data_manager.set_data(loaded_data)
        data_manager.split_data_into_train_valid_test_sets()

        model = svm.SVR()

        velocity = Velocity()
        velocity_matrix = velocity.create_first_velocity()

        # define the first population
        # validation of a row generating random row for
        population = Population(velocity_matrix=velocity_matrix)
        population.create_first_population()

        debpso = DEBPSO(population.population_matrix[0])
        debpso.fit(data_manager.inputs[SplitTypes.Train], data_manager.targets[SplitTypes.Train])
        data_manager.transformed_input[SplitTypes.Train] = debpso.transform(data_manager.inputs[SplitTypes.Train])
        print("Population 0 row sum ", population.population_matrix[0].sum())
        print("Selected feature descriptors",debpso.sel_descriptors_for_curr_population)
        print("Transformed array", data_manager.transformed_input[SplitTypes.Train])
コード例 #3
0
    def test_run_experiment_for_DEBPSO_population_With_Velocity(self):
        read_data = ReadData()
        loaded_data = read_data.read_data_and_set_variable_settings("../Dataset/00-91-Drugs-All-In-One-File.csv", "../Dataset/VariableSetting.csv")

        #output_filename = FileManager.create_output_file()

        #rescaling_normalizer = RescalingNormalizer()
        #scikit_normalizer = ScikitNormalizer()
        #data_manager = DataManager(normalizer=scikit_normalizer)

        data_manager = DataManager(normalizer=None)
        data_manager.set_data(loaded_data)
        data_manager.split_data_into_train_valid_test_sets()

        #data_manager.feature_selector = debpso
        feature_selection_algo = None
        model = None

        if VariableSetting.Feature_Selection_Algorithm == 'GA' and VariableSetting.Model == 'SVM':
            #feature_selection_algo = GA()
            model = svm.SVR()
        elif VariableSetting.Feature_Selection_Algorithm == 'DEBPSO' and VariableSetting.Model == 'SVM':
            feature_selection_algo = DEBPSO()
            model = svm.SVR()
        experiment = Experiment(data_manager, model, feature_selection_algo)

        experiment.run_experiment()
コード例 #4
0
    def test_run_experiment_for_DEBPSO_population_With_Velocity(self):
        read_data = ReadData()
        loaded_data = read_data.read_data_and_set_variable_settings("../Dataset/00-91-Drugs-All-In-One-File.csv", "../Dataset/VariableSetting.csv")

        #output_filename = FileManager.create_output_file()

        zero_one_normalizer = ZeroOneMinMaxNormalizer()
        data_manager = DataManager(normalizer=zero_one_normalizer)


        data_manager.set_data(loaded_data)
        data_manager.split_data_into_train_valid_test_sets()

        print("Train Data", data_manager.inputs)
コード例 #5
0
async def inline(callback: types.InlineQuery):
    if 'ans_' in callback.data:
        id = callback.data.split('_')[1]

        question = dm.get_question_by_id(id)
        dm.update_responsible(question.id, callback.message.chat.id)

        await Form.answer_question.set()
        print(dm.get_question_by_id(question.id))

        text = f"""
Вопрос: <i>{question.questions_text}</i>

Введите ответ:
        """
        keyboard = types.ReplyKeyboardMarkup(one_time_keyboard=True,
                                             resize_keyboard=True)
        keyboard.row('Отмена')
        await bot.send_message(callback.message.chat.id,
                               text,
                               reply_markup=keyboard,
                               parse_mode='html')

    elif 'cls_' in callback.data:
        question = dm.get_question_by_id(callback.data.split('_')[1])
        dm.update_answer(question.id, '')
        await bot.send_message(callback.message.chat.id,
                               f'Вопрос: /ques{question.id} закрыт.')

    elif callback.data == 'get_question':
        questions = DataManager.get_questions_by_status('Открыт')
        question = random.choice(questions)
        print(question)
        keyboard = types.InlineKeyboardMarkup()
        keyboard.row(
            types.InlineKeyboardButton('Ответить',
                                       callback_data='ans_{}'.format(
                                           question.id)))
        keyboard.row(
            types.InlineKeyboardButton('Закрыть',
                                       callback_data='cls_{}'.format(
                                           question.id)))

        text = f"""
<b>Вопрос:</b>
/ques{question.id}

Стаус: {question.status}
От пользователя: {question.from_user}
Текст: {question.questions_text}
        """
        await bot.send_message(callback.message.chat.id,
                               text,
                               reply_markup=keyboard,
                               parse_mode='html')
コード例 #6
0
    def test_transform(self):
        read_data = ReadData()
        loaded_data = read_data.read_data_and_set_variable_settings(
            "../Dataset/00-91-Drugs-All-In-One-File.csv",
            "../Dataset/VariableSetting.csv")

        data_manager = DataManager(normalizer=None)
        data_manager.set_data(loaded_data)
        data_manager.split_data_into_train_valid_test_sets()

        model = svm.SVR()

        velocity = Velocity()
        velocity_matrix = velocity.create_first_velocity()

        # define the first population
        # validation of a row generating random row for
        population = Population(velocity_matrix=velocity_matrix)
        population.create_first_population()

        debpso = DEBPSO(population.population_matrix[0])
        debpso.fit(data_manager.inputs[SplitTypes.Train],
                   data_manager.targets[SplitTypes.Train])
        data_manager.transformed_input[SplitTypes.Train] = debpso.transform(
            data_manager.inputs[SplitTypes.Train])
        print("Population 0 row sum ", population.population_matrix[0].sum())
        print("Selected feature descriptors",
              debpso.sel_descriptors_for_curr_population)
        print("Transformed array",
              data_manager.transformed_input[SplitTypes.Train])
コード例 #7
0
    def test_fit(self):
        #file_path = "../Dataset/00-91-Drugs-All-In-One-File.csv"
        #loaded_data = FileManager.load_file(file_path)

        read_data = ReadData()
        loaded_data = read_data.read_data_and_set_variable_settings(
            "../Dataset/00-91-Drugs-All-In-One-File.csv",
            "../Dataset/VariableSetting.csv")

        data_manager = DataManager(normalizer=None)
        data_manager.set_data(loaded_data)
        data_manager.split_data_into_train_valid_test_sets()

        model = svm.SVR()

        velocity = Velocity()
        velocity_matrix = velocity.create_first_velocity()

        # define the first population
        # validation of a row generating random row for
        population = Population(velocity_matrix=velocity_matrix)
        population.create_first_population()

        debpso = DEBPSO(population.population_matrix[1])
        debpso.fit(data_manager.inputs[SplitTypes.Train],
                   data_manager.targets[SplitTypes.Train])
コード例 #8
0
    def test_run_experiment_for_DEBPSO_population_With_Velocity(self):
        read_data = ReadData()
        loaded_data = read_data.read_data_and_set_variable_settings(
            "../Dataset/00-91-Drugs-All-In-One-File.csv",
            "../Dataset/VariableSetting.csv")

        #output_filename = FileManager.create_output_file()

        #rescaling_normalizer = RescalingNormalizer()
        #scikit_normalizer = ScikitNormalizer()
        #data_manager = DataManager(normalizer=scikit_normalizer)

        data_manager = DataManager(normalizer=None)
        data_manager.set_data(loaded_data)
        data_manager.split_data_into_train_valid_test_sets()

        #data_manager.feature_selector = debpso
        feature_selection_algo = None
        model = None

        if VariableSetting.Feature_Selection_Algorithm == 'GA' and VariableSetting.Model == 'SVM':
            #feature_selection_algo = GA()
            model = svm.SVR()
        elif VariableSetting.Feature_Selection_Algorithm == 'DEBPSO' and VariableSetting.Model == 'SVM':
            feature_selection_algo = DEBPSO()
            model = svm.SVR()
        experiment = Experiment(data_manager, model, feature_selection_algo)

        experiment.run_experiment()
コード例 #9
0
    def test_fit(self):
        file_path = "../Dataset/00-91-Drugs-All-In-One-File.csv"
        loaded_data = FileManager.load_file(file_path)

        data_manager = DataManager(normalizer=None)
        data_manager.set_data(loaded_data)
        data_manager.split_data_into_train_valid_test_sets(test_split=0.15,
                                                           train_split=0.70)

        model = svm.SVR()

        velocity = Velocity()
        velocity_matrix = velocity.create_first_velocity()

        # define the first population
        # validation of a row generating random row for
        population = Population(velocity_matrix=velocity_matrix)
        population.create_first_population()

        debpso = DEBPSO(population.population_matrix[1])
        debpso.fit(data_manager.inputs[SplitTypes.Train],
                   data_manager.targets[SplitTypes.Train])
        print("Population 1 row sum ", population.population_matrix[1].sum())
        print("Selected feature descriptors",
              debpso.sel_descriptors_for_curr_population)
コード例 #10
0
    def test_fit(self):
        file_path = "../Dataset/00-91-Drugs-All-In-One-File.csv"
        loaded_data = FileManager.load_file(file_path)

        data_manager = DataManager(normalizer=None)
        data_manager.set_data(loaded_data)
        data_manager.split_data_into_train_valid_test_sets(test_split=0.15, train_split=0.70)

        model = svm.SVR()

        velocity = Velocity()
        velocity_matrix = velocity.create_first_velocity()

        # define the first population
        # validation of a row generating random row for
        population = Population(velocity_matrix=velocity_matrix)
        population.create_first_population()

        debpso = DEBPSO(population.population_matrix[1])
        debpso.fit(data_manager.inputs[SplitTypes.Train], data_manager.targets[SplitTypes.Train])
        print("Population 1 row sum ", population.population_matrix[1].sum())
        print("Selected feature descriptors",debpso.sel_descriptors_for_curr_population)
コード例 #11
0
    def test_run_experiment_for_DEBPSO_population_With_Velocity(self):
        read_data = ReadData()
        loaded_data = read_data.read_data_and_set_variable_settings(
            "../Dataset/00-91-Drugs-All-In-One-File.csv",
            "../Dataset/VariableSetting.csv")

        #output_filename = FileManager.create_output_file()

        zero_one_normalizer = ZeroOneMinMaxNormalizer()
        data_manager = DataManager(normalizer=zero_one_normalizer)

        data_manager.set_data(loaded_data)
        data_manager.split_data_into_train_valid_test_sets()

        print("Train Data", data_manager.inputs)
コード例 #12
0
import pandas as pd
from pathlib import Path
from src.DataManager import DataManager
from src.FeatureManager import FeatureManager
from src.Preprocessing import Preprocessor
import src.TrainManager as TrainManager
from src.configuration import config
from src.utils import DfCustomPrintFormat

# Load data
data = DataManager()
data.LoadData()

# Feature Engineering
features = FeatureManager()
features.EngineerFeatures(data)

# Preprocessing
preprocessor = Preprocessor()
preprocessor.Preprocess(data, features)

# Train
TrainManager.Train(preprocessor, data)
コード例 #13
0
required_r2 = {}
required_r2[SplitTypes.Train] = .6
required_r2[SplitTypes.Valid] = .5
required_r2[SplitTypes.Test] = .5

file_path = "../Dataset/00-91-Drugs-All-In-One-File.csv"
loaded_data = FileManager.load_file(file_path)
output_filename = FileManager.create_output_file()


#rescaling_normalizer = RescalingNormalizer()
#scikit_normalizer = ScikitNormalizer()
#data_manager = DataManager(normalizer=scikit_normalizer)

data_manager = DataManager(normalizer=None)
data_manager.set_data(loaded_data)
data_manager.split_data(test_split=0.15, train_split=0.70)

model = svm.SVR()

population = Population()
population.load_data()


'''

    TrainX, TrainY, ValidateX, ValidateY, TestX, TestY = FromDataFileMLR_DE_BPSO.getAllOfTheData()
    TrainX, ValidateX, TestX = FromDataFileMLR_DE_BPSO.rescaleTheData(TrainX, ValidateX, TestX)

    velocity = createInitVelMat(numOfPop, numOfFea)
コード例 #14
0
from src.DataManager import DataManager
from copy import deepcopy
from datetime import datetime

DM = DataManager()

DM.load_original_file("./data/AirQualityData/QualitatAire2016TotCatalunya.csv")

number_polutants = len(DM.list_all_polutants())
number_of_stations = len(DM.list_all_stations())
print('Full file data:')
print('Number of raws in originalDF: {}'.format(len(DM.originalDF)))
print('Number of polutants: {}'.format(number_polutants))
print('Number of stations: {}'.format(number_of_stations))
print('\n\n----------------------\n\n')

original_data_frame = DM.originalDF

DM.originalDF = DM.filter_by_time('2016-07-01', '2016-07-15')

number_polutants = len(DM.list_all_polutants())
number_of_stations = len(DM.list_all_stations())
print('Filteed by time data:')
print('Number of raws in originalDF: {}'.format(len(DM.originalDF)))
print('Number of polutants: {}'.format(number_polutants))
print('Number of stations: {}'.format(number_of_stations))

DM.split_by_polutant()

PolutantsDF_xvpca_format = deepcopy(DM.by_polutant_dataframes)
コード例 #15
0
from src.tools import data_initializer
from src.analysis import base_nlp_scenario

from src.DataManager import DataManager

DataManager.create_db()
コード例 #16
0
from aiogram import Bot, types, executor
from aiogram.dispatcher import Dispatcher
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters.state import State, StatesGroup
import random

from src.settings import TG_TOKEN, ADMINS
from src.DataManager import DataManager
from src.predictor import model, predict_class

bot = Bot(token=TG_TOKEN)
dp = Dispatcher(bot, storage=MemoryStorage())
dm = DataManager()


class Form(StatesGroup):
    text_question = State()
    id_questions = State()
    answer_question = State()


@dp.message_handler(state=Form.text_question, content_types=['text'])
async def new_question(message: types.Message, state: FSMContext):
    if message.text == 'Отмена':
        await bot.send_message(message.chat.id, 'Отменено')
        await state.finish()
    else:
        async with state.proxy() as data:
            data['text_question'] = message.text
コード例 #17
0
from src.FileManager import FileManager
from src.DataManager import DataManager
from src.VariableSetting import VariableSetting
from src.Velocity import Velocity

read_data = ReadData()
loaded_data = read_data.read_data_and_set_variable_settings(
    "../Dataset/00-91-Drugs-All-In-One-File.csv",
    "../Dataset/VariableSetting.csv")

output_filename = FileManager.create_output_file()

#normalizer = ZeroOneMinMaxNormalizer()
#normalizer = MinMaxScaler()
normalizer = None
data_manager = DataManager(normalizer=normalizer)
data_manager.set_data(loaded_data)
data_manager.split_data_into_train_valid_test_sets()

#data_manager.feature_selector = debpso
#set feature selection algorithm based on variable settings
feature_selection_algo = None
if VariableSetting.Feature_Selection_Algorithm == 'DEBPSO':
    feature_selection_algo = DEBPSO()
if VariableSetting.Feature_Selection_Algorithm == 'LinearSVC':
    feature_selection_algo = LinearSVC()

#set model based on variable settings
if VariableSetting.Model == 'SVM':
    model = svm.SVR()
elif VariableSetting.Model == 'BayesianRidge':
コード例 #18
0
ファイル: routes.py プロジェクト: jcoumont/us-income
from flask import Flask
from flask import jsonify, render_template
from src.DataManager import DataManager
from src.AccuracyManager import AccuracyManager
from src.RFClassifierProvider import RFClassifierProvider

import random
import os

app = Flask(__name__)

dataManager = DataManager()
accuracyManager = AccuracyManager()
rfCassifierProvider = RFClassifierProvider()

X_train, y_train, X_test, y_test = dataManager.get_train_test()

# Default model
model = rfCassifierProvider.get_classifier()
model.fit(X_train, y_train)

acc_model_train = accuracyManager.check_model_accuracy(model, X_train, y_train)
acc_model_test = accuracyManager.check_model_accuracy(model, X_test, y_test)

# Hyperparametrized model
model_tuned = rfCassifierProvider.get_classifier(True)
model_tuned.fit(X_train, y_train)

acc_model_tuned_train = accuracyManager.check_model_accuracy(
    model_tuned, X_train, y_train)
acc_model_tuned_test = accuracyManager.check_model_accuracy(
コード例 #19
0
params = {'database_name': 		'fig_share_data',
		  'dataset': 			'FigShare',
		  'feature_option':		'image_and_k_space',
		  'img_shape': 			128,
		  'num_subjects': 		'all'}

print(len(dataManager.dataCollection['FigShare']))
print(len(dataManager.data_splits['FigShare'][0]))
print(len(dataManager.data_splits['FigShare'][1]))
print(len(dataManager.data_splits['FigShare'][2]))

dataManager.compile_dataset(params)
'''

# Example to extract data from the ADNI dataset
dataManager = DataManager(
    r'C:/Users/eee/workspace_python/Image Reconstruction/data/', ['ADNI'])

params = {
    'database_name': 'data_tumor_21_05_2018',
    'dataset': 'ADNI',
    'batch_size': 32,
    'feature_option': 'add_tumor',
    'slice_ix': 0.32,  #0.32, #0.52,
    'img_shape': 128,
    'consec_slices': 120,  #120,#30,
    'num_subjects': 'all',
    'scan_type': 'T2',
    'acquisition_option': 'cartesian',
    'phase_map': 'None',  #constant, sinusoid
    'sampling_percent': 1,  #0.0625,
    'accel_factor': 0,  # How to implement this?