示例#1
0
    def __init__(self,
                 name=None,
                 email=None,
                 session=None,
                 input_config=None,
                 output_config=None,
                 mlflow_config=None):
        self.__log.info("Initialiizing the Client...")
        self.name = name
        self.email = email
        self.session = session
        self.config = ConfigHelper().get_config()
        self.__log.info("Creating MLFlow API client....")
        #self.httpClient = create_http_client() # This will make HTTP Calls to MlFlow's Orchestration APIs
        #self.sql_client = create_mlflow_client() # This is a work around for MLFLow's Orchestration APIs as APIs are not yet
        # implemented. TODO: Remove this when we have the apis ready
        self.api_client = create_mlflow_client()

        self.__log.info("Successfully created MLFlow API client")
        # Input and Output DataClients will create connection to the specified data source to load the data for training
        #  and storing the predictions back respectively.
        if input_config is not None:
            self.__log.info("Creating input connection....")
            self.input_data_client = create_data_client(input_config)
            self.__log.info("Successfully created input connection")

        if output_config is not None:
            self.__log.info("Creating output connection....")
            self.output_data_client = create_data_client(output_config)
            self.__log.info("Successfully created output connection")
示例#2
0
def create_mlflow_client():
    """
    A helper function to create connection to mlflow database tables
    :return:
    """
    config = ConfigHelper().get_config()
    server = config.get(MLFLOW, "server")
    db = config.get(MLFLOW, "db")
    user = config.get(MLFLOW, "user")
    password = os.getenv(MLFLOW_PWD) or (config.get(MLFLOW, "password") if config.has_option(MLFLOW, "password") else
                                         None)
    if password is None or password == '':
        assert False, "Cannot connect to mlflow database. Please provide password."
    #return SQLClient(server=server, db=db, user=user, password=password)
    return MLFlowAPIClient(server=server, db=db, user=user, password=password)
import time

import unittest
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager

from pages.login_page import LoginPage
from pages.home_page import HomePage
from pages.boutique_detail_page import BoutiqueDetail
from pages.product_details_page import ProductDetails
from helpers.config_helper import ConfigHelper
from locators.home_page_locators import HomePageLocators
from locators.boutique_detail_page_locators import BoutiqueDetailPageLocators

config = ConfigHelper().config_load()


class TestAddItemToCart(unittest.TestCase):
    # set browser from the following options: chrome or firefox
    BROWSER = 'chrome'

    def setUp(self):
        if self.BROWSER == 'chrome':
            self.driver = webdriver.Chrome(ChromeDriverManager().install())

        elif self.BROWSER == 'firefox':
            self.driver = webdriver.Firefox(
                executable_path=GeckoDriverManager().install())

        else:
示例#4
0
class TestInjestionAutomation:

    ssm_helper = SSMHelper()
    comparison_helper = ComparisonHelper()
    utility = UtilityHelper()
    config_helper = ConfigHelper()
    oracle_manager = OracleManager()
    athena_manager = AthenaManager()

    Entities = config_helper.get_entities()

    qa_raw_df_name = "QA Raw"
    qa_processed_df_name = "QA Processed"
    dev_raw_df_name = "DEV Raw"
    dev_processed_df_name = "DEV Processed"


    @pytest.fixture(params=Entities, scope='class')
    def Entity(self, request):
        EntityName = request.param

        global qa_raw_query
        global qa_raw_df
        global qa_processed_query
        global qa_processed_df
        global dev_raw_query
        global dev_raw_df
        global dev_processed_query
        global dev_processed_df
        global new_qa_raw_df
        global new_qa_processed_df
        try:
            entity_json_data = self.config_helper.json_config_data(self.utility.path_finder('/resources/entity-configurations/entityConfigQA.json'), EntityName, 'raw')
            if (entity_json_data[0] == "BANNER"):
                qa_raw_query = entity_json_data[1]
                qa_raw_df = self.oracle_manager.get_qa_raw_or_procseed_dataframes(qa_raw_query)
                # oracle login here
            elif (entity_json_data[0] == "CSV"):
                qa_raw_query=entity_json_data[1]
                qa_raw_df =self.athena_manager.get_raw_or_procseed_dataframes(qa_raw_query)



            entity_json_data = self.config_helper.json_config_data(self.utility.path_finder('/resources/entity-configurations/entityConfigQA.json'),EntityName, 'processed')

            if (entity_json_data[0] == "BANNER"):
                qa_processed_query = entity_json_data[1]
                qa_processed_df = self.oracle_manager.get_qa_raw_or_procseed_dataframes(qa_processed_query)


            elif (entity_json_data[0] == "CSV"):
                qa_processed_query = entity_json_data[1]
                qa_processed_df = self.athena_manager.get_raw_or_procseed_dataframes(qa_processed_query)
        except Exception as e:
            raise AssertionError("Couldn't create test data for expected data set")

        try:
            entity_json_data = self.config_helper.json_config_data(self.utility.path_finder('/resources/entity-configurations/entityConfigDev.json'), EntityName, 'raw')
            dev_raw_query = entity_json_data[1]
            dev_raw_df = self.athena_manager.get_raw_or_procseed_dataframes(dev_raw_query)

            entity_json_data = self.config_helper.json_config_data(self.utility.path_finder('/resources/entity-configurations/entityConfigDev.json'), EntityName, 'processed')
            dev_processed_query = entity_json_data[1]
            dev_processed_df = self.athena_manager.get_raw_or_procseed_dataframes(dev_processed_query)
        except Exception as e:
            raise AssertionError("Couldn't create test data for actual data set")


        try:
            list_raw = self.utility.dataframe_match_dataTypes(qa_raw_df, dev_raw_df)
            new_qa_raw_df = list_raw[0]
            dev_raw_df = list_raw[1]

            list_processed = self.utility.dataframe_match_dataTypes(qa_processed_df, dev_processed_df)
            new_qa_processed_df = list_processed[0]
            dev_processed_df = list_processed[1]

            new_qa_raw_df = self.utility.sort_all_columns_dataframe(new_qa_raw_df)
            dev_raw_df = self.utility.sort_all_columns_dataframe(dev_raw_df)

            new_qa_processed_df = self.utility.sort_all_columns_dataframe(new_qa_processed_df)
            dev_processed_df = self.utility.sort_all_columns_dataframe(dev_processed_df)
        except Exception as e:
            raise AssertionError("Couldn't create test data for data set")

        return request.param


    @allure.feature('Metadata verification')
    @allure.story('Row & Column count verification of Raw Data')
    @allure.title("Row & Column count verification of Raw Data")
    def test_raw_row_column_count(self, Entity):

        self.comparison_helper.verify_dataframes_row_column_count(dev_raw_df, qa_raw_df, self.dev_raw_df_name, self.qa_raw_df_name)

    @allure.feature('Data verification')
    @allure.story('Data verification of Raw Data')
    @allure.title("Data verification of Raw Data")
    def test_raw_data_comparison(self, Entity):
            self.comparison_helper.verify_dataframes_data_comparsion(dev_raw_df, new_qa_raw_df, self.dev_raw_df_name, self.qa_raw_df_name, Entity, 'raw')



    @allure.feature('Additional column verification')
    @allure.story('Additional column verification in Processed Data')
    @allure.title("Additional column verification in Processed Data")
    def test_dev_processed_additional_column_count(self, Entity):
        self.comparison_helper.verify_datframe_additional_columns_addition(dev_processed_df, self.dev_processed_df_name)

    @allure.feature('Row Gui ID verification')
    @allure.story('Row Gui ID verification in Processed File')
    @allure.title("Row Gui ID verification in Processed File")
    def test_dev_processed_row_gui_id(self, Entity):
        time.sleep(4)
        self.comparison_helper.verify_datframe_row_gui_id(dev_processed_df, self.dev_processed_df_name)


    @allure.feature('Metadata verification')
    @allure.story('Row & Column count verification of Processed Data')
    @allure.title("Row & Column count verification of Processed Data")
    def test_processed_row_column_count(self, Entity):
        self.dev_processed_df = self.utility.drop_proceesed_data_frame_columns(dev_processed_df)
        self.comparison_helper.verify_dataframes_row_column_count(self.dev_processed_df, qa_processed_df, self.dev_processed_df_name, self.qa_processed_df_name)

    @allure.feature('Data verification')
    @allure.story('Data verification of Processed Data')
    @allure.title("Data verification of Processed Data")
    def test_processed_data_comparison(self, Entity):
            self.dev_processed_df = self.utility.drop_proceesed_data_frame_columns(dev_processed_df)
            self.comparison_helper.verify_dataframes_data_comparsion(self.dev_processed_df, new_qa_processed_df, self.dev_processed_df_name , self.qa_processed_df_name, Entity,'processed')
示例#5
0
class Client:
    """
    This class handles the MLFlow API calls to upload and deploy the models. The class also takes care of evaluation and
    comparison of models in a training pipeline.
    """
    def __init__(self,
                 name=None,
                 email=None,
                 session=None,
                 input_config=None,
                 output_config=None,
                 mlflow_config=None):
        self.__log.info("Initialiizing the Client...")
        self.name = name
        self.email = email
        self.session = session
        self.config = ConfigHelper().get_config()
        self.__log.info("Creating MLFlow API client....")
        #self.httpClient = create_http_client() # This will make HTTP Calls to MlFlow's Orchestration APIs
        #self.sql_client = create_mlflow_client() # This is a work around for MLFLow's Orchestration APIs as APIs are not yet
        # implemented. TODO: Remove this when we have the apis ready
        self.api_client = create_mlflow_client()

        self.__log.info("Successfully created MLFlow API client")
        # Input and Output DataClients will create connection to the specified data source to load the data for training
        #  and storing the predictions back respectively.
        if input_config is not None:
            self.__log.info("Creating input connection....")
            self.input_data_client = create_data_client(input_config)
            self.__log.info("Successfully created input connection")

        if output_config is not None:
            self.__log.info("Creating output connection....")
            self.output_data_client = create_data_client(output_config)
            self.__log.info("Successfully created output connection")

    def list_models(self, text=None, uuid=None, tag=None):
        """
        Lists all the models from the db. If text is given, then runs a full-text search on uuid and tag
        :param text:
        :param uuid:
        :param tag:
        :return:
        """
        return self.api_client.list_models(text, uuid, tag)

    def upload_model(self, model):
        """
        Uploads a MLFlow model to Model Repo
        :param model:
        :return:
        """
        self.__log.info("Uploading the model to Model Store....")
        if model is None:
            assert False, "Model must be provided"

        # Upload the model and artifacts
        # TODO: Make this api call
        #try:
        table = self.config.get(MLFLOW, "model_store")
        schema = self.config.get(MLFLOW, "model_store.schema")
        model_store_df = model.metadata.to_table()
        model_store_df["Model"] = model.artifacts
        print(model_store_df)
        model_store_df.to_sql(table,
                              schema=schema,
                              con=self.sql_client.engine,
                              if_exists='append',
                              index=False)
        self.__log.info("Model uploaded to the Model Store successfully.")
        #except Exception as e:
        #    msg = "Error uploading model. Error: {}".format(str(e))
        #    self.__log.error(msg)
        #    assert False, msg

    def verify_model(self, model):
        """
        Verifies the model. Here it first stores the model to LeaderBoard
        :param model:
        :return:
        """
        self.__log("Verifying the model....")
        if model is None:
            assert False, "Model must be provided"

        # Create LeaderBoard dataframe
        data = [{
            "model_UUID": model.metadata.model_uuid,
            "created_date": datetime.now(),
            "created_by": get_username3()
        }]
        lb_df = pd.DataFrame(data)

        # Upload the model uuid to LeaderBoard
        try:
            table = self.config.get(MLFLOW, "model_leaderboard")
            schema = self.config.get(MLFLOW, "model_leaderboard.schema")
            data.to_sql(table,
                        schema=schema,
                        con=self.sql_client.engine,
                        if_exists='append',
                        index=False)
            self.__log.info("Model moved to Leader Board successfully.")
        except Exception as e:
            msg = "Error uploading model to Leader Board. Error: {}".format(
                str(e))
            self.__log.error(msg)
            assert False, msg

    def deploy_model(self, model):
        """
        Deploys the model
        :param model:
        :return:
        """
        pass

    def load_data(self, sql):
        """
        Loads the data from database specified in inputconfig and executes the sql and returns a pandas dataframe
        :param sql:
        :return:
        """
        data = pd.read_sql(sql, self.input_data_client)
        return data

    def download_model(self,
                       model_uuid=None,
                       model_tag=None,
                       model_version=None):
        """
        Downloads the model from the database using either model_uuid or model_tag and version
        :param model_uuid:
        :param model_tag:
        :param model_version:
        :return:
        """
        pymodel = PyModel(model_uuid=model_uuid,
                          model_tag=model_tag,
                          model_version=model_version)
        return pymodel

    def load_model(self, model_tag, model_version=None):
        """
        Loads the model from the database using either model_uuid or model_tag and version and converts to a PyModel
        :param model_uuid:
        :param model_tag:
        :param model_version:
        :return:
        """
        return self._load_model_by_tag(self, model_tag, model_version)

    def load_model(self, model_uuid):
        """
        Loads the model from the database using either model_uuid or model_tag and version and converts to a PyModel
        :param model_uuid:
        :param model_tag:
        :param model_version:
        :return:
        """
        return self._load_model_by_uuid(model_uuid)

    def _load_model_by_uuid(self, model_uuid):
        """
        Loads the model from database for a given model_uuid
        :param model_uuid:
        :return:
        """
        model_df = pd.read_sql(
            'select * from {} where model_uuid = {}'.format(
                TBL_MODEL_STORE, model_uuid), self.sql_client.engine)
        return self.create_pymodel_from_pandas(model_df)

    def _load_model_by_tag(self, model_tag, model_version=None):
        """
        Loads the model from database for a given model_uuid
        :param model_uuid:
        :return:
        """
        sql = 'SELECT TOP 1 * FROM {} WHERE model_tag = {}'
        if model_version is not None:
            sql = sql + ' and model_version = {}'
        sql = sql + 'ORDER BY created_date DESC'
        sql = sql.format(TBL_MODEL_STORE, model_tag, model_version)

        model_df = pd.read_sql(sql, self.sql_client.engine)
        return self.create_pymodel_from_pandas(model_df)

    def create_pymodel_from_pandas(self, model_df):
        """
        Creates a PyModel class from a Pandas Dataframe
        :param model_df:
        :return:
        """
        # first extract the model from the binary BLOB data
        # store the binary data to a file
        tmp_dir = tempfile.TemporaryDirectory()
        zip_file_path = tmp_dir + "/" + model_df["model_uuid"] + ".zip"
        model_zip_file = open(zip_file_path, "wb")
        binary_format = bytearray(model_df["model_artifacts"])
        model_zip_file.write(binary_format)
        model_zip_file.close()
        # extract the zip file
        with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
            zip_ref.extractall(tmp_dir)

        model = joblib.load(tmp_dir + "/model.pkl")
        features = pickle.load(open('feature_column.pkl', 'rb'))
        pymodel = PyModel(model=model,
                          features=features,
                          split=model_df["split"],
                          feature_columns=model_df["feature_columns"],
                          target_column=model_df["target_column"],
                          model_uuid=model_df["model_uuid"],
                          model_tag=model_df["model_tag"],
                          model_version=model_df["model_version"],
                          maximising_metric=model_df["maximising_metric"],
                          hyperparams=model_df["hyperparams"],
                          training_args=model_df["training_args"],
                          train_actuals=model_df["train_actuals"],
                          train_pred_probs=model_df["train_pred_probs"],
                          valid_actuals=model_df["valid_actuals"],
                          valid_pred_probs=model_df["valid_pred_probs"])
        return pymodel
示例#6
0
文件: bot.py 项目: vcokltfre/robo-vco
import discord
import helpers.pregenerator

from discord.ext import commands
from discord_logger import DiscordLogger
from helpers.config_helper import ConfigHelper

cfg = ConfigHelper().read()


class Bot(commands.Bot):
    """A subclassed commands.Bot"""

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.logger = DiscordLogger(webhook_url=cfg["logger_hook"], **cfg["logger_ops"])
        self.logger.construct(title="Startup", description="Robo Vco is starting up.")
        self.logger.send()

    def load_extensions(self, cogs: list):
        """Loads a list of cogs"""
        loading_data = {
            "success":0,
            "failure":0
        }
        for cog in cogs:
            try:
                super().load_extension(cog)
                loading_data["success"] += 1
            except Exception as e:
                loading_data["failure"] += 1
示例#7
0
 def __init__(self):
     self.conf = ConfigHelper("./data/persist/perms.json")
     self.cfg = self.conf.read()
示例#8
0
class PermissionManager:
    def __init__(self):
        self.conf = ConfigHelper("./data/persist/perms.json")
        self.cfg = self.conf.read()

    def has_perms(self, roles: list, uid: str, required: int):
        overrides = self.cfg["overrides"]
        if uid in overrides:
            return overrides[uid] >= required

        role_perms = self.cfg["roles"]
        for role in roles:
            if role in role_perms:
                if role_perms[role] >= required:
                    return True
        return False

    def add_role(self, name: str, level: int):
        perms = self.conf.read()
        perms["roles"][name] = level
        self.conf.write(perms)

    def modify_role(self, name: str, level: int):
        self.add_role(name, level)

    def del_role(self, name: str):
        perms = self.conf.read()
        perms["roles"].pop(name)
        self.conf.write(perms)

    def add_override(self, uid: str, level: int):
        perms = self.conf.read()
        perms["overrides"][uid] = level
        self.conf.write(perms)

    def modify_override(self, uid: str, level: int):
        self.add_override(uid, level)

    def del_override(self, uid: str):
        perms = self.conf.read()
        perms["overrides"].pop(uid)
        self.conf.write(perms)