示例#1
0
    def test_import_from_file(self, mock_open, mock_load, mock_isfile):
        """Test process-service file import"""

        # set return values
        mock_isfile.return_value = True
        mock_file = mock.MagicMock(name='service_file')
        mock_file.__enter__.return_value = mock_file
        mock_open.return_value = mock_file

        # create mock process-service class and instance
        logger = Logger()
        ps_cls = type('ps_cls', (), {'persist': True, 'logger': logger})
        ps = mock.Mock(name='ProcessService_instance')
        ps.__class__ = ps_cls

        # test normal import
        mock_load.return_value = ps
        ps_ = ProcessService.import_from_file.__func__(ps_cls, 'mock_file_path')
        self.assertIs(ps_, ps, 'unexpected process-service instance returned')
        mock_open.assert_called_once_with('mock_file_path', 'rb')
        mock_load.assert_called_once_with(mock_file)
        mock_open.reset_mock()
        mock_load.reset_mock()

        # test importing instance of incorrect type
        mock_load.return_value = None
        with self.assertRaises(TypeError):
            ProcessService.import_from_file.__func__(ps_cls, 'mock_file_path')
        mock_open.reset_mock()
        mock_load.reset_mock()

        # test import with non-persisting service
        ps_cls.persist = False
        ps_ = ProcessService.import_from_file.__func__(ps_cls, 'mock_file_path')
        self.assertIs(ps_, None, 'unexpected return value for non-persisting service')
示例#2
0
class TimePeriod(ArgumentsMixin):
    """Time period."""

    logger = Logger()

    def __init__(self, **kwargs):
        """Initialize TimePeriod instance."""
        pass

    def period_index(self, dt):
        """Get number of periods until date/time "dt".

        :param dt: specified date/time
        """
        self.logger.fatal(
            'period_index method not implemented for {cls}; please implement derived class.',
            cls=self.__class__.__name__)
        raise NotImplementedError('period_index function is not implemented.')

    @classmethod
    def parse_time_period(cls, period):
        """Try to parse specified time period.

        :param period: specified period
        """
        # catch single value
        if not isinstance(period, dict):
            period = dict(value=period)

        # try to parse specified period
        try:
            return pd.Timedelta(**period).delta
        except Exception as ex:
            cls.logger.fatal('Unable to parse period: {period!s}.',
                             period=period)
            raise ex

    @classmethod
    def parse_date_time(cls, dt):
        """Try to parse specified date/time.

        :param dt: specified date/time
        """
        try:
            return pd.Timestamp(dt).value
        except Exception as ex:
            cls.logger.fatal('Unable to parse date/time: {dt!s}', dt=dt)
            raise ex
示例#3
0
"""

from eskapade import ConfigObject, Chain
from eskapade import process_manager
from eskapade import root_analysis
from eskapade.core import persistence
from eskapade.logger import Logger
from eskapade.root_analysis import roofit_utils

# make sure Eskapade RooFit library is loaded
roofit_utils.load_libesroofit()

import ROOT
from ROOT import RooFit

logger = Logger()

logger.debug(
    'Now parsing configuration file esk411_weibull_predictive_maintenance')

#########################################################################################
# --- minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk411_weibull_predictive_maintenance'
settings['version'] = 0

#########################################################################################
# --- Analysis values, settings, helper functions, configuration flags.

msg = r"""
    with values True or False assigned per record.

Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

from eskapade import ConfigObject, resources, Chain
from eskapade import core_ops, analysis
from eskapade import process_manager
from eskapade.logger import Logger

logger = Logger()

logger.debug('Now parsing configuration file esk207_record_vectorizer')

#########################################################################################
# --- minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk207_record_vectorizer'
settings['version'] = 0

#########################################################################################
# --- Analysis values, settings, helper functions, configuration flags.

# --- Set path of data
data_path = resources.fixture('dummy.csv')
示例#5
0
    datasets in chunks.
    Similar to esk209, but here the reading of dataset is forked into different processes.

Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""
import pandas as pd

from eskapade import analysis, core_ops, process_manager, resources, ConfigObject, Chain
from eskapade.logger import Logger

logger = Logger()

logger.debug('Now parsing configuration file esk211_fork_read_data_itr')

#########################################################################################
# --- minimal analysis information
settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk211_fork_read_data_itr'
settings['version'] = 0

# no need to set this normally, but illustrates how to throttle the number of concurrent processes.
# default is set to number of available cpu cores.
process_manager.num_cpu = 4

#########################################################################################
Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

import numpy as np

from eskapade import ConfigObject, Chain
from eskapade import data_mimic
from eskapade import process_manager
from eskapade.logger import Logger, LogLevel

logger = Logger()
logger.debug('Now parsing configuration file esk703_mimic_data')

#########################################################################################
# --- minimal analysis information
settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk703_mimic_data'
settings['version'] = 0

np.random.seed(42)

ch = Chain('DataPrep')
ch.logger.log_level = LogLevel.DEBUG

sim_data = data_mimic.MixedVariablesSimulation(store_key='df',
                                               n_obs=100000,
Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

from eskapade import ConfigObject, Chain
from eskapade import core_ops, analysis, root_analysis
from eskapade import process_manager
from eskapade import resources
from eskapade.logger import Logger, LogLevel

logger = Logger()

logger.debug(
    'Now parsing configuration file esk405_simulation_based_on_binned_data')

#########################################################################################
# --- minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk405_simulation_based_on_binned_data'
settings['version'] = 0

#########################################################################################
# --- Analysis values, settings, helper functions, configuration flags.

settings['high_num_dims'] = False
示例#8
0
Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

import sys

import ROOT

from eskapade import process_manager, ConfigObject, Chain
from eskapade.logger import Logger

from esroofit.links import WsUtils

logger = Logger()

logger.debug('Now parsing configuration file tutorial_5.')

###############################################################################
# --- minimal analysis information
settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'tutorial_5'
settings['version'] = 0

###############################################################################
# - First create, compile and load your pdf model. We can either create it
#   on the fly or load if it has already been created.
pdf_name = 'MyPdf'
pdf_lib_base = pdf_name + '_cxx'
pdf_lib_ext = '.so'
    Tutorial macro for applying a SQL-query to one more objects in the
    DataStore. Such SQL-queries can for instance be used to filter data.

Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

from eskapade import process_manager, ConfigObject, Chain
from eskapade.logger import Logger
from eskapadespark import SparkManager, SparkDfReader, SparkExecuteQuery, resources

logger = Logger()

logger.debug('Now parsing configuration file esk604_spark_execute_query.')

##########################################################################
# Minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk604_spark_execute_query'
settings['version'] = 0

##########################################################################
# Start Spark session

spark = process_manager.service(SparkManager).create_session(eskapade_settings=settings)
    with a default plotter link.

Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

from eskapade import ConfigObject, Chain
from eskapade import core_ops, visualization, root_analysis
from eskapade import process_manager
from eskapade.logger import Logger

logger = Logger('macro.esk404_workspace_createpdf_simulate_fit_plot')

logger.debug('Now parsing configuration file esk404_workspace_createpdf_simulate_fit_plot')

#########################################################################################
# --- minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk404_workspace_createpdf_simulate_fit_plot'
settings['version'] = 0

#########################################################################################
# --- Analysis values, settings, helper functions, configuration flags.

settings['generate_fit_plot'] = True
settings['summary'] = True
    Do not forget to clean the results directory when testing.

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

from pyspark.streaming import StreamingContext

from eskapade import process_manager, ConfigObject, DataStore, Chain
from escore.core import persistence
from eskapade.logger import Logger
from eskapadespark import SparkManager, SparkStreamingWordCount, SparkStreamingWriter, SparkStreamingController

logger = Logger()
logger.debug('Now parsing configuration file esk610_spark_streaming.')

##########################################################################
# --- minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk610_spark_streaming'
settings['version'] = 0


# check command line
def check_var(var_name, local_vars=vars(), settings=settings, default=False):
    """Set setting and return it."""
    var_value = default
    if var_name in local_vars:
示例#12
0
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

import datetime
import os
import shutil
import sys

from eskapade.logger import Logger
from eskapade import resources

logger = Logger(__name__)


def get_absolute_path(path):
    """Get an absolute path.

    First expands ~ if present. Second take care of any . or ..

    :param path: path
    :returns: the absolute path
    """
    return os.path.abspath(os.path.expanduser(path))


def create_dir(path):
    """Create a leaf directory and all intermediate ones.
示例#13
0
    Macro to say hello to the world with Eskapade!

Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

from eskapade import ConfigObject, Chain
from eskapade import core_ops
from eskapade import process_manager
from eskapade.logger import Logger, LogLevel

logger = Logger()

logger.debug('Now parsing configuration file esk101_helloworld')

#########################################################################################
# --- minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk101_helloworld'
settings['version'] = 0

#########################################################################################
# --- Analysis values, settings, helper functions, configuration flags.

#     E.g. define flags turn on or off certain chains with links.
#     by default all set to false, unless already configured in
示例#14
0
    Macro to illustrate how to control the contents of the datastore

Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

from eskapade import ConfigObject, Chain
from eskapade import core_ops
from eskapade import process_manager
from eskapade.logger import Logger

logger = Logger()

logger.debug(
    'Now parsing configuration file esk104_basic_datastore_operations.')

#########################################################################################
# --- minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk104_basic_datastore_operations'
settings['version'] = 0

#########################################################################################
# --- Analysis values, settings, helper functions, configuration flags.

# some dummy information to use in this macro
示例#15
0

Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

from eskapade import ConfigObject, resources, Chain
from eskapade import analysis, visualization
from eskapade import process_manager
from eskapade.logger import Logger

logger = Logger()

logger.debug('Now parsing configuration file esk305_correlation_summary.')

#########################################################################################
# --- minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk305_correlation_summary'
settings['version'] = 0

#########################################################################################
# --- Analysis values, settings, helper functions, configuration flags.

settings['input_path'] = resources.fixture('correlated_data.sv.gz')
settings['reader'] = 'csv'
示例#16
0
Description:
    Macro shows how to boxplot the content of a dataframe in a nice summary
    pdf file.

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

from eskapade import ConfigObject, Chain
from eskapade import analysis, visualization
from eskapade import process_manager
from escore.core import persistence
from eskapade.logger import Logger

logger = Logger()

logger.debug('Now parsing configuration file esk304_df_boxplot.')

#########################################################################################
# --- minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk304_df_boxplot'
settings['version'] = 0

#########################################################################################
# --- Analysis values, settings, helper functions, configuration flags.

msg = r"""
示例#17
0
    visualization links into one big report.

Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

from eskapade import ConfigObject, resources, Chain
from eskapade import analysis, visualization
from eskapade import process_manager
from eskapade.logger import Logger, LogLevel

logger = Logger()

logger.debug('Now parsing configuration file esk306_concatenate_reports.')

#########################################################################################
# --- minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk306_concatenate_reports'
settings['version'] = 0

#########################################################################################
# --- Analysis values, settings, helper functions, configuration flags.

input_files = resources.fixture('correlated_data.sv.gz')
示例#18
0
    Tutorial macro for applying map functions on groups of rows
    in Spark data frames

Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

from eskapade import process_manager, ConfigObject, DataStore, spark_analysis, Chain
from eskapade.logger import Logger
from eskapade.spark_analysis import SparkManager

logger = Logger()

logger.debug('Now parsing configuration file esk609_map_df_groups')

##########################################################################
# --- minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk609_map_df_groups'
settings['version'] = 0

##########################################################################
# --- start Spark session

spark = process_manager.service(SparkManager).create_session(
    eskapade_settings=settings)
    Macro serves as input to other three esk105 example macros.

Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

from eskapade import ConfigObject, Chain
from eskapade import core_ops
from eskapade import process_manager
from eskapade.logger import Logger

logger = Logger()

logger.debug('Now parsing configuration file esk105_datastore_pickling.')

#########################################################################################
# --- minimal analysis information
settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk105_datastore_pickling'
settings['version'] = 0

#########################################################################################
# --- Analysis values, settings, helper functions, configuration flags.

msg = r"""

The setup consists of three simple chains that add progressively more information to the datastore.
Description:
    Tutorial macro for reading CSV files into a Spark data frame

Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

from eskapade import process_manager, ConfigObject, resources, spark_analysis, Chain
from eskapade.logger import Logger
from eskapade.spark_analysis import SparkManager

logger = Logger()

logger.debug('Now parsing configuration file esk602_read_csv_to_spark_df.')

##########################################################################
# --- minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk602_read_csv_to_spark_df'
settings['version'] = 0

##########################################################################
# --- start Spark session

spark = process_manager.service(SparkManager).create_session(eskapade_settings=settings)
示例#21
0
Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

import pyspark

from eskapade import process_manager, ConfigObject, DataStore, spark_analysis, Chain
from eskapade.logger import Logger
from eskapade.spark_analysis import SparkManager

logger = Logger()

logger.debug('Now parsing configuration file esk606_convert_spark_df.')

##########################################################################
# --- minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk606_convert_spark_df'
settings['version'] = 0

##########################################################################
# --- start Spark session

spark = process_manager.service(SparkManager).create_session(
    eskapade_settings=settings)
示例#22
0
LICENSE.
"""

from eskapade import ConfigObject, Chain
from eskapade import core_ops, visualization, root_analysis
from eskapade import process_manager
from eskapade.logger import Logger
from eskapade.root_analysis import roofit_utils

# make sure Eskapade RooFit library is loaded
roofit_utils.load_libesroofit()

import ROOT
from ROOT import RooFit

logger = Logger()

logger.debug('Now parsing configuration file esk408_classification_error_propagation_after_fit')

#########################################################################################
# --- minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk408_classification_error_propagation_after_fit'
settings['version'] = 0

#########################################################################################
# --- Analysis values, settings, helper functions, configuration flags.

#########################################################################################
# --- now set up the chains and links based on configuration flags
示例#23
0
    Macro that illustrates how to loop over multiple (possibly large!)
    datasets in chunks, in each loop fill a (common) histogram, and plot the
    final histogram.

Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

from eskapade import analysis, core_ops, process_manager, resources, visualization, ConfigObject, Chain
from eskapade.logger import Logger, LogLevel

logger = Logger()

logger.debug('Now parsing configuration file esk302_histogram_filler_plotter.')

#########################################################################################
# --- minimal analysis information
settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk302_histogram_filler_plotter'
settings['version'] = 0

#########################################################################################

msg = r"""

The plots and latex files produced by link hist_summary can be found in dir:
{path}
Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

from eskapade import ConfigObject, Chain, process_manager
from eskapade import core_ops, analysis
from eskapade.logger import Logger, LogLevel

from esroofit import resources
from esroofit.links import RooDataHistFiller

logger = Logger()

logger.debug('Now parsing configuration file esk402_roodatahist_fill')

#########################################################################################
# --- minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk402_roodatahist_fill'
settings['version'] = 0

#########################################################################################
# --- Analysis values, settings, helper functions, configuration flags.

input_files = [resources.fixture('mock_accounts.csv.gz')]
Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

from eskapade import ConfigObject, Chain
from eskapade import analysis, root_analysis, visualization
from eskapade import process_manager
from eskapade import resources
from eskapade.logger import Logger, LogLevel

logger = Logger()

logger.debug('Now parsing configuration file esk410_testing_correlations_between_categories')

#########################################################################################
# --- minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk410_testing_correlations_between_categories'
settings['version'] = 0

#########################################################################################
# --- Analysis values, settings, helper functions, configuration flags.

input_files = [resources.fixture('mock_accounts.csv.gz')]
Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

from pyspark.sql import types, functions

from eskapade import process_manager, ConfigObject, Chain
from eskapade.logger import Logger
from eskapadespark import SparkManager, SparkDfReader, SparkWithColumn, resources

logger = Logger()

logger.debug('Now parsing configuration file esk607_spark_with_column')

##########################################################################
# Minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk607_spark_with_column'
settings['version'] = 0

##########################################################################
# Start Spark session

spark = process_manager.service(SparkManager).create_session(
    eskapade_settings=settings)
示例#27
0
Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

import numpy as np
import pandas as pd

from eskapade.logger import Logger

NUM_NS_DAY = 24 * 3600 * int(1e9)

logger = Logger()


def plot_histogram(hist, x_label, y_label=None, is_num=True, is_ts=False, pdf_file_name='', top=20):
    """Create and plot histogram of column values.

    :param hist: input numpy histogram = values, bin_edges
    :param str x_label: Label for histogram x-axis
    :param str y_label: Label for histogram y-axis
    :param bool is_num: True if observable to plot is numeric
    :param bool is_ts: True if observable to plot is a timestamp
    :param str pdf_file_name: if set, will store the plot in a pdf file
    :param int top: only print the top 20 characters of x-labels and y-labels. (default is 20)
    """
    # import matplotlib here to prevent import before setting backend in
    # core.execution.eskapade_run
示例#28
0
Description:
    Macro to that illustrates how to loop over multiple (possibly large!)
    datasets in chunks.

Authors:
    KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands

Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""

from eskapade import analysis, core_ops, process_manager, resources, ConfigObject, Chain
from eskapade.logger import Logger

logger = Logger()

logger.debug('Now parsing configuration file esk209_read_big_data_itr')

#########################################################################################
# --- minimal analysis information
settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk209_read_big_data_itr'
settings['version'] = 0

#########################################################################################

# when chunking through an input file, pick up only N lines in each iteration.
chunk_size = 5

#########################################################################################
示例#29
0
from eskapade import process_manager
from eskapade.logger import Logger
from eskapade.root_analysis import RooFitManager, TruncExpGen, TruncExpFit
from eskapade.root_analysis.roofit_models import TruncExponential

MODEL_NAME = 'voucher_redeem'
REDEEM_DATA_KEY = 'voucher_redeems'
AGE_DATA_KEY = 'voucher_ages'

MAX_AGE = 1500  # days
FAST_REDEEM_RATE = -0.01  # per day
SLOW_REDEEM_RATE = -0.001  # per day
FAST_FRAC = 0.4
REDEEM_FRAC = 0.6

logger = Logger()

logger.debug('Now parsing configuration file esk409_unredeemed_vouchers.')

###############################################################################
# --- minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk409_unredeemed_vouchers'
settings['version'] = 0

###############################################################################
# --- create voucher redeem model

# create model if it is not read from persisted services of first chain
if not settings.get('beginWithChain'):
示例#30
0
"""

import ROOT
from ROOT import RooFit

from eskapade import ConfigObject, Chain, process_manager
from eskapade import core_ops
from eskapade.logger import Logger

from esroofit import roofit_utils
from esroofit.links import WsUtils, PrintWs

# make sure Eskapade RooFit library is loaded
roofit_utils.load_libesroofit()

logger = Logger()

logger.debug(
    'Now parsing configuration file esk407_classification_unbiased_fit_estimate'
)

#########################################################################################
# --- minimal analysis information

settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk407_classification_unbiased_fit_estimate'
settings['version'] = 0

#########################################################################################
# --- now set up the chains and links based on configuration flags