Ejemplo n.º 1
0
    def __init_log(cls) -> bool:
        """ Initialize the logging system for this module and set
        a ColoredFormatter.
        """
        # create format string for this module
        format_str = config.logging.fmt.replace('[name]', 'EXECUTOR')
        formatter = colorlog.ColoredFormatter(format_str, datefmt=config.logging.datefmt)

        # create stream
        stream = logging.StreamHandler()
        stream.setLevel(logging.DEBUG)
        stream.setFormatter(formatter)

        # assign log method and set handler
        cls.log = logging.getLogger('executor')
        cls.log.setLevel(logging.DEBUG)
        cls.log.addHandler(stream)

        # if requested, enable slack notifications
        if config.notification.slack:

            # channel
            channel = config.notification.slack_channel

            # create slack handler
            slack_handler = SlackerLogHandler(config.notification.slack_token, channel, stack_trace=True,
                                              username='******', icon_emoji=':dizzy', fail_silent=True)

            # add slack handler to logger
            cls.log.addHandler(slack_handler)

            # define the minimum level of log messages
            slack_handler.setLevel(logging.INFO)

        return True
Ejemplo n.º 2
0
def reporting(msg, channel, bot):
    """
    this sends errors to a slack channel so that
    msg{str} --> error message
    """
    # Create slack handler
    slack_handler = SlackerLogHandler(channel, bot, stack_trace=True)

    # Create logger
    logger = logging.getLogger('Send Conf.')
    logger.addHandler(slack_handler)

    # Format
    formatter = NoStacktraceFormatter('%(asctime)s --> ERROR - %(message)s')
    slack_handler.setFormatter(formatter)
    slack_handler.setLevel(logging.DEBUG)

    # Message to send
    logger.error(msg)
Ejemplo n.º 3
0
def setup_loggers(slack):
    for name in ['default', 'txns', 'cron']:
        username = "******".format(slack['username'], name)
        channel = slack['channel']
        if name == 'txns':
            channel = slack.get('txns-channel', channel)
        handler = SlackerLogHandler(slack['key'],
                                    channel,
                                    username=username,
                                    icon_emoji=slack['emojis'].get(name))

        formatter = NoStacktraceFormatter(FMT)
        handler.setFormatter(formatter)
        handler.setLevel(logging.INFO)
        logger = logging.getLogger(name)
        logger.setLevel(logging.INFO)
        logger.addHandler(handler)

    sys.excepthook = handle_exception
Ejemplo n.º 4
0
def startBetLogging(application):
    """
    Initial the main logging function for comeon. 
    
    Normal message level is Info
    = Debug --> not show
    = Info --> Console (Rundeck)
    > Warn --> Console (Rundeck) & Slack
    
    Please use the following loglevels for messages:
        
        Debug : Application internal messages (just for debugging)
        Info  : Status information and some stuff to show on the console
        Warn  : Messages to the customer (no action required)
        Error : Messages to the customer (action required)
        Critical : not defined yet
        
    
    Args:
        applicatoin (str): Name of the applicaton 
        
    Returns:
        logger: the logger object
        
    Todo:
    * Send all Debug messaged to Kafka
        
    """

    logger = logging.getLogger(application)
    logger.setLevel(logging.DEBUG)

    SLACK_API_TOKEN = cfg['log']['slack']['api_key']

    if application == 'surebet':
        SLACK_CHANNEL = cfg['log']['slack']['surebet']['channel']
    elif application == 'laybet':
        SLACK_CHANNEL = cfg['log']['slack']['laybet']['channel']
    elif application == 'balance':
        SLACK_CHANNEL = cfg['log']['slack']['balance']['channel']
    elif application == 'etl':
        SLACK_CHANNEL = cfg['log']['slack']['etl']['channel']
    elif application == 'p_l':
        SLACK_CHANNEL = cfg['log']['slack']['p_l']['channel']
    else:
        SLACK_CHANNEL = cfg['log']['slack']['common']['channel']

    ch = logging.StreamHandler()
    ch.setLevel(logging.INFO)

    sh = SlackerLogHandler(SLACK_API_TOKEN, SLACK_CHANNEL, stack_trace=True)
    sh.setLevel(logging.WARN)

    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    ch.setFormatter(formatter)

    logger.addHandler(ch)
    logger.addHandler(sh)

    return logger
Ejemplo n.º 5
0
import logging
import os
from platform import python_version

from slacker_log_handler import SlackerLogHandler, NoStacktraceFormatter

SLACK_API_TOKEN = os.getenv('SLACK_API_TOKEN')
SLACK_CHANNEL = os.getenv('SLACK_CHANNEL')

slack_handler = SlackerLogHandler(SLACK_API_TOKEN, SLACK_CHANNEL, stack_trace=True, ping_users=["@ose", "slackbot"], ping_level=logging.ERROR)

logger = logging.getLogger('debug_application')
logger.addHandler(slack_handler)
logger.setLevel(logging.DEBUG)

formatter = NoStacktraceFormatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
slack_handler.setFormatter(formatter)

logger.info('Python version is {}'.format(python_version()))

logger.debug('Test DEBUG')
logger.info('Test INFO')
logger.warning('Test WARNING')
logger.error('Test ERROR')
logger.fatal('Test FATAL')
logger.critical('Test CRITICAL')

try:
    raise Exception('Test exception')
except Exception as e:
    logger.exception(e)
Ejemplo n.º 6
0
from slacker_log_handler import SlackerLogHandler, NoStacktraceFormatter

logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s - %(levelname)s - %(message)s',
                    filename='/tmp/stream_handler_test.txt',
                    filemode='a')

# Create slack handler
'NOTE: get api_key from here: https://api.slack.com/custom-integrations/legacy-tokens'

with open('/home/xuananh/Dropbox/Work/Other/slack-token-api-key.json',
          "r") as in_file:
    SLACK_API_KEY = json.load(in_file)['phungxuananh']
'''NOTE: tham so channel co the co hoac khong co # dang truoc cung duoc'''
slack_handler = SlackerLogHandler(api_key=SLACK_API_KEY,
                                  channel='general',
                                  stack_trace=True,
                                  username='******')

# Create logger
logger = logging.getLogger('debug_application')
logger.addHandler(slack_handler)

# OPTIONAL: Define a log message formatter.
# If you have set stack_trace=True, any exception stack traces will be included as Slack message attachments.
# You therefore need to use NoStacktraceFormatter as a base to exclude the trace from the main message text.
log_formatter = '`[%(asctime)s]` - %(name)s - %(levelname)s - %(message)s'
datetime_formatter = "%Y-%m-%d %H:%M:%S"
formatter = NoStacktraceFormatter(log_formatter, datetime_formatter)
slack_handler.setFormatter(formatter)

# Define the minimum level of log messages you want to send to Slack
Ejemplo n.º 7
0
def change_slack_settings(slack_handler, option, settings):
    #option true means that it is a info message, false means that it is an error message
    if option:
        slack_handler.icon_emoji = ":white_check_mark:"
        slack_handler.channel = settings["slack_channel_info"]
    else:
        slack_handler.icon_emoji = ":bangbang:"
        slack_handler.channel = settings["slack_channel_error"]


yaml_file = open("settings.yaml", 'r')
settings = yaml.load(yaml_file)

slack_handler = SlackerLogHandler(str(settings["web_hook_url"]),
                                  channel=str(settings["slack_channel"]),
                                  username=str(settings["slack_username"]))

logger = logging.getLogger('debug_application')
logger.addHandler(slack_handler)

formatter = logging.Formatter(
    '%(asctime)s|%(levelname)s|%(filename)s:%(lineno)s|%(funcName)20s()|%(message)s'
)

slack_handler.setFormatter(formatter)
slack_handler.setLevel(logging.DEBUG)

logging.basicConfig(
    stream=sys.stdout,
    format=
Ejemplo n.º 8
0
'''
logging.ERROR
WARNING
'''


def hello():
    logger.info("heellloooo")


# formatter = NoStacktraceFormatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter(
    '%(asctime)s - %(filename)s : %(lineno)d line - %(funcName)s - %(levelname)s - %(message)s'
)
log_slack_handler = SlackerLogHandler(
    'xoxb-707533402582-707555674182-4IzzBXtLwPqNseiUSbu11s8V',
    '#dev',
    stack_trace=True)
log_slack_handler.setLevel(logging.WARNING)
log_slack_handler.setFormatter(formatter)

log_file_handler = logging.FileHandler(os.path.join('py_log', 'log.txt'))
log_file_handler.setLevel(logging.INFO)
log_file_handler.setFormatter(formatter)

log_stream_handler = logging.StreamHandler()
# log_stream_handler.setLevel(logging.INFO)
log_stream_handler.setFormatter(formatter)
# Create logger
logger = logging.getLogger(__name__)

# logger.addHandler(log_slack_handler)
Ejemplo n.º 9
0
def run(domain, org='totvstechfin', ignore_sheet=False, is_painel=False):
    # avoid all tasks starting at the same time.

    org = org
    app_name = "techfinplatform"
    connector_name = 'protheus_carol'
    connector_group = 'protheus'
    app_version = '0.2.8'

    if ignore_sheet:
        techfin_worksheet = None
    else:
        time.sleep(round(1 + random.random() * 6, 2))
        techfin_worksheet = sheet_utils.get_client()

    process_name = 'processAll'
    app_settings = {'clean_dm': True, 'clean_etls': True, 'skip_pause': False}

    to_drop_stagings = [
        'se1_acresc', 'cv3_outros', 'se1_decresc', 'se2_acresc', 'se2_decresc'
    ]

    to_look = [
        'arInvoices',
        'apInvoices',
        'mdCurrencies',
        'mdBusinessPartners',
    ]
    drop_etl_stagings = {
        'se1': [{
            'se1_decresc',
        }, {
            'se1_acresc',
        }],
        'se2': [{
            'se2_decresc',
        }, {
            'se2_acresc',
        }]
    }

    drop_data_models = [
        'apbankbearer',
        'apbankbearerlot',
        'appaymentsbank',
        'appaymentscard',
        'appaymentscheckbook',
        'apbankpayment',
        'apcardpayment',
        'apcheckbook',
        'arbankbearer',
        'arbankbearerlot',
        'arpaymentscard',
        'arpaymentscheckbook',
        'arcardpayment',
        'archeckbook',
        'arappayments',
        'cashflowevents',
    ]

    # Create slack handler
    slack_handler = SlackerLogHandler(
        os.environ["SLACK"],
        '#techfin-reprocess',  # "@rafael.rui",
        username='******')
    slack_handler.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    slack_handler.setFormatter(formatter)
    logger = logging.getLogger(domain)
    logger.addHandler(slack_handler)
    logger.setLevel(logging.DEBUG)
    console = logging.StreamHandler()
    console.setLevel(logging.DEBUG)
    logger.addHandler(console)

    current_cell = sheet_utils.find_tenant(techfin_worksheet, domain)
    if current_cell is None and ignore_sheet:
        pass
    else:
        current_cell = current_cell.row
        status = techfin_worksheet.row_values(current_cell)[-1].strip().lower()

        skip_status = [
            'done', 'failed', 'wait', 'running', 'installing', 'reprocessing'
        ]
        if any(i in status for i in skip_status):
            logger.info(f"Nothing to do in {domain}, status {status}")
            return

    login = carol_login.get_login(domain, org, app_name)
    sheet_utils.update_start_time(techfin_worksheet, current_cell)

    try:
        current_version = carol_apps.get_app_version(login, app_name,
                                                     app_version)
    except:
        logger.error(f"error fetching app version {login.domain}", exc_info=1)
        sheet_utils.update_status(techfin_worksheet, current_cell,
                                  "failed - fetching app version")
        return

    # Drop DMs
    sheet_utils.update_status(techfin_worksheet, current_cell,
                              "running - drop DMs")
    try:
        carol_task.remove_dms(login, drop_data_models)
    except Exception:
        logger.error("error dropping Dms", exc_info=1)
        sheet_utils.update_status(techfin_worksheet, current_cell,
                                  "failed - dropping Dms")
        return

    # Drop stagings
    sheet_utils.update_status(techfin_worksheet, current_cell,
                              "running - drop stagings")

    tasks, fail = carol_task.drop_staging(login,
                                          staging_list=to_drop_stagings,
                                          connector_name=connector_name,
                                          logger=logger)
    if fail:
        logger.error(f"error dropping staging {domain}")
        sheet_utils.update_status(techfin_worksheet, current_cell,
                                  "failed - dropping stagings")
        return
    try:
        task_list, fail = carol_task.track_tasks(login, tasks, logger=logger)
    except Exception:
        logger.error("error dropping staging", exc_info=1)
        sheet_utils.update_status(techfin_worksheet, current_cell,
                                  "failed - dropping stagings")
        return

    # Drop ETLs
    sheet_utils.update_status(techfin_worksheet, current_cell,
                              "running - drop ETLs")
    for key, values in drop_etl_stagings.items():
        for value in values:
            try:
                carol_task.drop_single_etl(login=login,
                                           staging_name=key,
                                           connector_name=connector_name,
                                           output_list=value,
                                           logger=logger)
            except:
                logger.error("error dropping ETLs", exc_info=1)
                sheet_utils.update_status(techfin_worksheet, current_cell,
                                          "failed - dropping ETLs")
                return

    fail = False
    task_list = '__unk__'
    if current_version != app_version:

        logger.info(f"Updating app from {current_version} to {app_version}")
        sheet_utils.update_version(techfin_worksheet, current_cell,
                                   current_version)
        sheet_utils.update_status(techfin_worksheet, current_cell,
                                  "running - app install")
        task_list, fail = carol_apps.update_app(
            login,
            app_name,
            app_version,
            logger,
            connector_group=connector_group)
        sheet_utils.update_version(techfin_worksheet, current_cell,
                                   app_version)
    else:
        logger.info(f"Running version {app_version}")
        sheet_utils.update_version(techfin_worksheet, current_cell,
                                   app_version)
        # return

    if fail:
        sheet_utils.update_status(techfin_worksheet, current_cell,
                                  'failed - app install')

        return

    # Cancel unwanted tasks.
    sheet_utils.update_status(techfin_worksheet, current_cell,
                              "running - canceling tasks")
    pross_tasks = carol_task.find_task_types(login)
    pross_task = [i['mdmId'] for i in pross_tasks]
    if pross_task:
        carol_task.cancel_tasks(login, pross_task)

    sync_type = sheet_utils.get_sync_type(techfin_worksheet,
                                          current_cell) or ''
    if 'painel' in sync_type.lower().strip() or is_painel:
        # deleting all data from techfin
        sheet_utils.update_status(techfin_worksheet, current_cell,
                                  "running - deleting DM from techfin")

        try:
            r = techfin_task.delete_and_track(
                login.domain,
                to_look=to_look,
            )
        except Exception:
            logger.error("failed - deleting DM from techfin", exc_info=1)
            sheet_utils.update_status(techfin_worksheet, current_cell,
                                      "failed - deleting DM from techfin")
            return
        if r:
            logger.error("failed - deleting DM from techfin", )
            sheet_utils.update_status(techfin_worksheet, current_cell,
                                      "failed - deleting DM from techfin")
            return

    # prepare process All
    sheet_utils.update_status(techfin_worksheet, current_cell,
                              "running - processAll")
    carol_task.change_app_settings(login=login,
                                   app_name=app_name,
                                   settings=app_settings)

    task = carol_task.start_app_process(login,
                                        app_name=app_name,
                                        process_name=process_name)
    tasks = [task['data']['mdmId']]
    try:
        task_list, fail = carol_task.track_tasks(login, tasks, logger=logger)
    except Exception:
        logger.error("failed - processAll", exc_info=1)
        sheet_utils.update_status(techfin_worksheet, current_cell,
                                  "failed - processAll")
        return
    if fail:
        logger.info(f"'failed - processAll'")
        sheet_utils.update_status(techfin_worksheet, current_cell,
                                  "failed - processAll")
        return

    logger.info(f"Finished all process {domain}")
    sheet_utils.update_status(techfin_worksheet, current_cell, "Done")
    sheet_utils.update_end_time(techfin_worksheet, current_cell)

    return task_list
Ejemplo n.º 10
0
import logging
import os
from platform import python_version

from slacker_log_handler import SlackerLogHandler, NoStacktraceFormatter

SLACK_API_TOKEN = os.getenv('SLACK_API_TOKEN')
SLACK_CHANNEL = os.getenv('SLACK_CHANNEL')

slack_handler = SlackerLogHandler(SLACK_API_TOKEN,
                                  SLACK_CHANNEL,
                                  stack_trace=True)

logger = logging.getLogger('debug_application')
logger.addHandler(slack_handler)
logger.setLevel(logging.DEBUG)

formatter = NoStacktraceFormatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
slack_handler.setFormatter(formatter)

logger.info('Python version is {}'.format(python_version()))

logger.debug('Test DEBUG')
logger.info('Test INFO')
logger.warning('Test WARNING')
logger.error('Test ERROR')
logger.fatal('Test FATAL')
logger.critical('Test CRITICAL')

try:
Ejemplo n.º 11
0
    def _handle_logdir(self, log_dir, run_id, overwrite, reuse):
        if log_dir is None:
            if run_id is None:
                run_id = haikunator.haikunate()
            self.log_dir = os.path.join(repo_root, 'log', run_id)
        else:
            self.log_dir = log_dir
            run_id = self.log_dir.split(os.pathsep)[-1]
        self.run_id = run_id

        if os.path.exists(self.log_dir):
            if overwrite:
                print("Clearing %s" % self.log_dir, file=sys.stderr)
                shutil.rmtree(self.log_dir)
            elif not reuse:
                raise ValueError("path " + self.log_dir + " exists")

        os.makedirs(self.log_dir, mode=0o744, exist_ok=reuse or overwrite)
        print("Logdir = ", self.log_dir, file=sys.stderr)
        self.logger = logging.getLogger(run_id)
        self.logger.propagate = False
        self.logger.setLevel(logging.DEBUG)

        ch = logging.StreamHandler()
        ch.setLevel(logging.INFO)
        ch.setFormatter(logging.Formatter(log_fmt))
        self.logger.addHandler(ch)

        hdlr = logging.FileHandler(os.path.join(self.log_dir, "model.log"))
        file_log_fmt = logging.Formatter(
            '%(asctime)s [%(levelname)s] %(name)s: %(message)s')
        hdlr.setFormatter(file_log_fmt)
        hdlr.setLevel(logging.DEBUG)
        self.logger.addHandler(hdlr)
        self.logger.debug("pl")
        if "SLACK_TOKEN" in os.environ:
            username = "******" % (run_id, hostname)
            self.logger.info("Adding slack logger")
            slack_handler = SlackerLogHandler(os.environ['SLACK_TOKEN'],
                                              hostname,
                                              stack_trace=True,
                                              username=username)

            slack_handler.setFormatter(file_log_fmt)
            slack_handler.setLevel(logging.INFO)
            print("Ignoring Slack INFO handler", file=sys.stderr)
            # self.logger.addHandler(slack_handler)

            slack_handler = SlackerLogHandler(os.environ['SLACK_TOKEN'],
                                              'error',
                                              username=username)

            slack_handler.setFormatter(file_log_fmt)
            slack_handler.setLevel(logging.ERROR)
            self.logger.addHandler(slack_handler)
        self.logger.info("Logdir %s", self.log_dir)
Ejemplo n.º 12
0
next_motion_alert_allowed = 0
last_watchdog = 0
cur_timer = None

s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('', port))
s.setblocking(0)

if testing or debugging:
    logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
    rebootCore()
else:
    logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')

slack_handler = SlackerLogHandler(config.get('config', 'slack_api'),
                                  log_channel,
                                  username="******")
logger.addHandler(slack_handler)

th = Timer(150.0, checkAlive)
th.daemon = True
th.start()

logger.info("alarmbot now monitoring")

if not testing:
    try:
        slack.chat.post_message(
            to_channel,
            '',
            username=my_name,
Ejemplo n.º 13
0
import logging
import os
from platform import python_version

from slacker_log_handler import SlackerLogHandler

SLACK_API_TOKEN = os.getenv('SLACK_API_TOKEN')
SLACK_CHANNEL = os.getenv('SLACK_CHANNEL')

slack_handler = SlackerLogHandler(SLACK_API_TOKEN, SLACK_CHANNEL)

logger = logging.getLogger('debug_application')
logger.addHandler(slack_handler)
logger.setLevel(logging.DEBUG)

formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
slack_handler.setFormatter(formatter)

logger.info('Python version is {}'.format(python_version()))

logger.debug('Test DEBUG')
logger.info('Test INFO')
logger.warning('Test WARNING')
logger.error('Test ERROR')
logger.fatal('Test FATAL')
logger.critical('Test CRITICAL')

try:
    raise Exception('Test exception')
except Exception as e:
Ejemplo n.º 14
0
def run(domain, org='totvstechfin'):
    # avoid all tasks starting at the same time.
    time.sleep(round(3 + random.random() * 6, 2))
    org = 'totvstechfin'
    app_name = "techfinplatform"
    app_version = '0.0.70'
    connector_name = 'protheus_carol'
    connector_group = 'protheus'

    techfin_worksheet = sheet_utils.get_client()

    consolidate_list = [
        'se1',
        'se2',
    ]
    compute_transformations = True  # need to force the old data to the stagings transformation.

    # Create slack handler
    slack_handler = SlackerLogHandler(
        os.environ["SLACK"],
        '#techfin-reprocess',  # "@rafael.rui",
        username='******')
    slack_handler.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    slack_handler.setFormatter(formatter)
    logger = logging.getLogger(domain)
    logger.addHandler(slack_handler)
    logger.setLevel(logging.DEBUG)
    console = logging.StreamHandler()
    console.setLevel(logging.DEBUG)
    logger.addHandler(console)

    current_cell = sheet_utils.find_tenant(techfin_worksheet, domain)
    status = techfin_worksheet.row_values(current_cell.row)[-1].strip().lower()

    skip_status = [
        'done', 'failed', 'wait', 'running', 'installing', 'reprocessing'
    ]
    if any(i in status for i in skip_status):
        logger.info(f"Nothing to do in {domain}, status {status}")
        return

    login = carol_login.get_login(domain, org, app_name)
    sheet_utils.update_start_time(techfin_worksheet, current_cell.row)

    dag = custom_pipeline.get_dag()
    dag = list(reduce(set.union, custom_pipeline.get_dag()))
    dms = [i.replace('DM_', '') for i in dag if i.startswith('DM_')]
    staging_list = [i for i in dag if not i.startswith('DM_')]

    try:
        current_version = carol_apps.get_app_version(login, app_name,
                                                     app_version)
    except:
        logger.error(f"error fetching app version {login.domain}", exc_info=1)
        sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                  "failed - fetching app version")
        return

    if current_version != app_version and current_version < "0.0.63":
        # Dropping stagings.
        sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                  "running - drop stagings")
        logger.info(f"Starting process {domain}")
        st = carol_task.get_all_stagings(login, connector_name=connector_name)
        st = [i for i in st if i.startswith('se1_') or i.startswith('se2_')]
        tasks, fail = carol_task.drop_staging(login,
                                              staging_list=st,
                                              connector_name=connector_name,
                                              logger=logger)
        if fail:
            logger.error(f"error dropping staging {domain}")
            sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                      "failed - dropping stagings")
            return
        try:
            task_list, fail = carol_task.track_tasks(login,
                                                     tasks,
                                                     logger=logger)
        except Exception as e:
            logger.error("error dropping staging", exc_info=1)
            sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                      "failed - dropping stagings")
            return

        # Drop ETL SE1, SE2.
        sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                  "running - drop ETLs")
        to_drop = ['se1', 'se2']
        to_delete = [
            i for i in carol_task.get_all_etls(login,
                                               connector_name=connector_name)
            if (i['mdmSourceEntityName'] in to_drop)
        ]

        try:
            carol_task.drop_etls(login, etl_list=to_delete)
        except:
            logger.error("error dropping ETLs", exc_info=1)
            sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                      "failed - dropping ETLs")
            return

    current_version = carol_apps.get_app_version(login, app_name, app_version)
    fail = False
    task_list = '__unk__'
    if current_version != app_version:

        # Stop pub/sub if any.
        sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                  "running - stop pubsub")
        try:
            carol_task.pause_and_clear_subscriptions(login, dms, logger)
        except Exception as e:
            logger.error("error stop pubsub", exc_info=1)
            sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                      "failed - stop pubsub")
            return

        logger.info(f"Updating app from {current_version} to {app_version}")
        sheet_utils.update_version(techfin_worksheet, current_cell.row,
                                   current_version)
        sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                  "running - app install")
        task_list, fail = carol_apps.update_app(
            login,
            app_name,
            app_version,
            logger,
            connector_group=connector_group)
        sheet_utils.update_version(techfin_worksheet, current_cell.row,
                                   app_version)
    else:
        logger.info(f"Running version {app_version}")
        sheet_utils.update_version(techfin_worksheet, current_cell.row,
                                   app_version)
        sheet_utils.update_status(techfin_worksheet, current_cell.row, "Done")
        sheet_utils.update_end_time(techfin_worksheet, current_cell.row)
        return

    if fail:
        sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                  'failed - app install')
        return

    # Cancel unwanted tasks.
    sheet_utils.update_status(techfin_worksheet, current_cell.row,
                              "running - canceling tasks")
    pross_tasks = carol_task.find_task_types(login)
    pross_task = [i['mdmId'] for i in pross_tasks]
    if pross_task:
        carol_task.cancel_tasks(login, pross_task)

    # pause ETLs.
    carol_task.pause_etls(login,
                          etl_list=staging_list,
                          connector_name=connector_name,
                          logger=logger)
    # pause mappings.
    carol_task.pause_dms(
        login,
        dm_list=dms,
        connector_name=connector_name,
    )
    time.sleep(round(10 + random.random() * 6, 2))  # pause have affect

    # consolidate
    sheet_utils.update_status(techfin_worksheet, current_cell.row,
                              "running - consolidate")
    task_list = carol_task.consolidate_stagings(
        login,
        connector_name=connector_name,
        staging_list=consolidate_list,
        n_jobs=1,
        logger=logger,
        compute_transformations=compute_transformations)

    try:
        task_list, fail = carol_task.track_tasks(login,
                                                 task_list,
                                                 logger=logger)
    except Exception as e:
        sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                  "failed - consolidate")
        logger.error("error after consolidate", exc_info=1)
        return
    if fail:
        sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                  "failed - consolidate")
        logger.error("error after consolidate")
        return

    # Stop pub/sub if any.
    sheet_utils.update_status(techfin_worksheet, current_cell.row,
                              "running - stop pubsub")
    try:
        carol_task.pause_and_clear_subscriptions(login, dms, logger)
    except Exception as e:
        logger.error("error stop pubsub", exc_info=1)
        sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                  "failed - stop pubsub")
        return
    try:
        carol_task.play_subscriptions(login, dms, logger)
    except Exception as e:
        logger.error("error playing pubsub", exc_info=1)
        sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                  "failed - playing pubsub")
        return

    # delete stagings.
    sheet_utils.update_status(techfin_worksheet, current_cell.row,
                              "running - delete stagings")
    st = carol_task.get_all_stagings(login, connector_name=connector_name)
    st = [i for i in st if i.startswith('se1_') or i.startswith('se2_')]
    task_list = carol_task.par_delete_staging(login,
                                              staging_list=st,
                                              connector_name=connector_name,
                                              n_jobs=1)
    try:
        task_list, fail = carol_task.track_tasks(login,
                                                 task_list,
                                                 logger=logger)
    except Exception as e:
        sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                  "failed - delete stagings")
        logger.error("error after delete DMs", exc_info=1)
        return
    if fail:
        sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                  "failed - delete stagings")
        logger.error("error after delete DMs")
        return

    # delete DMs
    sheet_utils.update_status(techfin_worksheet, current_cell.row,
                              "running - delete DMs")
    task_list = carol_task.par_delete_golden(login, dm_list=dms, n_jobs=1)
    try:
        task_list, fail = carol_task.track_tasks(login,
                                                 task_list,
                                                 logger=logger)
    except Exception as e:
        sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                  "failed - delete DMs")
        logger.error("error after delete DMs", exc_info=1)
        return
    if fail:
        sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                  "failed - delete DMs")
        logger.error("error after delete DMs")
        return

    sync_type = sheet_utils.get_sync_type(techfin_worksheet, current_cell.row)
    if 'painel' in sync_type.lower().strip():
        sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                  "running - delete payments techfin")
        try:
            res = techfin_task.delete_payments(login.domain)
        except Exception as e:
            sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                      "failed - delete payments techfin")
            logger.error("error after delete payments techfin", exc_info=1)
            return

    sheet_utils.update_status(techfin_worksheet, current_cell.row,
                              "running - processing")
    try:
        fail = custom_pipeline.run_custom_pipeline(
            login, connector_name=connector_name, logger=logger)
    except Exception:
        sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                  "failed - processing")
        logger.error("error after processing", exc_info=1)
        return
    if fail:
        sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                  "failed - processing")
        logger.error("error after processing")
        return

    sync_type = sheet_utils.get_sync_type(techfin_worksheet, current_cell.row)
    if 'painel' in sync_type.lower().strip():
        sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                  "running - add pub/sub")
        try:
            techfin_task.add_pubsub(login.domain)
        except Exception:
            sheet_utils.update_status(techfin_worksheet, current_cell.row,
                                      "failed - add pub/sub")
            logger.error("error after add pub/sub", exc_info=1)
            return

    logger.info(f"Finished all process {domain}")
    sheet_utils.update_status(techfin_worksheet, current_cell.row, "Done")
    sheet_utils.update_end_time(techfin_worksheet, current_cell.row)

    return task_list
Ejemplo n.º 15
0
import logging
import os
from platform import python_version

from slacker_log_handler import SlackerLogHandler, NoStacktraceFormatter

SLACK_API_TOKEN = os.getenv('SLACK_API_TOKEN')
SLACK_CHANNEL = os.getenv('SLACK_CHANNEL')

slack_handler = SlackerLogHandler(SLACK_API_TOKEN,
                                  SLACK_CHANNEL,
                                  stack_trace=True,
                                  ping_users=["@ose", "slackbot"],
                                  ping_level=logging.ERROR)

logger = logging.getLogger('debug_application')
logger.addHandler(slack_handler)
logger.setLevel(logging.DEBUG)

formatter = NoStacktraceFormatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
slack_handler.setFormatter(formatter)

logger.info('Python version is {}'.format(python_version()))

logger.debug('Test DEBUG')
logger.info('Test INFO')
logger.warning('Test WARNING')
logger.error('Test ERROR')
logger.fatal('Test FATAL')
logger.critical('Test CRITICAL')
Ejemplo n.º 16
0
import django
import logging

sys.path.append("..")  # here store is root folder(means parent).
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings")
django.setup()

from app.models import Store
from django.conf import settings
from slacker_log_handler import SlackerLogHandler

# Overrides the default function for context creation with the function to create an unverified context.
ssl._create_default_https_context = ssl._create_unverified_context

slack_handler = SlackerLogHandler(settings.SLACK_API_KEY,
                                  'production-logs',
                                  stack_trace=True)

logger = logging.getLogger(__name__)

if settings.DEVELOPMENT_MODE == 'PRODUCTION':
    logger.addHandler(slack_handler)


def authenticate(func):
    def wrapper(stores_obj, *args, **kwargs):
        session = shopify.Session(stores_obj.store_name,
                                  stores_obj.permanent_token)
        shopify.ShopifyResource.activate_session(session)
        return func(stores_obj, *args, **kwargs)
Ejemplo n.º 17
0
from logging.handlers import RotatingFileHandler
import os
import json

from moderator import modBot

app = Flask(__name__, static_folder='static')
app.secret_key = 'F12Zr47j\3yX R~X@H!jmM]Lwf/,?KT'
dbSetUp()

SLACK_TOKEN = os.environ['SLACK']
PAGE_LIMIT = 30

from slacker_log_handler import SlackerLogHandler
slack_handler = SlackerLogHandler(SLACK_TOKEN,
                                  'post',
                                  stack_trace=True,
                                  username='******')
slack_handler.setFormatter(Formatter('%(message)s'))
file_handler = RotatingFileHandler('foo.log', maxBytes=10000, backupCount=1)

file_handler.setLevel(ERROR)
app.logger.addHandler(slack_handler)


@app.route('/test')
def test():
    return render_template('index2.html')


@app.errorhandler(404)
def page_not_found(e):