Beispiel #1
0
    def run(self):

        util.setup_logging('aggregate_daily', False, self.args.verbose)

        logging.info('Aggregating chart data of %s.' % self.args.date.strftime('%Y-%m-%d'))

        self.session = sessionmaker(bind=engine_sakuya_db)()
        self.Chartdata = ChartdataTmpl.gen(self.args.date.strftime('%Y%m%d'))

        last_id = 0
        while True:
            charts = self.session.\
                     query(Charts).\
                     filter(Charts.id > last_id).\
                     order_by(Charts.id).\
                     limit(PER_FETCH).\
                     all()
            self.session.expunge_all()

            for chart in charts:
                try:
                    self.process_chart(chart)
                except Exception, e:
                    logging.exception('Fail to process chart %d' % chart.id)
                    self.session.rollback()

            if len(charts) < PER_FETCH:
                break

            last_id = charts[-1].id
Beispiel #2
0
    def __init__(self, args):
        self.args = args

        util.setup_logging('suite_basic', True)

        signal.signal(signal.SIGTERM, self.signal_handler)
        signal.signal(signal.SIGINT, self.signal_handler)

        with open(get_config('webapp')['suite_hosts']) as f:
            self.groups = yaml.load(f.read())

        self.cmdgen = cmdgen.CommandGenerator()
Beispiel #3
0
    def run(self):

        util.setup_logging('hp_create')

        self.sakuya_db = sessionmaker(bind=engine_sakuya_db)()

        data = []
        for i in haopan.PRODS:
            for j in haopan.CHARTS:
                for k in haopan.CITIES:
                    for l in haopan.CHANNELS:
                        for m in haopan.ENTRIES:
                            data.append((0, i[0], j[0], k[0], l[0], m[0]))

        chart_keys = haopan.fill_data(data).keys()

        charts = None
        try:
            with file(CHARTFILE) as f:
                charts = msgpack.unpackb(f.read())
        except Exception:
            pass

        if not isinstance(charts, dict):
            charts = {}

        stats = [0, 0]
        for key in chart_keys:
            if key not in charts:
                id = self.new_chart(key)
                if id is not None:
                    charts[key] = id
                    stats[0] += 1
                else:
                    stats[1] += 1
            else:
                logging.info('skip %s' % str(key))

        if stats[0]:

            if os.path.isfile(CHARTFILE):
                shutil.copyfile(
                    CHARTFILE, CHARTFILE + '.' + time.strftime('%Y%m%d%H%M%S'))

            with file(CHARTFILE, 'w') as f:
                f.write(msgpack.packb(charts))

        logging.info('%d charts created, %d failed.' % tuple(stats))
Beispiel #4
0
    def run(self):

        util.setup_logging('hp_create')

        self.sakuya_db = sessionmaker(bind=engine_sakuya_db)()

        data = []
        for i in haopan.PRODS:
            for j in haopan.CHARTS:
                for k in haopan.CITIES:
                    for l in haopan.CHANNELS:
                        for m in haopan.ENTRIES:
                            data.append((0, i[0], j[0], k[0], l[0], m[0]))

        chart_keys = haopan.fill_data(data).keys()

        charts = None
        try:
            with file(CHARTFILE) as f:
                charts = msgpack.unpackb(f.read())
        except Exception:
            pass

        if not isinstance(charts, dict):
            charts = {}

        stats = [0, 0]
        for key in chart_keys:
            if key not in charts:
                id = self.new_chart(key)
                if id is not None:
                    charts[key] = id
                    stats[0] += 1
                else:
                    stats[1] += 1
            else:
                logging.info('skip %s' % str(key))

        if stats[0]:

            if os.path.isfile(CHARTFILE):
                shutil.copyfile(CHARTFILE, CHARTFILE + '.' + time.strftime('%Y%m%d%H%M%S'))

            with file(CHARTFILE, 'w') as f:
                f.write(msgpack.packb(charts))

        logging.info('%d charts created, %d failed.' % tuple(stats))
Beispiel #5
0
    def run(self):

        util.setup_logging('hp_aggregate', True)

        signal.signal(signal.SIGTERM, self.signal_handler)
        signal.signal(signal.SIGINT, self.signal_handler)

        self.socket = zmq.Socket(zmq.Context.instance(), zmq.PULL)
        self.socket.bind(get_config('webapp')['hp_aggregate_bind'])

        self.sakuya_db = sessionmaker(bind=engine_sakuya_db)()
        self.next_dt = datetime.datetime.now()
        self.data = []

        logging.info('Start looping...')

        self.loop()

        self.socket.close()
Beispiel #6
0
    def run(self):

        util.setup_logging('hp_aggregate', True)

        signal.signal(signal.SIGTERM, self.signal_handler)
        signal.signal(signal.SIGINT, self.signal_handler)

        self.socket = zmq.Socket(zmq.Context.instance(), zmq.PULL)
        self.socket.bind(get_config('webapp')['hp_aggregate_bind'])

        self.sakuya_db = sessionmaker(bind=engine_sakuya_db)()
        self.next_dt = datetime.datetime.now()
        self.data = []

        logging.info('Start looping...')

        self.loop()

        self.socket.close()
Beispiel #7
0
    def run(self):

        util.setup_logging('rule_logger', True, self.args.verbose)

        signal.signal(signal.SIGTERM, self.signal_handler)
        signal.signal(signal.SIGINT, self.signal_handler)

        self.socket = zmq.Socket(zmq.Context.instance(), zmq.PULL)
        self.socket.bind(get_config('webapp')['rule_logger_endpoint'])

        self.sakuya_db = sessionmaker(bind=engine_sakuya_db)()

        self.init_rule_methods()
        self.datasource_rules = {}
        self.next_time = {}

        logging.info('Start looping...')
        self.loop()

        self.socket.close()
Beispiel #8
0
from collections import OrderedDict
from sakuya.models.sakuya_db import Charts, Categories, Follows, Events, Users, ChartdataTmpl, WarnRules
from sakuya.models import engine_sakuya_db as engine
from sakuya.config import get_config
from sakuya.lib import haopan, util
from sqlalchemy.orm import sessionmaker
from turbomail import Message, interface

NOTIFY_SCREEN_LATENCY = 60 * 10 # 10 min

# make session
Session = sessionmaker(bind=engine)
session = Session()

# setup logging
util.setup_logging('alert', False, True)

logging.getLogger('turbomail').setLevel(logging.WARN)

user_events = {}
skipped_user = set()

LEVEL_TEXT = {
    Events.CONST_TYPE_OK: 'OK',
    Events.CONST_TYPE_WARNING: 'Warning',
    Events.CONST_TYPE_CRITICAL: 'Critical'
}

# The cron job is started at the beginning of the minute,
# so the check time should be last minute,
# otherwise there'll be no data.
Beispiel #9
0
    def __init__(self):

        util.setup_logging('check_logtime', True)

        signal.signal(signal.SIGTERM, self.signal_handler)
        signal.signal(signal.SIGINT, self.signal_handler)
Beispiel #10
0
    def __init__(self):

        util.setup_logging('check_logtime', True)

        signal.signal(signal.SIGTERM, self.signal_handler)
        signal.signal(signal.SIGINT, self.signal_handler)
Beispiel #11
0
from collections import OrderedDict
from sakuya.models.sakuya_db import Charts, Categories, Follows, Events, Users, ChartdataTmpl, WarnRules
from sakuya.models import engine_sakuya_db as engine
from sakuya.config import get_config
from sakuya.lib import haopan, util
from sqlalchemy.orm import sessionmaker
from turbomail import Message, interface

NOTIFY_SCREEN_LATENCY = 60 * 10  # 10 min

# make session
Session = sessionmaker(bind=engine)
session = Session()

# setup logging
util.setup_logging("alert", False, True)

logging.getLogger("turbomail").setLevel(logging.WARN)

user_events = {}
skipped_user = set()

LEVEL_TEXT = {Events.CONST_TYPE_OK: "OK", Events.CONST_TYPE_WARNING: "Warning", Events.CONST_TYPE_CRITICAL: "Critical"}

# The cron job is started at the beginning of the minute,
# so the check time should be last minute,
# otherwise there'll be no data.
check_time = util.current_minute() - datetime.timedelta(minutes=2)

no_data = False  # if no-data event happens for storm charts, notify jizhang only.