Ejemplo n.º 1
0
def cp_rewrite_toDB():
    r = redisfactory.getDB(15)
    logger.debug("cp_rewrite_toDB begining...")
    rewrites = []
    for key in r.keys():
        d = {"CHANNEL": key, "REWRITE": r.get(key)}
        rewrites.append(d)
    print rewrites
    database.db_session().rewrite.insert(rewrites)
    logger.debug("cp_rewrite_toDB end.")
Ejemplo n.º 2
0
 def __init__(self):
     dbcache = db_session()
     self.statistical = dbcache.statistical
     self.cache_user_channels = dbcache.cache_user_channels
     self.cache_user_channels.create_index('username')
     self.cache_channel_devices = dbcache.cache_channel_devices
     self.cache_channel_devices.create_index('channel_code')
     self.cache_channel_firstlayer_devices = dbcache.cache_channel_firstlayer_devices
     self.cache_channel_firstlayer_devices.create_index('channel_code')
     self.cache_user_channels_portal = dbcache.cache_user_channels_portal
     self.cache_user_channels_portal.create_index('username')
Ejemplo n.º 3
0
def get_HPCC_num(act_time, end_time):
    all_num = 0
    all_err_num = 0
    # host = "223.202.203.93"
    # db_name = 'bermuda'
    # db_ber = get_host(host, db_name)
    db_ber = database.db_session()
    connect_day = db_ber['devices_status_day']

    device_one = connect_day.find_one(
        {'date': {
            '$gte': act_time,
            '$lt': end_time
        }})
    dev_dict = device_one.get("HPCC")
    return dev_dict.get('200', 1)
Ejemplo n.º 4
0
    def __init__(self, batch_size=3000, package_size=30):

        self.preload_api_size = 100
        self.batch_size = batch_size
        self.package_size = package_size
        logger.debug('router start. batch_size:%s package_size:%s' %
                     (self.batch_size, self.package_size))
        self.dt = datetime.now()
        self.merged_urls = {}
        self.high_merged_urls = {}
        self.merged_cert = {}
        self.merged_cert_query = {}
        self.merged_transfer_cert = {}
        self.physical_urls = {}

        self.db = database.db_session()
        self.s1_db = database.s1_db_session()
Ejemplo n.º 5
0
def main():
    agent = Agent(db_session())
    agent.setName('mq_agent')
    agent.setDaemon(True)
    agent.run()
Ejemplo n.º 6
0
    Returns:

    '''
    uid = str(url.pop("_id"))
    url['firstLayer'] = url.pop('is_multilayer')
    url['layer_type'] = "two" if url.get('firstLayer') else "one"
    url['r_id'] = str(url.get("r_id"))
    url['recev_host'] = RECEIVER_HOST
    url['id'] = uid
    del url['created_time']
    if username == 'sina_t' or username == 'sina_weibo' or username == 'autohome' or username == 'meipai':
        url['layer_type'] = "three"
    return url


db = database.db_session()
db_s1 = database.s1_db_session()


@task(ignore_result=True, default_retry_delay=10, max_retries=3)
def submit(refresh_task, urls):
    '''
        提交任务到消息队列

    Parameters
    ----------
    refresh_task : 任务

    ignore_result       设置任务存储状态,如果为True,不存状态,也查询不了返回值
    default_retry_delay 设置重试提交到消息队列间隔时间,默认10 分钟,单位为秒
    max_retries         设置重试次数,默认为3
Ejemplo n.º 7
0
def main():
    beat = Beat(db_session(), query_db_session())
    beat.setName('retry_beat')
    beat.setDaemon(True)
    beat.run()
Ejemplo n.º 8
0
import datetime, sys
import logging
import traceback, json
from core import database
from core import sendEmail

LOG_FILENAME = '/Application/bermuda3/logs/fail_details_user.log'
logging.basicConfig(
    filename=LOG_FILENAME,
    format=
    '%(asctime)s - %(name)s - %(levelname)s - %(process)d - Line:%(lineno)d - %(message)s',
    level=logging.INFO)

logger = logging.getLogger('fail_url_task')

conn = database.db_session()


class details_fails_urls(object):
    def __init__(self, conn, begin_date, end_date, username, to_add):
        super(details_fails_urls, self).__init__()
        self.begin_date = begin_date
        self.end_date = end_date
        self.connect = conn
        self.username = username
        self.to_add = to_add
        self.collection_url = ''
        self.collection_dev = ''

    def get_fail_urls(self):
        find_url_dic = {
Ejemplo n.º 9
0
from core import database
import sys
from core.database import db_session
import os

LOG_FILENAME = '/Application/bermuda3/logs/fail_details.log'
logging.basicConfig(filename=LOG_FILENAME,
                    format='%(asctime)s - %(name)s - %(levelname)s - %(process)d - Line:%(lineno)d - %(message)s',
                    level=logging.INFO)

logger = logging.getLogger('fail_url_task')
logger.setLevel(logging.DEBUG)

# conn = database.db_session()
# conn = ReplicaSetConnection('%s:27017,%s:27017,%s:27017' % ('172.16.12.136', '172.16.12.135', '172.16.12.134'), replicaSet ='bermuda_db')['bermuda']
conn =db_session()
# conn = Connection("223.202.52.135", 27017)['bermuda']

class details_fails_urls(object):
    """TTL: db.ref_err.ensureIndex( { "datetime": -1 }, { expireAfterSeconds: 60*60*24*7 })
        db.ref_err.ensureIndex({"uid":1 },{"unique":true})
        db.ref_err.ensureIndex({"channel_code":1})
        db.ref_err.ensureIndex({"failed.code":1})
        db.ref_err.ensureIndex({"username":1})
    """
    def __init__(self, conn, begin_date, end_date):
        super(details_fails_urls, self).__init__()
        self.begin_date = begin_date
        self.end_date = end_date
        self.connect = conn
        self.collection_url = ''
Ejemplo n.º 10
0
from core.config import config
# try:
#     from pymongo import ReplicaSetConnection as MongoClient
# except:
#     from pymongo import MongoClient as MongoClient
import logging
import traceback
from core.database import db_session
import os

file_path = config.get('success_definition_strategy', 'basic_info_file')

# uri ='mongodb://*****:*****@%s:27017,%s:27017,%s:27017/bermuda?replicaSet=bermuda_db' % ('172.16.12.136', '172.16.12.135', '172.16.12.134')
# uri = 'mongodb://*****:*****@%s:29017/bermuda' %('172.16.21.205')
# conn_url = MongoClient(uri)['bermuda']
conn_url =db_session()

LOG_FILENAME = '/Application/bermuda3/logs/basic_info.log'
logging.basicConfig(filename=LOG_FILENAME,
                    format='%(asctime)s - %(name)s - %(levelname)s - %(process)d - Line:%(lineno)d - %(message)s',
                    level=logging.INFO)

logger = logging.getLogger('basic_info')
logger.setLevel(logging.DEBUG)

def get_basic_info():
    fp = codecs.open(file_path,'r',encoding='utf-8')
    basic_info=[]
    key_list = ['Province','City','Zone','Provinc']
    for line in fp:
        info_dic ={}
Ejemplo n.º 11
0
        }).count():
            verify.verify(task.get("urls"), db, 'FINISHED')
    else:
        db.error_task.save(task)
    if task.get('retryCount') == 1:
        logger.debug(
            "retryCount:3, status:FAILED,task_id : %s dev_id : %s..." %
            (task.get("_id"), task.get("dev_id")))
        verify.verify(task.get("urls"), db, 'FAILED')
        queue.put_json2("error_task", [get_queue(task)])


def get_queue(task):
    queue = {
        "_id": str(task.get("_id")),
        "channel_code": task.get("urls")[0].get("channel_code"),
        'host': task.get("host")
    }
    queue['urls'] = [{
        "url": u.get("url"),
        "username": u.get("username")
    } for u in task.get("urls")]
    return queue


if __name__ == "__main__":
    beat = Beat(database.db_session(), database.query_db_session())
    beat.setName('retry_beat')
    beat.setDaemon(True)
    beat.run()
Ejemplo n.º 12
0
        for device in devs_dict.keys():
            mailContent += device + ":\n"
            for url in devs_dict.get(device):
                mailContent += url.get("url") + "\n"
            mailContent += u"\n"
        logger.debug(mailContent)
        return mailContent

    def getCustomerUrl(self, username):
        return [
            url for url in self.db.url.find({
                "username": username,
                "status": 'FINISHED',
                "isMonitor": None,
                'created_time': {
                    '$gte': datetime.now() - timedelta(minutes=20)
                }
            })
        ]

    def updateUrl(self, url):
        url["isMonitor"] = 0
        self.db.url.save(url)


if __name__ == "__main__":
    monitor = Monitor(database.db_session())
    monitor.setName('customer_monitor')
    monitor.setDaemon(True)
    monitor.run()
Ejemplo n.º 13
0
def run():

    global DEVICES
    global MAX_COUNT

    hour = datetime.datetime.now().hour
    failed_devices = make_data()

    logger.debug('failed_devices %s' % (failed_devices))

    bak_data()

    if hour != 0 and not failed_devices:
        #print hour
        #print 'no failed devices'
        #os._exit(0)
        return
    #邮件列表配置
    all_info = db_session().email_management.find(
        {'failed_type': 'alarm_link_failed_devices'})
    for _info in all_info:
        #print '_info', _info
        DEVICES = _info.get('devices')
        if not DEVICES:
            #print 'c1-----------------'
            continue
        MAX_COUNT = _info.get('threshold')
        if not MAX_COUNT:
            #print 'c2-----------------'
            continue
        ads_to = _info.get('email_address', [])
        if not ads_to:
            #print 'c3-----------------'
            continue
        rate = _info.get('rate')
        if not rate:
            #print 'c4-----------------'
            continue
        can_email_hour = range(0, 23 + 1, int(rate))
        if hour not in can_email_hour:
            #print 'c5-----------------'
            #print can_email_hour
            continue
        if rate == 24:
            #汇总邮件
            _day_key = (datetime.datetime.now() -
                        datetime.timedelta(days=1)).strftime('%Y-%m-%d')
            all_failed_devices = make_all_data(_day_key)
            _f = filter(filter_data, all_failed_devices.items())
        else:
            _f = filter(filter_data, failed_devices.items())

    # print 'DEVICES', DEVICES
    # print 'MAX_COUNT', MAX_COUNT
    # print 'rate', rate

        if not _f:
            #print 'c6-----------------'
            continue

        #print '_f', _f

        _content = make_content(dict(_f))

        logger.debug('ads_to %s' % (ads_to))
        logger.debug('_content %s' % (_content))

        send_mail(ads_from, ads_to, '刷新下发设备异常告警', _content)

    os._exit(0)
Ejemplo n.º 14
0
 def __init__(self, batch_size=100):
     self.batch_size = batch_size
     logger.debug('timer start. batch_size:%s !' % self.batch_size)
     self.dt = datetime.now()
     self.query_db = database.query_db_session()
     self.db = database.db_session()
Ejemplo n.º 15
0
def main():
    scanOverTimeTrace(database.db_session())
Ejemplo n.º 16
0
@contact: [email protected]
@site: 
@software: PyCharm
@file: preload_channel_script.py
@time: 16-11-8 上午11:18
"""
import xlrd
import logging
import traceback
from core import redisfactory, rcmsapi
from core.database import query_db_session, db_session
from datetime import datetime
import sys

PRELOAD_DEVS = redisfactory.getDB(5)
db = db_session()
# LOG_FILENAME = '/Application/bermuda/logs/autodesk_postal.log'
LOG_FILENAME = '/home/rubin/logs/preload_channel_script.log'

# logging.basicConfig(filename=LOG_FILENAME,
#                     format='%(asctime)s - %(name)s - %(levelname)s - %(process)d - Line:%(lineno)d - %(message)s',
#                     level=logging.INFO)
formatter = logging.Formatter(
    "%(asctime)s - %(name)s - %(levelname)s - %(process)d - Line:%(lineno)d - %(message)s"
)
fh = logging.FileHandler(LOG_FILENAME)
fh.setFormatter(formatter)

logger = logging.getLogger('preload_channel_script')
logger.addHandler(fh)
logger.setLevel(logging.DEBUG)
Ejemplo n.º 17
0
 def __init__(self):
     self.dt = datetime.now()
     self.query_db = database.query_db_session()
     self.db = database.db_session()