def main(): logger.debug("error_email start!") concurrency = 10 bodys = queue.get('error_task', 5000) logger.debug("bodys count:%s!" % len(bodys)) step = len(bodys) / concurrency + 1 steped_bodys = [bodys[i:i + step] for i in range(0, len(bodys), step)] for steped_body in steped_bodys: Process(target=send_error_mail.run, args=( database.query_db_session(), steped_body, )).start()
#! /usr/bin/env python # -*- coding: utf-8 -*- import urllib2 import json import datetime # from pymongo import MongoClient import redis from core import database # conn = MongoClient('mongodb://*****:*****@223.202.52.82:27017/') # db = conn.bermuda db = database.query_db_session() docUrl = 'data' #insert into table preload_channel url #preUrl = 'https://rcmsapi.chinacache.com/customer/' preUrl = 'https://cms3-apir.chinacache.com/customer/' userName = '******' lastUrl = '/channels' #tableName = 'test_rubin' #insert into table preload_config_device url #preCode = 'https://rcmsapi.chinacache.com//channel/' preCode = 'https://cms3-apir.chinacache.com//channel/' lastCode = '/flexicache/firstlayer' #连接redis数据库 REDIS_CLIENT = redis.StrictRedis(host='%s' % '223.202.52.82', port=6379, db=5) class PraseJson(object):
@software: PyCharm @file: receiver_physical_del.py @time: 17-8-7 下午4:53 """ import simplejson as json import traceback from bson import ObjectId import math from util import log_utils from core.database import query_db_session, db_session from core import redisfactory logger = log_utils.get_admin_Logger() db = db_session() q_db = query_db_session() REWRITE_CACHE = redisfactory.getDB(15) def physical_del_channel_qu(username, channel_original): """ according username,channel_name, to find the relationship with the channel_name, :param username: the name of user :param channel_name: the name of url :return: the list of collection physical_del_channel like [{'_id':xxx, 'username':xxxx, 'channel_list':XXXx},\ {'_id':xxx, 'username':xxxx, 'channel_list':XXXx}] """ logger.debug( "physical_del_channel_query username:%s, physical_del_channel:%s" % (username, channel_original))
def main(): beat = Beat(db_session(), query_db_session()) beat.setName('retry_beat') beat.setDaemon(True) beat.run()
#urllib2.urlopen("http://logmonitor.chinacache.net:8888/mobile/", urllib.urlencode(args), timeout=10) #根总借的帐号 config = {"username":"******","password":"******","mobile":"15910506097;13717961668;15801269880;15910506922","content":message} query = urllib.urlencode(config) print urllib2.urlopen(SMSURL, query, timeout=10).read() mailContent_noc = u"刷新队列堆积报警,请通知刷新组维护人员(张宏安、于善良、马欢)\n" subject_noc = "**队列堆积报警**" noc_email = config.get('refresh_noc_monitor', 'noc_email').split(',') for message in alert_messages_noc: mailContent_noc += message + "\n" logging.warn("refresh queue detail: %s: " % (message)) try: if len(alert_messages) > 0: sendEmail.send(refresh_email, subject, mailContent.encode('utf8')) if len(alert_messages_noc) > 0: sendEmail.send(noc_email, subject_noc, mailContent_noc.encode('utf8')) except Exception, e: logging.debug(e) if __name__ == '__main__': # agent = Monitor( # MongoClient('%s:27017,%s:27017,%s:27017' % ('172.16.12.134', '172.16.12.135', '172.16.12.136'), # replicaSet='bermuda_db')['bermuda']) agent = Monitor(database.query_db_session()) #agent.setName('mq_monitor') #agent.setDaemon(True) #agent.run() agent.do_job() exit()
#SH #s_uri = 'mongodb://*****:*****@%s:27017,%s:27017,%s:27017/bermuda?replicaSet=bermuda_db' % ('172.16.31.222','172.16.32.254','172.16.32.3') #BJ s_uri = 'mongodb://*****:*****@%s:27017,%s:27017,%s:27017/bermuda?replicaSet=bermuda_db' % ( '172.16.12.136', '172.16.12.135', '172.16.12.134') db = MongoClient(s_uri)['bermuda'] #uri = 'mongodb://*****:*****@%s:27017,%s:27017,%s:27017/bermuda?replicaSet=bermuda_db' % ('223.202.52.134', '223.202.52.135', '223.202.52.136') #active_db = ReplicaSetConnection(uri)['bermuda'] # active_db = Connection("mongodb://*****:*****@%s:27018/bermuda"%'223.202.52.135')['bermuda'] active_db = MongoClient("mongodb://*****:*****@%s:27018/bermuda" % '223.202.52.135')['bermuda'] # rubin test rubin_test_db = query_db_session() def update_configure(db_name): try: logger.info('db_name: %s' % db_name) db_list = active_db[db_name].find() print db_list if db_list: db[db_name].remove() db[db_name].insert(db_list) if db_name == "preload_channel": update_redis5(db_list) if db_name == "rewrite_new": update_redis15(db_list) except Exception, ex:
def main(): agent = Monitor(query_db_session()) agent.setName('mq_monitor') agent.setDaemon(True) agent.run()
# format='%(asctime)s - %(name)s - %(levelname)s - %(process)d - Line:%(lineno)d - %(message)s', # level=logging.INFO) # logger = logging.getLogger('preload_count_deivce') # logger.setLevel(logging.DEBUG) from util import log_utils logger = log_utils.get_rtime_Logger() # uri = 'mongodb://*****:*****@%s:27017,%s:27017,%s:27017/bermuda?replicaSet=bermuda_db' % ( # '172.16.12.136', '172.16.12.135', '172.16.12.134') # conn = MongoClient(uri)['bermuda'] # conn = MongoClient('mongodb://*****:*****@223.202.52.136:27017/').bermuda # online info conn = query_db_session() s1_db = s1_db_session() # 获取上层设备的url #RCMS_DEVICES_LAYER = 'https://rcmsapi.chinacache.com/upperlayer/devices' RCMS_DEVICES_LAYER = 'https://cms3-apir.chinacache.com/upperlayer/devices' # 获取设备类型的url # RCMS_API = 'http://rcmsapi.chinacache.com:36000/device/name/{0}/apps' RCMS_API = 'http://j.hope.chinacache.com:8082/api/device/name/{0}/apps' # 查询设备的mrtg # RCMS_MRTG = 'http://rcmsapi.chinacache.com:36000/device/{0}/mrtgnotes' HOPE_MRTG = 'http://j.hope.chinacache.com:8082/api/device/{0}/mrtgnotes' STATUS_LIST = ['0', '404', '408', '503', '501', '502']
def __init__(self): self.dt = datetime.now() self.query_db = database.query_db_session() self.db = database.db_session()
}).count(): verify.verify(task.get("urls"), db, 'FINISHED') else: db.error_task.save(task) if task.get('retryCount') == 1: logger.debug( "retryCount:3, status:FAILED,task_id : %s dev_id : %s..." % (task.get("_id"), task.get("dev_id"))) verify.verify(task.get("urls"), db, 'FAILED') queue.put_json2("error_task", [get_queue(task)]) def get_queue(task): queue = { "_id": str(task.get("_id")), "channel_code": task.get("urls")[0].get("channel_code"), 'host': task.get("host") } queue['urls'] = [{ "url": u.get("url"), "username": u.get("username") } for u in task.get("urls")] return queue if __name__ == "__main__": beat = Beat(database.db_session(), database.query_db_session()) beat.setName('retry_beat') beat.setDaemon(True) beat.run()
def getRConnection(): return query_db_session()
def __init__(self, batch_size=100): self.batch_size = batch_size logger.debug('timer start. batch_size:%s !' % self.batch_size) self.dt = datetime.now() self.query_db = database.query_db_session() self.db = database.db_session()
def getStatus(r_id): print "url check begining .......... %s " % os.getpid() begin = datetime.datetime.now() while True: firstLayer = [] notfirstLayer = [] devices = [] finishCount = 0 processCount = 0 time.sleep(30) db = query_db_session() request = db.request.find_one({"_id": ObjectId(r_id)}) refresh_msg = "\n ********************************************** %s " % os.getpid( ) refresh_msg += "\n\t------------------------------------------- " refresh_msg += "\n\t request status : %s" % str(request.get('status')) urlList = [url for url in db.url.find({"r_id": ObjectId(r_id)})] for url in urlList: refresh_msg += u"\n\t url : %s , status: %s " % ( str(url.get("url", "no url").encode('utf-8')), str(url.get("status", "NO STATUS"))) successCount = 0 if url != None and url.get('dev_id', '') != '': device = db.device.find_one({"_id": url.get('dev_id')}) refresh_msg += "\n\t\t device_id: %s " % (url.get('dev_id')) firstLayer = [] notfirstLayer = [] devices = device['devices'] for key in devices.keys(): if devices.get(key).get('code') > 0: successCount = successCount + 1 if devices.get(key).get('firstLayer') == True: firstLayer.append(devices.get(key)) else: notfirstLayer.append(devices.get(key)) else: refresh_msg += "\n\t\t no device_id " refresh_msg += "\n\t\t total devices: %d ,success device : %d , firstLayer: %d ,notfirstLayer: %d " % ( len(devices), successCount, len(firstLayer), len(notfirstLayer)) if len(firstLayer) > 0: tiered = u" firstLayer" else: tiered = u" notfirstLayer" if url.get('status') == "FINISHED": refresh_msg += u"\n\t\t url refresh FINISHED , %s " % tiered finishCount = finishCount + 1 else: if successCount > 0: processCount = processCount + 1 refresh_msg += u"\n\t\t url refresh PROGRESS, %s " % tiered refresh_msg += "\n\t------------------------------------------- " end = datetime.datetime.now() refresh_msg += "\n\t result : count: %d , finish count : %d , processing count : %d , not start : %d" % ( len(urlList), finishCount, processCount, (len(urlList) - finishCount - processCount)) refresh_msg += "\n ************************************ times :%d " % ( end - begin).seconds print refresh_msg logger.debug(refresh_msg) if len(urlList) == finishCount or (end - begin).seconds > 500: break print "url check ending ............ %s " % os.getpid()