Пример #1
0
def queue_mnt():
	"""公司称名数据分配进程。
	公司名称的基本数据表命名格式 base_省份简称。
	自动提取以base_为前缀的公司基本表中的数据当对应的队列元素小于
	设定的阀值queue_threshold时。
	   
	"""
	db  = mysqlwrap.get_db()
	index_rd = rediswrap.get_hash('wbsp:gov:baseindex')
	while 1:
		try:
			sql = "SHOW TABLES LIKE 'base_%'"
			res, desc = db.query(sql)
			if res==0 and desc:
				for row in desc:
					tb = list(row.values())[0]
					q  = rediswrap.get_queue('wbsp:gov:queue.%s'%tb)
					#print('tb:%s q_len:%s' % (tb,q.__len__()))
					if q.__len__() >= queue_threshold:
						continue
					index = index_rd.get(tb,0)
					#print('tb:%s index:%s' % (tb,index))
					sql = "select id,name from %s where id >%s limit %s" % (tb,index,rows_limit)
					#print(sql)
					rs, ds = db.query(sql)
					if rs==0 and ds:
						id = 0
						for d in ds:
							id = d['id']
							q.push(d['name'])
						index_rd.set(tb,id)
		except Exception as e:
			traceback.print_exc()	
						
		time.sleep(10)
Пример #2
0
def doworke():
    db = mysqlwrap.get_db()
    sql_item = {}
    sql_item['table']= 'data_total'
    sql_item['fields']="id,gov_url"
    sql_item['where']=" isnull(reg_no)"
    sql_item['limit']=50
    sql_item['order']="id asc"
    id = 0
    pool     = eventlet.GreenPool(60)
    i=0
    while True:
        sql_item['where']+=" and id >%s" %id
        res,desc = db.query(sql_item)
        #print(res,desc)
        urls = []
        if res==0 and desc:
            for row in desc:
                urls.append(row['gov_url'])
                id = row['id']
            urls=[row['gov_url'] for row in desc ]
        if urls:
            for res in pool.imap(anhui.format_html,urls):
                i+=1
                if res and 'name' in res:
                    print("total_num:%s" % i,id)
                    print(updata(res,'ah'))
                else:
                    print(res) 
Пример #3
0
def savedata(data,table):
	"""保存数据到mysql数据库
	"""
	db = mysqlwrap.get_db()
	if type(data) != list:
		data = [data]
	for item in data:
		fields = []
		values = []
		for k,v in item.items():
			fields.append(k)
			v=mysqlwrap.addslashes(v)
			values.append('"%s"' % v)
		sql = "insert into data_%s(%s)values(%s)" % (table,','.join(fields),','.join(values))
		res,desc = db.query(sql,1)
		#print(sql,res,desc)
		#数据入库失败处理
		if res == -1:
			#print('insert_error:',desc)
			if desc.args[0] in [1064,1062]:
				return (0,())
			item['table'] = 'data_%s'%table
			datamodel.get_tmp_queue().push(item)
		#插入dat_total总表
		#print(item)
		if 'reg_status' in item and item['reg_status'] in ['存续','在业'] :
			fields.append('prov_tb')
			values.append('"%s"' % table)
			t_table = 'data_total'
			
			sql = "insert into %s(%s)values(%s)" % (t_table,','.join(fields),','.join(values))
			rs,ds = db.query(sql,1)
			#print("insert total tb",rs,ds)
	return (res,desc)
Пример #4
0
def update_group(gkey,data):
	"""更新
	"""
	stat_db = mysqldb.get_db()
	data.pop('gid')
	res,group = get_groups([gkey])
	if res == -1:
		return res,group
	if res ==0 and not group:
		return [-3,"%s 未注册."%gkey]
	if data:
		
		res,desc = stat_db.update("stat_item_group",data,"gkey='%s'" %gkey)
		if res ==0 and desc:
			#成功更新后检查是否有变更item表和history表
			if "items_mrk" in data and data['items_mrk'] != group['items_mrk'] \
			or "history_mrk" in data and data['history_mrk'] != group['history_mrk']:
				if "name" not in data:
					data['name'] = group['name']
				if "pid" not in data:
					data['pid'] = group['pid']
				reg_group(gkey,data)
				reg_items2redis(gkey)
			return res,desc
		elif res ==-1:
			return err_handle.db_err
		return [0,[]]
Пример #5
0
def set(gkey,data):
	"""上传值,先从redis缓存中取出item的id,再匹配itemkey入history库
	参数说明:
	gkey group 的gkey
	data 数据集 最好一次不要超过100个
	  {'itemkey':[值,生成时间默认为当前时间戳]}
	"""
	stat_db = mysqldb.get_db()
	rdb = rediswrap.get_redis()
	kys = list(data.keys())
	#加入表标识以确定目标表
	kys.append('mrk')
	itm = rdb.hmget(RD_ITM_KEY_PRFX+gkey,kys)
	itm_dict = {}
	if itm:
		mrk=itm.pop().split(',')
		hst_tb = get_hst_name(mrk[1])
		value = []
		for i in range(0,len(itm)):
			if itm[i] :
				tmp = data[kys[i]]
				clock = tmp[1] if tmp[1] else int(time.time())
				value.append("('%s','%s','%s')" %(itm[i],tmp[0],clock))
		if value:
			res,desc = stat_db.query("insert into %s(itemid,val,clock)values%s" % (hst_tb,','.join(value)),1)
			if res ==-1 :
				return [res,str(desc)]
			return res,desc
		else:
			return [-1,"no items find."]		
	return [-2,'items not in redis.']
Пример #6
0
def sys_log(contxt):
	"""写入系统日志表
	"""
	stat_db = mysqldb.get_db()
	sql_item={'table':'stat_syslog','contxt':contxt}
				
	stat_db.query(sql_item)	
Пример #7
0
def reg_items(gkey,itm,prefix=''):
	"""注册items
	参数说明:
	gkey group的标识
	itm items组[[name,key]]
	prefix key的前缀
	
	"""
	#stat_db = dbclass(sconf.SYS['mysql'])	
	#stat_db.connect()
	stat_db = mysqldb.get_db()
	#取group信息
	res,desc = get_groups([gkey])
	if res==0 and desc:
		itm_tb = "stat_item_%s" % desc[0]['items_mrk'] if desc[0]['items_mrk'] else "stat_item"
		value = []
		for k in itm:
			value.append("('%s','%s%s',%s)" % (k[0],prefix,k[1],desc[0]['gid']))
		sql = "insert into %s (name,itemkey,gid)values %s " %(itm_tb, ",".join(value))
		res,desc = stat_db.query(sql,1)
		if res == -1:
			
			if desc[0] == 1062:
				return err_handle.item_exis
			return err_handle.db_err
		else:
			return [0,'']
	else:
		return res,desc
Пример #8
0
def get(gkey,itm=[],start_time=0,stop_time=0,sort='clock asc',groupby=0,page=None):
	"""
	获取统计数据
	参数说明
	   itm itemid列表 为空时提取整个group的记录
	   start_time 开始时间戮
	   stop_time  结构时间戮
	   sort       排序方式 
	   groupby    分组方式 
	   page       分页参数集 {'site':每页数据量,'num':页码} 默认返回所有记录
	"""
	stat_db = mysqldb.get_db()
	rdb = rediswrap.get_redis()
	sql_item = {'fields':'*'}
	r_itmkey = RD_ITM_KEY_PRFX+gkey
	if itm:
		itm.append('mrk')
		itmids = rdb.hmget(r_itmkey,itm)
		mrk = itmids.pop().split(',')
		
	else:
		mrk = rdb.hget(r_itmkey,'mrk')
		itmids = rdb.hvals(r_itmkey)
		itmids.remove(mrk)
		mrk = mrk.split(',')	

	ids = [k for k in itmids if k ]	
	sql_item['table'] = get_hst_name(mrk[1]) 
	sql_item['where'] = " itemid in (%s) " % ",".join(ids)
	start_time = utils.timestamp(start_time) if start_time else utils.timestamp(0,'d')
	stop_time = utils.timestamp(stop_time) if stop_time else int(time.time())
	sql_item['where'] += " and clock>=%s and clock <%s" % (start_time,stop_time)
	sql_item['order'] = sort

	if groupby:
		if groupby ==1:
			sql_item['group'] = 'itemid'
		elif groupby == 2:
			sql_item['group'] = 'clock'
		else:
			sql_item['group'] = 'itemid,clock' 
		sql_item['fields'] = "itemid,sum(val) as val,clock"
	#分页这个mark一下。待定
	if page:
		s = page['num']*page['site']
		sql_item['limit'] = "%s,%s" %(s,page['site'])
	
	res,desc = stat_db.query(sql_item)
	#取得items的名称
	item_lab = {}
	if res == 0 and desc:
		itm_tb = "stat_item_" + mrk[0] if mrk[0] else "stat_item"
		rs, ds = stat_db.query("select name,id from %s where id in(%s)" %(itm_tb,",".join(ids) ))
		if rs==0 and ds:
			for row in ds :
				item_lab[row['id']]=row['name']
		return 0,[item_lab,desc]
	return 0,[{},[]]
Пример #9
0
def update_item_name(gkey,itemkey,name):
	stat_db = mysqldb.get_db()		
	res,desc = get_groups([gkey])	
	if res==0 and desc:
		itm_tb = get_itm_name(desc[0]['items_mrk'])	
		res,desc = stat_db.update(itm_tb,{"name":name},"itemkey='%s'" % itemkey)
	if res ==-1 :
		return err_handle.db_err
	return res,desc
Пример #10
0
def exist_corp(name,table):
	"""检查公司数据是否已存在
	"""
	db = mysqlwrap.get_db()
	sql = "select id from %s where name='%s'" %(table,name)
	res,desc = db.query(sql)
	if res == 0 and desc:
		return True
	return False
Пример #11
0
def check_exist_corp(biz):
	"""对比数据表
	"""
	res,desc = mysqlwrap.get_db().query("select max(id) as mx,min(id) as mi from base_%s" % biz)

	mx = desc[0]['mx']
	mi = desc[0]['mi']
	while mi <=mx:
		step = mi + 1000
		sql = "select name from base_%s where id >%s and id <%s" %(biz,mi,step)
		res,desc = mysqlwrap.get_db().query(sql)
		for row in desc:
			s = "select id from data_%s where name='%s'" % (biz,row['name'])
			rs,ds = mysqlwrap.get_db().query(s)
			if rs==0 and ds:
				sql = "update base_%s set flag=1 where name='%s'" % (biz,row['name'])
				result = mysqlwrap.get_db().query(sql,1)
				#print(result)
		mi=step
Пример #12
0
	def run(self):
		db = mysqlwrap.get_db()
		queue = datamodel.get_tmp_queue()
		while 1:
			try:
				row = queue.pop(3)
				if row:
					tb = row.pop('table')[-2:]
					res,desc = savedata(row,tb)

					if desc:
						print(tb,res,desc)
			except:
				traceback.print_exc()
Пример #13
0
def reg_items2redis(gkey,itm=[]):
	"""将items存入redis缓存
	"""
	rdb = rediswrap.get_redis()
	stat_db = mysqldb.get_db()
	hash_tb = rediswrap. get_hash(gkey, system='default',serialized_type='string')
	res,desc = get_groups([gkey])
	if res==0 and desc:
		itm_tb = get_itm_name(desc[0]['items_mrk'])
		rdb.hsetnx(RD_ITM_KEY_PRFX+gkey,'mrk',"%s,%s" %(desc[0]['items_mrk'],desc[0]['history_mrk']))
		gid = desc[0]['gid']
		
		i=0
		#设置了itemkey列表,依据itemkey取值
		if itm:
			itm_len = len(itm)
			while True:
				j = i+50 if i+50 < itm_len else itm_len 
				if i == itm_len:
					break
				key = ['"%s"' % k for k in itm[i:j]]
				sql = "select id,itemkey from %s where itemkey in (%s)" % (itm_tb, ",".join(key))

				res,desc = 	stat_db.query(sql)

				new_dict = {}
				if res ==0 and desc:
					for row in desc:
						new_dict[row['itemkey']]=row['id']
					rs = rdb.hmset(RD_ITM_KEY_PRFX+gkey,new_dict)
				i = j
		#未设置itemkey列表,依据gkey取出该组所有的items
		else:
			res, desc = stat_db.query("select min(id) as mnid,max(id) as mxid from %s where gid = %s" % (itm_tb,gid))

			mnid = desc[0]['mnid']
			mxid = desc[0]['mxid']
			limit = 50
			while True: 
				if mnid == mxid:
					break
				sql = "select id,itemkey from %s where id > %s and gid=%s limit %s" % (itm_tb,mnid,gid,limit)
				res,desc = 	stat_db.query(sql)
				
				new_dict = {}
				if res ==0 and desc:
					for row in desc:
						new_dict[row['itemkey']]=row['id']
						mnid = row['id']
					rdb.hmset(RD_ITM_KEY_PRFX+gkey,new_dict)	
Пример #14
0
def savedata(data,table):
	"""保存数据到mysql数据库
	"""
	db = mysqlwrap.get_db()
	fields = []
	values = []
	for k,v in data.items():
		fields.append(k)
		#v = db.escape_string(v)
		v=v.replace('"'," ")
		values.append('"%s"' % v)
	sql = "insert into data_%s(%s)values(%s)" % (table,','.join(fields),','.join(values))

	res,desc = db.query(sql,1)
	return (res,desc)
Пример #15
0
def update_item_key(gkey,oldkey,newkey):
	stat_db = mysqldb.get_db()
	res,desc = get_groups([gkey])
	rdb = rediswrap.get_redis()
	if res==0 and desc:
		itm_tb = get_itm_name(desc[0]['items_mrk'])	
		res,des = stat_db.update(itm_tb,{"itemkey":newkey},"itemkey='%s'" % oldkey)
		if res ==0 and des:
			id = rdb.hget(RD_ITM_KEY_PRFX+gkey,oldkey)
			if id :
				rdb.hset(RD_ITM_KEY_PRFX+gkey,newkey,id)
		elif res == -1:
			return err_handle.db_err
		return res,des
	return res,desc
Пример #16
0
    def output_data(self,queue,exit_flag):
        """输出结果到数据库
        """
        kword_stat={}
        kcount = 0
        out_db = mysqlwrap.get_db(self._output['dbserver'])
        #插入sql模版,如果记录存在则更新
        tb = self._output['table']      
        insert_sql = "insert into " + tb + "(rowid,contents,tb_name,k_weight) values('%s','%s','" + self._input['data']['table'] + "','%s') on DUPLICATE KEY UPDATE contents='%s',k_weight='%s'"

        while 1:
            if exit_flag.is_set() and queue.qsize()==0:
                import os
                print("out_db exit...",os.getpid())
                #sql = "insert into illegal_log(kcount,taskid,contents)values('%s','%s','%s')" % \
                #(kcount,self.taskid,mysqlwrap.addslashes(json.dumps(kword_stat)))
                
                #res,desc = out_db.query(sql,1)
                get_result_queue(self.pid).push(kword_stat)
                break
            #print("im waiting for data.....",queue.qsize())
            try:
                try:
                    row = queue.get(timeout=5)
                except:
                    continue
                
                id = row.pop('id')
                weight = row.pop('weight')
                #小于或等设置的输出权重,刚放弃
                if weight <= self._output['weight']:
                    continue
                #统计关键匹配数量
                for ks in row['content'].values():
                    for k in ks:
                        kcount+=1
                        if k in kword_stat:
                            kword_stat[k]+=1
                        else:
                            kword_stat[k]=1
                            
                content = mysqlwrap.addslashes(json.dumps(row.get('content')))
                
                res, desc = out_db.query(insert_sql % (id, content, weight,content, weight))
                
            except Exception as e:
                
                logger.error(str(e))
Пример #17
0
def stat_tabls_rows(tb_prefix):
	"统计同一前缀的表记录数"	
	db  = mysqlwrap.get_db()
	sql = "SHOW TABLES LIKE '"+tb_prefix+"%'"

	res, desc = db.query(sql)
	totel=0
	if res==0 and desc:
		for row in desc:
			tb = list(row.values())[0]
			sql = "select count(0) as n from %s" % tb
			rs, ds = db.query(sql)
			if rs ==0 and ds:
				totel+=ds[0]['n']
				print(tb,ds[0]['n']) 
	print("totel:%s" % totel)
Пример #18
0
def get_items(gkey,item=[]):
	"""提取items
	"""
	stat_db = mysqldb.get_db()
	res,desc = get_groups([gkey])
	if res==0 and desc:
		itm_tb = get_itm_name(desc[0]['items_mrk'])
		sql_item={"table":itm_tb,"fields":"*",'where':"gid=%s"%desc[0]['gid']}
		if item:
			itms = ["'%s'" % k for k in item]
			sql_item['where']+=" and itemkey in (%s)" % ",".join(itms)
		res,desc = stat_db.query(sql_item)
		if res ==-1:
			return err_handle.db_err
		return [res,desc]
	else:
		return res,desc
Пример #19
0
def get_groups(gkey=[]):
	"""提取group信息
	返回值:[status,info]
	status: 0 成功
			-1 gkey未设置
	        -100 数据库错误
	"""
	stat_db = mysqldb.get_db()
	item = {"table":"stat_item_group","fields":"*"}
	if  gkey and type(gkey) in (list,tuple):
		gk = ["'%s'" % k for k in gkey]
		item['where'] = "gkey in (%s)" % ",".join(gk)
	elif type(gkey) == str and gkey !='all' or not gkey:
		return err_handle.gkey_not_set
	res,desc = stat_db.query(item)
	if res == -1:
		return err_handle.db_err
	return res,desc
Пример #20
0
def updata(data,table):
	"""更新数据
	"""
	db = mysqlwrap.get_db()
	if type(data) != list:
		data = [data]	
	for item in data:
		for k,v in item.items():
			v=mysqlwrap.addslashes(v)
			item[k]=v
		where = "name='%s'" % item['name']
		res,desc = db.update("data_%s" % table,item,where)
		#数据入库失败处理
		if 'reg_status' in item and item['reg_status'] in ['存续','在业','存续(在营、开业、在'] :
			item['prov_tb']='"%s"' % table
			t_table = 'data_total'
			res,desc = db.update(t_table,item,where)
			print("insert total tb",res,desc)
	return (res,desc)	
Пример #21
0
    def init(self):         
        global KWORDS
        if self._input is None:
            raise Exception("not set input config!")
        if self._kwsource is None and self._keywords is None:
            raise Exception("not set keywords!")
        
        if self._dbsource is None:
            raise Exception("not set dbsource!")
        #注册数据库连接
        conn_rd = get_conn_hash()
        info = conn_rd.get(self._input['dbserver'])['info']
        info['dbname']=self._input['data']['dbname']
        mysqlwrap.setup_db(self._input['dbserver'],info)
        
        info = conn_rd.get(self._kwsource['dbserver'])['info']
        info['dbname']=self._kwsource['data']['dbname']
        mysqlwrap.setup_db(self._kwsource['dbserver'],info) 

        info = conn_rd.get(self._output['dbserver'])['info']
        info['dbname']=self._output['dbname']
        mysqlwrap.setup_db(self._output['dbserver'],info)    
        
        mysqlwrap.pool_monitor()
        
        #生成keywords
        if self._keywords:
            self.keywords = self._keywords
            KWORDS = self.keywords
            
        else:
            db = mysqlwrap.get_db(self._kwsource['dbserver'])
            res,desc = db.query(self._kwsource['data'])

            if res == -1 or not desc:
                print(res,desc)
                raise Exception("no keywords!")
            
            for row in desc:
                if row['k_word']:
                    self.keywords.append(row['k_word'])
                    self.keyweight[row['k_word']]=row['k_weight']
Пример #22
0
def savedata(data,table):
	"""保存数据到mysql数据库
	"""
	db = mysqlwrap.get_db()
	fields = []
	values = []
	for k,v in data.items():
		fields.append(k)
		#v = db.escape_string(v)
		v=v.replace('"'," ")
		values.append('"%s"' % v)
	sql = "insert into data_%s(%s)values(%s)" % (table,','.join(fields),','.join(values))
	res,desc = db.query(sql,1)

	#数据入库失败处理
	if res == -1:
		if desc.args[0] == 1064:
			print(desc)
			return
		data['table'] = 'data_%s'%table
		datamodel.get_tmp_queue().push(data)
Пример #23
0
    def run(self):
        manager = mp.Manager()
        queue = manager.Queue()
        in_queue = manager.Queue()
        out_db_exit = mp.Event()
        input_db = mysqlwrap.get_db(self._input['dbserver'])
        sql_item = self._input['data']
        #设置id范围
        if 'range' in sql_item and sql_item['range']:
            minid, maxid = sql_item.pop('range')
        else:
            res, desc = input_db.query("select min(id) as min,max(id) as max from %s" % sql_item['table'])
            minid, maxid = desc[0]['min'], desc[0]['max']
        sql_where = sql_item.pop('where','')
        
        #字段权重
        weight = sql_item.pop('weight')
        
        if sql_where:   sql_where+= " and "
        
        #定义运行级别取得进程数
        #pool = mp.Pool(get_process_count(self.powerlevel))
        """以守护的方式运行"""
        pnum = get_process_count(self.powerlevel)
        pool = []
        q_pool=[]
        for i in range(0,pnum):
            q = manager.Queue()
            q_pool.append(q)
            pool.append(mp.Process(target=fielt_srv, args=(self.keywords,self.keyweight,weight,q,queue,self.exit_flag)))
            pool[i].daemon = True
            pool[i].start()
        """-"""
        p = None
        #启动输出线程
        if self._output:
            
            p=mp.Process(target=self.output_data, args=(queue,out_db_exit))
            p.start()
            #pool.append(p)
            #pool.apply_async(output_data,(self,queue,self.exit_flag))
            #output_data(self,queue,self.exit_flag)
        p_start = time.time()
        #e_pool = eventlet.GreenPool(10)
        item=[]
        while minid < maxid:
            #检查退出状态
            if self.exit_flag.is_set() or global_list.G_EXIT:
                pool.close()
                pool.join()
                self.close()
                break
            #检查任务状态
            if global_list.TASK_STAUS[self.taskid] == 1: #暂停状态
                time.sleep(1)
                continue
            if global_list.TASK_STAUS[self.taskid] == 2: #退出状态
                #pool.close()
                #pool.join()
                #self.close()
                break
            stop_id = minid + self.rowstep
            if stop_id > maxid : stop_id = maxid
            _sql_item=sql_item.copy()
            _sql_item['where'] = "%s id >= %s and id < %s" % (sql_where, minid, stop_id)
            res, desc = input_db.query(_sql_item)
            """这里sql_item存在同步问题,有共享冲突"""
            #e_pool.spawn_n(self.data_source,input_db,q_pool,_sql_item)
            #item.append(_sql_item)
            minid = stop_id
            if res == -1:
                print(desc)
                time.sleep(3)
                continue
            #result = []
            q_index = 0
            for row in desc:
                #ps = pool.apply_async(func=fielt,args=(row,self.keywords,self.keyweight,weight,queue))
                
                #fielt(row,self.keywords,self.keyweight,weight,queue)
                #if self._return: result.append(ps)      
                #数据队列模式
                if q_index == len(q_pool):q_index=0
                q_pool[q_index].put(row)
                q_index+=1
            
            """返回结果的代码
            if 1:#self._return:
                for i in result:
                    _res = i.get()
                    print(_res)
                    if _res:
                        self.result.append(_res)
            
            """  

        #任务完成
        #pool.close()
        #pool.join()
        self.close()
        #if p: p.join()
        #import pdb
        #pdb.set_trace()
        
        print(len(pool))
        for i in range(0,len(pool)):
            print('join..',pool[i].pid)
            if global_list.TASK_STAUS[self.taskid] == 2:
                pool[i].terminate()
            else:
                pool[i].join()
        print("is done")
        out_db_exit.set()
        if p: p.join()
        print("finish job user :" ,time.time()-p_start)
        jobstatus.done(self.taskid)    
        
Пример #24
0
def sumdelay():
	"""
	检测redis中的统计缓存,根据stat_item_group.sumdelay
	将满足条件的统计数据入库,并清除缓存
	"""
	rdb = rediswrap.get_redis()
	db = mysqlwrap.get_db()
	now = time.localtime()
	fields = rdb.keys(stat_base.RD_ITM_HST_TMP_PRFX+'*')

	if fields:
		gkeys = [f.split(':')[-1] for f in fields]

		todo_gkeys=[]
		step = 20
		#初始化时间
		clock = int(time.time())-120
		for i in range(0,len(gkeys),step):
			#每次提取step条group记录
			ks = ["'%s'" % k for k in gkeys[i:i+step]]
			sql = "select gkey,sumdelay,history_mrk from stat_item_group where gkey in (%s)" % ",".join(ks)
			res, desc = db.query(sql)
			if res==0 and desc:
				for row in desc:
					gk = ""
					#按时汇总
					if row['sumdelay'] == 2:
						clock = utils.timestamp(clock,'h')
						gk =row['gkey'] 
					#按天汇总,当前时间0点
					elif row['sumdelay'] == 1 and now.tm_hour==0:
						clock = utils.timestamp(0,'d')
						gk =row['gkey'] 
					#按月汇总,当天是1号,时间是0点
					elif row['sumdelay'] == 3 and (now.tm_mday==1 or now.tm_hour==0) :
						clock = utils.timestamp(0,'d')
						gk =row['gkey'] 
					#按周汇总,当天是周1,时间是0点
					elif row['sumdelay'] == 4 and (now.tm_wday!=1 or now.tm_hour!=0) :
						clock = utils.timestamp(0,'d')
						gk =row['gkey'] 
					#gk =row['gkey']
					if gk:
						#todo_gkeys.append(row)
						#print(stat_base.RD_ITM_HST_TMP_PRFX+row['gkey'],RD_TMP_PRF+row['gkey'])
						#rdb.rename(stat_base.RD_ITM_HST_TMP_PRFX+row['gkey'],RD_TMP_PRF+row['gkey'])

						key = stat_base.RD_ITM_HST_TMP_PRFX+gk
						r_item = rdb.hgetall(key)
						rdb.delete(key)
						tb = stat_base.get_hst_name(row['history_mrk'])
						cnt = 0
						if r_item:
							
							logger.info("sum stat group %s from %s into %s" % (row['gkey'],key,tb))
							ks = list(r_item.keys())
							#每次写50条记录
							for i in range(0,len(ks),50):
								value = ["('%s','%s','%s')" %(j,r_item[j],clock) for j in ks[i:i+50]]
								ins_sql = "insert into %s(itemid,val,clock)values%s" % (tb,','.join(value))
								res,desc = db.query(ins_sql,1)
								if res == -1:
									logger.error(str(desc))
									data_loge.info(ins_sql)
								else:
									cnt+=len(value)
							
							logger.info("[%s] count:[%s] ins:[%s] rows." %(gk,len(ks),cnt))
					
				#汇总数据入库
				"""
Пример #25
0
def reg_group(gkey,info):
	"""
	注册group
	参数说明:
	 gkey group的标识
	 info 组信息 
	返回值:[status,info]
	status: 0 成功
	        -100 数据库错误
	"""
	if gkey == 'default':
		return [0, 1]
	pid = info.get('pid',1)
	name = info.get('name')
	items_mrk = info.get('items_mrk','')
	history_mrk = info.get('history_mrk',items_mrk)
	#检查group是否存在,如不存则插入
	stat_db = mysqldb.get_db()
	res,desc = get_groups([gkey])
	#print(res,desc)
	if res==0 and not desc:
		data = {'name':name,'pid':1,'gkey':gkey,'items_mrk':items_mrk,'history_mrk':history_mrk}
		rs, des = stat_db.insert('stat_item_group',data)
		gid = des
	else:
		gid = desc[0]['gid']
	
	#检查项目表与记录表是否存在,不存在创建
	itesm_tabse_ddl = """
	  Create Table  stat_item_%s (
	  id int(11) NOT NULL AUTO_INCREMENT COMMENT '项目id号',
	  name varchar(255) COLLATE utf8_bin DEFAULT NULL COMMENT '项目名称',
	  itemkey varchar(50) COLLATE utf8_bin NOT NULL COMMENT '项目的标识',
	  gid int(11) NOT NULL DEFAULT '1' COMMENT '所属项目集的id号',
	  sumdelay int(11) DEFAULT '1' COMMENT '间隔时间(秒)',
	  PRIMARY KEY (id),
	  UNIQUE KEY itemkey (itemkey) USING BTREE
	) ENGINE=InnoDB  DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='统计项目表';
	""" % items_mrk
	if items_mrk:
		res,desc = stat_db.query("show tables like 'stat_item_%s'" % items_mrk )
		if res==0 and not desc:
			rs,ds = stat_db.query(itesm_tabse_ddl,1)
			if rs== -1:
				return err_handle.db_err
			
	history_tabse_ddl = """
	  Create Table  stat_history_%s (
	  itemid bigint(20) unsigned NOT NULL DEFAULT '0' COMMENT '项目id号',
	  clock int(11) NOT NULL DEFAULT '0' COMMENT '数据产生时间',
	  val float(16,4) NOT NULL DEFAULT '0.0000' COMMENT '值',
	  gid int(11) DEFAULT '1' COMMENT '项目集id号',
	  KEY history_1 (itemid,clock) USING BTREE
	) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='存放统计数据';
	""" % history_mrk
	if 	history_mrk:
		res,desc = stat_db.query("show tables like 'stat_history_%s'" % items_mrk )
		if res==0 and not desc:
			rs,ds = stat_db.query(history_tabse_ddl,1)
			if rs ==-1:
				return err_handle.db_err
	return [0,gid]	
Пример #26
0
def chang_flag(corp,flag,table):
	"""更改原始公司表中的flag
	"""
	db = mysqlwrap.get_db()
	sql = "update  base_%s set flag=%s where name='%s'" % (table,flag,corp)
	res,desc = db.query(sql,1)
Пример #27
0
		 				'url_check':'http://gxqyxygs.gov.cn/checkCheckNo.jspx',
		 				'url_list':'http://gxqyxygs.gov.cn/searchList.jspx'}
		 },
		 'ha':{'mod':'anhui','thread_count':50,
		 		'urls':{'host':'http://222.143.24.157',
		 				'url_home':'http://222.143.24.157/search.jspx',
		 				'url_code':'http://222.143.24.157/validateCode.jspx?type=1&id=%s',
		 				'url_check':'http://222.143.24.157/checkCheckNo.jspx',
		 				'url_list':'http://222.143.24.157/searchList.jspx'}
		 },
		 'hl':{'mod':'anhui','thread_count':50,
		 		'urls':{'host':'http://gsxt.hljaic.gov.cn',
		 				'url_home':'http://gsxt.hljaic.gov.cn/search.jspx',
		 				'url_code':'http://gsxt.hljaic.gov.cn/validateCode.jspx?type=1&id=%s',
		 				'url_check':'http://gsxt.hljaic.gov.cn/checkCheckNo.jspx',
		 				'url_list':'http://gsxt.hljaic.gov.cn/searchList.jspx'}
		 },
		 }

if __name__=="__main__":

	dbinfo ={'host':'192.168.10.126','port':3306,'dbname':'gov_corp',
            'user':'******','passwd':'wbsp','charset':'utf8'}
	mysqlwrap.setup_db('default',dbinfo)
	mysqlwrap.get_db().connect()
	rediswrap.setup_redis('default','192.168.10.126',6380)
	for p in SF_DIST.keys():
		sql = Data_tb % p
		mysqlwrap.get_db().query(sql,1)
	#print(get_proxy())