def handle(self, *args, **options): print stamp2str(time.time()) + ':begin' result = 'success' resDict = handleImpl() if len(resDict) > 0: result = 'failure' for (item, errortype) in resDict.items(): print item + ': ' + errortype print stamp2str(time.time()) + result
def handle(self, *args, **options): print stamp2str(time.time()) + ':begin' oldgroups = OldConfigGroup.objects.using('configcentre').exclude( group_id__icontains='_gray') newgroups = ConfigGroup.objects.all() sites = Site.objects.all() apps = App.objects.all() idcs = Room.objects.all() for g in oldgroups: group_id = g.group_id idc_ycc_code = g.idc pool = g.pool idcarr = idcs.filter(ycc_code=idc_ycc_code) if idcarr.exists(): for i in idcarr: if idc_ycc_code == 'SH': idc = idcs.get(pk=1) break else: idc = i newgroup = newgroups.filter(group_id=group_id, idc=idc) poolarr = pool.split('/') site_id = 0 app_id = 0 site_name = '' app_name = '' # grouptype = 1 if len(poolarr) > 0: site_name = poolarr[0] siteset = sites.filter(name=site_name) if siteset.exists(): site_id = siteset[0].id if len(poolarr) > 1: app_name = poolarr[1] appset = apps.filter(name=app_name, status=0) if appset.exists(): if site_id != 0: appset = appset.filter(site_id=site_id) if appset.exists(): app_id = appset[0].id # if app_id == 0: # grouptype = 2 if app_id != 0 and not newgroup.filter(site_id=site_id, site_name=site_name, app_id=app_id, app_name=app_name).exists(): if newgroup.exists(): #print newgroup[0].__dict__ #print '11==\n' newgroup.update(site_id=site_id, site_name=site_name, app_id=app_id, app_name=app_name) print stamp2str(time.time()) + ':success'
def handle(self, *args, **options): ip = IpTotal.objects.all() for item in ip: arr = item.ip.split('.') item.ip1 = arr[0] item.ip2 = arr[1] item.ip3 = arr[2] item.ip4 = arr[3] item.save() print stamp2str(time.time()) + ':success'
def tc_only_update(): print 'begin ' + stamp2str(time.time()) setup() testImpl() teardownOld() setup() if testImpl(): print 'tc_only_update success' + stamp2str(time.time()) teardown() print 'end ' + stamp2str(time.time())
def handle(self, *args, **options): print 'begin ' + stamp2str(time.time()) res = 'Fail' setup() if tc_add(): if tc_update(): if tc_delete(): res = 'Success' teardown() print res + ' ' + stamp2str(time.time())
def handle(self, *args, **options): size = 10000 for i in range(1, 20): bb = (i - 1) * size cc = i * size ip = IpTotal.objects.filter(type=args[0])[bb:cc] for item in ip: response = os.system("ping -c 2 " + item.ip) if response == 0: IpFping.objects.get_or_create(ip=item.ip) print stamp2str(time.time()) + ':success'
def handle(self, *args, **options): type = int(args[1]) idc = int(args[0]) is_need_delete = int(args[2]) if is_need_delete == 1: IpFping.objects.all().delete() size = 10000 for i in range(1, 20): bb = (i - 1) * size cc = i * size if idc == 1: #南汇 if type == 1: #DB ip = IpTotal.objects.filter(type=3, status=1, idc=1, ip2=0)[bb:cc] else: ip_list = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 225, 248, 249, 250, 251, 252, 253, 254 ] ip = IpTotal.objects.filter(type=3, status=1, idc=1, ip2=4, ip3__in=ip_list)[bb:cc] if idc == 4 or idc == 10: if type == 1: ip = IpTotal.objects.filter(type=3, status=1, idc=4, ip3__gte=1, ip3__lte=46)[bb:cc] elif type == 3: ip = IpTotal.objects.filter(type=3, status=1, idc=4, ip2=63, ip3__gte=12, ip3__lte=15)[bb:cc] else: ip = IpTotal.objects.filter(type=3, status=1, idc=4, ip3=223)[bb:cc] for item in ip: response = os.system("ping -c 2 " + item.ip) if response == 0: IpFping.objects.get_or_create(ip=item.ip) print stamp2str(time.time()) + ':success'
def handle(self, *args, **options): selgroups = [] hostnum = len(args) if hostnum > 0: for hostip in args: output = ssh('ls /var/www/webapps/config/ycc/snapshot/', hostip, user='******')[2] output = output.split('\n') selgroups.extend(output) print len(output) print len(selgroups) selgroups = list(set(selgroups)) print len(selgroups) importGroupDatas(selgroups) print stamp2str(time.time()) + ':success'
def testImpl(): print 'testImpl begin ' + stamp2str(time.time()) group_id = 'site_app' olddataids = OldConfigInfo.objects.using('configcentre').filter(group_id=group_id, group_version=0, status='approved', environment='production') dataiddict = {} dataiddict['old'] = {} dataiddict['new'] = {} newdataids = ConfigInfo.objects.filter(group_status__group__group_id=group_id, group_status__status=4, env=7) for nd in newdataids: dataiddict['new'][nd.data_id] = {} dataiddict['new'][nd.data_id]['old_content'] = nd.content dataiddict['new'][nd.data_id]['old_md5'] = nd.content_md5 for od in olddataids: dataiddict['old'][od.data_id] = {} dataiddict['old'][od.data_id]['old_content'] = od.content dataiddict['old'][od.data_id]['old_md5'] = od.md5 content = stamp2str(time.time(),'%Y-%m-%d %H:%M:%S') od.content = content od.md5= md5(content) od.status = 'approved' od.save() dataiddict['old'][od.data_id]['new_content'] = od.content dataiddict['old'][od.data_id]['new_md5'] = od.md5 olddataids.update(status='published') newdataids = ConfigInfo.objects.filter(group_status__group__group_id=group_id, group_status__status=4, env=7) for nd in newdataids: if not dataiddict['new'].has_key(nd.data_id): dataiddict['new'][nd.data_id] = {} dataiddict['new'][nd.data_id]['new_content'] = nd.content dataiddict['new'][nd.data_id]['new_md5'] = nd.content_md5 same = True odlen = len(olddataids) if odlen == len(newdataids) or odlen == 0: for i in range(odlen): if olddataids[i].content != newdataids[i].content or olddataids[i].content_md5 != newdataids[i].content_md5: same = False print olddataids.data_id + '...fails' else: same = False print 'data_id Num diffs:' + odlen + ':' + len(newdataids) + '...fails' #print stamp2str(time.time()) + str(dataiddict) + '\n\n' return same
def handle(self, *args, **options): ip = TmpFpingIpNh.objects.filter() for item in ip: try: server = Server.objects.exclude(server_status_id=400).get( mgmt_ip=item.ip) except Server.MultipleObjectsReturned: print item.ip except Server.DoesNotExist: print item.ip new_ip = IpTotal.objects.get(ip=item.ip) new_ip.asset_info = server.assetid new_ip.is_used = 1 new_ip.save() print stamp2str(time.time()) + ':success'
def accident_list(request): accident_status = AccidentStatus.objects.using('accident').all() accident_parent_type = AccidentParentType.objects.using('accident').filter(enable=0) accident_type = AccidentType.objects.using('accident').filter(enable=0) dept_level2 = DdDepartmentNew.objects.filter(deptlevel=2, enable=0) domain_list = DdDomainV2.objects.filter(enable = 0).exclude(id=DOMAIN_HEAD_ID) today = date.today() start_date = (today - timedelta(7)).strftime("%Y-%m-%d") end_date = today.strftime("%Y-%m-%d") user_list = DdUsers.objects.filter(enable=0) current_time = stamp2str(int(time.time())) try: cur_rota = Rota.objects.using('default').get(promotion=0, duty_domain=DOMAIN_HEAD_ID, duty_date_start__lt=current_time, duty_date_end__gte=current_time) duty_manager = cur_rota.duty_man.all().first() back_duty_manager = cur_rota.duty_backup.all().first() except (Rota.DoesNotExist, Rota.MultipleObjectsReturned): duty_manager = None back_duty_manager = None edit_visible = False group_list = request.user.groups.values() group_id_list = [group['id'] for group in group_list] if request.user.is_superuser or GROUP_ID['ACCIDENT_MASTER'] in group_id_list or GROUP_ID['ACCIDENT_MONITOR'] in group_id_list: edit_visible = True other_domain_list = AccidentOtherDomain.objects.all() return my_render(request, 'accident/accident_list.html', locals())
def perform_create(self, serializer): # 验证type和action的合法性 type = self.request.DATA.get('type') action = self.request.DATA.get('action') app_id = self.request.DATA.get('app_id') exists_type, exists_action, app = (None, None, None) try: exists_type = Type.objects.using('change').get(key=type) except Type.DoesNotExist as e: logging.warn("change.views_api.perform_create() "+\ "type does not exists!") if exists_type: type_id = exists_type.id try: exists_action = Action.objects.using('change').get( key=action, type_id=type_id) except Action.DoesNotExist as e: logging.warn("change.views_api.perform_create()" + \ "action does not exits! create failed!") if exists_action: try: app = App.objects.get(id=app_id) except App.DoesNotExist as e: logging.warn("app not exists! skip it") serializer.save(created=stamp2str(time.time()), task_id=uuid.uuid4(), action_id=exists_action.id, app=app) else: raise YAPIException("this action can not find!") else: raise YAPIException('type does not exists!')
def handle(self, *args, **options): app_filter = dict() app_filter['type'] = 0 if settings.YCC_ENV == 'production': app_filter['status'] = 0 elif settings.YCC_ENV == 'test': app_filter['test_status'] = 0 else: app_filter['id'] = 0 app = App.objects.filter(**app_filter) idc = Room.objects.filter(status=1, ycc_sync=1) for item in idc: for item_app in app: # if item.id not in [1, 4] and not RoomApps.objects.filter(room=item.id, app=item_app.id).exists(): # continue group_name = '%s_%s' % (item_app.site.name, item_app.name) old_pool = '%s/%s' % (item_app.site.name, item_app.name) group, created = ConfigGroup.objects.get_or_create( site_id=item_app.site_id, app_id=item_app.id, group_id=group_name, idc=item, status=1, defaults={ 'site_name': item_app.site.name, 'app_name': item_app.name, 'type': 1, 'old_pool': old_pool, 'created': int(time.time()), 'updated': int(time.time()), 'status': 1, }) if not created: group.site_name = item_app.site.name group.app_name = item_app.name group.save() else: ConfigGroupStatus.objects.get_or_create(group=group, version=0, status=0, pre_version=0) ConfigGroupStatus.objects.get_or_create(group=group, version=1, status=4, pre_version=0) print stamp2str(time.time()) + ':success'
def load_accident(request): api_url = '%saccident/accident/all/?%s' % (CMDBAPI_URL, ('&'.join([k+'='+v for k,v in zip(request.GET.keys(), request.GET.values())]))) headers = {'Authorization':'Basic YWNjaWRlbnQ6ODVCKFVQcFkyM00pKl5ibl4='} res = requests.get(api_url, headers=headers) if res.status_code != 200: return HttpResponse(u'获取导出数据出现错误,请联系平台研发刘亚婷') data = res.json() # 设置excel表格头样式 head_style = xlwt.XFStyle() # 设置对齐方式 head_alignment = xlwt.Alignment() head_alignment.horz = xlwt.Alignment.HORZ_CENTER head_alignment.vert = xlwt.Alignment.VERT_CENTER head_style.alignment = head_alignment # 设置excel表格内容样式 content_style = xlwt.XFStyle() # 设置对齐方式 alignment = xlwt.Alignment() alignment.horz = xlwt.Alignment.HORZ_LEFT alignment.vert = xlwt.Alignment.VERT_CENTER content_style.alignment = alignment file = xlwt.Workbook(encoding='utf-8') table = file.add_sheet(u"历史事故列表", cell_overwrite_ok=True) table.row(0).set_style(xlwt.easyxf('font:height 380;')) # 写入excel表格头 head = [u'编号', u'值班经理', u'值班经理账号', u'事故编号', u'事故等级', u'事故名称', u'发生时间', u'影响范围', u'影响时长', u'是否影响可用性', u'事故原因', u'责任部门', u'处理经过', u'责任Domain', u'责任人', u'事故类型', u'根源分类', u'是否处罚', u'处罚人', u'处罚信息', u'Mantis编号', u'事故状态', u'恢复时间', u'基本信息填写SLA', u'详细信息填写SLA', u'备注', u'是否电商系统', u'调校系数' ] for col in range(0,len(head)): if col ==0: table.col(col).width = 1400 else: table.col(col).width = 4000 table.write(0, col, head[col], head_style) # 循环写入表格数据 row = 1 # 表格当前行 for id, item in enumerate(data): value = [id+1,item['duty_manager_name_ch'], item['duty_manager_name'], item['accidentid'], item['level_name'], item['title'], stamp2str(item['happened_time'], '%Y-%m-%d %H:%M'), item['affect'], item['time_length'], '是' if item['is_available'] else '否', item['reason'], item['duty_dept_names'], item['process'], item['duty_domain_names'], item['duty_users'], item['type_parent_name'], item['type_name'], '是' if item['is_punish'] else '否', item['punish_users'], item['punish_content'], item['mantis_id'], item['status_name'], stamp2str(item['finish_time'], '%Y-%m-%d %H:%M'), item['basic_sla'], item['detail_sla'], item['comment'], item['is_online_str'], item['health']] table.write(row, 0, value[0], head_style) for col1 in range(1,len(head)): table.write(row, col1, value[col1], content_style) table.row(row).set_style(xlwt.easyxf('font:height 300;')) row += 1 sio = StringIO.StringIO() file.save(sio) response = HttpResponse(sio.getvalue(), content_type='application/vnd.ms-excel') response['Content-Disposition'] = 'attachment; filename=accident_list@%s.xls' % date.today().strftime("%Y%m%d") response.write(sio.getvalue()) return response
def handle(self, *args, **options): print stamp2str(time.time()) + ':begin' groupstatus = ConfigGroupStatus.objects.all() groupstatus_published = groupstatus.filter(status=4) groupstatus_edit = groupstatus.filter(status=0) configinfo_pro_published = ConfigInfo.objects.filter( env=7, group_status_id__status=4) configinfo_pro_edit = ConfigInfo.objects.filter( env=7, group_status_id__status=0) current_time = int(time.time()) print current_time for gsp in groupstatus_published: if not groupstatus_edit.filter(group_id=gsp.group_id).exists(): print 'error-no gse in ' + gsp.group.group_id continue configinfo_pro_published_gsp = configinfo_pro_published.filter( group_status_id=gsp.id) if configinfo_pro_published_gsp.exists(): gse = groupstatus_edit.get(group_id=gsp.group) configinfo_pro_published_gse = configinfo_pro_edit.filter( group_status_id=gse.id) if configinfo_pro_published_gse.exists(): print 'error-gse configinfo exists in ' + gsp.group.group_id continue configinfos = [] for cppg in configinfo_pro_published_gsp: configinfos.append( ConfigInfo(data_id=cppg.data_id, group_status=gse, env=cppg.env, content=cppg.content, content_md5=cppg.content_md5, created_time=current_time, modified_time=0, created_by=cppg.created_by, modified_by='', remark='', file_type=cppg.file_type, cmp=1, config_type=1)) ConfigInfo.objects.bulk_create(configinfos) print stamp2str(time.time()) + ':success'
def create_publish4trident4config(self, user_id, app_id, comment, jiraid, idc, publishDateTimeFrom, publishDateTimeTo, restart, publishTimeType, gray_stage_type, gray_detail_info, restart_interval, zone): depid = stamp2str(time.time(), '%Y%m%d%H%M%S') + str( random.randint(100000, 999999)) + 'C' deploy, created = DeployMainConfig.objects.get_or_create( depid=depid, defaults={ 'uid': user_id, 'app_id': app_id, 'status': 0, 'comment': comment, 'jiraid': jiraid, 'create_time': int(time.time()), 'last_modified': int(time.time()), 'idc': idc, 'publishdatetimefrom': int(publishDateTimeFrom) / 1000, 'publishdatetimeto': int(publishDateTimeTo) / 1000, 'restart': int(restart), 'publishtimetype': publishTimeType, 'gray_release_info': gray_detail_info.get('grayPercent') if gray_stage_type == 1 else None, 'gray_stage_interval': gray_detail_info.get('stageInterval') if gray_stage_type == 1 else None, 'restart_interval': restart_interval, 'colony_surplus': gray_detail_info.get('colonySurplus', 75) if gray_stage_type == 1 else None, 'recover_time': gray_detail_info.get('recoverTime', 50) if gray_stage_type == 1 else None, 'gray_rollback_type': gray_detail_info.get('rollbackType', 1) if gray_stage_type == 1 else None, 'zone': Room.objects.get(id=zone) if zone else None }) deploy.status = 1 deploy.save() return depid
def perform_create(self, serializer): segment = serializer.validated_data['ip'] mask = serializer.validated_data['mask'] ip_network_obj = IPNetwork('/'.join([segment, str(mask)])) for ip_segment_obj in IpSegment.objects.filter(status=1): old_ip_network = '/'.join( [ip_segment_obj.ip, str(ip_segment_obj.mask)]) old_ip_network_obj = IPNetwork(old_ip_network) if set(range(ip_network_obj.first, ip_network_obj.last + 1)) & set( range(old_ip_network_obj.first, old_ip_network_obj.last + 1)): raise Exception('新建的网段和已有网段(%s)有交集,创建失败' % old_ip_network) instance = serializer.save(created=stamp2str(time.time())) is_gen_ip = self.request.DATA.get('is_gen_ip', None) if is_gen_ip is not None: ip_str = "%s/%d" % (instance.ip, instance.mask) records = [] ipnet = IPNetwork(ip_str) status = 1 for ip in ipnet: ip_string = "%s" % ip ip_array = ip_string.split('.') if instance.type == 2: #管理IP地址生成规则 if ip == ipnet.network or ip == ipnet.broadcast or ip_array[ 3] == '0' or ip_array[3] == '255': status = 0 else: status = 1 if instance.idc == 1 and ip_string.find( "10.61.15") == 0: #南汇管理IP status = 0 if instance.idc == 4 and int(ip_array[3]) >= 249: status = 0 elif instance.type == 3: #内网IP地址生成规则 if ip == ipnet.network or ip == ipnet.broadcast: status = 0 else: status = 1 if int(ip_array[3]) > 230: status = 0 if int(ip_array[3]) >= 191 and int(ip_array[3]) <= 200: status = 0 records.append( IpTotal(ip=ip, ip_segment_id=instance.id, type=instance.type, idc=instance.idc, status=status, ip1=int(ip_array[0]), ip2=int(ip_array[1]), ip3=int(ip_array[2]), ip4=int(ip_array[3]))) IpTotal.objects.bulk_create(records, batch_size=1024)
def save_data(self, server_id, ori_str): data = ori_str['values'] processorinfo = self.format_process(data) nicinfo = self.format_nic_info(data) try: fact_date = stamp2str( str2stamp(data.get('last_run'), '%a %b %d %H:%M:%S %Z %Y')) except ValueError, e: print('data format error(server_id=%s):%s' % (str(server_id), data.get('last_run'))) return
def tc_update(): group_id = 'site_app' olddataids = OldConfigInfo.objects.using('configcentre').filter( group_id=group_id, environment='staging', status='published', group_version=0) for od in olddataids: content = stamp2str(time.time(), '%Y-%m-%d %H:%M:%S') od.content = content od.md5 = md5(content) od.save() return cmp()
def handle(self, *args, **options): IpZabbix.objects.all().delete() cursor = connections['zabbix_jq_new'].cursor() cursor.execute( 'select distinct ip from interface where hostid in (select hostid from hosts where status=0)' ) data = cursor.fetchall() for item in data: IpZabbix.objects.get_or_create(ip=item[0]) connections['zabbix_jq_new'].close() cursor = connections['zabbix_nh_new'].cursor() cursor.execute( 'select distinct ip from interface where hostid in (select hostid from hosts where status=0)' ) data = cursor.fetchall() for item in data: IpZabbix.objects.get_or_create(ip=item[0]) connections['zabbix_nh_new'].close() cursor = connections['zabbix_jq'].cursor() cursor.execute( 'select distinct ip from interface where hostid in (select hostid from hosts where status=0)' ) data = cursor.fetchall() for item in data: IpZabbix.objects.get_or_create(ip=item[0]) connections['zabbix_jq'].close() cursor = connections['zabbix_nh'].cursor() cursor.execute( 'select distinct ip from interface where hostid in (select hostid from hosts where status=0)' ) data = cursor.fetchall() for item in data: IpZabbix.objects.get_or_create(ip=item[0]) connections['zabbix_nh'].close() print stamp2str(time.time()) + ':success'
def online_report(redis_host, redis_port, task_dict, action, username, server_change_content, email, poolname, sitename): action_time = stamp2str(time.time(), '%Y%m%d') for i in range(ONLINE_REPORT['TRIES']): if all([ task_report(redis_host, redis_port, task_id)['ready'] for ip, task_id in task_dict.items() ]): break time.sleep(ONLINE_REPORT['WAIT']) failure_ip_list = [ ip for ip, task_id in task_dict.items() if task_report(redis_host, redis_port, task_id)['status'] == 'FAILURE' ] ip_list = ','.join([ip for ip, task_id in task_dict.items()]) subject = '(%s)主机变更提醒:%s %s' % ('失败' if failure_ip_list else '成功', ip_list, action) html = loader.render_to_string( 'deploy/online_report.html', { 'task_list': [ task_report(redis_host, redis_port, task_id, ip) for ip, task_id in task_dict.items() ], 'oms_host': OMS_HOST, 'action': action, 'action_time': action_time, 'username': username, 'server_change_content': server_change_content, 'poolname': poolname, 'sitename': sitename, 'ips': ip_list }) send_email(subject=subject, content=html.encode('utf8'), recipient_list=email)
def setupOld(olddataidnum): group_id = 'site_app' for i in range(olddataidnum): data_id = 'data_id_%d.properties' %i content = stamp2str(time.time() + i * 1000) OldConfigInfo.objects.using('configcentre').create(data_id=data_id, group_id=group_id, content=content, md5=md5(content), gmt_create=content, gmt_modified=content, environment='production', gmt_expired=content, group_version=0, status='approved', created_by='snyc_trigger_test', updated_by='sync_trigger_test', remark='', file_type='txt', release_type=0)
def accident_center(request): try: accident = Accident.objects.using('accident').get(is_accident=0, status_id=1) except Accident.DoesNotExist: accident = None if accident: user_list = DdUsers.objects.using('default').filter(enable=0) cmdbv2_url = CMDBV2_URL monitor_url = MONITOR_URL media_url_reg = 'http://' + OMS_HOST + MEDIA_URL current_time = stamp2str(int(time.time())) try: cur_rota = Rota.objects.using('default').get(promotion=0, duty_domain=DOMAIN_HEAD_ID, duty_date_start__lt=current_time, duty_date_end__gte=current_time) duty_manager = cur_rota.duty_man.all().first() back_duty_manager = cur_rota.duty_backup.all().first() except (Rota.DoesNotExist, Rota.MultipleObjectsReturned): duty_manager = None back_duty_manager = None return my_render(request, 'accident/center_accident.html', locals()) else: return my_render(request, 'accident/center_normal.html', locals())
def handle(self, *args, **options): #pool list appname_list = [ 'yihaodian/ad-dolphin-go', 'yihaodian/contract', 'yihaodian/advertise-open-service', 'yihaodian/backend-zeus-app', 'yihaodian/front-cms', 'yihaodian/promotion', 'yihaodian/backend-price-web', 'ops/sre', 'yihaodian/tracker-related', 'yihaodian/security-antifraud', 'samsclub/lab-pe-front', 'shareservice/tracker-flume', 'yihaodian/front-homepage', 'yihaodian/brain', 'shareservice/order-gds', 'yihaodian/front-union-click', 'yihaodian/mingpin-backend', 'yihaodian/jingpin', 'yihaodian/front-myyhd-backend', 'yihaodian/ad-dolphin-bidding', 'yihaodian/backend-finance-invoice', 'yihaodian/search-mars-platform', 'yihaodian/lab_pe_front' ] for appname in appname_list: print('%s start' % appname) site_app = appname.split('/') #get param try: site = Site.objects.get(name=site_app[0].strip()) app = App.objects.get(name=site_app[1].strip(), site_id=site.id) except (Site.DoesNotExist, App.DoesNotExist): print('error: %s site or app does not exist!' % appname) continue servers = Server.objects.filter(app_id=app.id, server_type_id=3) ids = ','.join([str(s.id) for s in servers]) last_stg_list = Deployv3StgMain.objects.filter( app_id=app.id, deploy_type=0, status=2).order_by('-success_update') if len(last_stg_list) == 0: print('error: %s last stg deploy does not exist!' % appname) continue last_stg = last_stg_list[0] postdata = { 'app_id': int(app.id), 'depid': stamp2str(int(time.time()), '%Y%m%d%H%M%S') + str(random.randint(100000, 999999)), 'uid': 'liuyating1', 'source_path': str(last_stg.source_path), 'deploy_type': 0, 'is_restart': 0, 'is_need_deploy': 1, 'server_ids': str(ids) } #post stg deploy url = '%sdeploy/stg/list/' % CMDBAPI_URL headers = { 'Authorization': 'Basic amVua2luczp2MEIoVXhtWTQ4TSkqXmJe' } response = requests.post(url, data=postdata, headers=headers) if response.status_code == 201: print('%s create stg deploy success!') else: print(u'error: %s create stg deploy error:%s' % (appname, str(response.text))) print('end')
def handle(self, *args, **options): count_suc = fail1 = fail2 = fail3 = fail4 = fail5 = fail6 = fail7 = 0 curent_date = stamp2str(time.time() - 3600 * 24, formt='%Y-%m-%d 00:00:00') next_date = stamp2str(time.time(), formt='%Y-%m-%d 00:00:00') print stamp2str(time.time()) + ':start' raw_sql = "select * from config_log where log_time>='%s' and log_time<'%s' and log_type='%s' " \ "group by log_operator" % (curent_date, next_date, 'client_metainfo_production') queryset = list(ConfigLog.objects.raw(raw_sql)) count = len(queryset) print stamp2str(time.time()) + ':sync %d records' % count for q in queryset: try: server = Server.objects.exclude(server_status_id=400).get( ip=q.log_operator) except Server.DoesNotExist: fail1 += 1 print stamp2str( time.time()) + ':%s has no Server object' % q.log_operator continue except Server.MultipleObjectsReturned: fail2 += 1 print stamp2str(time.time( )) + ':%s has muti Server object' % q.log_operator continue if q.log_level[-3:] == '_jq': idc = 4 groupid = q.log_level[:-3] else: idc = 1 groupid = q.log_level try: main_group = ConfigGroup.objects.get(group_id=groupid, idc=idc) except ConfigGroup.DoesNotExist: print stamp2str( time.time()) + ':%s has no group object' % q.log_operator except ConfigGroup.MultipleObjectsReturned: print stamp2str( time.time()) + ':%s has muti group object' % q.log_operator detail = json.loads(q.log_detail) tmp_appcode = detail['appCode'].split("/") if len(tmp_appcode) > 1: tmp_site = tmp_appcode[0] tmp_pool = tmp_appcode[1] try: site = Site.objects.get(name=tmp_site) except Site.DoesNotExist: print stamp2str(time.time( )) + ':%s has wrong poolid(site:%s)' % (q.id, tmp_site) ori_validated_pool_name = '' else: try: app = App.objects.get(site_id=site.id, name=tmp_pool, status=0) except App.DoesNotExist: print stamp2str(time.time( )) + ':%s has wrong poolid(app:%s)' % (q.id, tmp_pool) ori_validated_pool_name = '' except App.MultipleObjectsReturned: print stamp2str(time.time( )) + ':%s has wrong poolid(muti app:%s)' % (q.id, tmp_pool) ori_validated_pool_name = '' else: ori_validated_pool_name = detail['appCode'] else: ori_validated_pool_name = '' config_host, created = ConfigHost.objects.get_or_create( server_id=server.id, ori_main_group_id=q.log_level, defaults={ 'ori_pool_name': detail['appCode'], 'ori_validated_pool_name': ori_validated_pool_name, 'pool_name': server.app.site.name + '/' + server.app.name if server.app and server.app.site else '', 'main_group_id': main_group.id if main_group else 0, 'create_time': int(time.time()) }) if not created: config_host.ori_pool_name = detail['appCode'] config_host.pool_name = server.app.site.name + '/' + server.app.name if server.app and server.app.site else '' config_host.main_group_id = main_group.id if main_group else 0 config_host.ori_validated_pool_name = ori_validated_pool_name config_host.create_time = int(time.time()) config_host.save() count_suc += 1 for d in detail['list']: if d['resource'] == 'MANIFEST.MF': if d['detailState']: content = re.findall( r'Class-Path: (.*) Specification-Vendor', d['memo'].encode('utf-8')) if not content: content = re.findall( r'Class-Path: (.*) Name: Build Information', d['memo'].encode('utf-8')) if not content: print stamp2str( time.time() ) + 'wrong jar data structure:host=' + q.log_operator continue if content: all_jar = re.findall( r'.*?\.jar', content[0].strip().encode('utf-8')) for j in all_jar: config_jar = re.findall( r'(.*?)([-_])([0-9])(.*)', j.strip()) if not config_jar: config_jar = re.findall( r'(.*?)(-[vbrs])([0-9])(.*)', j.strip()) if not config_jar: print(u'failure:host=' + q.log_operator + u' and config_main_group=' + q.log_level + u'的jar包' + j + u'版本名不符合规则!') continue #同步config_jar表 jar, created = ConfigJar.objects.get_or_create( config_jar=config_jar[0][0]) #同步config_jar_version表 jar_version, vcreated = ConfigJarVersion.objects.get_or_create( config_jar_id=jar.id, config_jar_version=j.strip(), defaults={'create_time': int(time.time())}) if not vcreated: jar_version.create_time = int(time.time()) jar_version.save() #同步config_host_jar_version表 host_jar_version, hcreated = ConfigHostJarVersion.objects.get_or_create( config_host_id=config_host.id, config_jar_version_id=jar_version.id, defaults={'create_time': int(time.time())}) if not hcreated: host_jar_version.create_time = int( time.time()) host_jar_version.save() # for d in detail['list']: # if d['resource'] == 'CONFIG_GROUPS': # if d['detailState']: # #同步config_depend_group表 # dgroup = d['memo'].encode('utf-8')[1:-1].split(',') # for dg in dgroup: # dg = dg.strip() # if dg[-3:] == '_jq': # didc = 4 # dgroupid = dg[:-3] # else: # didc = 1 # dgroupid = dg # try: # depend_group = ConfigGroup.objects.get(group_id=dgroupid, idc=didc) # except ConfigGroup.DoesNotExist: # print "failure: config_log's id: %d has no depend group object" % q.id # continue # except ConfigGroup.MultipleObjectsReturned: # print "failure: config_log's id: %d has muti depend group object" % q.id # continue # # host_dgroup, dcreated = ConfigDependGroup.objects.get_or_create(config_host_id = config_host.id, ori_depend_group_id = dg, defaults={ # 'depend_group_id': depend_group.id if depend_group else 0 # }) # if not dcreated: # host_dgroup.depend_group_id = depend_group.id if depend_group else 0 # host_dgroup.save() # else: # print(u'failure:server' + q.log_operator + u'的detailState(CONFIG_GROUPS)不存在!') # elif d['resource'] == 'MANIFEST.MF': # if d['detailState']: # content = re.findall(r'Class-Path: (.*) Specification-Vendor', d['memo'].encode('utf-8')) # if not content: # content = re.findall(r'Class-Path: (.*) Name: Build Information', d['memo'].encode('utf-8')) # if not content: # print(u'failure:host=' + q.log_operator + u' and config_main_group=' + q.log_level + u'的jar包外层匹配错误!') # continue # if content: # all_jar = re.findall(r'.*?\.jar', content[0].strip().encode('utf-8')) # for j in all_jar: # config_jar = re.findall(r'(.*?)([-_])([0-9])(.*)', j.strip()) # if not config_jar: # config_jar = re.findall(r'(.*?)(-[vbrs])([0-9])(.*)', j.strip()) # if not config_jar: # print(u'failure:host=' + q.log_operator + u' and config_main_group=' + q.log_level + u'的jar包' + j + u'版本名不符合规则!') # continue # #同步config_jar表 # jar, created = ConfigJar.objects.get_or_create(config_jar = config_jar[0][0]) # #同步config_jar_version表 # jar_version, vcreated = ConfigJarVersion.objects.get_or_create(config_jar_id = jar.id, # config_jar_version = j.strip()) # if not vcreated: # jar_version.config_jar_id = jar.id # jar_version.save() # #同步config_host_jar_version表 # host_jar_version, hcreated = ConfigHostJarVersion.objects.get_or_create(config_host_id = config_host.id, config_jar_version_id = jar_version.id) # else: # print(u'failure:server' + q.log_operator + u'的detailState(MANIFEST.MF)不存在!') # else: # print "log id: %d has no config_groups or manifest.mf" % (q.id, ) print stamp2str(time.time()) + ':finished. success %d, error %d' % ( count_suc, fail1 + fail2)
message.append( '### duty user mail does not exist: username=%s' % username) act_user_email = list(set(act_user_email)) # act_user_email = ['*****@*****.**'] # 发送Action提醒邮件 if act_list and act_user_email: t = get_template('mail/accident/action_delay_notice.html') html_content = t.render(Context(locals())) try: sendmail_html(u'【Action提醒】您还有未完成改进措施', html_content, act_user_email) except Exception, e: error = True message.append( '### send mail error: username=%s, detail: %s' % (username, str(e))) break message.append(stamp2str(time.time()) + ':finish') except Exception, e: error = True message.append('error: %s' % str(e)) print('\n'.join(message)) # 脚本报错邮件通知 if error: t = get_template('mail/cmdb/cron_sync_by_cmis_error.html') title = '【Action邮件提醒脚本失败】%s' % stamp2str(int(time.time()), formt='%Y-%m-%d %H:%M:%S') html_content = t.render(Context(locals())) sendmail_html(title, html_content, ACCIDENT_CRON_MAILLIST)
def happened_time_format(self): return stamp2str(self.happened_time, formt='%Y/%m/%d %H:%M:%S')
def get_eventlistv2(request): id_q = request.GET.get('id', '') source__id = request.GET.get('source__id', '') type__id = request.GET.get('type__id', '') level__id = request.GET.get('level__id', '') status_q = request.GET.get('status', '') converge_id = request.GET.get('converge_id', '') # 小于level_id level_id = request.GET.get('level_id', '') level__in = request.GET.get('level__in', '') source__in = request.GET.get('source__in', '') type__in = request.GET.get('type__in', '') site_id = request.GET.get('site_id', '') pool_id = request.GET.get('pool_id', '') start_time = request.GET.get('start_time', '') end_time = request.GET.get('end_time', '') search = request.GET.get('search') order_by_list = ['-get_time'] self_defined_filters = {} undone_event_flag = False if id_q: self_defined_filters['id'] = id_q if source__id: self_defined_filters['source__id'] = source__id if type__id: self_defined_filters['type__id'] = type__id if level__id: self_defined_filters['level__id'] = level__id if status_q: try: status_q = int(status_q) except ValueError: raise MyException(u'the parameter \'status\' incorrect') self_defined_filters['status'] = status_q # for undone event if status_q == 0: order_by_list.insert(0, 'level__id') undone_event_flag = True if converge_id: self_defined_filters['converge_id'] = converge_id # 小于level_id if level_id: self_defined_filters['level_id__lt'] = level_id if level__in: self_defined_filters['level__in'] = level__in.split(',') if source__in: self_defined_filters['source__in'] = source__in.split(',') if type__in: self_defined_filters['type__in'] = type__in.split(',') if site_id: event_detail = EventDetail.objects.filter(site__id=site_id) self_defined_filters['id__in'] = [one.event_id for one in event_detail] if pool_id: event_detail = EventDetail.objects.filter( pool_id=pool_id).values('event_id') event_ids = [one['event_id'] for one in event_detail] self_defined_filters['id__in'] = event_ids if start_time: try: if start_time.count(':') == 1: start_time += ':00' self_defined_filters['get_time__gte'] = str2stamp( start_time, '%Y-%m-%d %H:%M:%S') except ValueError: raise MyException(u'time format error') else: # 如果没有指定id,则设置默认时间 if not id_q: # start_time 默认为过去的24h start_time_default = int(time.time()) - 24 * 3600 self_defined_filters['get_time__gte'] = start_time_default if end_time: try: if end_time.count(':') == 1: end_time += ':00' self_defined_filters['get_time__lt'] = str2stamp( end_time, '%Y-%m-%d %H:%M:%S') except ValueError: raise MyException(u'time format error') else: # 如果没有指定id,则设置默认时间 if not id_q: # end_time 默认为现在 end_time_default = int(time.time()) + 60 self_defined_filters['get_time__lt'] = end_time_default if search: self_defined_filters['message__contains'] = search # page information limit = int(request.GET.get('page_size', 30)) offset = (int(request.GET.get('page', 1)) - 1) * limit # the count of the result # the count use about 600ms(local test) count = Event.objects.filter(**self_defined_filters).count() # if 0, return directly if count == 0: return HttpResponse(json.dumps({ 'count': 0, 'results': [] }), content_type="application/json") if offset + limit > count: limit = count - offset + 1 # the result set columns_list = [ 'id', 'level_id', 'level_adjustment_id', 'type_id', 'source_id', 'title', 'message', 'get_time', 'create_time', 'cancel_time', 'cancel_user', 'comment', 'status', 'cancel_type' ] # use about 600ms(local test) result_set = Event.objects.filter(**self_defined_filters).order_by( *order_by_list)[offset:offset + limit].values(*columns_list) # event id in result eventid_list = [] for event in result_set: eventid_list.append(event['id']) # event detail.eg.ip and pool ed_columns = ['event_id', 'ip', 'pool_id'] event_detail_res = EventDetail.objects.filter( event_id__in=eventid_list).values(*ed_columns) event_detail_map = {} pool_id_list = [] for ed in event_detail_res: pool_id_list.append(ed['pool_id']) if ed['event_id'] not in event_detail_map: event_detail_map[ed['event_id']] = { 'ip': [ed['ip']], 'pool_id': ed['pool_id'] } else: event_detail_map[ed['event_id']]['ip'].append(ed['ip']) # preload app info event_global_var.update_app_info(pool_id_list) # add extra info for row in result_set: # 未处理事件持续时间 if undone_event_flag: row['during_time'] = timelength_format(row['get_time'], int(time.time())) # time format row['get_time'] = stamp2str(row['get_time']) row['create_time'] = stamp2str(row['create_time']) if row['cancel_time'] == 0: row['cancel_time'] = "" else: row['cancel_time'] = stamp2str(row['cancel_time']) detail = event_detail_map.get(row['id'], {}) row.setdefault('ip', detail.get('ip', [])) pack_pool_id = detail.get('pool_id', 0) row.setdefault('pool_id', pack_pool_id) app_info = event_global_var.get_app_info(pack_pool_id) if app_info: site_pool_name = app_info[0] + "/" + app_info[1] else: site_pool_name = '' row.setdefault('pool_name', site_pool_name) row.setdefault('source_name', event_global_var.get_source_info(row['source_id'])) row.setdefault('level_name', event_global_var.get_level_info(row['level_id'])) row.setdefault('type_name', event_global_var.get_type_info(row['type_id'])) # status name row['status'] = event_global_var.get_event_status_name(row['status']) # queryset转为list result_set = result_set[:] return HttpResponse(json.dumps({ 'count': count, 'results': result_set }), content_type="application/json")
def get_alarmlistv2(request): # event parameter pool_id = request.GET.get('pool_id', '') exclude_source_id = request.GET.get('exclude_source_id', 0) source__in = request.GET.get('source__in', '') type__in = request.GET.get('type__in', '') level__in = request.GET.get('level__in', '') # compatible source_id = request.GET.get('event__source__id', '') type_id = request.GET.get('event__type__id', '') level_id = request.GET.get('event__level__id', '') search = request.GET.get('search', '') event_defined_filters = {} event_defined_exclude = {} if pool_id: event_detail = EventDetail.objects.filter( pool_id=pool_id).values('event_id') event_ids = [one['event_id'] for one in event_detail] event_defined_filters['id__in'] = event_ids if exclude_source_id: event_defined_exclude['source_id'] = exclude_source_id if source__in: event_defined_filters['source_id__in'] = source__in.split(',') if type__in: event_defined_filters['type_id__in'] = type__in.split(',') if level__in: event_defined_filters['level_id__in'] = level__in.split(',') if source_id: event_defined_filters['source_id'] = source_id if type_id: event_defined_filters['type_id'] = type_id if level_id: event_defined_filters['level_id'] = level_id if search: event_defined_filters['message__contains'] = search # alarm parameter id_q = request.GET.get('id', '') status_id = request.GET.get('status_id', '') method_id = request.GET.get('method_id', '') start_time = request.GET.get('start_time', '') end_time = request.GET.get('end_time', '') order_by_list = ['-create_time'] alarm_defined_filters = {} if id_q: alarm_defined_filters['id'] = id_q if status_id: alarm_defined_filters['status_id'] = status_id if method_id: alarm_defined_filters['method_id'] = method_id if start_time: try: alarm_defined_filters['create_time__gte'] = str2stamp( start_time, '%Y-%m-%d %H:%M') except ValueError: raise MyException('time format error') else: # 如果没有指定id,则设置默认时间 if not id_q: # start_time 默认为过去的24h start_time_default = int(time.time()) - 24 * 3600 alarm_defined_filters['create_time__gte'] = start_time_default if end_time: try: alarm_defined_filters['create_time__lt'] = str2stamp( end_time, '%Y-%m-%d %H:%M') except ValueError: raise MyException('time format error') else: # 如果没有指定id,则设置默认时间 if not id_q: # end_time 默认为现在 end_time_default = int(time.time()) + 60 alarm_defined_filters['create_time__lt'] = end_time_default event_id_list = [] # 是否要过滤 if event_defined_filters or event_defined_exclude: # 过滤1:alarm alarm_count = Alarm.objects.filter(**alarm_defined_filters).count() if alarm_count == 0: return HttpResponse(json.dumps({ 'count': 0, 'results': [] }), content_type="application/json") alarm_columns = ['event_id'] alarm_result_set = Alarm.objects.filter( **alarm_defined_filters).values(*alarm_columns) event_id_list = [] for one in alarm_result_set: event_id_list.append(one['event_id']) old_event_ids = event_defined_filters.get('id__in', []) if old_event_ids: # 交集 event_id_merge = list( set(event_id_list).intersection(set(old_event_ids))) event_defined_filters['id__in'] = event_id_merge else: event_defined_filters['id__in'] = event_id_list # 过滤2: event event_count = Event.objects.filter(**event_defined_filters).exclude( **event_defined_exclude).count() if event_count == 0: return HttpResponse(json.dumps({ 'count': 0, 'results': [] }), content_type="application/json") event_columns = ['id'] event_result_set = Event.objects.filter( **event_defined_filters).exclude(**event_defined_exclude).values( *event_columns) event_id_list = [] for one in event_result_set: event_id_list.append(one['id']) # page information limit = int(request.GET.get('page_size', 30)) offset = (int(request.GET.get('page', 1)) - 1) * limit # query alarm info if event_id_list: alarm_defined_filters['event_id__in'] = event_id_list alarm_count = Alarm.objects.filter(**alarm_defined_filters).count() if alarm_count == 0: return HttpResponse(json.dumps({ 'count': 0, 'results': [] }), content_type="application/json") count = alarm_count if offset + limit > count: limit = count - offset + 1 columns_list = [ 'id', 'event_id', 'method_id', 'result', 'create_time', 'receiver', 'error' ] alarm_result_set = Alarm.objects.filter(**alarm_defined_filters).order_by( *order_by_list)[offset:offset + limit].values(*columns_list) event_id_list = [] for one in alarm_result_set: event_id_list.append(one['event_id']) # query event info event_defined_filters = {'id__in': event_id_list} event_columns = [ 'id', 'source_id', 'type_id', 'level_id', 'title', 'message', 'level_adjustment_id' ] event_result_set = Event.objects.filter(**event_defined_filters).values( *event_columns) event_map = {} for one in event_result_set: event_map.setdefault(one['id'], one) # event detail.eg.ip and pool ed_columns = ['event_id', 'ip', 'pool_id'] event_detail_res = EventDetail.objects.filter( event_id__in=event_id_list).values(*ed_columns) event_detail_map = {} pool_id_list = [] for ed in event_detail_res: pool_id_list.append(ed['pool_id']) if ed['event_id'] not in event_detail_map: event_detail_map[ed['event_id']] = { 'ip': [ed['ip']], 'pool_id': ed['pool_id'] } else: event_detail_map[ed['event_id']]['ip'].append(ed['ip']) # preload app info event_global_var.update_app_info(pool_id_list) # add extra info # method_name, receiver_name, source_name, type_name, level_name for row in alarm_result_set: # alarm row['method_name'] = event_global_var.get_alarm_method_name( row['method_id']) row['create_time'] = stamp2str(row['create_time']) # event event_id = row['event_id'] one_event = event_map.get(event_id, {}) for event_field in one_event: if event_field != 'id': row[event_field] = one_event[event_field] row['source_name'] = event_global_var.get_source_info( one_event.get('source_id', 0)) row['type_name'] = event_global_var.get_type_info( one_event.get('type_id', 0)) row['level_name'] = event_global_var.get_level_info( one_event.get('level_id', 0)) # event_detail one_event_detail = event_detail_map.get(event_id, {}) row['ip'] = one_event_detail.get('ip', []) pack_pool_id = one_event_detail.get('pool_id', 0) row['pool_id'] = pack_pool_id # pool name app_info = event_global_var.get_app_info(pack_pool_id) if app_info: site_pool_name = app_info[0] + "/" + app_info[1] else: site_pool_name = '' row.setdefault('pool_name', site_pool_name) # receiver_name row['receiver'] = event_global_var.get_alarm_receiver_name( row['method_id'], row['receiver']) alarm_result_set = alarm_result_set[:] return HttpResponse(json.dumps({ 'count': count, 'results': alarm_result_set }), content_type="application/json")
if app: filters['app'] = app if user: filters["user"] = user if type: filters["type"] = type if action: filters["action"] = action if index: filters["index__startswith"] = index # support unix time and general time format to search if input_start_time: try: start_time_unix = int(input_start_time) start_time = stamp2str(start_time_unix) except ValueError as e: start_time = input_start_time filters["happen_time__gte"] = start_time if input_end_time: try: end_time_unix = int(input_end_time) end_time = stamp2str(end_time_unix) except ValueError as e: end_time = input_end_time filters["happen_time__lte"] = end_time return self.queryset.filter(**filters)\ .exclude(type=settings.DISABLE_CHANGE_TYPES[0])\ .using('change')\