def handle(self, request, **kwargs): type_ = kwargs["type"] first_record = PhysicalMachineUseRecord.objects.order_by("pk").first() if first_record is None: return 0, [] datetime_start = datetime.datetime.strptime(first_record.time, "%Y%m%d%H") timestamp_start = int(datetime_to_timestamp(datetime_start)) now = datetime.datetime.now() if type_ in {"day", "month"}: datetime_end = get_report_end(datetime_start, type_) if now > datetime_end: return 1, [{"start": timestamp_start}] else: return 0, [] week_start = datetime_start week_end = datetime_start + datetime.timedelta( days=6 - datetime_start.weekday()) weeks = [] while now - week_end > datetime.timedelta(days=1): weeks.append({ "start": int(datetime_to_timestamp(week_start)), "end": int(datetime_to_timestamp(week_end)), }) week_start = week_end + datetime.timedelta(days=1) week_end = week_end + datetime.timedelta(days=7) return len(weeks), weeks
def get_instance_last_backuptime(zone, owner): """ Get instance related backup time """ payload = { "zone": zone, "owner": owner, "action": "DescribeImage", "is_system": "False" } # backup_resp = api.get(payload=payload, timeout=10) backup_resp = api.get(payload=payload) ins_to_last_backuptime = {} if backup_resp.get("code") == 0: for backup_info in backup_resp["data"]["ret_set"]: ins_uuid = backup_info.get("instance_uuid") if ins_uuid is not None: last_time = ins_to_last_backuptime.get(ins_uuid, 0) new_time = datetime_to_timestamp( backup_info.get("create_datetime")) if new_time > last_time: ins_to_last_backuptime[ins_uuid] = new_time else: logger.error("cannot get the image list") return ins_to_last_backuptime
def get_security_groups_info(security_groups, owner): sgs = [] sg_obj = None values = set() for sg in security_groups: # 去重 if sg["name"] in values: continue if sg["name"] == 'default': all_security_group\ = SecurityGroupModel.get_securities_by_owner(owner) for single_security_group in all_security_group: if str(single_security_group.sg_id ).startswith("sg-desg") and str( single_security_group.sg_name).startswith("非Web默认"): sg_obj = single_security_group break else: sg_obj = SecurityGroupModel.get_security_by_id(sg["name"]) # sg_obj = SecurityGroupModel.get_security_by_uuid(sg["id"]) if sg_obj: security_group = {} security_group["sg_id"] = sg_obj.sg_id security_group["sg_name"] = sg_obj.sg_name security_group["create_datetime"] =\ datetime_to_timestamp(sg_obj.create_datetime) sgs.append(security_group) values.add(sg["name"]) return sgs
def get_cmdb_ticket(owner, zone): record = CfgRecordModel.objects.all() resp = [] for single_record in record: ticket_id = single_record.ticket_id ticket_type = FinanceTicketModel.objects.get( ticket_id=ticket_id).ticket_type.ticket_name applicant_id = single_record.applicant user = Account.objects.get(user__username=applicant_id) department = getattr(user.department, 'name', '') username = user.name if user.name is None: username = u'无名氏' applicant = username + '/' + department approve_id = single_record.approve user = Account.objects.get(user__username=approve_id) department = getattr(user.department, 'name', '') username = user.name if user.name is None: username = u'无名氏' approve = username + '/' + department create_time = utc_to_local_time(single_record.create_datetime) create_time = datetime_to_timestamp(create_time) cfg_type = single_record.model resp.append({ 'ticket_id': ticket_id, 'ticket_type': ticket_type, 'applicants': applicant, 'last_handle': approve, 'commit_time': create_time, 'cfg_type': cfg_type }) return console_response(total_count=len(resp), ret_set=resp)
def handle(self, *args, **kwargs): title = kwargs.get('title') now_time = datetime_to_timestamp(datetime.datetime.now()) node_data = [ {'unit_name': u'标题', 'unit_attribute': 'text', 'unit_choices_list': [], 'unit_fill_value': title}, {'unit_name': u'系统名称', 'unit_attribute': 'drop', 'unit_choices_list': [u'核心系统', u'总线系统', u'数据仓库', u'柜面系统', u'网银', u'手机银行', u'电话银行', u'信贷系统', u'短信通知', u'ATM', u'POS', u'国际结算', u'支付', u'电子汇票', u'总账', u'报表', u'中间业务', u'影像', u'客户关系管理'], 'unit_fill_value': u'核心系统'}, {'unit_name': u'所属应用系统', 'unit_attribute': 'text', 'unit_choices_list': [], 'unit_fill_value': '123'}, {'unit_name': u'类型', 'unit_attribute': 'text', 'unit_choices_list': [], 'unit_fill_value': '123'}, {'unit_name': u'监控来源', 'unit_attribute': 'text', 'unit_choices_list': [], 'unit_fill_value': '123'}, {'unit_name': u'大类', 'unit_attribute': 'text', 'unit_choices_list': [], 'unit_fill_value': '123'}, {'unit_name': u'子类', 'unit_attribute': 'text', 'unit_choices_list': [], 'unit_fill_value': '123'}, {'unit_name': u'状态', 'unit_attribute': 'text', 'unit_choices_list': [], 'unit_fill_value': '123'}, {'unit_name': u'地址', 'unit_attribute': 'text', 'unit_choices_list': [], 'unit_fill_value': '123'}, {'unit_name': u'告警组', 'unit_attribute': 'text', 'unit_choices_list': [], 'unit_fill_value': '123'}, {'unit_name': u'编码', 'unit_attribute': 'text', 'unit_choices_list': [], 'unit_fill_value': '123'}, {'unit_name': u'首次发生时间', 'unit_attribute': 'date', 'unit_choices_list': [], 'unit_fill_value': now_time}, {'unit_name': u'末次发生时间', 'unit_attribute': 'date', 'unit_choices_list': [], 'unit_fill_value': now_time}, {'unit_name': u'摘要', 'unit_attribute': 'textarea', 'unit_choices_list': [], 'unit_fill_value': '123'}, ] cur_node_id = get_monitor_create_node() next_node_id = get_monitor_second_node() fill_data = { 'cur_node_id': cur_node_id, 'next_node_id': next_node_id, 'node_data': node_data } owner = Account.objects.get(id=2).user.username resp = add_ticket_process(owner=owner, ticket_id=None, ticket_type=1, fill_data=fill_data) if 'msg' in resp: print 'False!' else: print 'OK!'
def get_keypair_info(keypair_id): if keypair_id == "": return None keypair_obj = KeypairsModel.get_keypair_by_id(keypair_id) if keypair_obj: keypair = {} keypair["keypair_id"] = keypair_id keypair["keypair_name"] = keypair_obj.name keypair["encryption"] = keypair_obj.encryption keypair["create_datetime"] =\ datetime_to_timestamp(keypair_obj.create_datetime) return keypair
def get_disks_info(volumes_attached, zone): zone_record = ZoneModel.get_zone_by_name(zone) disks = [] for volume in volumes_attached: disk_uuid = volume["id"] disk_obj = DisksModel.get_disk_by_uuid(uuid=disk_uuid, zone=zone_record) if disk_obj: disk = {} disk["disk_id"] = disk_obj.disk_id disk["disk_name"] = disk_obj.name disk["create_datetime"] =\ datetime_to_timestamp(disk_obj.create_datetime) disks.append(disk) return disks
def filter_subnet_pubips(ip_set): ip_list = list() for item in ip_set: ip_addr = item.get("floating_ip_address") ip_status = item.get("status") ip_uuid = item.get("id") binding_resource = item.get("binding_resource") ip_id = 'Unkonw' ip_name = 'Unknow' bandwidth = -1 create_datetime = 0 # 根据 ip 地址的 uuid 查询数据库表 ips ,可以获得 ID、名称、计费模式、带宽、分配时间 ip_obj = IpsModel.get_ip_by_uuid(ip_uuid) if ip_obj: ip_id = ip_obj.ip_id ip_name = ip_obj.name bandwidth = ip_obj.bandwidth create_datetime = datetime_to_timestamp(ip_obj.create_datetime) instance_uuid = binding_resource.get("instance_id") if instance_uuid: instance_inst = InstancesModel.get_instance_by_uuid(instance_uuid) if instance_inst: instance_id = instance_inst.instance_id instance_name = instance_inst.name binding_resource["instance_id"] = instance_id binding_resource["instance_name"] = instance_name router_uuid = binding_resource.get("router_id") if router_uuid: router_inst = RoutersModel.get_router_by_uuid(router_uuid) if router_inst: router_id = router_inst.router_id router_name = router_inst.name binding_resource["router_id"] = router_id binding_resource["router_name"] = router_name ip_list.append({ "ip_id": ip_id, "ip_name": ip_name, "ip_addr": ip_addr, "ip_status": ip_status, "bandwidth": bandwidth, "binding_resource": json.dumps(binding_resource), "create_datetime": create_datetime }) return ip_list
def get_ip_detail_by_uuid(uuid): detail = {} try: info = IpsModel.get_ip_by_uuid(uuid) detail["ip_id"] = info.ip_id detail["ip_name"] = info.name detail["bandwidth"] = info.bandwidth detail["is_normal"] = info.is_normal detail["billing_mode"] = info.billing_mode detail["create_datetime"] = datetime_to_timestamp(info.create_datetime) detail["charge_mode"] = getattr(info, "charge_mode") except IpsModel.DoesNotExist: # TODO: logging pass return detail
def filter_needed_disk_info(disk_info, disk_uuid_list, disks=list()): """ 获取js端需要的参数信息 """ if isinstance(disk_info, list): disk_info = filter(lambda x: DisksModel.disk_exists_by_id(x["name"]), disk_info) else: disk_info = [disk_info] needed_info = DISK_FILTER_MAP.values() d_info_list = [] for disk in disk_info: d_info = {} disk_id = disk.get(DISK_FILTER_MAP["disk_id"]) if len(disks) > 0 and disk_id not in disks: continue disk_ins = DisksModel.get_disk_by_id(disk_id=disk_id) if disk_ins is None: logger.error("cannot find disk with disk id %s" % disk_id) continue disk_uuid = disk_ins.uuid disk_name = disk_ins.name backup_time = disk_ins.backup_time disk.update({"backup_datetime": backup_time}) for k in needed_info: if k == "disk_name": d_info[REVERSE_FILTER_MAP[k]] = disk_name continue d_info[REVERSE_FILTER_MAP[k]] = disk.get(k, "") if d_info["create_datetime"]: timestamp = datetime_to_timestamp(disk_ins.create_datetime, use_timezone=True) d_info["create_datetime"] = timestamp d_info["status"] = STATUS_MAP.get(d_info["status"]) # 如果为从硬盘备份创建,由于是使用创建和恢复两个操作实现, # 不显示恢复中,只显示创建中 if d_info["status"] == 'recovering' and disk_uuid not in disk_uuid_list: d_info["status"] = 'creating' if d_info['disk_type'].startswith('pvc'): d_info['disk_type'] = 'pvcd' d_info_list.append(d_info) return d_info_list
def change_key_model(ticket): finish_dict = {} for key, val in TICKET_HEAD_LIST.items(): dict_value = getattr(ticket, val, None) if val == 'applicants' or val == 'last_handle': user_id = dict_value.username user = Account.objects.get(user__username=user_id) department = getattr(user.department, 'name', '') username = user.name if user.name is None: username = u'无名氏' dict_value = username + '/' + department elif val.endswith('time'): if dict_value is not None: dict_value = utc_to_local_time(dict_value) dict_value = datetime_to_timestamp(dict_value) elif val == 'cur_node': dict_value = dict_value.name finish_dict.update({val: dict_value}) return finish_dict
def create(cls, *args, **kwargs): jumper_instance_id_list = kwargs.get('jumper_instance_id_list') jumper_instance_info_list = [] try: for jumper_instance_id in jumper_instance_id_list: jumper_obj = JumperInstanceModel.objects.get( jumper_instance=jumper_instance_id) result = JumperTrash.objects.create( operate_type=TrashOperateType.CREATED, jumper=jumper_obj) jumper_instance_info_list.append({ "operate_type": result.operate_type, "operate_time": datetime_to_timestamp(result.operate_time, use_timezone=True), "jumper_id": result.jumper_id }) return jumper_instance_info_list, None except Exception as e: return jumper_instance_info_list, e
def filter(owner, zone, offset, limit, hypervisor_type='', search_key=''): trash_details = DisksTrash.objects.filter( delete_datetime=None, disk__user__username=owner, disk__zone__name=zone, disk__availability_zone=hypervisor_type, disk__name__icontains=search_key, ).values( 'id', 'create_datetime', 'disk__name', 'disk__disk_type', 'disk__disk_size', 'disk__zone__name', 'disk__user__username', 'disk__disk_id', 'disk__availability_zone', 'disk__attach_instance', )[offset:limit + offset] for trash_detail in trash_details: trash_detail['trash_id'] = trash_detail.pop('id') trash_detail['disk_name'] = trash_detail.pop('disk__name') trash_detail['disk_zone_name'] = trash_detail.pop( 'disk__zone__name') trash_detail['disk_username'] = trash_detail.pop( 'disk__user__username') trash_detail['disk_id'] = trash_detail.pop('disk__disk_id') trash_detail['delete_time'] = datetime_to_timestamp( trash_detail.pop('create_datetime')) trash_detail['disk_size'] = trash_detail.pop('disk__disk_size') trash_detail['hyper_type'] = trash_detail.pop( 'disk__availability_zone') trash_detail['disk_type'] = trash_detail.pop('disk__disk_type') trash_detail['attach_instance'] = trash_detail.pop( 'disk__attach_instance') if trash_detail['disk_type'].startswith('pvc'): trash_detail['disk_type'] = 'pvcd' return trash_details
def filter(zone, owner): payload = { 'owner': owner, 'zone': zone, 'action': 'DescribeLoadbalancers', } lb_details = LoadbalancerTrash.objects.filter( lb__zone__name=zone, lb__user__username=owner, delete_datetime=None, restore_datetime=None).values('lb__lb_id', 'create_datetime') lb_delete_datetimes = { lb_detail['lb__lb_id']: lb_detail['create_datetime'] for lb_detail in lb_details } resp = describe_loadbalancers_api(payload) lb_set = resp.get("data", {}).get("ret_set", []) lb_list = [] for single in lb_set: lb_id = single.get("name", None) if lb_id not in lb_delete_datetimes: continue raw_status = single.get("provisioning_status", None) if lb_id and LoadbalancerModel.lb_exists_by_id(lb_id, deleted=True): lb = LoadbalancerModel.get_lb_by_id(lb_id, deleted=True) else: continue info = { "lb_id": lb_id, "lb_name": lb.name, "create_datetime": datetime_to_timestamp(lb.create_datetime), "status": transfer_lb_status(raw_status), } net_info = {"is_basenet": lb.is_basenet} if not lb.is_basenet: net_payload = { "zone": zone, "owner": owner, "action": "DescribeNets", "subnet_id": lb.net_id } resp = api.get(net_payload) net_data = resp['data']['ret_set'][0] net_type = 'private' if net_data.get( 'gateway_ip') is None else 'public' net_info.update({"net_type": net_type}) net_info.update({"net_id": lb.net_id}) net_info.update({"net_name": net_data['name']}) info.update({"net": net_info}) ip_info = {"vip_addr": single.get("vip_address", None)} fip_info = single.get("fip_info") if fip_info: fip_uuid = fip_info["ip_uuid"] fip_address = fip_info["ip_address"] ip = IpsModel.get_ip_by_uuid(fip_uuid) ip_info.update({"ip_id": ip.ip_id}) ip_info.update({"fip_addr": fip_address}) ip_info.update({"bandwidth": ip.bandwidth}) info.update({"ip": ip_info}) info['delete_datetime'] = datetime_to_timestamp( lb_delete_datetimes[lb_id]) lb_list.append(info) return lb_list
def collect_virtual_machine_data(date, account, zone): if VirtualMachineUseRecord.objects.filter( zone=zone, time=date.strftime("%Y%m%d00")).exists(): return vms = get_all_vms(account, zone) vm_ids = [ vm["id"] for vm in vms if vm.get("OS-EXT-STS:vm_state") == "active" ] # 只获取运行中的vm的数据 db_vms = InstancesModel.objects.filter(seen_flag=1, deleted=False, zone=zone, uuid__in=vm_ids) vm_dict = {vm.uuid: vm for vm in db_vms} vm_ids = vm_dict.keys() items = [{'memory.usage': None}, {'cpu_util': None}] data_set = [{"item": items, "uuid": vm_id} for vm_id in vm_ids] timestamp = datetime_to_timestamp(date + timedelta(days=1)) # 接口参数是截止时间 payload = { "owner": account.user.username, "zone": zone.name, "action": "ceilometer", "data_set": data_set, "data_fmt": "one_day_data", "timestamp": int(timestamp), } params = ["data_fmt", "timestamp"] res = api.post(payload, urlparams=params) if res["code"] != 0: logger.error("%scollect_virtual_machine_data: osapi return %s" % (EXCEPTION_PREFIX, res)) return [] data = res["data"]["ret_set"] records = [] date_str = date.strftime("%Y%m%d") for uuid, items in data.iteritems(): vm = vm_dict[uuid] vm_memory_total = [vm.instance_type.memory] * 24 vm_cpu_total = [vm.instance_type.vcpus] * 24 vm_memory_usage_rate = list_cal(merge_value(items[0]["memory.usage"]), 100, "/", null_as="return_null") vm_cpu_usage_rate = list_cal(merge_value(items[1]["cpu_util"]), 100, "/", null_as="return_null") vm_memory_used = list_cal(vm_memory_total, vm_memory_usage_rate, "*") vm_cpu_used = list_cal(vm_cpu_total, vm_cpu_usage_rate, "*") vm_disk_total = [0] * 24 # TODO lack of disk data vm_disk_used = [0] * 24 for hour in range(24): create_params = { "instance": vm, "app_system": vm.app_system, "time": "%s%02d" % (date_str, hour), "cpu_total": vm_cpu_total[hour], "cpu_used": vm_cpu_used[hour], "memory_total": vm_memory_total[hour], "memory_used": vm_memory_used[hour], "disk_total": vm_disk_total[hour], "disk_used": vm_disk_used[hour], "zone": zone, } records.append(VirtualMachineUseRecord(**create_params)) VirtualMachineUseRecord.objects.bulk_create(records)
def post(self, request, *args, **kwargs): _data = request.data zone = request.data["zone"] owner = request.data["owner"] hypervisor_type = request.data.get("hypervisor_type", "KVM") form = DescribeImagesValidator(data=_data) if not form.is_valid(): return Response( {"code": 1, "msg": form.errors, "data": {}, "ret_code": 90001} ) validated_data = form.validated_data image_id = validated_data.get("image_id") # image_id = validated_data.get("image_id") # if image_id: # _image = ImageModel.get_image_by_id(image_id) # _image_serializer = ImagesSerializer(_image) # _image_info = _image_serializer.data # # timestamp = datetime_to_timestamp(_image_info["create_datetime"]) # _image_info.update({"create_datetime": timestamp}) # # return Response({"code": 0, "msg": "succ", "data": {"ret_set": [_image_info], "total_count": 1}}, # status=status.HTTP_200_OK) # # 过滤结果 # # Todo 搜索关键词需要实现在innodb引擎下的全文检索,方案:自定义全文检索 # sort_key = validated_data.get("sort_key", "create_datetime") # reverse = validated_data.get("reverse", False) # sort_key = (reverse * "-") + sort_key # # try: # images = ImageModel.objects.filter( # (Q(user__username=owner) | Q(user__username='******')), # Q(zone__name=zone), Q(status='available')).order_by(sort_key) # # image_serializer = ImagesSerializer(images, many=True) # total = len(image_serializer.data) # _image_info_list = image_serializer.data # # for _image_info in _image_info_list: # timestamp = datetime_to_timestamp(_image_info["create_datetime"]) # _image_info.update({"create_datetime": timestamp}) # # _data = {"total_count": total, "ret_set": image_serializer.data} # # payload = { # "action" : "DescribeImage", # "owner" : owner, # "zone" : zone, # "private_image" : True # } # dd # private_images = get_private_images(payload) # _data["ret_set"].extend(private_images) # # return Response(console_response(0, "succ", len(_data["ret_set"]), _data["ret_set"]), # status=status.HTTP_200_OK) # except Exception as exp: # return Response(console_response(1, str(exp)), # status=status.HTTP_200_OK) payload = { "owner": owner, "zone": zone, "action": "DescribeImage" } resp = show_image_by_admin(payload) result = [] # we need compare hypervisor_type from frontend and backend case insensitive hypervisor_type = hypervisor_type.lower() for image in resp: image_hypervisor_type = image.get('hyper_type', "KVM").lower() if hypervisor_type != image_hypervisor_type: continue if owner != image.get("image_owner") and image.get("visibility") == "private": continue if image_id is not None and image.get('id') != image_id: continue image_name = image.get("name") if image_name.find("fortress") >= 0 or image_name.find("waf") >= 0 or image_name.find("bak-") >= 0: continue image_name = InstanceBackupModel.get_backup_by_id(image.get("name")) image_name = image_name.backup_name if image_name else image.get("name") tmp_info = dict() tmp_info["create_datetime"] = datetime_to_timestamp(image.get("created_at")) tmp_info["image_name"] = image_name tmp_info["platform"] = image.get("image_type") tmp_info["size"] = image.get("size") if image.get("status") == "active": tmp_info["status"] = "available" elif image.get("status") == "queued": tmp_info["status"] = "queued" else: tmp_info["status"] = "error" tmp_info["system"] = image.get("image_type") tmp_info["image_id"] = image.get("id") tmp_info["min_disk"] = image.get("min_disk") result.append(tmp_info) return Response(console_response(ret_set=result))
def collect_physical_data(date, account, zone): if PhysicalMachineUseRecord.objects.filter( zone=zone, time=date.strftime("%Y%m%d00")).exists(): return time_from = datetime_to_timestamp(date) # account = Account.objects.filter(status=AcountStatus.ENABLE).first() # 采集物理机数据 -_- 都是坑,各个监控项的采集interval竟然不一样 -_- needed_item_interval_dict = { "cpu_util": 60, "cpu_num": 120, "total_mem": 1, "available_mem": 30, "disk_total_and_usage": 60, } data = {} for item, interval in needed_item_interval_dict.iteritems(): while True: payload = { "owner": account.user.username, "zone": zone.name, "action": "MonitorFinancialServerNew", "items": item, "interval": interval, "count": 24, "poolname": "all", "time_from": int(time_from), } res = api.get(payload) if res["code"] != 0: logger.error("%scollect_physical_data(%s): osapi return %s" % (EXCEPTION_PREFIX, item, res)) return item_data = res["data"]["ret_set"][0] if not is_physical_data_valid(item_data): if datetime.now() - date <= timedelta(days=1, minutes=90): logger.info("collect_physical_data %s delay " % item) time.sleep(120) # 2分钟后重试 else: logger.error( "%s collect_physical_data %s, data(%s) is unavailable." % (EXCEPTION_PREFIX, item, item_data)) return else: break data.update(item_data) convert_physical_origin_data(data) pms = PhysServModel.objects.all() machine_cabinet_dict = {pm.name: pm.cabinet for pm in pms} machines = data["cpu_total"].keys() records = [] date_str = date.strftime("%Y%m%d") for m in machines: for hour in range(24): cabinet = machine_cabinet_dict.get(m, u"其他") create_params = { "hostname": m, "cabinet": cabinet, "time": "%s%02d" % (date_str, hour), "zone": zone, } items = [ "cpu_total", "cpu_used", "memory_total", "memory_used", "disk_total", "disk_used" ] for item in items: create_params[item] = data[item][m][hour] records.append(PhysicalMachineUseRecord(**create_params)) PhysicalMachineUseRecord.objects.bulk_create(records)
def filter_disk_backup_info(ret_set, backup_status, owner, zone, availability_zone=None): BACKUP_FILTER_MAP.update({"resource_id": "volume_id"}) bak_list = [] for backup in ret_set: if availability_zone is not None and 'availability_zone' in backup: if backup['availability_zone'] != availability_zone: continue bak = {} backup_ins = DiskBackupModel.get_backup_by_id(backup["name"]) if backup_ins is None: # logger.error("cannot find backup with backup id " # + backup["name"] + " in console") continue backup_name = backup_ins.backup_name for k in BACKUP_FILTER_MAP.keys(): bak[k] = backup.get(BACKUP_FILTER_MAP[k]) resource_inst = get_resource_inst_by_uuid("disk", bak["resource_id"], zone) if resource_inst is None: resource_id = "Unknown" resource_name = "Unknown" resource_inst = get_resource_inst_by_uuid("disk", bak["resource_id"], zone, True) if resource_inst: resource_name = getattr(resource_inst, "name") else: resource_id = getattr(resource_inst, "disk_id") resource_name = getattr(resource_inst, "name") time_str = getattr(backup_ins, 'create_datetime', '') timestamp = datetime_to_timestamp(time_str, use_timezone=True) # p = re.compile("(\d{4}-\d{1,2}-\d{1,2})\D+(\d{2}:\d{2}:\d{2})") # time_match = p.search(time_str) # if time_match is not None: # time_str = time_match.group(1) + " " + time_match.group(2) # logger.info("the creation time is: " + str(time_str)) # timestamp = int(time.mktime(time.strptime(time_str, # "%Y-%m-%d %H:%M:%S"))) # else: # logger.error("cannot parse the time string to timestamp: " + # time_str) # timestamp = int(time.time()) bak.update({"create_datetime": timestamp}) bak.update({"backup_name": backup_name}) bak.update({"resource_id": resource_id}) bak.update({"resource_name": resource_name}) bak.update({"status": DISK_BAKCUP_STATUS_MAP.get(bak.get("status"))}) bak.update({"disk_type": backup_ins.disk_type}) bak.update({"charge_mode": getattr(backup_ins, "charge_mode")}) bak.update({"availability_zone": backup['availability_zone']}) if backup_status is None or backup_status == bak["status"]: user_id = User.objects.get(username=owner).id record_user = DiskBackupModel.get_backup_by_id( backup_id=bak["backup_id"]) if record_user is not None and user_id == record_user.user.id: bak_list.append(bak) backup_list = filter( lambda x: DiskBackupModel.backup_exists_by_id(x["backup_id"]), bak_list) return backup_list or {}
def filter_instance_backup_info(ret_set, backup_status, owner, zone, hypervisor_type=None, filter_img=None): """ Args: filter_img: 是否过滤掉镜像 """ BACKUP_FILTER_MAP.update({"resource_id": "instance_uuid"}) bak_list = [] for backup in ret_set: if filter_img and backup.get('name', '').startswith('img-'): continue if hypervisor_type == 'KVM': # KVM类型的主机备份,OSAPI返回的数据没有hypervisor_type字段 if 'hypervisor_type' in backup: continue if hypervisor_type == 'POWERVM': # POWERVM类型的主机备份,OSAPI返回的数据中hypervisor_type字段为phyp if 'hypervisor_type' not in backup or backup[ 'hypervisor_type'] != 'phyp': continue # it's image not instance backup if backup.get("instance_uuid") is None: continue bak = {} backup_ins = InstanceBackupModel.get_backup_by_id(backup["name"]) if backup_ins is None: # logger.error("cannot find backup with backup id " # + backup["name"] + " in console") continue backup_name = backup_ins.backup_name for k in BACKUP_FILTER_MAP.keys(): bak[k] = backup.get(BACKUP_FILTER_MAP[k]) resource_inst = get_resource_inst_by_uuid("instance", bak["resource_id"], zone) if resource_inst is None: resource_id = "Unknown" resource_name = "Unknown" resource_inst = get_resource_inst_by_uuid("instance", bak["resource_id"], zone, True) if resource_inst: resource_name = getattr(resource_inst, "name") else: resource_id = getattr(resource_inst, "instance_id") resource_name = getattr(resource_inst, "name") # if resource_id is None: # continue time_str = getattr(backup_ins, 'create_datetime', '') timestamp = datetime_to_timestamp(time_str, use_timezone=True) bak.update({"create_datetime": timestamp}) bak.update({"backup_name": backup_name}) bak.update({"resource_id": resource_id}) bak.update({"resource_name": resource_name}) bak.update({"charge_mode": getattr(backup_ins, "charge_mode")}) bak.update({"platform": getattr(backup_ins, "platform")}) bak.update({"id": getattr(backup_ins, "uuid")}) bak.update({"can_delete": True}) if InstancesModel.objects.filter(backup_id=bak['backup_id'], destroyed=False).exists(): bak.update({"can_delete": False}) if str(backup_ins.platform) == "windows": bak.update({"size": 40}) else: bak.update({"size": 20}) bak.update( {"status": INSTANCE_BAKCUP_STATUS_MAP.get(bak.get("status"))}) if backup_status is None or backup_status == bak["status"]: user_id = User.objects.get(username=owner).id record_user = InstanceBackupModel.get_backup_by_id( backup_id=bak["backup_id"]) if record_user is not None and user_id == record_user.user.id: bak_list.append(bak) backup_list = filter( lambda x: InstanceBackupModel.backup_exists_by_id(x["backup_id"]), bak_list) return backup_list
def get_instance_details(instance_info, instances, owner, zone): """ Get instances details, including basic infos and related resources. """ # instances filter if isinstance(instance_info, list): instance_info = filter( lambda x: InstancesModel.instance_exists_by_uuid(x["id"]), instance_info) else: instance_info = [instance_info] # get backuptime info backuptime_info = get_instance_last_backuptime(zone, owner) subnet_uuid_info = get_subnet_uuid_info(zone, owner) # get ip info ip_info = get_ip_info(zone, owner) logger.debug(ip_info) info_list = [] for instance in instance_info: info = get_instance_detail_by_uuid(instance["id"]) # fileter instances if len(instances) > 1 and info["instance_id"] not in instances: continue # image image_uuid = instance.get("image", {}).get("id", "") info["image"] = get_image_info(image_uuid, zone) # state # if instance.has_key("OS-EXT-STS:vm_state"): # info["instance_state"] = instance["OS-EXT-STS:vm_state"] if "OS-SRV-USG:launched_at" in instance: launched_at = instance["OS-SRV-USG:launched_at"] if launched_at: launched_at = datetime_to_timestamp( instance["OS-SRV-USG:launched_at"]) info["launched_at"] = launched_at # power state if "OS-EXT-STS:power_state" in instance: info["power_state"] = instance["OS-EXT-STS:power_state"] # add status ori_vm_state = instance.get("OS-EXT-STS:vm_state", None) ori_task_state = instance.get("OS-EXT-STS:task_state", None) new_status = instance_state_mapping(vm_state=ori_vm_state, task_state=ori_task_state) # info["status"] = new_status info["instance_state"] = new_status # security_groups security_groups = instance.pop("security_groups", []) info["security_groups"] = get_security_groups_info( security_groups, owner) # keypairs keypair_id = instance.pop("key_name", "") info["keypair"] = get_keypair_info(keypair_id) # volumes volumes_attached = list( instance.get("os-extended-volumes:volumes_attached", [])) info["disks"] = get_disks_info(volumes_attached, zone) # nets addresses = instance.get("addresses", {}) info["nets"], info["net_count"] = get_nets_info(addresses, instance["id"], subnet_uuid_info, ip_info, zone=zone, owner=owner) # backup last_backup_time = None if backuptime_info is not None: last_backup_time = get_last_backup_time(instance["id"], backuptime_info) info["last_backup_time"] = last_backup_time info_list.append(info) return info_list
def describe_ticket_process(owner, ticket_id, ticket_type, zone): ticket = None if ticket_id is not None: ticket = FinanceTicketModel.get_ticket_by_id(ticket_id) record_model = TicketRecordModel flow_instance = FlowEngine(ticket=ticket, ticket_type=ticket_type, record_model=record_model) node_list = flow_instance.get_node_fill_info() if ticket is None: node_id = FlowNodeModel.get_create_node_by_type(ticket_type).node_id else: node_id = ticket.cur_node.node_id user = Account.objects.get(user__username=owner) for node in node_list: if 'operation_usr_info' not in node: continue usr_info = node.get('operation_usr_info') this_user_id = usr_info.pop('user_id') this_user = Account.objects.get(user__username=this_user_id) usr_info.update({ "name": this_user.name, "worker_num": this_user.worker_id, "phone": this_user.phone, "department": getattr(this_user.department, 'name', None) }) operation_time = usr_info.get('operation_time') usr_info.update( {"operation_time": datetime_to_timestamp(operation_time)}) if UserPermissionService.check_node_permissions(user, [node_id], False)[0] is False: node_list.pop() else: # 这里用来增加一些需要其他接口提供数据的字段 need_fill_node = node_list[-1] for single_unit in need_fill_node['node_combination']: if single_unit['unit_name'] == u'所属应用系统' or single_unit[ 'unit_name'] == u'影响应用系统': payload = {'type': 'sys'} from console.finance.cmdb.helper import list_items resp = list_items(payload) for single_resp in resp['ret_set']: single_unit['unit_choice_list'].append( single_resp.get('name')) break elif single_unit['unit_name'] == u'配置文件内容': create_node_info = node_list[0] system_name = '' for create_unit in create_node_info['node_data']: if create_unit['unit_name'] == u'所属应用系统': system_name = create_unit['unit_fill_value'] break if system_name: from console.finance.cmdb.helper import get_application_by_name application = get_application_by_name(system_name) single_unit['unit_fill_value'] = application['cfg'] else: single_unit['unit_fill_value'] = None break return node_list