def _get_used_ip_info(subnet_id, tenant_id): try: network_used_ips = yield query_subnet_ips_used_neutron(subnet_id, tenant_id) ipused_info = [] device_ids = [] port_vm = {} info = yield get_task_flow() for item in info: all_net_data = item.get("body", {}).get('server', {}).get('networks', {}) for data in all_net_data: port_vm[data.get("port")] = item.get("resource") for tmp in network_used_ips: if tmp["device_id"]: device_ids.append(tmp["device_id"]) vm = yield query_instance_nova(tuple(device_ids)) for tmp in network_used_ips: vm_name = "" for item in vm: if tmp["device_id"] == item["uuid"]: vm_name = item["vm"] elif tmp["id"] in port_vm: vm_name = port_vm[tmp['id']] break ipused_info.append({ "used": True, "dhcp": True if tmp["ipdes"] == "network:dhcp" else False, "ip": tmp["ip"], "vm": vm_name, "port":tmp["id"] }) except Exception, e: LOG.error("get ips of used error: %s" % e) raise e
def execute(self): try: name = self._message.get("hostname") vm_id = self._message.get("instance_id") status = self._message.get("state") LOG.debug("vm create end name is %s status is %s", name, status) t_obj = yield task.get_task_flow(resource=name) if t_obj: if status == "active": yield task.update_task_flow_status(t_obj[0].get("id"), status=4) else: networks = yield get_vms_nics( self._message.get("instance_id")) for network in networks: port_id = network.get("port_id") yield network_module.request_delete_ports( self._message.get("tenant_id"), port_id) yield task.delete_task_flow(t_obj[0].get("id")) iso_list = yield list_server_attach_volume(vm_id, vd_type=3) if iso_list: for iso in iso_list: yield _update_iso_volume_available(iso.get("volume_id")) sys = yield list_server_attach_volume(vm_id, vd_type=1) if sys: image_meta = self._message.get("image_meta") for k, v in image_meta.items(): yield update_volume_image_metadata(sys[0].get("volume_id"), k, v) except Exception, e: LOG.error("instance create end process error %s" % e)
def execute(self): try: name = self._message.get("display_name") status = self._message.get("status") source_volid = self._message.get("volume_id") LOG.debug( "image volume create end name is %s status is %s volume_id is %s", name, status, source_volid) meta = yield get_metadata(source_volid) task_id = meta.get("task_id") t_obj = yield task.get_task_flow(task_id=task_id) for t in t_obj: volume = t.get("volume") LOG.debug("create vm sys volume is %s", volume) if t.get("is_iso"): source_volid = None yield volume_create( size=volume.get("size"), tenant_id=volume.get("tenant_id"), snapshot_id=None, source_volid=source_volid, name=volume["name"], description=str(1), volume_type=volume['volume_type'], source_replica=None, metadata=None, project_id=volume['tenant_id'], image_ref=None, availability_zone=volume.get('availability_zone')) except Exception, e: LOG.error("image volume create end create sys volume error %s" % e)
def execute(self): try: name = self._message.get("display_name") status = self._message.get("status") volume_id = self._message.get("volume_id") message = {"sys_volume_id": "", "drive_volume_id": ""} LOG.debug( "drive volume create end name is %s status is %s volume_id is %s", name, status, volume_id) meta = yield get_metadata(volume_id) task_id = meta.get("task_id") t_obj = yield task.get_task_flow(task_id=task_id) if t_obj: if status == "available": message["sys_volume_id"] = t_obj[0].get("sys_volume_id") message["drive_volume_id"] = volume_id sched_status = SCHED_STATUS_PREPARE_SUCCESS if t_obj[0].get("is_iso") and t_obj[0].get("is_windows") and \ message["sys_volume_id"] == "": sched_status = SCHED_STATUS_PREPARE yield task.update_task_flow( t_obj[0].get("id"), status=sched_status, message=simplejson.dumps(message)) else: yield task.update_task_flow_status( t_obj[0].get("id"), status=SCHED_STATUS_PREPARE_FAIL) except Exception, e: LOG.error("sysvolume clone end update task status error %s" % e)
def execute(self): try: name = self._message.get("instance_id") status = self._message.get("state") LOG.debug("vm reboot end name is %s status is %s", name, status) t_obj = yield task.get_task_flow(resource=name) if t_obj: if status == "active": yield task.update_task_flow_status(t_obj[0].get("id"), status=4) else: yield task.delete_task_flow(t_obj[0].get("id")) except Exception, e: LOG.error("vm reboot end process error %s" % e)
def delete_server(self, vm_id, delete_volume_ids=None): """ delete vm and delete vm attach volumes :param vm_id: the id of to delete vm :param delete_volume_ids: the ids of to delete attach volume """ try: vm_info = {} if vm_id.startswith("vm-"): server_task = yield get_task_flow(name=vm_id) if server_task: server_task = server_task[0] params = server_task.get("param") server_info = params["body"]["server"] vm_info["network"] = server_info["networks"] vm_info["name"] = server_info["name"] vm_info["displayname"] = server_info["displayname"] else: vm_infos = yield compute.list_server(vm_ids=vm_id, with_task=False) if not vm_infos: raise VmNotExist(args=['vm_id', vm_id]) vm_info = vm_infos[0] info = vm_info if delete_volume_ids: delete_volume_ids = delete_volume_ids.split(",") else: delete_volume_ids = [] if not vm_id.startswith("vm-"): yield snapshot.clean_vm_or_volume_snapshot(info["name"]) if delete_volume_ids: del_volumes = yield list_volume(detailed=False, volume_id=delete_volume_ids) for del_volume_item in del_volumes: yield snapshot.clean_vm_or_volume_snapshot( del_volume_item["name"]) optLog.write(self.request, Type.VDISK, str(del_volume_item["name"]), Operator.DELETE, str(del_volume_item["name"])) yield del_server(vm_id, delete_volume_ids=delete_volume_ids) optLog.write( self.request, Type.VM, str(info["name"]), Operator.DELETE, str(info["displayname"]) + " " + gen_network_comment(info['network'].values())) self.response(Response()) except Exception as e: LOG.error("delete_server error: %s" % e) raise e
def list_server(vm_ids=None, tenant_ids=None, vlan_id=None, subnet_id=None, hosts=None, user_ids=None, batch=None, detailed=True, with_task=True): """ :param vlan_id: :param subnet_id: :param vm_ids: :param tenant_ids: :param hosts: :param user_ids: :param batch: :param detailed: :param with_task: :return: """ vm_ips = [] if subnet_id: subnet_ips = yield get_subnet_ips(subnet_id) vm_ips = [subnet["ip"] for subnet in subnet_ips["ipused"]] out_servers = [] servers = yield server_list(server_ids=vm_ids, tenant_ids=tenant_ids, hosts=hosts, user_ids=user_ids, batch=batch) servers = sorted( servers, key=lambda d: (int(d["name"].split("-")[1]), int(d["name"].split("-")[2]) if len(d["name"].split("-")) > 2 else 0), reverse=True) servers_name = [server_item["name"] for server_item in servers] all_tasks = yield task.get_task_flow() servers_task = [] vm_control_task = [] vm_control_task_dict = {} for t_obj in all_tasks: status = t_obj.get("status") task_type = t_obj.get("type") if task_type == SCHED_TYPE_ACTIONG_CREATE \ and status < SCHED_STATUS_RUN_SUCCESS: servers_task.append(t_obj) elif task_type in (SCHED_TYPE_ACTIONG_START, SCHED_TYPE_ACTIONG_REBOOT) \ and status < SCHED_STATUS_RUNNING: vm_control_task.append(t_obj) if with_task: # servers_task = yield task.get_task_flow(type=0) if not servers and not servers_task: raise gen.Return(out_servers) else: if not servers: raise gen.Return(out_servers) if not vm_ids: vm_ids = [server_item["id"] for server_item in servers] for c_task_item in vm_control_task: vm_control_task_dict[c_task_item["resource"]] = c_task_item["type"] meta = {} network_nics = {} if detailed: network_nics = yield get_vms_nics() servers_meta = yield servers_metadata(vm_ids) for meta_item in servers_meta: if meta_item["server_id"] in meta: try: meta[meta_item["server_id"]][meta_item["meta_key"]] = eval( meta_item["meta_value"]) except Exception, e: meta[meta_item["server_id"]][ meta_item["meta_key"]] = meta_item["meta_value"] else: meta[meta_item["server_id"]] = {} try: meta[meta_item["server_id"]][meta_item["meta_key"]] = eval( meta_item["meta_value"]) except Exception, e: meta[meta_item["server_id"]][ meta_item["meta_key"]] = meta_item["meta_value"]
def del_server(vm_id_or_name, delete_volume_ids=list()): """ :param vm_id_or_name: :param delete_volume_ids: list :return: """ if vm_id_or_name.startswith("vm-"): server_task = yield task.get_task_flow(resource=vm_id_or_name) if server_task: server_task = server_task[0] task_id = server_task.get("id") server_info = server_task["body"]["server"] networks = server_info["networks"] LOG.debug("vm %s delete task", vm_id_or_name) yield task.delete_task_flow(task_id) tenant_id = server_task.get('tenant') LOG.debug("vm %s free quotas", vm_id_or_name) used_quotas = yield get_tenant_quota(tenant_id=tenant_id) yield update_tenant_vm_quotas( tenant_id=tenant_id, used_cores=used_quotas["used_cores"], used_memory=used_quotas["used_memorys"]) else: try: vm = yield get_server(vm_id_or_name, detailed=False) for volume_id in delete_volume_ids: LOG.debug("delete vm %s with volume %s set deleting status", vm.get("name"), volume_id) yield set_volume_status(volume_id, status='deleting') if str(vm.get("vm_state")) != 'error': LOG.debug("delete vm %s set deleting status", vm.get("name")) yield set_or_update_vm_meta(vm_id_or_name, meta_key="status", meta_value="deleting") attach_volumes = yield list_server_attach_volume(vm_id_or_name, vd_type=0) if attach_volumes: for attach_volume in attach_volumes: # if int(attach_volume.get("type")) == TYPE_VDISK: LOG.debug("detach vm %s with volume %s ", vm.get("name"), attach_volume['volume_id']) yield set_volume_attach_vm_id( attach_volume.get("volume_id"), attach_vm_id=vm_id_or_name) yield detach_server_volume(vm_id_or_name, attach_volume['volume_id']) else: LOG.debug("force delete vm %s ", vm.get("name")) yield server_force_delete(vm_id_or_name) else: LOG.debug("force delete vm %s ", vm.get("name")) yield server_force_delete(vm_id_or_name) try: networks = yield get_vms_nics(vm.get("id")) for network in networks: port_id = network.get("port_id") yield network_module.request_delete_ports( vm.get("tenant_id"), port_id) except Exception as e: LOG.error("delete ports error: %s" % e) raise DeletePortsFailed except Exception as e: try: yield set_or_update_vm_meta(vm_id_or_name, meta_key="status", meta_value="") except Exception as e: raise e LOG.error("del vm error: %s" % e)
"display_name": user_obj["displayname"] } except Exception: pass network = {} try: networks = yield get_vms_nics(vm_id) for network_item in networks: if network_item["name"] not in network: network[network_item["name"]] = [] network[network_item["name"]].append(network_item["ip"]) except Exception: networks = [] network = {} vm_tasks = yield task.get_task_flow(resource=vm_id) vm_contorl_task_status = None for vm_tasks_item in vm_tasks: status = vm_tasks_item.get("status") task_type = vm_tasks_item.get("type") if task_type in (SCHED_TYPE_ACTIONG_START, SCHED_TYPE_ACTIONG_REBOOT) \ and status < SCHED_STATUS_RUNNING: vm_contorl_task_status = vm_tasks_item["type"] meta_status = meta.get("status", "") status = gen_server_status(server["vm_state"], server["task_state"], control_task_state=vm_contorl_task_status, meta_status=meta_status) extend = meta.get("extend", {}) recover_status = meta.get("recover_status", "")
def create_reboot_start_schedule(): LOG.debug("*************************************************") LOG.debug("*********** Compute Control Start ************") LOG.debug("*************************************************") try: all_tasks = yield task.get_task_flow() host_schedule = {} for t_obj in all_tasks: host = t_obj.get("host") status = t_obj.get("status") type = t_obj.get("type") if host not in host_schedule: host_schedule[host] = { "nedd_create_tasks": [], "need_reboot_tasks": [], "need_start_tasks": [], "runnig_num": 0 } if status in (SCHED_STATUS_RUNNING, SCHED_STATUS_RUN_SUCCESS): host_schedule[host]["runnig_num"] += 1 if status == SCHED_STATUS_PREPARE_SUCCESS and type == SCHED_TYPE_ACTIONG_CREATE: host_schedule[host]["nedd_create_tasks"].append(t_obj) if status == SCHED_STATUS_PREPARE and type == SCHED_TYPE_ACTIONG_START: host_schedule[host]["need_start_tasks"].append(t_obj) if status == SCHED_STATUS_PREPARE and type == SCHED_TYPE_ACTIONG_REBOOT: host_schedule[host]["need_reboot_tasks"].append(t_obj) if status == SCHED_STATUS_RUN_SUCCESS: now = datetime.datetime.now() old = t_obj.get("updated_at") if (now - old).seconds > CONF.compute.boot_interval: yield task.delete_task_flow(t_obj.get("id")) for k, v in host_schedule.items(): need_running = CONF.compute.max_booting - v.get("runnig_num") if need_running > 0: for i in range(0, need_running): if v["nedd_create_tasks"]: t = v["nedd_create_tasks"].pop() LOG.debug("boot vm name is %s status is %s " % (t['resource'], t['status'])) row = yield task.update_task_flow_status( t.get("id"), status=SCHED_STATUS_RUNNING) if row: yield __boot_vm(t) continue if v["need_start_tasks"]: t = v["need_start_tasks"].pop() vm_id = t.get("resource") need_reboot = yield get_server_metas(vm_id) LOG.debug("start vm name is %s status is %s ", vm_id, t['status']) row = yield task.update_task_flow_status( t.get("id"), status=SCHED_STATUS_RUNNING) if row: iso_list = yield list_server_attach_volume( vm_id, vd_type=3) if iso_list: for iso in iso_list: volume_id = iso.get("volume_id") yield _detach_iso_volume(volume_id, vm_id) else: if need_reboot.get(NEED_REBOOT): yield server_action(vm_id, Control.REBOOT, info={"type": "HARD"}) yield del_server_meta(vm_id, [NEED_REBOOT]) else: yield server_action(vm_id, Control.START) continue if v["need_reboot_tasks"]: t = v["need_reboot_tasks"].pop() vm_id = t.get("resource") LOG.debug("reboot vm name is %s status is %s ", t.get("resource"), t['status']) row = yield task.update_task_flow_status( t.get("id"), status=SCHED_STATUS_RUNNING) if row: iso_list = yield list_server_attach_volume( vm_id, vd_type=3) if iso_list: for iso in iso_list: volume_id = iso.get("volume_id") yield _detach_iso_volume(volume_id, vm_id) else: yield server_action(t.get("resource"), Control.REBOOT, info={"type": "SOFT"}) continue LOG.debug("*************************************************") LOG.debug("*********** Compute Control End ************") LOG.debug("*************************************************") except Exception, e: LOG.error(trace()) LOG.error(" create start reboot schedule error %s" % e)