def refresh_deployment_status(request): logger.debug('---- ajax refresh_deployment_status ----') required_fields = set(['topologyId']) if not required_fields.issubset(request.POST): return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"}) topology_id = request.POST['topologyId'] if topology_id == "": logger.debug( "Found a blank topology_id, returning full hypervisor status") return refresh_hypervisor_status(request) if configuration.deployment_backend == "openstack": logger.info('Refresh openstack deployment status') return refresh_openstack_deployment_status(request, topology_id) else: domain_list = libvirtUtils.get_domains_for_topology("t" + topology_id + "_") network_list = [] is_linux = False if osUtils.check_is_linux(): is_linux = True network_list = libvirtUtils.get_networks_for_topology("t" + topology_id + "_") context = { 'domain_list': domain_list, 'network_list': network_list, 'topologyId': topology_id, 'isLinux': is_linux } return render(request, 'ajax/deploymentStatus.html', context)
def launch(request, topology_id): logger.debug('---- topology launch ----') try: topology = Topology.objects.get(pk=topology_id) except ObjectDoesNotExist as ex: logger.debug(ex) return render(request, 'error.html', {'error': "Topology not found!"}) if configuration.deployment_backend == 'openstack': ret = av.deploy_stack(request, topology_id) if ret is not None: inventory = wistarUtils.get_topology_inventory(topology) wistarUtils.send_new_topology_event(inventory) try: # let's parse the json and convert to simple lists and dicts config = wistarUtils.load_config_from_topology_json(topology.json, topology_id) logger.debug("Deploying topology: %s" % topology_id) # this is a hack - inline deploy should be moved elsewhere # but the right structure isn't really there for a middle layer other # than utility and view layers ... unless I want to mix utility libs av.inline_deploy_topology(config) inventory = wistarUtils.get_topology_inventory(topology) wistarUtils.send_new_topology_event(inventory) except Exception as e: logger.error('exception: %s' % e) return render(request, 'error.html', {'error': str(e)}) if configuration.deployment_backend != 'kvm': # only continue in kvm case, openstack will start instances for us on deployment return domain_list = libvirtUtils.get_domains_for_topology("t%s_" % topology_id) network_list = [] if osUtils.check_is_linux(): network_list = libvirtUtils.get_networks_for_topology("t%s_" % topology_id) for network in network_list: logger.debug("Starting network: " + network["name"]) if libvirtUtils.start_network(network["name"]): time.sleep(1) else: return render(request, 'error.html', {'error': "Could not start network: " + network["name"]}) num_domains = len(domain_list) iter_counter = 1 for domain in domain_list: logger.debug("Starting domain " + domain["name"]) if libvirtUtils.start_domain(domain["uuid"]): if iter_counter < num_domains: time.sleep(1) iter_counter += 1 else: return render(request, 'error.html', {'error': "Could not start domain: " + domain["name"]}) logger.debug("All domains started") messages.info(request, 'Topology %s launched successfully' % topology.name) return HttpResponseRedirect('/topologies/')
def preconfig_junos_domain(request): response_data = {"result": True, "message": "success"} required_fields = set( ['domain', 'user', 'password', 'ip', 'mgmtInterface']) if not required_fields.issubset(request.POST): return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"}) domain = request.POST['domain'] user = request.POST["user"] password = request.POST['password'] ip = request.POST['ip'] mgmt_interface = request.POST['mgmtInterface'] logger.debug("Configuring domain:" + str(domain)) try: # let's see if we need to kill any webConsole sessions first wc_dict = request.session.get("webConsoleDict") if wc_dict is not None: if domain in wc_dict: wc_config = wc_dict[domain] wc_port = wc_config["wsPort"] server = request.get_host().split(":")[0] wistarUtils.kill_web_socket(server, wc_port) # FIXME - there is a bug somewhere that this can be blank ? if mgmt_interface == "": mgmt_interface = "em0" elif mgmt_interface == "em0": if not osUtils.check_is_linux(): mgmt_interface = "fxp0" if user != "root": response_data["result"] = False response_data[ "message"] = "Junos preconfiguration user must be root!" return HttpResponse(json.dumps(response_data), content_type="application/json") if consoleUtils.preconfig_junos_domain(domain, user, password, ip, mgmt_interface): response_data["result"] = True response_data["message"] = "Success" else: response_data["result"] = False response_data["message"] = "Could not configure domain" return HttpResponse(json.dumps(response_data), content_type="application/json") except WistarException as we: logger.debug(we) response_data["result"] = False response_data["message"] = str(we) return HttpResponse(json.dumps(response_data), content_type="application/json")
def refresh_hypervisor_status(request): try: domains = libvirtUtils.list_domains() if osUtils.check_is_linux(): networks = libvirtUtils.list_networks() else: networks = [] context = {'domain_list': domains, 'network_list': networks} return render(request, 'ajax/deploymentStatus.html', context) except Exception as e: return render(request, 'ajax/ajaxError.html', {'error': e})
def pause_topology(request): required_fields = set(['topologyId']) if not required_fields.issubset(request.POST): return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"}) topology_id = request.POST['topologyId'] if topology_id == "": logger.debug("Found a blank topoId!") return render(request, 'ajax/ajaxError.html', {'error': "Blank Topology Id found"}) domain_list = libvirtUtils.get_domains_for_topology("t" + topology_id + "_") for domain in domain_list: if domain["state"] == "running": logger.debug("Pausing domain " + domain["name"]) libvirtUtils.suspend_domain(domain["uuid"]) time.sleep(5) else: logger.debug("Domain %s is already shut down" % domain["name"]) network_list = [] if osUtils.check_is_linux(): network_list = libvirtUtils.get_networks_for_topology("t" + topology_id + "_") for network in network_list: logger.debug("Stopping network: " + network["name"]) if libvirtUtils.stop_network(network["name"]): time.sleep(1) else: return render( request, 'ajax/ajaxError.html', {'error': "Could not stop network: " + network["name"]}) logger.debug("All domains paused") return refresh_deployment_status(request)
def deploy_topology(request): if 'topologyId' not in request.POST: return render(request, 'ajax/ajaxError.html', {'error': "No Topology Id in request"}) topology_id = request.POST['topologyId'] try: topo = Topology.objects.get(pk=topology_id) except ObjectDoesNotExist: return render(request, 'ajax/ajaxError.html', {'error': "Topology not found!"}) try: # let's parse the json and convert to simple lists and dicts config = wistarUtils.load_config_from_topology_json( topo.json, topology_id) # FIXME - should this be pushed into another module? inline_deploy_topology(config) inventory = wistarUtils.get_topology_inventory(topo) wistarUtils.send_new_topology_event(inventory) except Exception as e: logger.debug("Caught Exception in deploy") logger.debug(str(e)) return render(request, 'ajax/ajaxError.html', {'error': str(e)}) domain_list = libvirtUtils.get_domains_for_topology("t" + topology_id + "_") network_list = [] if osUtils.check_is_linux(): network_list = libvirtUtils.get_networks_for_topology("t" + topology_id + "_") context = { 'domain_list': domain_list, 'network_list': network_list, 'isLinux': True, 'topologyId': topology_id } return render(request, 'ajax/deploymentStatus.html', context)
def inline_deploy_topology(config): """ takes the topology configuration object and deploys to the appropriate hypervisor :param config: output of the wistarUtils. :return: """ if configuration.deployment_backend != 'kvm': raise WistarException( 'Cannot deploy to KVM configured deployment backend is %s' % configuration.deployment_backend) is_ovs = False is_linux = osUtils.check_is_linux() is_ubuntu = osUtils.check_is_ubuntu() if hasattr(configuration, "use_openvswitch") and configuration.use_openvswitch: is_ovs = True # only create networks on Linux/KVM logger.debug("Checking if we should create networks first!") if is_linux: for network in config["networks"]: network_xml_path = "ajax/kvm/network.xml" # Do we need openvswitch here? if is_ovs: # set the network_xml_path to point to a network configuration that defines the ovs type here network_xml_path = "ajax/kvm/network_ovs.xml" if not ovsUtils.create_bridge(network["name"]): err = "Could not create ovs bridge" logger.error(err) raise Exception(err) try: if not libvirtUtils.network_exists(network["name"]): logger.debug("Rendering networkXml for: %s" % network["name"]) network_xml = render_to_string(network_xml_path, {'network': network}) logger.debug(network_xml) libvirtUtils.define_network_from_xml(network_xml) time.sleep(.5) logger.debug("Starting network") libvirtUtils.start_network(network["name"]) except Exception as e: raise Exception(str(e)) # are we on linux? are we on Ubuntu linux? set kvm emulator accordingly vm_env = dict() vm_env["emulator"] = "/usr/libexec/qemu-kvm" vm_env["pcType"] = "rhel6.5.0" # possible values for 'cache' are 'none' (default) and 'writethrough'. Use writethrough if you want to # mount the instances directory on a glusterFs or tmpfs volume. This might make sense if you have tons of RAM # and want to alleviate IO issues. If in doubt, leave it as 'none' vm_env["cache"] = configuration.filesystem_cache_mode vm_env["io"] = configuration.filesystem_io_mode if is_linux and is_ubuntu: vm_env["emulator"] = "/usr/bin/kvm-spice" vm_env["pcType"] = "pc" # by default, we use kvm as the hypervisor domain_xml_path = "ajax/kvm/" if not is_linux: # if we're not on Linux, then let's try to use vbox instead domain_xml_path = "ajax/vbox/" for device in config["devices"]: domain_exists = False try: if libvirtUtils.domain_exists(device['name']): domain_exists = True device_domain = libvirtUtils.get_domain_by_name(device['name']) device['domain_uuid'] = device_domain.UUIDString() else: device['domain_uuid'] = '' # if not libvirtUtils.domain_exists(device["name"]): logger.debug("Rendering deviceXml for: %s" % device["name"]) configuration_file = device["configurationFile"] logger.debug("using config file: " + configuration_file) logger.debug(device) image = Image.objects.get(pk=device["imageId"]) image_base_path = settings.MEDIA_ROOT + "/" + image.filePath.url instance_path = osUtils.get_instance_path_from_image( image_base_path, device["name"]) secondary_disk = "" tertiary_disk = "" if not osUtils.check_path(instance_path): if device["resizeImage"] > 0: logger.debug('resizing image') if not osUtils.create_thick_provision_instance( image_base_path, device["name"], device["resizeImage"]): raise Exception( "Could not resize image instance for image: " + device["name"]) else: if not osUtils.create_thin_provision_instance( image_base_path, device["name"]): raise Exception( "Could not create image instance for image: " + image_base_path) if "type" in device["secondaryDiskParams"]: secondary_disk = wistarUtils.create_disk_instance( device, device["secondaryDiskParams"]) if "type" in device["tertiaryDiskParams"]: tertiary_disk = wistarUtils.create_disk_instance( device, device["tertiaryDiskParams"]) cloud_init_path = '' if device["cloudInitSupport"]: # grab the last interface management_interface = device["managementInterface"] # grab the prefix len from the management subnet which is in the form 192.168.122.0/24 if '/' in configuration.management_subnet: management_prefix_len = configuration.management_subnet.split( '/')[1] else: management_prefix_len = '24' management_ip = device['ip'] + '/' + management_prefix_len # domain_name, host_name, mgmt_ip, mgmt_interface script_string = "" script_param = "" roles = list() if 'roles' in device and type(device['roles']) is list: roles = device['roles'] if device["configScriptId"] != 0: logger.debug("Passing script data!") script = osUtils.get_cloud_init_template( device['configScriptId']) script_param = device["configScriptParam"] logger.debug("Creating cloud init path for linux image") cloud_init_path = osUtils.create_cloud_init_img( device["name"], device["label"], management_ip, management_interface, device["password"], script, script_param, roles) logger.debug(cloud_init_path) device_xml = render_to_string( domain_xml_path + configuration_file, { 'device': device, 'instancePath': instance_path, 'vm_env': vm_env, 'cloud_init_path': cloud_init_path, 'secondary_disk_path': secondary_disk, 'tertiary_disk_path': tertiary_disk, 'use_ovs': is_ovs }) logger.debug(device_xml) libvirtUtils.define_domain_from_xml(device_xml) if not domain_exists: logger.debug("Reserving IP with dnsmasq") management_mac = libvirtUtils.get_management_interface_mac_for_domain( device["name"]) logger.debug('got management mac') logger.debug(management_mac) libvirtUtils.reserve_management_ip_for_mac( management_mac, device["ip"], device["name"]) logger.debug('management ip is reserved for mac') except Exception as ex: logger.warn("Raising exception") logger.error(ex) logger.error(traceback.format_exc()) raise Exception(str(ex))