def start_topology(request): logger.debug("---- start_topology ---- ") json_string = request.body json_body = json.loads(json_string) try: if "name" in json_body[0]: topology_name = json_body[0]["name"] # get the topology by name topology = Topology.objects.get(name=topology_name) domain_list = libvirtUtils.get_domains_for_topology( "t" + str(topology.id) + "_") if len(domain_list) == 0: # it has not yet been deployed! logger.debug("not yet deployed!") # let's parse the json and convert to simple lists and dicts config = wistarUtils.load_config_from_topology_json( topology.json, topology.id) if config is None: return apiUtils( False, "Could not load config for topology: %s" % topology.id) logger.debug("Deploying to hypervisor now") # FIXME - should this be pushed into another module? av.inline_deploy_topology(config) # now, topology should be deployed and ready to go! network_list = libvirtUtils.get_networks_for_topology( "t" + str(topology.id) + "_") domain_list = libvirtUtils.get_domains_for_topology( "t" + str(topology.id) + "_") for network in network_list: logger.debug("starting network: %s" % network["name"]) libvirtUtils.start_network(network["name"]) time.sleep(.5) for domain in domain_list: # no sleep time? Just go ahead and melt the disks! time.sleep(.5) logger.debug("starting domain: %s" % domain["uuid"]) libvirtUtils.start_domain(domain["uuid"]) return apiUtils.return_json(True, 'Topology started!', topology_id=topology.id) except Topology.DoesNotExist: return apiUtils.return_json(False, 'Topology Does not Exist') except Exception as ex: logger.debug(str(ex)) return apiUtils.return_json(False, 'Could not start topology!')
def start_topology(request): required_fields = set(['topologyId']) if not required_fields.issubset(request.POST): return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"}) topology_id = request.POST['topologyId'] delay_str = request.POST.get('delay', '180') try: delay = int(delay_str) except ValueError: delay = 180 if topology_id == "": logger.debug("Found a blank topoId!") return render(request, 'ajax/ajaxError.html', {'error': "Blank Topology Id found"}) domain_list = libvirtUtils.get_domains_for_topology("t" + topology_id + "_") network_list = [] if configuration.deployment_backend == "kvm": network_list = libvirtUtils.get_networks_for_topology("t" + topology_id + "_") for network in network_list: logger.debug("Starting network: " + network["name"]) if libvirtUtils.start_network(network["name"]): time.sleep(1) else: return render( request, 'ajax/ajaxError.html', {'error': "Could not start network: " + network["name"]}) num_domains = len(domain_list) iter_counter = 1 for domain in domain_list: logger.debug("Starting domain " + domain["name"]) if libvirtUtils.is_domain_running(domain["name"]): # skip already started domains logger.debug("domain %s is already started" % domain["name"]) iter_counter += 1 continue if libvirtUtils.start_domain(domain["uuid"]): # let's not sleep after the last domain has been started if iter_counter < num_domains: time.sleep(delay) iter_counter += 1 else: return render( request, 'ajax/ajaxError.html', {'error': "Could not start domain: " + domain["name"]}) logger.debug("All domains started") return refresh_deployment_status(request)
def launch(request, topology_id): logger.debug('---- topology launch ----') try: topology = Topology.objects.get(pk=topology_id) except ObjectDoesNotExist as ex: logger.debug(ex) return render(request, 'error.html', {'error': "Topology not found!"}) if configuration.deployment_backend == 'openstack': ret = av.deploy_stack(request, topology_id) if ret is not None: inventory = wistarUtils.get_topology_inventory(topology) wistarUtils.send_new_topology_event(inventory) try: # let's parse the json and convert to simple lists and dicts config = wistarUtils.load_config_from_topology_json(topology.json, topology_id) logger.debug("Deploying topology: %s" % topology_id) # this is a hack - inline deploy should be moved elsewhere # but the right structure isn't really there for a middle layer other # than utility and view layers ... unless I want to mix utility libs av.inline_deploy_topology(config) inventory = wistarUtils.get_topology_inventory(topology) wistarUtils.send_new_topology_event(inventory) except Exception as e: logger.error('exception: %s' % e) return render(request, 'error.html', {'error': str(e)}) if configuration.deployment_backend != 'kvm': # only continue in kvm case, openstack will start instances for us on deployment return domain_list = libvirtUtils.get_domains_for_topology("t%s_" % topology_id) network_list = [] if osUtils.check_is_linux(): network_list = libvirtUtils.get_networks_for_topology("t%s_" % topology_id) for network in network_list: logger.debug("Starting network: " + network["name"]) if libvirtUtils.start_network(network["name"]): time.sleep(1) else: return render(request, 'error.html', {'error': "Could not start network: " + network["name"]}) num_domains = len(domain_list) iter_counter = 1 for domain in domain_list: logger.debug("Starting domain " + domain["name"]) if libvirtUtils.start_domain(domain["uuid"]): if iter_counter < num_domains: time.sleep(1) iter_counter += 1 else: return render(request, 'error.html', {'error': "Could not start domain: " + domain["name"]}) logger.debug("All domains started") messages.info(request, 'Topology %s launched successfully' % topology.name) return HttpResponseRedirect('/topologies/')
def manage_network(request): required_fields = set(['networkName', 'action', 'topologyId']) if not required_fields.issubset(request.POST): return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"}) network_name = request.POST['networkName'] action = request.POST['action'] if action == "start": if libvirtUtils.start_network(network_name): return refresh_deployment_status(request) else: return render(request, 'ajax/ajaxError.html', {'error': "Could not start network!"}) elif action == "stop": if libvirtUtils.stop_network(network_name): return refresh_deployment_status(request) else: return render(request, 'ajax/ajaxError.html', {'error': "Could not stop network!"}) elif action == "undefine": # clean up ovs bridges if needed if hasattr(configuration, "use_openvswitch") and configuration.use_openvswitch: use_ovs = True else: use_ovs = False if libvirtUtils.undefine_network(network_name): if use_ovs: ovsUtils.delete_bridge(network_name) return refresh_deployment_status(request) else: return render(request, 'ajax/ajaxError.html', {'error': "Could not stop domain!"}) else: return render(request, 'ajax/ajaxError.html', {'error': "Unknown Parameters in POST!"})
def start_topology_old(request): """ DEPRECATED verify the topology exists and is started! required parameters: topology_name, id of which to clone, cloud_init data returns json { "status": "running|unknown|powered off", "topology_id": "0" } """ context = {"status": "unknown"} required_fields = set( ['topology_name', 'clone_id', 'script_id', 'script_param']) if not required_fields.issubset(request.POST): context["status"] = "unknown" context["message"] = "Invalid parameters in POST" return HttpResponse(json.dumps(context), content_type="application/json") topology_name = request.POST['topology_name'] clone_id = request.POST['clone_id'] script_id = request.POST['script_id'] script_param = request.POST['script_param'] try: # get the topology by name topo = Topology.objects.get(name=topology_name) except ObjectDoesNotExist: # uh-oh! it doesn't exist, let's clone it and keep going # clone the topology with the new name specified! topology = Topology.objects.get(pk=clone_id) # get a list of all the currently used IPs defined all_used_ips = wistarUtils.get_used_ips() logger.debug(str(all_used_ips)) raw_json = json.loads(topology.json) for json_object in raw_json: if "userData" in json_object and "wistarVm" in json_object[ "userData"]: ud = json_object["userData"] ip = ud["ip"] ip_octets = ip.split('.') # get the next available ip next_ip = wistarUtils.get_next_ip(all_used_ips, 2) # mark it as used so it won't appear in the next iteration all_used_ips.append(next_ip) ip_octets[3] = str(next_ip) newIp = ".".join(ip_octets) ud["ip"] = newIp ud["configScriptId"] = script_id ud["configScriptParam"] = script_param description = "Clone from: %s\nScript Id: %s\nScript Param: %s" % ( clone_id, script_id, script_param) topo = Topology(name=topology_name, description=description, json=json.dumps(raw_json)) topo.save() try: # by this point, the topology already exists logger.debug("Got topo " + str(topo.id)) domain_status = libvirtUtils.get_domains_for_topology("t" + str(topo.id) + "_") if len(domain_status) == 0: # it has not yet been deployed! logger.debug("not yet deployed!") # let's parse the json and convert to simple lists and dicts config = wistarUtils.load_config_from_topology_json( topo.json, topo.id) logger.debug("Deploying to hypervisor now") # FIXME - should this be pushed into another module? av.inline_deploy_topology(config) time.sleep(1) except Exception as e: logger.debug(str(e)) context["status"] = "unknown" context["message"] = "Exception" return HttpResponse(json.dumps(context), content_type="application/json") try: # at this point, the topology now exists and is deployed! network_list = libvirtUtils.get_networks_for_topology("t" + str(topo.id) + "_") domain_list = libvirtUtils.get_domains_for_topology("t" + str(topo.id) + "_") for network in network_list: libvirtUtils.start_network(network["name"]) time.sleep(1) for domain in domain_list: time.sleep(10) libvirtUtils.start_domain(domain["uuid"]) context = { 'status': 'booting', 'topologyId': topo.id, 'message': 'sandbox is booting' } logger.debug("returning") return HttpResponse(json.dumps(context), content_type="application/json") except Exception as ex: logger.debug(str(ex)) context["status"] = "unknown" context["message"] = "Caught Exception %s" % ex return HttpResponse(json.dumps(context), content_type="application/json")
def start_topology(request): logger.debug("---- start_topology ---- ") json_string = request.body try: json_body = json.loads(json_string) except ValueError: return apiUtils(False, "Could not load json payload") try: if "name" in json_body[0]: topology_name = json_body[0]["name"] # do we have a specified delay between starting domains? if 'start_delay' in json_body[0]: delay = int(json_body[0]['start_delay']) else: delay = 0.5 # get the topology by name try: topology = Topology.objects.get(name=topology_name) except ObjectDoesNotExist: logger.error('Could not find topology with name: %s' % topology_name) return apiUtils( False, "Could not find topology with name: %s" % topology_name) domain_list = libvirtUtils.get_domains_for_topology( "t" + str(topology.id) + "_") if len(domain_list) == 0: # it has not yet been deployed! logger.debug("not yet deployed!") # let's parse the json and convert to simple lists and dicts config = wistarUtils.load_config_from_topology_json( topology.json, topology.id) if config is None: return apiUtils( False, "Could not load config for topology: %s" % topology.id) logger.debug("Deploying to hypervisor now") # FIXME - should this be pushed into another module? av.inline_deploy_topology(config) # now, topology should be deployed and ready to go! network_list = libvirtUtils.get_networks_for_topology( "t" + str(topology.id) + "_") domain_list = libvirtUtils.get_domains_for_topology( "t" + str(topology.id) + "_") for network in network_list: logger.debug("starting network: %s" % network["name"]) libvirtUtils.start_network(network["name"]) time.sleep(delay) for domain in domain_list: # no sleep time? Just go ahead and melt the disks! if domain["state"] != 'running': logger.debug("starting domain: %s" % domain["uuid"]) libvirtUtils.start_domain(domain["uuid"]) time.sleep(delay) return apiUtils.return_json(True, 'Topology started!', topology_id=topology.id) except Exception as ex: logger.debug(str(ex)) return apiUtils.return_json(False, 'Could not start topology!')
def inline_deploy_topology(config): """ takes the topology configuration object and deploys to the appropriate hypervisor :param config: output of the wistarUtils. :return: """ if configuration.deployment_backend != 'kvm': raise WistarException( 'Cannot deploy to KVM configured deployment backend is %s' % configuration.deployment_backend) is_ovs = False is_linux = osUtils.check_is_linux() is_ubuntu = osUtils.check_is_ubuntu() if hasattr(configuration, "use_openvswitch") and configuration.use_openvswitch: is_ovs = True # only create networks on Linux/KVM logger.debug("Checking if we should create networks first!") if is_linux: for network in config["networks"]: network_xml_path = "ajax/kvm/network.xml" # Do we need openvswitch here? if is_ovs: # set the network_xml_path to point to a network configuration that defines the ovs type here network_xml_path = "ajax/kvm/network_ovs.xml" if not ovsUtils.create_bridge(network["name"]): err = "Could not create ovs bridge" logger.error(err) raise Exception(err) try: if not libvirtUtils.network_exists(network["name"]): logger.debug("Rendering networkXml for: %s" % network["name"]) network_xml = render_to_string(network_xml_path, {'network': network}) logger.debug(network_xml) libvirtUtils.define_network_from_xml(network_xml) time.sleep(.5) logger.debug("Starting network") libvirtUtils.start_network(network["name"]) except Exception as e: raise Exception(str(e)) # are we on linux? are we on Ubuntu linux? set kvm emulator accordingly vm_env = dict() vm_env["emulator"] = "/usr/libexec/qemu-kvm" vm_env["pcType"] = "rhel6.5.0" # possible values for 'cache' are 'none' (default) and 'writethrough'. Use writethrough if you want to # mount the instances directory on a glusterFs or tmpfs volume. This might make sense if you have tons of RAM # and want to alleviate IO issues. If in doubt, leave it as 'none' vm_env["cache"] = configuration.filesystem_cache_mode vm_env["io"] = configuration.filesystem_io_mode if is_linux and is_ubuntu: vm_env["emulator"] = "/usr/bin/kvm-spice" vm_env["pcType"] = "pc" # by default, we use kvm as the hypervisor domain_xml_path = "ajax/kvm/" if not is_linux: # if we're not on Linux, then let's try to use vbox instead domain_xml_path = "ajax/vbox/" for device in config["devices"]: domain_exists = False try: if libvirtUtils.domain_exists(device['name']): domain_exists = True device_domain = libvirtUtils.get_domain_by_name(device['name']) device['domain_uuid'] = device_domain.UUIDString() else: device['domain_uuid'] = '' # if not libvirtUtils.domain_exists(device["name"]): logger.debug("Rendering deviceXml for: %s" % device["name"]) configuration_file = device["configurationFile"] logger.debug("using config file: " + configuration_file) logger.debug(device) image = Image.objects.get(pk=device["imageId"]) image_base_path = settings.MEDIA_ROOT + "/" + image.filePath.url instance_path = osUtils.get_instance_path_from_image( image_base_path, device["name"]) secondary_disk = "" tertiary_disk = "" if not osUtils.check_path(instance_path): if device["resizeImage"] > 0: logger.debug('resizing image') if not osUtils.create_thick_provision_instance( image_base_path, device["name"], device["resizeImage"]): raise Exception( "Could not resize image instance for image: " + device["name"]) else: if not osUtils.create_thin_provision_instance( image_base_path, device["name"]): raise Exception( "Could not create image instance for image: " + image_base_path) if "type" in device["secondaryDiskParams"]: secondary_disk = wistarUtils.create_disk_instance( device, device["secondaryDiskParams"]) if "type" in device["tertiaryDiskParams"]: tertiary_disk = wistarUtils.create_disk_instance( device, device["tertiaryDiskParams"]) cloud_init_path = '' if device["cloudInitSupport"]: # grab the last interface management_interface = device["managementInterface"] # grab the prefix len from the management subnet which is in the form 192.168.122.0/24 if '/' in configuration.management_subnet: management_prefix_len = configuration.management_subnet.split( '/')[1] else: management_prefix_len = '24' management_ip = device['ip'] + '/' + management_prefix_len # domain_name, host_name, mgmt_ip, mgmt_interface script_string = "" script_param = "" roles = list() if 'roles' in device and type(device['roles']) is list: roles = device['roles'] if device["configScriptId"] != 0: logger.debug("Passing script data!") script = osUtils.get_cloud_init_template( device['configScriptId']) script_param = device["configScriptParam"] logger.debug("Creating cloud init path for linux image") cloud_init_path = osUtils.create_cloud_init_img( device["name"], device["label"], management_ip, management_interface, device["password"], script, script_param, roles) logger.debug(cloud_init_path) device_xml = render_to_string( domain_xml_path + configuration_file, { 'device': device, 'instancePath': instance_path, 'vm_env': vm_env, 'cloud_init_path': cloud_init_path, 'secondary_disk_path': secondary_disk, 'tertiary_disk_path': tertiary_disk, 'use_ovs': is_ovs }) logger.debug(device_xml) libvirtUtils.define_domain_from_xml(device_xml) if not domain_exists: logger.debug("Reserving IP with dnsmasq") management_mac = libvirtUtils.get_management_interface_mac_for_domain( device["name"]) logger.debug('got management mac') logger.debug(management_mac) libvirtUtils.reserve_management_ip_for_mac( management_mac, device["ip"], device["name"]) logger.debug('management ip is reserved for mac') except Exception as ex: logger.warn("Raising exception") logger.error(ex) logger.error(traceback.format_exc()) raise Exception(str(ex))
def manage_domain(request): required_fields = set(['domainId', 'action', 'topologyId']) if not required_fields.issubset(request.POST): return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"}) domain_id = request.POST['domainId'] action = request.POST['action'] topology_id = request.POST["topologyId"] if action == "start": # force all networks to be up before we start a topology # potential minor optimization here to only start networks attached to domain networks = libvirtUtils.get_networks_for_topology(topology_id) for network in networks: if network["state"] != "running": libvirtUtils.start_network(network["name"]) # prevent start up errors by missing ISO files - i.e cloud-init seed files domain = libvirtUtils.get_domain_by_uuid(domain_id) iso = libvirtUtils.get_iso_for_domain(domain.name()) # if we have an ISO file configured, and it doesn't actually exist # just remove it completely if iso is not None and not osUtils.check_path(iso): logger.debug("Removing non-existent ISO from domain") libvirtUtils.detach_iso_from_domain(domain.name()) # now we should be able to safely start the domain if libvirtUtils.start_domain(domain_id): return refresh_deployment_status(request) else: return render(request, 'ajax/ajaxError.html', {'error': "Could not start domain!"}) elif action == "stop": if libvirtUtils.stop_domain(domain_id): return refresh_deployment_status(request) else: return render(request, 'ajax/ajaxError.html', {'error': "Could not stop domain!"}) elif action == "suspend": if libvirtUtils.suspend_domain(domain_id): return refresh_deployment_status(request) else: return render(request, 'ajax/ajaxError.html', {'error': "Could not suspend domain!"}) elif action == "undefine": domain = libvirtUtils.get_domain_by_uuid(domain_id) domain_name = domain.name() source_file = libvirtUtils.get_image_for_domain(domain_id) if libvirtUtils.undefine_domain(domain_id): if source_file is not None: osUtils.remove_instance(source_file) osUtils.remove_cloud_init_seed_dir_for_domain(domain_name) return refresh_deployment_status(request) else: return render(request, 'ajax/ajaxError.html', {'error': "Could not stop domain!"}) else: return render(request, 'ajax/ajaxError.html', {'error': "Unknown Parameters in POST!"})