def zoomChange(self): if self.image.isNull(): LOG.warning('image is null.') return self.draw_area.scale = 0.01 * self.zoom_value.value() self.draw_area.adjustSize() self.draw_area.update()
def get_application(application_name, is_summary=False): if not is_summary: session = requests.session() url = 'http://{}:8080/api/v1/namespaces/{}/pods'.format( app.config['K8S_IP'], application_name) reply = session.get(url) pod_list_json = reply.json()['items'] url = 'http://{}:8080/api/v1/namespaces/{}/replicationcontrollers'.format( app.config['K8S_IP'], application_name) reply = session.get(url) rc_list_json = reply.json()['items'] try: stack = heat_client.get_stack(application_name) stack_json = stack.to_dict() if not is_summary: paas_app = PaasApplication( application_name, stack_json, heat_client.get_resource_list(application_name), rc_list_json, pod_list_json) else: paas_app = PaasApplication(application_name, stack_json, None, None, None) return paas_app except Exception, e: LOG.warning(type(e)) LOG.warning(e) return None
def clean(self, clean_options): if self.clean_needed(clean_options): if self.servers: for server in self.servers: try: LOG.info('Deleting instance %s...', server.name) self.nova_client.servers.delete(server.id) except Exception: LOG.exception("Instance %s deletion failed", server.name) LOG.info(' Waiting for %d instances to be fully deleted...', len(self.servers)) retry_count = 15 + len(self.servers) * 5 while True: retry_count -= 1 self.servers = [ server for server in self.servers if self.instance_exists(server) ] if not self.servers: break if retry_count: LOG.info( ' %d yet to be deleted by Nova, retries left=%d...', len(self.servers), retry_count) time.sleep(2) else: LOG.warning( ' instance deletion verification time-out: %d still not deleted', len(self.servers)) break
def set_irq_affinity(set_bitmap, irqs, cpulist): """Set irq affinity to the specified cpulist for list of irqs. :param set_bitmap: True: set bitmap file, False: set list file :param irqs: irq list :param cpulist: cpu list """ _irqs = set() if set_bitmap: filename = 'smp_affinity' else: filename = 'smp_affinity_list' for irq in irqs: irq_aff_path = "/proc/irq/%s/%s" % (irq, filename) try: with open(irq_aff_path, 'w') as f: f.write(cpulist) _irqs.update([irq]) except Exception as e: LOG.warning("Failed to write pci affine file:%(F)s, irq:%(I)s, " "error=%(E)s" % { "F": filename, "I": irq, "E": e }) return _irqs
def get_application(application_name, is_summary=False): if not is_summary: session = requests.session() url = 'http://{}:8080/api/v1/namespaces/{}/pods'.format( app.config['K8S_IP'], application_name) reply = session.get(url) pod_list_json = reply.json()['items'] url = 'http://{}:8080/api/v1/namespaces/{}/replicationcontrollers'.format( app.config['K8S_IP'], application_name) reply = session.get(url) rc_list_json = reply.json()['items'] try: stack = heat_client.get_stack(application_name) #print stack stack_json = stack.to_dict() res = read_cache(application_name) if res is None: LOG.info(" cache is None ") res = heat_client.get_resource_list(application_name) res = write_cache(application_name, res) if not is_summary: ''' change to PaasApplication2 ''' paas_app = PaasApplication2(application_name, stack_json, res, rc_list_json, pod_list_json) else: paas_app = PaasApplication2(application_name, stack_json, None, None, None) return paas_app except Exception, e: LOG.warning(type(e)) LOG.warning(e) return None
def get_guest_domain_by_uuid(conn, uuid): try: dom = conn.lookupByUUIDString(uuid) except Exception as e: LOG.warning("Failed to get domain for uuid=%s! error=%s" % (uuid, e)) return None domain = get_guest_domain_info(dom) return domain
def get_az_host_list(self): avail_list = [] host_list = [] try: host_list = self.novaclient.services.list() except novaclient.exceptions.Forbidden: LOG.warning('Operation Forbidden: could not retrieve list of hosts' ' (likely no permission)') # the user has specified a list of 1 or 2 hypervisors to use if self.config.hypervisors: for hyp in self.config.hypervisors: hyp = self.sanitize_az_host(host_list, hyp) if hyp: avail_list.append(hyp) else: return [] # if the user did not specify an az, insert the configured az if ':' not in hyp: if self.config.availability_zone: hyp = self.normalize_az_host(None, hyp) else: return [] # pick first 2 matches at most if len(avail_list) == 2: break LOG.info('Using hypervisors ' + ', '.join(avail_list)) else: for host in host_list: # this host must be a compute node if host.binary != 'nova-compute' or host.state != 'up': continue candidate = None if self.config.availability_zone: if host.zone == self.config.availability_zone: candidate = self.normalize_az_host(None, host.host) else: candidate = self.normalize_az_host(host.zone, host.host) if candidate: avail_list.append(candidate) # pick first 2 matches at most if len(avail_list) == 2: break # if empty we insert the configured az if not avail_list: if not self.config.availability_zone: LOG.error('Availability_zone must be configured') elif host_list: LOG.error( 'No host matching the selection for availability zone: ' + self.config.availability_zone) avail_list = [] else: avail_list = [self.config.availability_zone] return avail_list
def delSelShapeCB(self): shape = self.draw_area.deleteSelected() if shape is None: LOG.warning('rm empty label') return item = self.shapes2items[shape] self.label_list.takeItem(self.label_list.row(item)) del self.shapes2items[shape] del self.items2shapes[item]
def _collect_node_info(node, session): url = 'http://{}:12305/api/v1.0/machine'.format(node.name) try: res = session.get(url, timeout=TIMEOUT) if res.status_code != 200: LOG.warning('Can not connect to agent <{}>'.format(node.name)) return except Exception, e: LOG.warning('Can not connect to agent <{}>'.format(e.message)) return
def power_on(self, instance_name): """"Power on z/VM instance.""" try: self._power_state(instance_name, "PUT", "on") except Exception as err: err_str = err.format_message() if ("Return Code: 200" in err_str and "Reason Code: 8" in err_str): # Instance already not active LOG.warning("z/VM instance %s already active", instance_name) return
def switchPage(self, id): if self.current == id: return if id < 0 or id > 4: LOG.warning('the page id:', id) return self.state_label1.setText("") self.state_label2.setText("") self.current = id self.centre_layout.setCurrentIndex(self.current)
def check_vm_placement(self, vm_instance1, vm_instance2): try: server_instance_1 = self.novaclient.servers.get(vm_instance1) server_instance_2 = self.novaclient.servers.get(vm_instance2) if server_instance_1.hostId == server_instance_2.hostId: return True else: return False except novaclient.exceptions: LOG.warning("Exception in retrieving the hostId of servers")
def _collect_master_info_v1_1(master_ip): svc = _read_svc_tmpl('svc_master.json', master_ip) url = 'http://{}:12305/api/v1.1/machine'.format(master_ip) try: resp, content = _post_dict(url, svc) if resp.status != 200: LOG.warning('Post failed <{}>'.format(master_ip)) return except Exception, e: LOG.warning('Post failed <{}>'.format(e.message)) return
def collect_cluster_resource_usage(): fname = 'collect_cluster_resource_usage' while True: try: LOG.info('{}: start'.format(fname)) # 1. sync master master_ip = settings.K8S_API_SERVER.split('/')[2].split(':')[0] _collect_master_info_v1_1(master_ip) # 2. sync nodes pod_list = kube_client.GetPods().Items assert pod_list is not None node_list = kube_client.GetNodes() assert node_list is not None for node in node_list: session = requests.Session() _collect_node_info(node, session) _collect_node_info_v1_1(node) if not node.is_ready(): continue # get pod container status url = 'http://{}:12305/api/v1.0/docker'.format(node.name) try: res = session.get(url, timeout=TIMEOUT) if res.status_code != 200: LOG.warning('Can not connect to agent <{}>'.format( node.name)) continue except Exception, e: LOG.warning('Can not connect to agent <{}>'.format( e.message)) continue pod_containers = res.json() assert pod_containers is not None for key, pod_container in pod_containers.items(): found = False for pod in pod_list: try: if pod.Status.ContainerStatuses[0]['containerID'][ 9:] == key: found = True break except: pass if not found: continue push_pod_status(pod_container['namespace'], pod_container['pod_name'], pod_container) LOG.info("{}: done".format(fname)) except Exception, e: LOG.error('{}: {}'.format(fname, e))
def delete(self, application_name): if not acl.check_acl('', __name__, 'd'): LOG.warning( 'DELETE application<{}> is rejected'.format(application_name)) return make_status_response(403, 'Access denied') LOG.info('Deleting application <%s>' % (application_name)) try: heat_client.delete_stack(application_name) except HTTPNotFound, e: LOG.warning(e) return make_status_response(404, str(e))
def get_resource_list(self, stack_name): heat_client = self.__get_heat_client() for i in range(5): try: return heat_client.resources.list(stack_name) except Exception as e: LOG.error('Failed to get resource list of stack <{}>'.format(stack_name)) if 'Timed out waiting for a reply to message ID' in str(e): LOG.warning('Timed out get resource list for stack <{}>'.format(stack_name)) time.sleep(2) else: raise
def delete(self, application_name, pod_name): if not acl.check_acl('', __name__, 'd'): LOG.warning('DELETE pod<{}> of application<{}> is rejected'.format( pod_name, application_name)) return make_status_response(403, 'Access denied') try: kube_client.DeletePods(pod_name, namespace=application_name) except KubernetesError, e: LOG.error("failed to delete pod <%s> of application <%s>" % (pod_name, application_name)) return make_status_response(404, e.message)
class Pod: def __init__(self, json_data): self.json_data = deepcopy(json_data) self.mem_usage = -1 # unit is MBytes self.mem_cache = -1 # unit is MBytes self.cpu_percentage = -1.0 self.max_mem_limit = 0.0 try: mem_limit_str = self.json_data['spec']['containers'][0][ 'resources']['limits']['memory'] self.max_mem_limit = self.get_mem_from_str(mem_limit_str) except Exception, e: LOG.info(e) try: key = '/paas/applications/{}/pods/{}'.format( self.namespace, self.name) result = etcd_client.read(key).value tmp_json = json.loads(result) if int(time.time()) - tmp_json['timestamp'] <= 60: self.mem_usage = tmp_json['stats']['memory'][ 'usage'] / 1024 / 1024 self.mem_cache = tmp_json['stats']['memory'][ 'cache'] / 1024 / 1024 self.cpu_percentage = tmp_json['stats']['cpu'][ 'cpu_percentage'] if self.cpu_percentage is None: self.cpu_percentage = -1.0 else: LOG.warning('The record <{}>\'s timestamp <{}> is old'.format( key, tmp_json['timestamp'])) except Exception, e: LOG.error(e)
def _collect_node_info_v1_1(node): svc = _read_svc_tmpl('svc_node.json', node.name) url = 'http://{}:12305/api/v1.1/machine'.format(node.name) try: resp, content = _post_dict(url, svc) LOG.debug(resp, content) if resp.status != 200: LOG.warning('Post failed <{}>'.format(node.name)) return except Exception as e: LOG.warning('Post failed <{}>'.format(e.message)) LOG.exception(e) return node_json = json.loads(content) node_json['mem_request_used'] = node.mem_request_used push_node_status('nodes', node.name, node_json, version=1.1)
def get_stack(self, name): heat_client = self.__get_heat_client() for i in range(5): try: stack = heat_client.stacks.get(name) return stack except HTTPNotFound as e: raise except Exception as e: LOG.error('Failed to get stack <{}>'.format(name)) LOG.error(type(e)) if 'Timed out waiting for a reply to message ID' in str(e): LOG.warning('Timed out getting stack <{}>. retry {}'.format(name, i)) time.sleep(2) else: raise
def _wait_for_msi_irqs(self, inst): """Check if each pci device has the expected number of msi irqs.""" _prev = self._msi_irq_count.copy() addrs = set() for pci_dev in inst.pci_devices: addr = pci_dev.address addrs.update([addr]) try: irqs, msi_irqs = pci_utils.get_irqs_by_pci_address(addr) except Exception as e: msi_irqs = set() LOG.error( '_wait_for_msi_irqs: pci_addr=%(A)s, error=%(E)s' % { 'A': addr, 'E': e }) self._msi_irq_count[addr] = len(msi_irqs) self._msi_irq_elapsed[addr] += \ CONF.msi_irq_check_interval if _prev[addr] == self._msi_irq_count[addr]: self._msi_irq_since[addr] += \ CONF.msi_irq_check_interval else: self._msi_irq_since[addr] = 0 # Done when msi irq counts have not changed for some time if all((self._msi_irq_count[k] > 0) and ( self._msi_irq_since[k] >= CONF.msi_irq_since) for k in addrs): raise loopingcall.LoopingCallDone() # Abort due to timeout if all(self._msi_irq_elapsed[k] >= CONF.msi_irq_timeout for k in addrs): msg = ("reached %(timeout)s seconds timeout, waiting for " "msi irqs of pci_addrs: %(addrs)s") % { 'timeout': CONF.msi_irq_timeout, 'addrs': list(addrs) } LOG.warning(msg) raise loopingcall.LoopingCallDone()
def check_app(app): kube_client = get_kube_client() app_status = app['stack_info']['stack_status'] if app_status in [ 'CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS', 'ROLLBACK_IN_PROGRESS' ]: pod_list = pykube_client.get_pods(app['name']) for pod in pod_list: if (not pod.is_ready() and pod.restart_count >= 1 and pod.container_state is not None and pod.container_state.name == 'waiting' and pod.container_state.reason == 'CrashLoopBackOff' and pod.last_container_state is not None and pod.last_container_state.name == 'terminated' and (pod.last_container_state.reason == 'Error' or pod.last_container_state.reason == 'ContainerCannotRun')): # reschedule the pod to another node NodeManager.close_node(pod.host_IP) kube_client.DeletePods(pod.name, pod.namespace) LOG.warning('pod <{}> on node <{}> is rescheduled'.format( pod.name, pod.host_IP))
def __remove_router_interface(self): for int_net in self.vm_int_net: if int_net: # If ipv6 is enabled remove that subnet too if self.ipv6_enabled: body = {'subnet_id': int_net['subnets'][1]} self.neutron_client.remove_interface_router( self.ext_router['id'], body) body = {'subnet_id': int_net['subnets'][0]} try: self.neutron_client.remove_interface_router( self.ext_router['id'], body) except NeutronException: # May fail with neutronclient.common.exceptions.Conflict # if there are floating IP in use - just ignore LOG.warning( 'Router interface may have floating IP in use: not deleted' ) except TypeError: # Externel router is not existed, so let's just continue pass
def post(self): ''' create an application ''' template = request.get_json() if template is None: return make_status_response(400, 'Bad application template') namespace = None try: namespace = self.get_namespace_from_template(template) except BadAppTemplate as e: LOG.warning(e.message) return make_status_response(400, e.message) except: LOG.warning(e.message) return make_status_response(400, 'Bad application template') finally: temp_file = dump_app_json_into_file(namespace, json.dumps(template, indent=2)) LOG.info('dump template into ' + temp_file) try: # ops platform may attach a field 'token' template.pop('token') except: pass if not self.__is_sys_mem_enough(template): return make_status_response(403, 'Memory is not enough') LOG.info("Creating application <%s>" % (namespace)) stack = None try: stack = heat_client.get_stack(namespace) except HTTPNotFound: # The stack does not exist pass except Exception, e: return make_status_response(500, 'Internal error')
async def exception_handler(request: Request, exception: Exception): if isinstance(exception, CustomException): _result = ResultBuilder.failed(code=exception.code, msg=exception.msg) LOG.debug("CustomException exception:{}", _result) return JSONResponse( status_code=exception.http_code, content=JsonUtil.toJsonString(_result) ) elif isinstance(exception, HTTPException): LOG.warning("http exception:{}", exception) return JSONResponse( status_code=exception.status_code, content=exception.detail, headers=exception.headers ) else: LOG.error("系统异常:{}", exception) _result = ResultBuilder.failed(code=ResultCode.SERVER_ERROR.value, msg=ResultCode.SERVER_ERROR.msg) return JSONResponse( status_code=500, content=JsonUtil.toJsonString(_result) )
def start_traffic_generator(self): """Start the traffic generator process (traffic not started yet).""" self.gen.connect() # pick up the interface speed if it is not set from config intf_speeds = self.gen.get_port_speed_gbps() # convert Gbps unit into bps tg_if_speed = bitmath.parse_string(str(intf_speeds[0]) + 'Gb').bits if self.intf_speed: # interface speed is overriden from config if self.intf_speed != tg_if_speed: # Warn the user if the speed in the config is different LOG.warning('Interface speed provided is different from actual speed (%d Gbps)', intf_speeds[0]) else: # interface speed not provisioned by config self.intf_speed = tg_if_speed # also update the speed in the tg config self.generator_config.intf_speed = tg_if_speed # Save the traffic generator local MAC for mac, device in zip(self.gen.get_macs(), self.generator_config.devices): device.set_mac(mac)
def teardown(self): ''' Clean up the floating ip and VMs ''' LOG.info('Cleaning up...') if self.server: self.server.dispose() if self.client: self.client.dispose() if not self.config.reuse_existing_vm and self.net: self.net.dispose() # Remove the public key if self.comp: self.comp.remove_public_key(self.config.public_key_name) # Finally remove the security group try: if self.comp: self.comp.security_group_delete(self.sec_group) except ClientException: # May throw novaclient.exceptions.BadRequest if in use LOG.warning('Security group in use: not deleted') if self.image_uploaded and self.config.delete_image_after_run: self.comp.delete_image(self.glance_client, self.config.image_name)
def __remove_router_interface(self): for int_net in self.vm_int_net: if int_net: # If ipv6 is enabled remove that subnet too if self.ipv6_enabled: body = { 'subnet_id': int_net['subnets'][1] } self.neutron_client.remove_interface_router(self.ext_router['id'], body) body = { 'subnet_id': int_net['subnets'][0] } try: self.neutron_client.remove_interface_router(self.ext_router['id'], body) except NeutronException: # May fail with neutronclient.common.exceptions.Conflict # if there are floating IP in use - just ignore LOG.warning('Router interface may have floating IP in use: not deleted') except TypeError: # Externel router is not existed, so let's just continue pass
def __init__(self, neutron_client, config): self.neutron_client = neutron_client self.networks = neutron_client.list_networks()['networks'] self.ext_net = None self.ext_router = None self.ext_router_created = False self.config = config # mgmt/data network: # - first for same network # - second for network to network communication self.vm_int_net = [] self.ext_router_name = None # Store state if the network is ipv4/ipv6 dual stack self.ipv6_enabled = False # If reusing existing management network just find this network if self.config.reuse_network_name: try: # An existing management network must be reused int_net = self.lookup_network(self.config.reuse_network_name) self.vm_int_net.append(int_net) except IndexError: raise vmtp.VmtpException( "Unable to find the network to be reused.") return else: ############################################## # If a user provided ext_net_name is not available, # then find the first network that is external ############################################## for network in self.networks: if network['router:external']: try: if network['name'] == config.ext_net_name: self.ext_net = network break if not self.ext_net: self.ext_net = network except AttributeError: ############################################### # A attribute error indicates, no user defined # external network defined, so use the first one ############################################### self.ext_net = network break if self.ext_net: LOG.info("Using external network: %s.", self.ext_net['name']) # Find or create the router to the external network ext_net_id = self.ext_net['id'] routers = neutron_client.list_routers()['routers'] for router in routers: external_gw_info = router['external_gateway_info'] if external_gw_info: if external_gw_info['network_id'] == ext_net_id: self.ext_router = router LOG.info('Found external router: %s', self.ext_router['name']) break # create a new external router if none found and a name was given self.ext_router_name = config.router_name if (not self.ext_router) and self.ext_router_name: self.ext_router = self.create_router( self.ext_router_name, self.ext_net['id']) LOG.info('Created ext router %s.', self.ext_router_name) self.ext_router_created = True else: LOG.warning("No external network found.") if config.ipv6_mode: self.ipv6_enabled = True # Create the networks and subnets depending on v4 or v6 enable_dhcp = not config.no_dhcp if config.ipv6_mode: for (net, subnet, cidr, subnet_v6, cidr_v6) in zip(config.internal_network_name, config.internal_subnet_name, config.internal_cidr, config.internal_subnet_name_v6, config.internal_cidr_v6): int_net = self.create_net(net, subnet, cidr, config.dns_nameservers, subnet_v6, cidr_v6, config.ipv6_mode, enable_dhcp=enable_dhcp) self.vm_int_net.append(int_net) if config.same_network_only: break else: for (net, subnet, cidr) in zip(config.internal_network_name, config.internal_subnet_name, config.internal_cidr): int_net = self.create_net(net, subnet, cidr, config.dns_nameservers, enable_dhcp=enable_dhcp) self.vm_int_net.append(int_net) if config.same_network_only: break # Add both internal networks to router interface to enable # network to network connectivity if self.ext_net: self.__add_router_interface() self.l2agent_type = self._get_l2agent_type() self.internal_iface_dict = self._get_internal_iface_dict()
def collect_cluster_resource_usage(): fname = 'collect_cluster_resource_usage' kube_client = get_kube_client() while True: try: LOG.info('{}: start'.format(fname)) pod_list = kube_client.GetPods().Items assert pod_list is not None node_list = kube_client.GetNodes() assert node_list is not None for node in node_list: if not node.is_ready(): continue session = requests.Session() # # get pod container status # url = 'http://{}:12305/api/v1.0/docker'.format(node.name) try: res = session.get(url) if res.status_code != 200: LOG.warning('Can not connect to agent <{}>'.format( node.name)) continue except Exception, e: LOG.warning('Can not connect to agent <{}>'.format( e.message)) continue pod_containers = res.json() assert pod_containers is not None for key, pod_container in pod_containers.items(): found = False for pod in pod_list: try: if pod.Status.ContainerStatuses[0]['containerID'][ 9:] == key: found = True break except: pass if not found: continue push_pod_status(pod_container['namespace'], pod_container['pod_name'], pod_container) # # get node status # url = 'http://{}:12305/api/v1.0/machine'.format(node.name) try: res = session.get(url) if res.status_code != 200: LOG.warning('Can not connect to agent <{}>'.format( node.name)) continue except Exception, e: LOG.warning('Can not connect to agent <{}>'.format( e.message)) continue node_json = res.json() push_node_status(node.name, node_json) LOG.info("{}: done".format(fname))
def merge_opts_to_configs(opts): default_cfg_file = resource_string(__name__, "cfg.default.yaml") # read the default configuration file and possibly an override config file # the precedence order is as follows: # $HOME/.vmtp.yaml if exists # -c <file> from command line if provided # cfg.default.yaml config = config_loads(default_cfg_file) local_cfg = os.path.expanduser('~/.vmtp.yaml') if os.path.isfile(local_cfg): config = config_load(local_cfg, config) if opts.config: config = config_load(opts.config, config) if opts.show_config: print default_cfg_file sys.exit(0) if opts.version: print(__version__) sys.exit(0) config.debug = opts.debug config.stop_on_error = opts.stop_on_error config.keep_first_flow_and_exit = opts.keep_first_flow_and_exit config.inter_node_only = opts.inter_node_only config.same_network_only = opts.same_network_only if config.public_key_file and not os.path.isfile(config.public_key_file): LOG.warning('Invalid public_key_file:' + config.public_key_file) config.public_key_file = None if config.private_key_file and not os.path.isfile(config.private_key_file): LOG.warning('Invalid private_key_file:' + config.private_key_file) config.private_key_file = None # direct: use SR-IOV ports for all the test VMs if opts.vnic_type not in [None, 'direct', 'macvtap', 'normal']: LOG.error('Invalid vnic-type: ' + opts.vnic_type) sys.exit(1) config.vnic_type = opts.vnic_type config.hypervisors = opts.hypervisors if opts.availability_zone: config.availability_zone = opts.availability_zone # time to run each perf test in seconds if opts.time: config.time = int(opts.time) else: config.time = 10 if opts.json: config.json_file = opts.json else: config.json_file = None # Initialize the external host access config.ext_host = get_ssh_access('external-host', opts.ext_host, config) ################################################### # VM Image URL ################################################### if opts.vm_image_url: config.vm_image_url = opts.vm_image_url ################################################### # MongoDB Server connection info. ################################################### if opts.mongod_server: config.vmtp_mongod_ip = opts.mongod_server else: config.vmtp_mongod_ip = None if 'vmtp_mongod_port' not in config: # Set MongoDB default port if not set. config.vmtp_mongod_port = 27017 # the bandwidth limit for VMs if opts.vm_bandwidth: opts.vm_bandwidth = opts.vm_bandwidth.upper().strip() ex_unit = 'KMG'.find(opts.vm_bandwidth[-1]) try: if ex_unit == -1: raise ValueError val = int(opts.vm_bandwidth[0:-1]) except ValueError: LOG.error('Invalid --bandwidth parameter. A valid input must ' 'specify only one unit (K|M|G).') sys.exit(1) config.vm_bandwidth = int(val * (10 ** (ex_unit * 3))) # the pkt size for TCP, UDP and ICMP if opts.tcp_pkt_sizes: config.tcp_pkt_sizes = decode_size_list('--tcpbuf', opts.tcp_pkt_sizes) if opts.udp_pkt_sizes: config.udp_pkt_sizes = decode_size_list('--udpbuf', opts.udp_pkt_sizes) if opts.icmp_pkt_sizes: config.icmp_pkt_sizes = decode_size_list('--icmp_pkt_sizes', opts.icmp_pkt_sizes) if opts.reuse_network_name: config.reuse_network_name = opts.reuse_network_name if opts.os_dataplane_network: config.os_dataplane_network = opts.os_dataplane_network config.config_drive = opts.config_drive config.no_floatingip = opts.no_floatingip config.no_dhcp = opts.no_dhcp config.delete_image_after_run = opts.delete_image_after_run ##################################################### # Set Ganglia server ip and port if the monitoring (-m) # option is enabled. ##################################################### config.gmond_svr_ip = None config.gmond_svr_port = None if opts.monitor: # Add the default gmond port if not present if ':' not in opts.monitor: opts.monitor += ':8649' mobj = re.match(r'(\d+\.\d+\.\d+\.\d+):(\d+)', opts.monitor) if mobj: config.gmond_svr_ip = mobj.group(1) config.gmond_svr_port = mobj.group(2) LOG.info("Ganglia monitoring enabled (%s:%s)", config.gmond_svr_ip, config.gmond_svr_port) config.time = 30 else: LOG.warning('Invalid --monitor syntax: ' + opts.monitor) ################################################### # Once we parse the config files, normalize # the paths so that all paths are absolute paths. ################################################### normalize_paths(config) # Check the tp-tool name config.protocols = opts.protocols.upper() if 'M' in config.protocols or opts.multicast_addr: # nuttcp required for multicast opts.tp_tool = 'nuttcp' config.tp_tool = nuttcp_tool.NuttcpTool # If M provided, but not multicast_addr, use default (231.1.1.1) config.multicast_addr = opts.multicast_addr if opts.multicast_addr else "231.1.1.1" # If --multicast_addr provided, ensure 'M' is in protocols. if 'M' not in config.protocols: config.protocols += 'M' elif 'T' in config.protocols or 'U' in config.protocols: if opts.tp_tool.lower() == 'nuttcp': config.tp_tool = nuttcp_tool.NuttcpTool elif opts.tp_tool.lower() == 'iperf': config.tp_tool = iperf_tool.IperfTool else: LOG.warning('Invalid transport tool: ' + opts.tp_tool) sys.exit(1) else: config.tp_tool = None return config
def __init__(self, openrc_file, pwd, no_env): self.rc_password = None self.rc_username = None self.rc_tenant_name = None self.rc_auth_url = None self.rc_cacert = None self.rc_region_name = None self.rc_project_name = None self.rc_project_domain_name = None self.rc_user_domain_name = None self.rc_identity_api_version = 2 success = True if openrc_file: if os.path.exists(openrc_file): export_re = re.compile('export OS_([A-Z_]*)="?(.*)') for line in open(openrc_file): mstr = export_re.match(line.strip()) if mstr: # get rif of posible trailing double quote # the first one was removed by the re name, value = mstr.group(1), mstr.group(2) if value.endswith('"'): value = value[:-1] # get rid of password assignment # echo "Please enter your OpenStack Password: "******"CACERT": self.rc_cacert = value elif name == "REGION_NAME": self.rc_region_name = value elif name == "PASSWORD" and not pwd: pwd = value elif name == "USER_DOMAIN_NAME": self.rc_user_domain_name = value elif name == "PROJECT_NAME": self.rc_project_name = value elif name == "PROJECT_DOMAIN_NAME": self.rc_project_domain_name = value else: LOG.error('Error: rc file does not exist %s' % (openrc_file)) success = False elif not no_env: # no openrc file passed - we assume the variables have been # sourced by the calling shell # just check that they are present if 'OS_IDENTITY_API_VERSION' in os.environ: self.rc_identity_api_version = int( os.environ['OS_IDENTITY_API_VERSION']) if self.rc_identity_api_version == 2: for varname in [ 'OS_USERNAME', 'OS_AUTH_URL', 'OS_TENANT_NAME' ]: if varname not in os.environ: LOG.warning('%s is missing', varname) success = False if success: self.rc_username = os.environ['OS_USERNAME'] self.rc_auth_url = os.environ['OS_AUTH_URL'] self.rc_tenant_name = os.environ['OS_TENANT_NAME'] if 'OS_REGION_NAME' in os.environ: self.rc_region_name = os.environ['OS_REGION_NAME'] elif self.rc_identity_api_version == 3: for varname in [ 'OS_USERNAME', 'OS_AUTH_URL', 'OS_PROJECT_NAME', 'OS_PROJECT_DOMAIN_NAME', 'OS_USER_DOMAIN_NAME' ]: if varname not in os.environ: LOG.warning('%s is missing', varname) success = False if success: self.rc_username = os.environ['OS_USERNAME'] self.rc_auth_url = os.environ['OS_AUTH_URL'] self.rc_project_name = os.environ['OS_PROJECT_NAME'] self.rc_project_domain_name = os.environ[ 'OS_PROJECT_DOMAIN_NAME'] self.rc_user_domain_name = os.environ[ 'OS_USER_DOMAIN_NAME'] if 'OS_CACERT' in os.environ: self.rc_cacert = os.environ['OS_CACERT'] # always override with CLI argument if provided if pwd: self.rc_password = pwd # if password not know, check from env variable elif self.rc_auth_url and not self.rc_password and success: if 'OS_PASSWORD' in os.environ and not no_env: self.rc_password = os.environ['OS_PASSWORD'] else: # interactively ask for password self.rc_password = getpass.getpass( 'Please enter your OpenStack Password: ') if not self.rc_password: self.rc_password = ""
class ApplicationList(Resource): def is_heat_template_valid(self, template): ''' Check if a given template is valid or not. Currently it only checkes if namespace of all components are the same. ''' return True def get(self): LOG.info('get ApplicationList') is_summary = False if 'summary' in request.args and request.args['summary'].upper( ) == 'Y': is_summary = True app_json_list = [] try: names = get_application_name_list() for name in names: app_json_list.append( get_application(name, is_summary).dump_as_dict()) except Exception as e: LOG.error(e) return make_status_response(500, 'Internal error') response_json = {'kind': 'ApplicationList', 'items': app_json_list} response = flask.make_response(json.dumps(response_json)) response.headers['Access-Control-Allow-Origin'] = '*' return response def post(self): ''' create an application ''' template = request.get_json() if template is None: return make_status_response(400, 'Bad application template') namespace = None try: namespace = self.get_namespace_from_template(template) except BadAppTemplate as e: LOG.warning(e.message) return make_status_response(400, e.message) except Exception as e: LOG.warning(e.message) return make_status_response(400, 'Bad application template') finally: temp_file = dump_app_json_into_file(namespace, json.dumps(template, indent=2)) LOG.info('dump template into ' + temp_file) try: # ops platform may attach a field 'token' template.pop('token') except: pass LOG.info("Creating application <%s>" % (namespace)) stack = None try: stack = heat_client.get_stack(namespace) except HTTPNotFound: # The stack does not exist pass except Exception, e: return make_status_response(500, 'Internal error') if stack is not None: if not acl.check_acl('', 'Application', 'u'): LOG.warning( 'UPDATE application<{}> is rejected'.format(namespace)) make_status_response(403, 'Access denied') elif stack.status != 'COMPLETE' and stack.status != 'FAILED': LOG.warning( 'UPDATE application <{}> is rejected'.format(namespace)) return make_status_response( 403, 'UPDATE application <{}> is rejected because its status is {}_{}' .format(namespace, stack.action, stack.status)) try: heat_client.update_stack(namespace, template) except Exception as e: LOG.error( 'Failed to update stack <{}>. Error message is: {}'.format( namespace, str(e))) message = '' if 'CircularDependencyException' in str(e): message = 'found Circulard Dependency' return make_status_response(400, message) else: try: heat_client.create_stack(namespace, template) except Exception as e: LOG.error( 'Failed to create stack <{}>. Error message is: {}'.format( namespace, str(e))) message = '' if 'CircularDependencyException' in str(e): message = 'found Circulard Dependency' return make_status_response(400, message) return make_status_response(200)
def __init__(self, openrc_file, pwd, no_env): self.rc_password = None self.rc_username = None self.rc_tenant_name = None self.rc_auth_url = None self.rc_cacert = False self.rc_region_name = None self.rc_project_name = None self.rc_project_domain_id = None self.rc_user_domain_id = None self.rc_identity_api_version = 2 success = True if openrc_file: if os.path.exists(openrc_file): export_re = re.compile('export OS_([A-Z_]*)="?(.*)') for line in open(openrc_file): mstr = export_re.match(line.strip()) if mstr: # get rif of posible trailing double quote # the first one was removed by the re name, value = mstr.group(1), mstr.group(2) if value.endswith('"'): value = value[:-1] # get rid of password assignment # echo "Please enter your OpenStack Password: "******"CACERT": self.rc_cacert = value elif name == "REGION_NAME": self.rc_region_name = value elif name == "PASSWORD" and not pwd: pwd = value elif name == "PROJECT_NAME": self.rc_project_name = value elif name == "PROJECT_DOMAIN_ID" or name == "PROJECT_DOMAIN_NAME": self.rc_project_domain_id = value elif name == "USER_DOMAIN_ID" or name == "USER_DOMAIN_ID": self.rc_user_domain_id = value else: LOG.error('Error: rc file does not exist %s', openrc_file) success = False elif not no_env: # no openrc file passed - we assume the variables have been # sourced by the calling shell # just check that they are present if 'OS_IDENTITY_API_VERSION' in os.environ: self.rc_identity_api_version = int(os.environ['OS_IDENTITY_API_VERSION']) if self.rc_identity_api_version == 2: for varname in ['OS_USERNAME', 'OS_AUTH_URL', 'OS_TENANT_NAME']: if varname not in os.environ: LOG.warning('%s is missing', varname) success = False if success: self.rc_username = os.environ['OS_USERNAME'] self.rc_auth_url = os.environ['OS_AUTH_URL'] self.rc_tenant_name = os.environ['OS_TENANT_NAME'] if 'OS_REGION_NAME' in os.environ: self.rc_region_name = os.environ['OS_REGION_NAME'] elif self.rc_identity_api_version == 3: for varname in ['OS_USERNAME', 'OS_AUTH_URL', 'OS_PROJECT_NAME', 'OS_PROJECT_DOMAIN_ID', 'OS_USER_DOMAIN_ID']: if varname not in os.environ: LOG.warning('%s is missing', varname) success = False if success: self.rc_username = os.environ['OS_USERNAME'] self.rc_auth_url = os.environ['OS_AUTH_URL'] self.rc_project_name = os.environ['OS_PROJECT_NAME'] self.rc_project_domain_id = os.environ['OS_PROJECT_DOMAIN_ID'] self.rc_user_domain_id = os.environ['OS_USER_DOMAIN_ID'] if 'OS_CACERT' in os.environ: self.rc_cacert = os.environ['OS_CACERT'] # always override with CLI argument if provided if pwd: self.rc_password = pwd # if password not know, check from env variable elif self.rc_auth_url and not self.rc_password and success: if 'OS_PASSWORD' in os.environ and not no_env: self.rc_password = os.environ['OS_PASSWORD'] else: # interactively ask for password self.rc_password = getpass.getpass( 'Please enter your OpenStack Password: ') if not self.rc_password: self.rc_password = ""
def __init__(self, neutron_client, config): self.neutron_client = neutron_client self.networks = neutron_client.list_networks()['networks'] self.ext_net = None self.ext_router = None self.ext_router_created = False self.config = config # mgmt/data network: # - first for same network # - second for network to network communication self.vm_int_net = [] self.ext_router_name = None # Store state if the network is ipv4/ipv6 dual stack self.ipv6_enabled = False # If reusing existing management network just find this network if self.config.reuse_network_name: try: # An existing management network must be reused int_net = self.lookup_network(self.config.reuse_network_name) self.vm_int_net.append(int_net) except IndexError: raise vmtp.VmtpException("Unable to find the network to be reused.") return else: ############################################## # If a user provided ext_net_name is not available, # then find the first network that is external ############################################## for network in self.networks: if network['router:external']: try: if network['name'] == config.ext_net_name: self.ext_net = network break if not self.ext_net: self.ext_net = network except AttributeError: ############################################### # A attribute error indicates, no user defined # external network defined, so use the first one ############################################### self.ext_net = network break if self.ext_net: LOG.info("Using external network: %s.", self.ext_net['name']) # Find or create the router to the external network ext_net_id = self.ext_net['id'] routers = neutron_client.list_routers()['routers'] for router in routers: external_gw_info = router['external_gateway_info'] if external_gw_info: if external_gw_info['network_id'] == ext_net_id: self.ext_router = router LOG.info('Found external router: %s', self.ext_router['name']) break # create a new external router if none found and a name was given self.ext_router_name = config.router_name if (not self.ext_router) and self.ext_router_name: self.ext_router = self.create_router(self.ext_router_name, self.ext_net['id']) LOG.info('Created ext router %s.', self.ext_router_name) self.ext_router_created = True else: LOG.warning("No external network found.") if config.ipv6_mode: self.ipv6_enabled = True # Create the networks and subnets depending on v4 or v6 enable_dhcp = not config.no_dhcp if config.ipv6_mode: for (net, subnet, cidr, subnet_v6, cidr_v6) in zip(config.internal_network_name, config.internal_subnet_name, config.internal_cidr, config.internal_subnet_name_v6, config.internal_cidr_v6): int_net = self.create_net(net, subnet, cidr, config.dns_nameservers, subnet_v6, cidr_v6, config.ipv6_mode, enable_dhcp=enable_dhcp) self.vm_int_net.append(int_net) if config.same_network_only: break else: for (net, subnet, cidr) in zip(config.internal_network_name, config.internal_subnet_name, config.internal_cidr): int_net = self.create_net(net, subnet, cidr, config.dns_nameservers, enable_dhcp=enable_dhcp) self.vm_int_net.append(int_net) if config.same_network_only: break # Add both internal networks to router interface to enable # network to network connectivity if self.ext_net: self.__add_router_interface() self.l2agent_type = self._get_l2agent_type() self.internal_iface_dict = self._get_internal_iface_dict()