def update(manager, args): manager.connection = openstack.connect(config=args) manager.load_local_groups(args.config) manager.load_remote_groups() manager.update_remote_groups(dry_run=args.dry_run, threshold=args.threshold, remove=args.remove)
def create_connection(auth_url, region, project_name, username, password): return openstack.connect( auth_url=auth_url, project_name=project_name, username=username, password=password, region_name=region, app_name='examples', app_version='1.0', )
def dump(manager, args): if args.config is None: # Dump remote groups manager.connection = openstack.connect(config=args) manager.load_remote_groups() groups = manager.remote else: # Dump local groups manager.load_local_groups(args.config) groups = manager.local validate_groups(groups) print(dump_groups(groups, default_flow_style=False, width=-1))
def connect_openstack_admin(self, service, required_version=None): import openstack if required_version: if LooseVersion(openstack.version.__version__) < LooseVersion(required_version): raise Exception("Insufficient OpenStack library version", installed_version=openstack.version__version__, required_version=required_version) conn = openstack.connect(auth_url=service.auth_url, project_name="admin", username=service.admin_user, password=service.admin_password, user_domain_name="Default", project_domain_name="Default") return conn
def main(): parser = argparse.ArgumentParser() parser.add_argument("name", help="server name") options = parser.parse_args() import openstack cloud = openstack.connect() # Get the server using the shade layer so that we have server.public_v4 # and server.public_v6 try: server = cloud.get_server(options.name) except AttributeError: print("Please update your version of shade/openstacksdk." " openstacksdk >= 0.12 is required") raise print_dns(cloud, server)
def delete(self, name=None): """ This function delete one volume. :param name: Volume name :return: Dictionary of volumes """ try: con = openstack.connect(**self.config) con.delete_volume(name_or_id=name) results = con.list_volumes() result = self.update_dict(results) except Exception as e: Console.error("Problem deleting volume", traceflag=True) print(e) raise RuntimeError return result
def process_stats(scn): """ compute the stats """ conn = openstack.connect() counter = 0 counter_error = 0 counter_completed = 0 list_duration = list() for movie in scn.duration.keys(): counter += 1 try: metad = conn.object_store.get_object_metadata( movie, container='CompressedVideos') counter_completed += 1 duration = float(metad['timestamp']) - scn.duration[movie] list_duration.append(duration) except openstack.exceptions.ResourceNotFound as e: counter_error += 1 # for obj in conn.object_store.objects("CompressedVideos"): # if obj.name in scn.duration.keys(): # print("found %s" %obj.name) # metad = conn.object_store.get_object_metadata(obj) # scn.duration[obj.name].append(float(metad['timestamp'])) # for movie in scn.duration.keys(): # counter = counter + 1 # if len(scn.duration[movie]) == 1: # print("movie %s has not been transcoded" %movie) # counter_error += 1 # else: # counter_completed += 1 if not counter_completed: percent = 0 aver = 0 else: percent = np.percentile(np.array(list_duration), np.array(95)) aver = np.average(np.array(list_duration)) print("completed ratio: %s" % float(counter_completed / counter)) print("duration 95th percentile: %s" % percent) print("average: %s" % aver)
def main(): conn = openstack.connect(cloud=CLOUD) routed_network = conn.network.create_network( name=NETWORK_NAME, shared=True, provider_physical_network='physnet1', provider_network_type='vlan', provider_segmentation_id=SEGMENT_ID) segment_1 = find_network_segment(conn, routed_network) conn.network.update_segment(segment_1.id, name=('%s-%s' % (NETWORK_NAME, 'segment-1'))) segment_2 = conn.network.create_segment(network_id=routed_network.id, name=('%s-%s' % (NETWORK_NAME, 'segment-2')), physical_network='physnet2', network_type='vlan', segmentation_id=SEGMENT_ID) subnet_1 = conn.network.create_subnet( name=('%s-%s' % (NETWORK_NAME, 'subnet-segment-1')), network_id=routed_network.id, ip_version=4, cidr='10.2.0.0/24', segment_id=segment_1.id) subnet_1_ipv6 = conn.network.create_subnet( name=('%s-%s' % (NETWORK_NAME, 'subnet-segment-1-ipv6')), network_id=routed_network.id, ip_version=6, cidr='fd2a:d02c:d36b:9a::/64', segment_id=segment_1.id) subnet_2 = conn.network.create_subnet( name=('%s-%s' % (NETWORK_NAME, 'subnet-segment-2')), network_id=routed_network.id, ip_version=4, cidr='10.2.1.0/24', segment_id=segment_2.id) subnet_2_ipv6 = conn.network.create_subnet( name=('%s-%s' % (NETWORK_NAME, 'subnet-segment-2-ipv6')), network_id=routed_network.id, ip_version=6, cidr='fd2a:d02c:d36b:9b::/64', segment_id=segment_2.id) print(conn.network.find_network(routed_network.name)) for subnet in [subnet_1, subnet_1_ipv6, subnet_2, subnet_2_ipv6]: conn.network.delete_subnet(subnet) conn.network.delete_network(routed_network)
def __init__(self, cloud='ops_work', name="", template_file="", dbg=logging.INFO): self.__logger = AkarLogging(dbg, "OS Stack").get_color_logger() self.dbg = dbg self.name = name self.template_file = template_file self.timeout = 3600 self.cloud = cloud self.rollback = False parameters = dict(default={}, type='dict') self.__logger.info( f'Name stack: {self.name} Template file: {self.template_file}') self.config = loader.OpenStackConfig() self.conn = openstack.connect(cloud=self.cloud) self.__logger.info(f'Openstack connected successful')
def delete_neutron_port(network_name, domain_name, ip): # Init and enable debugging openstack.enable_logging(debug=False) # Connect conn = openstack.connect(cloud='default') """ Get the project_id from the authed session. We are using this as to map the name to project you need list_projects permissions which are locked down for project admins. """ project_id = conn.config.get_session().get_project_id() print "Using project id {} from auth session".format(project_id) # Get Neutron Network networks = conn.network.networks(name=network_name) for network in networks: if network_name == network['name']: ironic_network_id = network['id'] print "Using matching neutron net id {}".format(ironic_network_id) break # List ports for this project port_found = 0 ports = conn.network.ports(tenant_id=project_id) for port in ports: if (port['name'] == '{}_external_floating_ip'.format(domain_name) and port['fixed_ips'][0]['ip_address'] == ip): port_found = 1 break # Create a port if not found if port_found == 1: print "Deleting port for tenant_id {}, network {} and ip {}".format( project_id, ironic_network_id, ip) portinfo = conn.network.delete_port(port) else: print "No matchint port for tenant_id {}, network {} and ip {}".format( project_id, ironic_network_id, ip)
def attach(self, names=None, vm=None): """ This function attaches a given volume to a given instance :param names: Names of Volumes :param vm: Instance name :return: Dictionary of volumes """ try: con = openstack.connect(**self.config) server = con.get_server(vm) volume = con.get_volume(name_or_id=names[0]) con.attach_volume(server, volume, device=None, wait=True, timeout=None) except Exception as e: Console.error("Problem attaching volume", traceflag=True) print(e) raise RuntimeError return self.list(NAME=names[0], refresh=True)
def authenticate(self, params: inputs.AuthenticateInput, **kwargs): """ 认证获取 Token :return: outputs.AuthenticateOutput() :raises: AuthenticationFailed, Error """ username = params.username password = params.password auth_url = self.endpoint_url + ':5000/v3/' region = 'RegionOne' project_name = 'admin' user_domain = 'default' project_domain = 'default' try: connect = openstack.connect( auth_url=auth_url, project_name=project_name, username=username, password=password, region_name=region, user_domain_name=user_domain, project_domain_name=project_domain, app_name='examples', app_version='1.0', ) expire = (datetime.utcnow() + timedelta(hours=1)).timestamp() auth = outputs.AuthenticateOutput(style='token', token='', header=None, query=None, expire=int(expire), username=username, password=password, vmconnect=connect) except Exception as e: raise exceptions.AuthenticationFailed() self.auth = auth return auth
def set_openstack_client(self): params = { "auth_url": self.conf['cloud']['identityUrl'], "username": self.conf['cloud']['username'], "password": self.conf['cloud']['password'], "identity_api_version": "3", "project_id": self.tenant_id, "project_domain_id": "default", "user_domain_name": "Default", "region": self.conf['cloud']['region'], "verify": False, "auth_type": "password" } logger.debug('---------- create openstack client ----------') client = openstack.connect(**params) client.authorize() logger.debug('---------- create openstack client done ----------') # server = client.get_server(name_or_id="ubuntu1604") self.openstack_client = client
def do_delete_stack(secrets): account_endpoint = re.search("(?:http.*://)?(?P<host>[^:/ ]+)", secrets['OS_AUTH_URL']).group('host') conn = openstack.connect(cloud='cloud') cluster_id = secrets['CLUSTER_ID'] """ TODO: Get list of nodes that are attached to this cluster from qbert. Then execute a DELETE in ResMgr for these hosts curl 'https://{DU_FQDN}/resmgr/v1/hosts/{HOST_ID}' -X DELETE -H 'Accept: application/json' -H 'X-Auth-Token: {TOKEN}' """ delete_cluster(account_endpoint, secrets['OS_USERNAME'], secrets['OS_PASSWORD'], secrets['PACKET_PROJECT_ID'], secrets['OS_REGION_NAME'], cluster_id) # TODO: We need to grab all users that are in the project that have <cluster_id> in their username and # delete them all project_deleted = delete_project(conn, account_endpoint, secrets['OS_USERNAME'], secrets['OS_PASSWORD'], secrets['PACKET_PROJECT_ID'], secrets['OS_REGION_NAME']) dir_path = "{}/{}".format(os.path.dirname(os.path.realpath(__file__)), "terraform") state_path = "{}/states/{}/{}".format(dir_path, secrets['PACKET_PROJECT_ID'], cluster_id) celery_task = delete_terraform_stack.delay(cluster_id, secrets['PACKET_PROJECT_ID'], dir_path, state_path, project_deleted) return ({'cluster_id': cluster_id, 'task_status': celery_task.status, 'task_id': celery_task.id})
def conn(os_secrets): """ Attempt to connect to an OpenStack cloud and return the connection object """ return openstack.connect( auth_url="{auth_url}".format(auth_url=os_secrets["OS_AUTH_URL"]), project_name="{proj}".format(proj=os_secrets["OS_PROJECT_NAME"]), username="******".format(user=os_secrets["OS_USERNAME"]), password="******".format(password=os_secrets["OS_PASSWORD"]), project_domain_id="{proj_id}".format( proj_id=os_secrets["OS_PROJECT_DOMAIN_ID"]), user_domain_id="{user_id}".format( user_id=os_secrets["OS_USER_DOMAIN_ID"]), region_name="{region}".format(region=os_secrets["OS_REGION_NAME"]), placement_api_version="{placement}".format( placement=os_secrets["OS_PLACEMENT_API_VERSION"]), app_name="sst_bf_verification", app_version="1.0", )
def __init__(self, params, **kwargs): super(OpenstackVM, self).__init__(params) self._data = None # Openstack connection credentials auth_url = params.get('auth_url', '*/Cloud/*') project_name = params.get('project_name', '*/Cloud/*') project_domain_name = params.get('project_domain_name', '*/Cloud/*') user_domain_name = params.get('user_domain_name', '*/Cloud/*') username = params.get('username', '*/Credential/*') password = params.get('password', '*/Credential/*') self.conn = openstack.connect(auth_url=auth_url, project_name=project_name, project_domain_name=project_domain_name, user_domain_name=user_domain_name, username=username, password=password) # VM creation parameters self.vm_name = params.get('vm_name', '*/VM/*') self.image_name = params.get('image_name', '*/VM/*') self.network_name = params.get('network_name', '*/VM/*') self.network_id = params.get('network_id', '*/VM/*') self.floating_network_id = params.get('floating_network_id', '*/VM/*', '') self.flavor = params.get('name', '*/Flavor/*') self.flavor_id = params.get('id', '*/Flavor/*') self.size = params.get('size', '*/Flavor/*') self.keypair = params.get('keypair', '*/VM/*') self.user_data = None self.config_drive = None self.second_nic_id = None # VM creation timeout self.create_timeout = kwargs.get("create_timeout") # VM access parameters self.vm_username = params.get('username', '*/VM/*') self.vm_password = params.get('password', '*/VM/*', '') self.arch = 'x86_64'
def list(self, **kwargs): """ This function list all volumes as following: If NAME (volume_name) is specified, it will print out info of NAME If NAME (volume_name) is not specified, it will print out info of all volumes :param kwargs: contains name of volume, vm name (optional) :return: Dictionary of volumes """ try: if kwargs and kwargs['refresh'] is False: result = self.cm.find(cloud=self.cloud, kind='volume') for key in kwargs: if key == 'NAME' and kwargs['NAME']: result = self.cm.find_name(name=kwargs['NAME']) elif key == 'NAMES' and kwargs['NAMES']: result = self.cm.find_names(names=kwargs['NAMES']) else: con = openstack.connect(**self.config) results = con.list_volumes() if kwargs and kwargs['NAME']: result = con.get_volume(name_or_id=kwargs["NAME"]) result = [result] result = self.update_dict(result) if kwargs and kwargs['vm']: server_id = con.get_server_id(name_or_id=kwargs['vm']) vol_list = [] for entry in results: attach_list = entry['attachments'] if len(attach_list) != 0: if attach_list[0]['server_id'] == server_id: vol_list.append(entry) result = self.update_dict(vol_list) else: result = self.update_dict(results) except Exception as e: Console.error("Problem listing volumes", traceflag=True) print(e) raise RuntimeError return result
def wrapped_function(*args, **kwargs): status_ok = False exc = None counter = 0 prev_msg_list = [] opts = kwargs['opts'] opts['conn'] = openstack.connect(cloud=opts['cloud']) start_time = datetime.datetime.now() try: while not status_ok and counter < times: _st_try_log_states(opts, prev_msg_list) try: function(*args, **kwargs) status_ok = True exc = None except StateMonitorException as state_exc: status_ok = False exc = state_exc time.sleep(interval) except openstack.exceptions.SDKException as sdk_exception: status_ok = False exc = sdk_exception time.sleep(interval) counter = counter + 1 except ErrorStatusException as exc: status_ok = False exc = exc time.sleep(interval) except Exception as generic: status_ok = False exc = exc time.sleep(interval) if 'test_handler' in opts: end_time = datetime.datetime.now() secs = (end_time - start_time).total_seconds() msg = 'test status: %s, time: %s, error: %s' % ( 'PASS' if status_ok else 'FAIL', '%s secs' % secs, repr(exc) if not status_ok else 'NO_ERROR') database.logs_add('tests_events', msg, opts['test_handler'].test_id) _st_try_log_states(opts, prev_msg_list) return status_ok, exc
def connect(os_auth_url, project_name, region): """ :param os_auth_url: :param project_name: :param region: :return: """ try: print("LOG: Creating Connection handle to OpenStack Project - %s" % project_name) conn = openstack.connect(auth_url=os_auth_url, project_name=project_name, username=env['OS_USERNAME'], password=env['OS_PASSWORD'], region_name=region) return conn except Exception as e: print("ERROR: Connection failed with error => %s" % str(e)) return None
def __init__(self, cloud='ops_work', name="", name_underline=True, dbg=logging.WARNING): self.servers = list() self.dbg = dbg if name_underline: self.name = f'{name}_' else: self.name = name self.cloud = cloud self.config = loader.OpenStackConfig() self.conn = openstack.connect(cloud=self.cloud) # kvminfo = kv.KvmInstanceInfo(uri_qemu=uri_qemu, name_instance=srv.instance) self.__logger = AkarLogging(dbg, "OS Servers").get_color_logger() self.__get_info_servers() self.__logger.debug(f'Total found servers: {len(self.servers)}') self.__get_info_ips() self.__logger.info(f'Stack: {name} Total servers: {len(self.servers)}')
def perform_action(settings): # Initialize and turn on debug openstack logging openstack.enable_logging(debug=True) logging.info("Initialize and turn on debug openstack logging") # Connection credentials = get_credentials() logging.info("Got OpenStack credentials {0}".format(credentials)) conn = openstack.connect(**credentials) logging.info("Connected to OpenStack") if settings.action == 'list': list_servers(conn) sys.exit(0) if settings.action == 'deploy': if settings.build_name == "": logging.info("Can't deploy a new VM: no build name is provided") sys.exit(-2) # find if there already exists a VM with the build name server = conn.compute.find_server(settings.build_name) # if a VM with the same build name already exists - delete it if server: logging.info("Server for build %s exists, deleting server.........." % settings.build_name) conn.compute.delete_server(server, ignore_missing=True, force=True) conn.compute.wait_for_delete(server) logging.info("Server %s deleted" % settings.build_name) if settings.action == 'delete': return if settings.action == 'deploy': server = create_server(conn, settings) add_floatingip(conn, server) return logging.error('Error: unsuported action {0}'.format(settings.action)) sys.exit(-2)
def __init__( self, uuid, auth_url, project_name, username, password, user_domain_name="Default", project_domain_name="default", ): """ Initialize an object of the class """ self.uuid = uuid self.auth_url = auth_url self.project_name = project_name self.username = username self.password = password self.user_domain_name = user_domain_name self.project_domain_name = project_domain_name conn = openstack.connect( auth_url=self.auth_url, project_name=self.project_name, username=self.username, password=self.password, user_domain_name=self.user_domain_name, project_domain_name=self.project_domain_name, ) try: conn.authorize() except AttributeError as e: logger.exception(e) logger.exception("AttributeError baby") self.auth_error = True except Exception as e: # raise for logging purposes logger.exception(e) logger.exception("Something went wrong") self.auth_error = True else: self.auth_error = False
def run(self): result = None conn = openstack.connect(cloud=self._cloud) test_func = self._get_instance_func(conn, self._test.event) if not test_func: test_func = self._get_local_func(self._test.event) if test_func: print('test-func %s found' % self._test.event) args = self._parse_args() result = test_func(**args) if 'name' in args and result: self._test_handler.instances[args['name']] = result if self._json_print and result: print_result = self._build_print_result(result) bw_json = json.dumps(print_result, indent=2, sort_keys=True) colorful_json = highlight(bw_json, lexers.JsonLexer(), formatters.TerminalFormatter()) print(colorful_json) else: print('test-func %s not found' % self._test.event) return result
def build_sdk_connection(): """ This function will create a universal connection to OpenStack with the OpenStack SDK. It will use the defined configuration from /root/.config/openstack/clouds.yaml deployed during maas-agent-setup.yml. It will then attempt to load pre-existing credentials into a session. If no credentials are found on disk, the the connection will re-authenticate to OpenStack automatically and pickle the authentication data to disk. """ if os.path.exists(OPENRC) or os.path.exists(STACKRC): try: sdk_conn = connect(cloud='default', verify=False) except MissingRequiredOptions as e: raise e # Load pre-existing credentials and validate get_sdk_credentials(sdk_conn) return sdk_conn
def connect_openstack_slice(self, slice, required_version=None): import openstack trust_domain = slice.trust_domain service = trust_domain.owner.leaf_model if required_version: if LooseVersion(openstack.version.__version__) < LooseVersion(required_version): raise Exception("Insufficient OpenStack library version", installed_version=openstack.version__version__, required_version=required_version) # This is not working yet... conn = openstack.connect(auth_url=service.auth_url, project_name=slice.name, username=service.admin_user, password=service.admin_password, user_domain_name="Default", project_domain_name=trust_domain.name) return conn
def take_action(self, parsed_args): columns = ['Cloud', 'Region', 'UUID', 'Name', 'Instance UUID', 'Power State', 'Provisioning State', 'Maintenance'] data = [] cloud_regions = openstack.config.loader.OpenStackConfig().\ get_all_clouds() if parsed_args.clouds: cloud_regions = filter(lambda c: c.name in parsed_args.clouds, cloud_regions) for c in cloud_regions: nodes = openstack.connect(cloud=c.name, region=c.config['region_name'] ).list_machines() for n in nodes: data.append([c.name, c.config['region_name'], n.uuid, n.name, n.instance_uuid, n.power_state, n.provision_state, n.maintenance]) return columns, data
def main(): try: OSENV = os.environ['OS_CLOUD'] except KeyError: try: OSENV = os.environ['ENVIRONMENT'] except KeyError as e: logging.error("Need to have OS_CLOUD or ENVIRONMENT set!") raise(e) conn = openstack.connect(cloud=OSENV) cleanup_servers(conn) cleanup_keypairs(conn) wait_servers_gone(conn) cleanup_ports(conn) cleanup_volumes(conn) disconnect_routers(conn) cleanup_subnets(conn) cleanup_networks(conn) cleanup_security_groups(conn) cleanup_routers(conn) cleanup_floating_ips(conn)
def main(): conn = openstack.connect() sc_config = read_sc_config('../../service_config.yaml') project_catalog = conn.config.get_service_catalog().catalog normalized_project_catalog = _normalize_catalog(project_catalog) regions = [x.id for x in conn.identity.regions()] validate_catalog_valid(project_catalog) validate_service_known_in_region(conn, normalized_project_catalog, sc_config, regions) validate_service_supports_version_discovery(conn, normalized_project_catalog, sc_config, regions) print(json.dumps(results, sort_keys=True, indent=True)) influx_client = get_influx_client() if influx_client: write_result(influx_client, sc_config)
def connect_openstack_slice(self, slice, required_version=None): import openstack trust_domain = slice.trust_domain service = trust_domain.owner.leaf_model if required_version: if LooseVersion(openstack.version.__version__) < LooseVersion( required_version): raise Exception("Insufficient OpenStack library version", installed_version=openstack.version__version__, required_version=required_version) # This is not working yet... conn = openstack.connect(auth_url=service.auth_url, project_name=slice.name, username=service.admin_user, password=service.admin_password, user_domain_name="Default", project_domain_name=trust_domain.name) return conn
def __init__(self, *args, **kwargs): super(EosDriver, self).__init__(False, *args, config_opts=[eos_opts], **kwargs) self.api = api.API() self.backend_name = self.configuration.safe_get( 'share_backend_name') or 'EOS' self.configuration.append_config_values(eos_opts) channel = grpc.insecure_channel('ajp.cern.ch:50051') #channel = grpc.insecure_channel('localhost:50051') self.grpc_client = eos_pb2_grpc.EosStub(channel) self.conn = openstack.connect( auth_url='http://188.185.71.204/identity/v3', username='******', password='******', project_name='admin', project_domain_id='default', user_domain_id='default')
def __init__(self, key_pair, instance_id, connection=None): """Set up the instance. Args: key_pair: A KeyPair for SSH interactions instance_id: The instance id representing the cloud instance connection: The connection used to create this instance. If None, connection will be created. """ super().__init__(key_pair) if not connection: connection = openstack.connect() self.conn = connection self.server = self.conn.compute.get_server(instance_id) self.delete_floating_ip = False self.floating_ip = self._get_existing_floating_ip() if self.floating_ip is None: self.floating_ip = self._create_and_attach_floating_id() self.delete_floating_ip = True
def get_info(self): conn = openstack.connect(cloud=self.cloud) # pylint: disable=maybe-no-member if self.debug: openstack.enable_logging(debug=True) if conf.DUMP_NETWORKS: self.data['networks'] = list(conn.network.networks()) self.data['subnets'] = list(conn.network.subnets()) self.data['secgroups'] = list(conn.network.security_groups()) self.data['routers'] = list(conn.network.routers()) self.data['ports'] = list(conn.network.ports()) if conf.DUMP_STORAGE: self.data['images'] = list(conn.image.images()) self.data['volumes'] = list(conn.volume.volumes()) if conf.DUMP_SERVERS: self.data['servers'] = list(conn.compute.servers()) self.data['keypairs'] = list(conn.compute.keypairs()) self.data['flavors'] = list(conn.compute.flavors()) if conf.DUMP_IDENTITY: self.data['users'] = list(conn.identity.users()) self.data['domains'] = list(conn.identity.domains()) self.data['projects'] = list(conn.identity.projects())
def main(): try: action = sys.argv[1] except IndexError: print("usage: %s [list|up|down] <stack>" % (sys.argv[0])) sys.exit(1) try: stack_name = sys.argv[2] except IndexError: stack_name = "main" down = False up = False update = False list_stacks = False if action == "restart": down = True up = True elif action == "up": up = True elif action == "down": down = True elif action == "list": list_stacks = True else: print("Unknown action `%s'" % (action)) sys.exit(1) openstack.enable_logging() conn = openstack.connect(**config_from_env()) if list_stacks: stack_list(conn) if down: stack_down(conn, stack_name) if up: stack_up(conn, stack_name)
def __init__(self, name=None, configuration="~/.cloudmesh/cloudmesh.yaml"): """ Initializes the provider. The default parameters are read from the configuration file that is defined in yaml format. :param name: The name of the provider as defined in the yaml file :param configuration: The location of the yaml configuration file """ conf = Config(configuration)["cloudmesh"] super().__init__(name, conf) self.user = Config()["cloudmesh"]["profile"]["user"] self.spec = conf["cloud"][name] self.cloud = name self.default = self.spec["default"] self.cloudtype = self.spec["cm"]["kind"] self.cred = self.spec["credentials"] if self.cred["OS_PASSWORD"] == 'TBD': Console.error("The password TBD is not allowed") self.credential = self._get_credentials(self.cred) self.cloudman = openstack.connect(**self.credential) # self.default_image = deft["image"] # self.default_size = deft["size"] # self.default.location = cred["datacenter"] try: self.public_key_path = conf["profile"]["publickey"] self.key_path = path_expand( Config()["cloudmesh"]["profile"]["publickey"]) f = open(self.key_path, 'r') self.key_val = f.read() except: raise ValueError("the public key location is not set in the " "profile of the yaml file.")
def scan_existing_cloud(cloud, domain): """Look for servers in given cloud connection, sort them based on domain name (server name contains domain) and generate DNS records """ records = [] try: logger.debug('connecting to cloud %s' % cloud) conn = openstack.connect(cloud=cloud) filters = {'name': domain['zone']} servers = conn.list_servers(filters=filters) zone_details = {'zone': domain['zone'], 'server': domain['server']} if 'server_type' in domain and \ domain['server_type'] != 'designate': if 'key_algorithm' in domain: zone_details['key_algorithm'] = domain['key_algorithm'] if 'key_name' in domain: zone_details['key_name'] = domain['key_name'] if 'key_secret' in domain: zone_details['key_secret'] = domain['key_secret'] for server in servers: server_name = server.name suffix = '.' + zone_details['zone'] a_name = server_name if not server_name.endswith(suffix) or \ len(suffix) == 0 else server_name[:-len(suffix)] rec = copy.deepcopy(zone_details) rec['record'] = a_name rec['value'] = server.private_v4 rec['type'] = 'A' rec['state'] = 'present' records.append(rec) except openstack.exceptions.OpenStackCloudException as e: logger.error('%s\n' % e.message) sys.exit(1) return records
def run(self) -> bool: """ Run the job. This method is automatically called by the :func:`Start()` \ method inherited from :class:`Thread`). :return: False if an Error occurred during config, True otherwise """ # create connection to OpenStack try: conn = openstack.connect(cloud=self.cloud) except: logger.warning("%s ERROR connecting to OpenStack -> skip job" % self.id_job) self.state = state.ERROR return False # =================================================== # =================================================== # # Here you launch the action of transcoding # the video. # Tip for the first part of the TP: # - create a VM, # - wait for the VM to be ready (in particular answer to # ping and accept ssh), # - copy the transcoding programs in the created VM, # - configure necessary entries in /etc/hosts (for controller) # - run the transcoding program with appropriate parameters, # - and, once it is done, kill the VM # # If needed, you can also enrich the exception # management bellow # # =================================================== self.state = state.STARTED return True
def test_get_auth_bogus(self): with testtools.ExpectedException(exceptions.ConfigException): openstack.connect( cloud='_bogus_test_', config=self.config)
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import openstack openstack.enable_logging(debug=True) cloud = openstack.connect(cloud='ovh', region_name='SBG1') cloud.create_object( container='my-container', name='my-object', filename='/home/mordred/briarcliff.sh3d', segment_size=1000000) cloud.delete_object('my-container', 'my-object') cloud.delete_container('my-container')
).get_template(filename).render(context) def send_mail(to, payload, mailgunfrom, mailgunapi, mailgunkey): logging.info("send mail to %s" % to) result = requests.post( mailgunapi, auth=("api", mailgunkey), data={"from": mailgunfrom, "to": to, "subject": payload["subject"], "text": payload["body"]}) logging.debug(result.text) if __name__ == '__main__': CONF(sys.argv[1:], project=PROJECT_NAME) cloud = openstack.connect(cloud=CONF.cloud) project = cloud.get_project(CONF.projectname, domain_id=CONF.domainid) utc = pytz.UTC now = utc.localize(datetime.now()) threshold = timedelta(days=CONF.threshold) for instance in cloud.list_servers(filters={"project_id": project.id}): created_at = parse(instance.properties['created_at']) expiration = created_at + threshold if instance.status == "ACTIVE" and expiration < now: user = cloud.get_user(instance.user_id) logging.info("instance %s (%s) from %s: %s" % (instance.name, instance.id, user.name, created_at.strftime("%Y-%m-%d %H:%M")))
#!/usr/bin/env python import openstack conn = openstack.connect(cloud='service') for network in conn.network.networks(): if network.is_admin_state_up == False or network.is_router_external: continue is_dhcp_enabled = False for subnet in network.subnet_ids: subnet = conn.network.find_subnet(subnet) if subnet.is_dhcp_enabled: is_dhcp_enabled = True if is_dhcp_enabled: agents = conn.network.network_hosting_dhcp_agents(network) length = sum(1 for x in agents) if length < 2: print(network.name) for agent in conn.network.agents(binary="neutron-dhcp-agent"): conn.network.add_dhcp_agent_to_network(agent, network)
def main(): parser = argparse.ArgumentParser() parser.add_argument("name", help="server name") parser.add_argument("--cloud", dest="cloud", required=True, help="cloud name") parser.add_argument("--region", dest="region", help="cloud region") parser.add_argument("--flavor", dest="flavor", default='1GB', help="name (or substring) of flavor") parser.add_argument("--image", dest="image", default="Ubuntu 18.04 LTS (Bionic Beaver) (PVHVM)", help="image name") parser.add_argument("--environment", dest="environment", help="Puppet environment to use", default=None) parser.add_argument("--volume", dest="volume", help="UUID of volume to attach to the new server.", default=None) parser.add_argument("--mount-path", dest="mount_path", help="Path to mount cinder volume at.", default=None) parser.add_argument("--fs-label", dest="fs_label", help="FS label to use when mounting cinder volume.", default=None) parser.add_argument("--boot-from-volume", dest="boot_from_volume", help="Create a boot volume for the server and use it.", action='store_true', default=False) parser.add_argument("--volume-size", dest="volume_size", help="Size of volume (GB) for --boot-from-volume", default="50") parser.add_argument("--keep", dest="keep", help="Don't clean up or delete the server on error.", action='store_true', default=False) parser.add_argument("--verbose", dest="verbose", default=False, action='store_true', help="Be verbose about logging cloud actions") parser.add_argument("--network", dest="network", default=None, help="network label to attach instance to") parser.add_argument("--config-drive", dest="config_drive", help="Boot with config_drive attached.", action='store_true', default=False) parser.add_argument("--timeout", dest="timeout", help="Increase timeouts (default 600s)", type=int, default=600) parser.add_argument("--az", dest="availability_zone", default=None, help="AZ to boot in.") options = parser.parse_args() openstack.enable_logging(debug=options.verbose) cloud_kwargs = {} if options.region: cloud_kwargs['region_name'] = options.region cloud = openstack.connect(cloud=options.cloud, **cloud_kwargs) flavor = cloud.get_flavor(options.flavor) if flavor: print("Found flavor", flavor.name) else: print("Unable to find matching flavor; flavor list:") for i in cloud.list_flavors(): print(i.name) sys.exit(1) image = cloud.get_image_exclude(options.image, 'deprecated') if image: print("Found image", image.name) else: print("Unable to find matching image; image list:") for i in cloud.list_images(): print(i.name) sys.exit(1) server = build_server(cloud, options.name, image, flavor, options.volume, options.keep, options.network, options.boot_from_volume, options.config_drive, options.mount_path, options.fs_label, options.availability_zone, options.environment, options.volume_size, options.timeout) dns.print_dns(cloud, server)
import argparse import os import openstack from openstack.config import loader import sys openstack.enable_logging(True, stream=sys.stdout) #: Defines the OpenStack Config cloud key in your config file, #: typically in $HOME/.config/openstack/clouds.yaml. That configuration #: will determine where the examples will be run and what resource defaults #: will be used to run the examples. TEST_CLOUD = os.getenv('OS_TEST_CLOUD', 'devstack-admin') config = loader.OpenStackConfig() cloud = openstack.connect(cloud=TEST_CLOUD) class Opts(object): def __init__(self, cloud_name='devstack-admin', debug=False): self.cloud = cloud_name self.debug = debug # Use identity v3 API for examples. self.identity_api_version = '3' def _get_resource_value(resource_key, default): return config.get_extra_config('example').get(resource_key, default) SERVER_NAME = 'openstacksdk-example'
def create_connection_from_config(): return openstack.connect(cloud=TEST_CLOUD)
def create_connection_from_args(): parser = argparse.ArgumentParser() config = loader.OpenStackConfig() config.register_argparse_arguments(parser, sys.argv[1:]) args = parser.parse_args() return openstack.connect(config=config.get_one(argparse=args))
nova_api_enabled = 'true' in _run_command( ['hiera', 'nova_api_enabled']).lower() mistral_api_enabled = 'true' in _run_command( ['hiera','mistral_api_enabled']).lower() tripleo_validations_enabled = 'true' in _run_command( ['hiera', 'tripleo_validations_enabled']).lower() if not nova_api_enabled: print('WARNING: Undercloud Post - Nova API is disabled.') if not mistral_api_enabled: print('WARNING: Undercloud Post - Mistral API is disabled.') if not tripleo_validations_enabled: print('WARNING: Undercloud Post - Tripleo validations is disabled.') sdk = openstack.connect(CONF['cloud_name']) try: if nova_api_enabled: _configure_nova(sdk) _create_default_keypair(sdk) if mistral_api_enabled: mistral = mistralclient.client(mistral_url=sdk.workflow.get_endpoint(), session=sdk.session) _configure_wrokbooks_and_workflows(mistral) _create_logging_cron(mistral) _store_passwords_in_mistral_env(mistral) _create_default_plan(mistral) if tripleo_validations_enabled: _prepare_ssh_environment(mistral) _upload_validations_to_swift(mistral)
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import openstack openstack.enable_logging(http_debug=True) cloud = openstack.connect( cloud='datacentred', app_name='AmazingApp', app_version='1.0') cloud.list_networks()
enable_dhcp=True ) attach = True if attach: cloud.add_router_interface(router, subnet_id=subnet.id) # load configurations with open("etc/quotaclasses.yml", "r") as fp: quotaclasses = yaml.load(fp) # get connections cloud = openstack.connect(cloud=CLOUDNAME) neutron = os_client_config.make_client("network", cloud=CLOUDNAME) # check existence of project project = cloud.get_project(PROJECT) if not project: logging.error("project %s does not exist" % PROJECT) sys.exit(1) if project.domain_id == "default": logging.error("projects in the default domain are not managed") sys.exit(1) # prepare project
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import openstack openstack.enable_logging(debug=True) cloud = openstack.connect(cloud='rax', region_name='DFW') print(cloud.has_service('network'))
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import openstack openstack.enable_logging(debug=True) cloud = openstack.connect(cloud='my-citycloud', region_name='Buf1') try: server = cloud.create_server( 'my-server', image='Ubuntu 16.04 Xenial Xerus', flavor=dict(id='0dab10b5-42a2-438e-be7b-505741a7ffcc'), wait=True, auto_ip=True) print("\n\nFull Server\n\n") cloud.pprint(server) print("\n\nTurn Detailed Off\n\n") cloud.pprint(cloud.get_server('my-server', detailed=False)) print("\n\nBare Server\n\n") cloud.pprint(cloud.get_server('my-server', bare=True))
def cleanup_task_resources(task_uuid, scenario, scenario_config, logger): logger.warn("{} - preparing stale resource cleanup for " "task {}".format(args.task, task_uuid)) auth = {'project_name': scenario_config['project'], 'username': scenario_config['user_name'], 'password': scenario_config['user_password']} conn = openstack.connect(cloud='default', auth=auth) # this extracts the first part of the task UUID - e.g. AAA from # AAA-BBB-CCC-DDD resource_tag = 's_rally_' + re.search('(.*?)-.*', task_uuid).group(1) if 'volume' in scenario_config['primary_resources']: for volume in conn.block_storage.volumes(): if resource_tag in volume.name: if volume.status == 'available': logger.warn("{} - deleting task {}'s volume " "{}".format(args.task, task_uuid, volume.name)) conn.block_storage.delete_volume(volume.id, ignore_missing=False) else: logger.warn("{} - found stale volume {} for task {}, but " "it's in '{}' status so we won't try to delete" " it directly".format(args.task, volume.name, task_uuid, volume.status)) if 'compute' in scenario_config['primary_resources']: for server in conn.compute.servers(): if resource_tag in server.name: logger.warn("{} - deleting task {}'s server " "{}".format(args.task, task_uuid, server.name)) target = conn.compute.find_server(server.name, ignore_missing=False) conn.compute.delete_server(target) if 'image' in scenario_config['primary_resources']: for image in conn.image.images(): if resource_tag in image.name: logger.warn("{} - deleting task {}'s image {} ({}). it was in " "'{}' state.".format(args.task, task_uuid, image.name, image.id, image.status)) conn.image.delete_image(image.id, ignore_missing=False) if 'port' in scenario_config['primary_resources']: for port in conn.network.ports(): if resource_tag in port.name: logger.warn("{} - deleting task {}'s port {} " "{}).".format(args.task, task_uuid, port.name, port.id)) conn.network.delete_port(port.id, ignore_missing=False) if 'secgroup' in scenario_config['primary_resources']: for secgroup in conn.network.security_groups(): if resource_tag in secgroup.name: logger.warn("{} - deleting task {}'s secgroup {} " "({}).".format(args.task, task_uuid, secgroup.name, secgroup.id)) conn.network.delete_security_group(secgroup.id, ignore_missing=False) logger.warn("{} - finished stale resource cleanup for " "task {}".format(args.task, task_uuid))
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import openstack openstack.enable_logging() cloud = openstack.connect( cloud='fuga', region_name='cystack', strict=True) image = cloud.get_image( 'Ubuntu 16.04 LTS - Xenial Xerus - 64-bit - Fuga Cloud Based Image') cloud.pprint(image)