def main(): openstack.enable_logging(debug=True) conn = openstack.connect() token1_id, rsp = get_token_from_password(conn) token1_from_token_id, rsp = get_token_from_token(conn, token1_id) token2_id, rsp = get_token_from_password(conn) token2_from_token_id, rsp = get_token_from_token(conn, token2_id) # Ensure dummy_get(conn, token1_id) dummy_get(conn, token2_id) dummy_get(conn, token1_from_token_id) dummy_get(conn, token2_from_token_id) # Ensure we can stil get another token from 1st token #token3_from_token_id, rsp = get_token_from_token(conn, token1_id) #revoke_token(conn, token3_from_token_id) revoke_token(conn, token2_from_token_id) revoke_token(conn, token1_from_token_id) revoke_token(conn, token2_id) revoke_token(conn, token1_id) time.sleep(2) dummy_get(conn, token1_id)
def main(): args = parse_args() try: config_files = cloud_config.CONFIG_FILES + CONFIG_FILES sdk.enable_logging(debug=args.debug) inventory_args = dict( refresh=args.refresh, config_files=config_files, private=args.private, cloud=args.cloud, ) if hasattr(sdk_inventory.OpenStackInventory, 'extra_config'): inventory_args.update(dict( config_key='ansible', config_defaults={ 'use_hostnames': False, 'expand_hostvars': False, 'fail_on_errors': True, } )) inventory = sdk_inventory.OpenStackInventory(**inventory_args) if args.list: output = get_host_groups(inventory, refresh=args.refresh, cloud=args.cloud) elif args.host: output = to_json(inventory.get_host(args.host)) print(output) except sdk.exceptions.OpenStackCloudException as e: sys.stderr.write('%s\n' % e.message) sys.exit(1) sys.exit(0)
def __init__(self): # Init and enable debugging openstack.enable_logging(debug=False) # Connect self.conn = openstack.connect(cloud='default')
def main(): svc_name = os.environ['SVC_NAME'] svc_type = os.environ['SVC_TYPE'] svc_region = os.environ['SVC_REGION'] endpoints = { 'admin': os.environ['SVC_ENDPOINT_ADMIN'], 'internal': os.environ['SVC_ENDPOINT_INTERNAL'], 'public': os.environ['SVC_ENDPOINT_PUBLIC'], } openstack.enable_logging(debug=True) conn = openstack.connect() service = conn.identity.find_service(svc_name) if not service: service = conn.identity.create_service(name=svc_name, type=svc_type) region = conn.identity.find_region(svc_region) current_endpoints = conn.identity.endpoints(service_id=service.id) for endpoint_type, endpoint_url in endpoints.items(): endpoint = next( (x for x in current_endpoints if x.interface == endpoint_type and x.region_id == region.id), None) if endpoint: conn.identity.update_endpoint(endpoint, url=endpoint_url) else: conn.identity.create_endpoint(service_id=service.id, region_id=region.id, interface=endpoint_type, url=endpoint_url)
def main(): logging.basicConfig(level=logging.DEBUG) openstack.enable_logging(debug=True, http_debug=True) # region = openstack.config.get_cloud_region('otc_demo_domain') # conn = openstack.connection.Connection(config=region) conn = openstack.connect() project_name = 'eu-de_test' project_descr = 'Test Project' group_name = 'test_group' group_descr = 'Test group' roles = ['server_adm', 'te_admin'] users = [{ 'name': 'test_user', 'description': 'some test user', 'mobile': '', 'password': '******' }] create_resources(conn=conn, fake=False, project_name=project_name, project_description=project_descr, group_name=group_name, group_description=group_descr, role_list=roles, user_list=users)
def main(): global fixed_name openstack.enable_logging(debug=False) try: conn = openstack.connect(cloud='openstack') print("Connected!") except: print("Error!") sys.exit(1) flag = False print('Please enter name of security group:') search_name = input() fixed_name = search_name firewall = process(conn, search_name) line = 'firewall ' + str(json.dumps(firewall, indent=4)) line = line.replace(':', '') line = line.replace('"', '') line = line.replace('\\', '"') line = line.replace(',', '') with open("result.txt", "w") as result_file: result_file.write(line) result_file.write(config_version) result_file.write(vyos_version) print('Done!')
def main(args): # Set up the connection to OpenStack -- this is read from clouds.yaml openstack.enable_logging(debug=False) api = openstack.connect(cloud=args.cloud) snapshot_id = args.snapshot server = args.volume # Create a snapshot object try: snapshot = Snapshot( api=api, snapshot=api.volume.get_snapshot(snapshot_id), ) except openstack.exceptions.ResourceNotFound: print('Snapshot id {} not found.'.format(snapshot_id)) sys.exit(1) today = time.strftime("%d-%m-%Y") # Convert the snapshot to a volume print('') print('Converting snapshot to volume..') volume = snapshot.to_volume('{}-restore-{}'.format(server, today)) print('Converting volume to image..') image = volume.to_image('{}-restore-{}'.format(server, today)) print('Converting image to volume..') image.to_volume(server, size=volume.volume.size) image.delete() volume.delete() print('')
def test_none(self): openstack.enable_logging(debug=True) self.fake_get_logger.assert_has_calls([]) self.openstack_logger.setLevel.assert_called_with(logging.DEBUG) self.assertEqual(self.openstack_logger.addHandler.call_count, 1) self.assertIsInstance( self.openstack_logger.addHandler.call_args_list[0][0][0], logging.StreamHandler)
def main(args): # Set up the connection to OpenStack -- this is read from clouds.yaml openstack.enable_logging(debug=False) api = openstack.connect(cloud=args.cloud) # Create a list of known snapshots -- we do this to limit the number # of API calls later on when detecting old snapshots current_snapshots = {} for snapshot in api.volume.snapshots(): current_snapshots[snapshot['id']] = snapshot['volume_id'] today = time.strftime("%d-%m-%Y") print('') exclude = args.exclude include = args.include for server in api.compute.servers(): # Logic for include and exclude if len(exclude) > 0: if server.name in exclude: continue if len(include) > 0: if server.name not in include: continue print(server.name) if len(server.attached_volumes) >= 1: for volume in server.attached_volumes: # Detect and remove old snapshots for snapshot_id, volume_id in current_snapshots.items(): if volume_id == volume['id']: print('Deleting old snapshot..') snapshot = Snapshot( api=api, snapshot=api.volume.get_snapshot(snapshot_id), ) snapshot.delete() # Create new snapshot print('Creating new snapshot..') volume = Volume( api=api, volume=api.volume.get_volume(volume['id']), ) volume.to_snapshot( name=server.name, description='Automated snapshot on {}'.format(today), ) print('') print('Snapshots created.') print('')
def main(): openstack.enable_logging(debug=False) try: conn = openstack.connect(cloud='openstack') print("Connected!") except: print("Error!") sys.exit(1) with open("test.txt", "r") as f: with open("new.txt", "w") as wf: for line in f: line = line.replace('{', ':') line = line.replace('/*', '#') line = line.replace('*/', '') line = line[::-1] line = line.replace(' ', ' - ', 1) line = line[::-1] if line.find('}') == -1: wf.write(line) with open('new.txt') as f: config = yaml.safe_load(f) #print('Please enter name of security group:') #security_group = input() print('Please enter floating ip:') floating_ip = input() for ip in conn.list_floating_ips(): if ip.floating_ip_address == floating_ip: res_port_id = ip.port_id break for port in conn.network.ports(): if port.id == res_port_id: fixed_ip = port.fixed_ips[0]['ip_address'] print(fixed_ip) break for eth in config['interfaces']: for key, val in eth.items(): if val != None: if val[0].find(fixed_ip) != -1: for name in config['firewall']: for rule, val in name.items(): if rule.find('in_') != -1: print('set interfaces ' + key + ' firewall in ' + rule) if rule.find('out_') != -1: print('set interfaces ' + key + ' firewall out ' + rule) break line = str(json.dumps(config, indent=4)) line = line.replace(':', '') line = line.replace('"', '') line = line.replace('\\', '"') line = line.replace(',', '') #print(line) print('Done!')
def _file_tests(self, level, debug): file_handler = mock.Mock() self.useFixture( fixtures.MonkeyPatch('logging.FileHandler', file_handler)) fake_path = "fake/path.log" openstack.enable_logging(debug=debug, path=fake_path) file_handler.assert_called_with(fake_path) self.assertEqual(self.openstack_logger.addHandler.call_count, 1) self.openstack_logger.setLevel.assert_called_with(level)
def setUp(self): super(TestHost, self).setUp() openstack.enable_logging(debug=True, http_debug=True) self.client = self.conn.deh res = self.client.create_host(name=uuid.uuid4().hex, availability_zone='eu-de-01', host_type='general', quantity=1) assert len(res.dedicated_host_ids) == 1 host_id = res.dedicated_host_ids[0] self.host = self.client.get_host(host_id)
def up(inventory, image, image_type, server_type, verbose): if not image: image_name = 'Fedora-Cloud-Base-32' else: image_name = image if verbose: openstack.enable_logging(True, stream=sys.stdout) conn = create_connection(OS_AUTH_URL, OS_REGION_NAME, OS_PROJECT_NAME, OS_USERNAME, OS_PASSWORD) print(image) image_list = list_images(conn, image) network = None if image_name not in image_list: print("ERROR: No image found.") sys.exit(1) print("Using Image: {}".format(image_name)) server = spawn_server(conn, image_name, server_type=server_type) if not server: print("ERROR: Unable to provision machine") sys.exit(1) for key in server.addresses.keys(): network = server.addresses[key][0] print("OSP Machine name: {}".format(server.name)) print("Server ip: {}".format(network['addr'])) data = ["[all]\n"] if os.path.isfile(inventory): with open(inventory, 'r') as f: data = f.readlines() data = [i.strip() for i in data] index = data.index("[all]") data.insert(index + 1, "{} hostname={}".format(network['addr'], network['addr'])) if "[{}]".format(server_type) in data: index = data.index("[{}]".format(server_type)) data.insert(index + 1, "{} hostname={}".format(network['addr'], network['addr'])) else: data.extend([ "\n[{}]".format(server_type), "{} hostname={} {}".format(network['addr'], network['addr']) ]) with open(inventory, 'w') as f: print(data) file_data = "\n".join(data) f.write(file_data)
def get_connect(self): if self.conn: return self.conn cloud = self.spec.get('cloud') if self.state.state.debug: openstack.enable_logging(debug=True) else: openstack.enable_logging(debug=False) logging.getLogger("paramiko").setLevel(logging.WARNING) self.conn = openstack.connect(cloud) return self.conn
def get_openstack_connection(region, cloud_keys): import openstack try: load_dotenv(dotenv_path=cloud_keys) openstack.enable_logging(debug=False) conn = openstack.connect(load_envvars=True) except Exception as e: util.message(str(e), "error") return (None) return (conn)
def __init__(self, data, agent_sock): try: import openstack except ImportError: raise RuntimeError('OpenStack SDK is not installed on this ' 'conversion host!') # Create a connection to the source cloud osp_env = data['osp_source_environment'] osp_args = {arg[3:].lower(): osp_env[arg] for arg in osp_env} osp_args['verify'] = not data.get('insecure_connection', False) self.source_converter = data['osp_source_conversion_vm_id'] self.source_instance = data['osp_source_vm_id'] self.conn = openstack.connect(**osp_args) # Create a connection to the destination cloud osp_env = data['osp_environment'] osp_args = {arg[3:].lower(): osp_env[arg] for arg in osp_env} osp_args['verify'] = not data.get('insecure_connection', False) self.dest_converter = data['osp_server_id'] self.dest_conn = openstack.connect(**osp_args) self.agent_sock = agent_sock openstack.enable_logging(debug=False, http_debug=False, stream=None) if self._converter() is None: raise RuntimeError('Cannot find source instance {}'.format( self.source_converter)) if self._destination() is None: raise RuntimeError('Cannot find destination instance {}'.format( self.dest_converter)) # Build up a list of VolumeMappings keyed by the original device path self.volume_map = {} # Temporary directory for logs on source conversion host self.tmpdir = None # SSH tunnel process self.forwarding_process = None # If there is a specific list of disks to transfer, remember them so # only those disks get transferred. self.source_disks = None if 'source_disks' in data: self.source_disks = data['source_disks'] # Allow UCI container ID (or name) to be passed in input JSON self.uci_container = data.get('uci_container_image', 'v2v-conversion-host')
def __init__(self, debug=False): logging.basicConfig(level=logging.DEBUG if (debug or Config.DEBUG) else logging.INFO, format=f'%(levelname)s: %(message)s', filename=Config.LOG_FILE) openstack.enable_logging( debug=True if bool(os.getenv('OS_DEBUG')) else False) logging.info('Connecting to cloud') self._cloud, self._admin_project = self.login() self._admin_user = self._get_user(os.getenv('OS_USERNAME')) logging.info(f'Ready')
def get_neutron_port(network_name, domain_name): # Init and enable debugging openstack.enable_logging(debug=False) # Connect conn = openstack.connect(cloud='default') """ Get the project_id from the authed session. We are using this as to map the name to project you need list_projects permissions which are locked down for project admins. """ project_id = conn.config.get_session().get_project_id() print "Using project id {} from auth session".format(project_id) # Get Neutron Networks networks = conn.network.networks(name=network_name) for network in networks: if network_name == network['name']: ironic_network_id = network['id'] print "Using matching neutron net id {}".format(ironic_network_id) break # List ports for this project port_found = 0 ports = conn.network.ports(tenant_id=project_id) for port in ports: if port['name'] == '{}_external_floating_ip'.format(domain_name): port_found = 1 break # Create a port if not found if port_found == 0: print "Requesting new port for tenant_id {} and network {}".format( project_id, ironic_network_id) portdescription = "{} external_floating_ip".format(domain_name) portname = "{}_external_floating_ip".format(domain_name) portinfo = conn.network.create_port(project_id=project_id, description=portdescription, name=portname, network_id=ironic_network_id) else: print "Using existing port for tenant_id {} and network {}".format( project_id, ironic_network_id) portinfo = port return portinfo
def get_info(self): conn = openstack.connect(cloud=self.cloud) # pylint: disable=maybe-no-member if self.debug: openstack.enable_logging(debug=True) info_matrix = { 'networks': (conf.DUMP_NETWORKS, conn.network.networks, const.FILE_NETWORKS), 'subnets': (conf.DUMP_NETWORKS, conn.network.subnets, const.FILE_SUBNETS), 'secgroups': (conf.DUMP_NETWORKS, conn.network.security_groups, const.FILE_SECURITY_GROUPS), 'routers': (conf.DUMP_NETWORKS, conn.network.routers, const.FILE_ROUTERS), 'ports': (conf.DUMP_NETWORKS, conn.network.ports, const.FILE_PORTS), 'images': (conf.DUMP_STORAGE, conn.image.images, const.FILE_IMAGES), 'volumes': (conf.DUMP_STORAGE, conn.volume.volumes, const.FILE_VOLUMES), # 'volumes': (conf.DUMP_STORAGE, conn.block_storage.volumes, const.FILE_VOLUMES), # 'floating_ips': (conf.DUMP_NETWORKS, conn.network.floating_ips, const.FILE_FIPS), 'keypairs': (conf.DUMP_SERVERS, conn.compute.keypairs, const.FILE_KEYPAIRS), 'servers': (conf.DUMP_SERVERS, conn.compute.servers, const.FILE_SERVERS), 'flavors': (conf.DUMP_SERVERS, conn.compute.flavors, const.FILE_FLAVORS), 'users': (conf.DUMP_IDENTITY, conn.identity.users, const.FILE_FLAVORS), 'projects': (conf.DUMP_IDENTITY, conn.identity.projects, const.FILE_PROJECTS), 'domains': (conf.DUMP_IDENTITY, conn.identity.domains, const.FILE_DOMAINS), } for data_type, (dump, func, file_name) in info_matrix.items(): if dump: self.data[data_type] = list((i.to_dict() for i in func())) # Remove Munch objects from the dict for i in self.data[data_type]: i.pop('location') self.dump2file(file_name, data_type) if conf.DATA_DIR_TRANSIENT: write_yaml( self.data, os.path.join(conf.DATA_DIR_TRANSIENT, const.FILE_ALL_DATA))
def script(settings): # Initialize and turn on debug openstack logging openstack.enable_logging(debug=True) logging.info("Initialize and turn on debug openstack logging") # Connection credentials = get_credentials() logging.info("Got OpenStack credentials {0}".format(credentials)) conn = openstack.connect(**credentials) logging.info("Connected to OpenStack") if settings.action == 'list': list_servers(conn) sys.exit(0) if settings.action == 'deploy': # check if a custom build name should be set (only when deploying) all_default_branches = True settings.branch_names = {} for repo in PROJECTS[settings.project].keys(): settings.branch_names[repo] = getattr(settings, repo + "_branch_name") if settings.branch_names[repo] != DEFAULT_BRANCH_NAME: all_default_branches = False if settings.build_name == DEFAULT_BRANCH_NAME and not all_default_branches: settings.build_name = "_".join(settings.branch_names.values()) logging.info("Setting build name to {0}".format( settings.build_name)) # find if there already exists a VM with the build name server = conn.compute.find_server(settings.build_name) # if a VM with the same build name already exists - delete it if server: logging.info("Server for build %s exists, deleting server.........." % settings.build_name) conn.compute.delete_server(server, ignore_missing=True, force=True) conn.compute.wait_for_delete(server) logging.info("Server %s deleted" % settings.build_name) if settings.action == 'delete': sys.exit(0) server = create_server(conn, settings) add_floatingip(conn, server)
def delete_neutron_port(network_name, domain_name, ip): # Init and enable debugging openstack.enable_logging(debug=False) # Connect conn = openstack.connect(cloud='default') """ Get the project_id from the authed session. We are using this as to map the name to project you need list_projects permissions which are locked down for project admins. """ project_id = conn.config.get_session().get_project_id() print "Using project id {} from auth session".format(project_id) # Get Neutron Network networks = conn.network.networks(name=network_name) for network in networks: if network_name == network['name']: ironic_network_id = network['id'] print "Using matching neutron net id {}".format(ironic_network_id) break # List ports for this project port_found = 0 ports = conn.network.ports(tenant_id=project_id) for port in ports: if (port['name'] == '{}_external_floating_ip'.format(domain_name) and port['fixed_ips'][0]['ip_address'] == ip): port_found = 1 break # Create a port if not found if port_found == 1: print "Deleting port for tenant_id {}, network {} and ip {}".format( project_id, ironic_network_id, ip) portinfo = conn.network.delete_port(port) else: print "No matchint port for tenant_id {}, network {} and ip {}".format( project_id, ironic_network_id, ip)
def setUp(self): super(TestMessage, self).setUp() openstack.enable_logging(debug=True, http_debug=True) try: self.queue = self.conn.dms.create_queue( name=TestMessage.QUEUE_ALIAS) except openstack.exceptions.BadRequestException: self.queue = self.conn.dms.find_queue(TestMessage.QUEUE_ALIAS) self.queues.append(self.queue) try: self.group = self.conn.dms.create_group(self.queue, "test_group") except openstack.exceptions.DuplicateResource: self.queue = self.conn.dms.groups(self.queue) self.groups.append(self.group)
def perform_action(settings): # Initialize and turn on debug openstack logging openstack.enable_logging(debug=True) logging.info("Initialize and turn on debug openstack logging") # Connection credentials = get_credentials() logging.info("Got OpenStack credentials {0}".format(credentials)) conn = openstack.connect(**credentials) logging.info("Connected to OpenStack") if settings.action == 'list': list_servers(conn) sys.exit(0) if settings.action == 'deploy': if settings.build_name == "": logging.info("Can't deploy a new VM: no build name is provided") sys.exit(-2) # find if there already exists a VM with the build name server = conn.compute.find_server(settings.build_name) # if a VM with the same build name already exists - delete it if server: logging.info("Server for build %s exists, deleting server.........." % settings.build_name) conn.compute.delete_server(server, ignore_missing=True, force=True) conn.compute.wait_for_delete(server) logging.info("Server %s deleted" % settings.build_name) if settings.action == 'delete': return if settings.action == 'deploy': server = create_server(conn, settings) add_floatingip(conn, server) return logging.error('Error: unsuported action {0}'.format(settings.action)) sys.exit(-2)
def main(): try: action = sys.argv[1] except IndexError: print("usage: %s [list|up|down] <stack>" % (sys.argv[0])) sys.exit(1) try: stack_name = sys.argv[2] except IndexError: stack_name = "main" down = False up = False update = False list_stacks = False if action == "restart": down = True up = True elif action == "up": up = True elif action == "down": down = True elif action == "list": list_stacks = True else: print("Unknown action `%s'" % (action)) sys.exit(1) openstack.enable_logging() conn = openstack.connect(**config_from_env()) if list_stacks: stack_list(conn) if down: stack_down(conn, stack_name) if up: stack_up(conn, stack_name)
def create_inventory(nodes, mgmt): global myservers global mgmt_nodes global node_list mgmt_nodes = mgmt node_list = nodes myservers = collections.OrderedDict({'all': {'children': {}}}) args = Foo() try: # openstacksdk library may write to stdout, so redirect this sys.stdout = StringIO() config_files = cloud_config.CONFIG_FILES + CONFIG_FILES sdk.enable_logging(debug=args.debug) inventory_args = dict( refresh=args.refresh, config_files=config_files, private=args.private, cloud=args.cloud, ) if hasattr(sdk_inventory.OpenStackInventory, 'extra_config'): inventory_args.update( dict(config_key='ansible', config_defaults={ 'use_hostnames': False, 'expand_hostvars': True, 'fail_on_errors': True, })) inventory = sdk_inventory.OpenStackInventory(**inventory_args) sys.stdout = sys.__stdout__ if args.list: output = get_host_groups(inventory, refresh=args.refresh, cloud=args.cloud) return myservers, manager_public except sdk.exceptions.OpenStackCloudException as e: sys.stderr.write('%s\n' % e.message) sys.exit(1)
def get_info(self): conn = openstack.connect(cloud=self.cloud) # pylint: disable=maybe-no-member if self.debug: openstack.enable_logging(debug=True) if conf.DUMP_NETWORKS: self.data['networks'] = list(conn.network.networks()) self.data['subnets'] = list(conn.network.subnets()) self.data['secgroups'] = list(conn.network.security_groups()) self.data['routers'] = list(conn.network.routers()) self.data['ports'] = list(conn.network.ports()) if conf.DUMP_STORAGE: self.data['images'] = list(conn.image.images()) self.data['volumes'] = list(conn.volume.volumes()) if conf.DUMP_SERVERS: self.data['servers'] = list(conn.compute.servers()) self.data['keypairs'] = list(conn.compute.keypairs()) self.data['flavors'] = list(conn.compute.flavors()) if conf.DUMP_IDENTITY: self.data['users'] = list(conn.identity.users()) self.data['domains'] = list(conn.identity.domains()) self.data['projects'] = list(conn.identity.projects())
def setUpClass(cls): super(TestMessage, cls).setUpClass() openstack.enable_logging(debug=True, http_debug=True) try: cls.queue = cls.conn.dms.create_queue( name=TestMessage.QUEUE_ALIAS ) except openstack.exceptions.BadRequestException: cls.queue = cls.conn.dms.get_queue(TestMessage.QUEUE_ALIAS) cls.queues.append(cls.queue) try: cls.group = cls.conn.dms.create_group( cls.queue, {"name": "test_group"} ) except openstack.exceptions.DuplicateResource: cls.queue = cls.conn.dms.groups(cls.queue) cls.groups.append(cls.group)
def main(): args = parse_args() try: # openstacksdk library may write to stdout, so redirect this sys.stdout = StringIO() config_files = cloud_config.CONFIG_FILES + CONFIG_FILES sdk.enable_logging(debug=args.debug) inventory_args = dict( refresh=args.refresh, config_files=config_files, private=args.private, cloud=args.cloud, ) if hasattr(sdk_inventory.OpenStackInventory, 'extra_config'): inventory_args.update( dict( config_key='ansible', config_defaults={ 'use_hostnames': True, #False, (If i leave this as False the quesry will display instance ID rather than Name) 'expand_hostvars': True, 'fail_on_errors': True, })) inventory = sdk_inventory.OpenStackInventory(**inventory_args) sys.stdout = sys.__stdout__ if args.list: output = get_host_groups(inventory, refresh=args.refresh, cloud=args.cloud) elif args.host: output = to_json(inventory.get_host(args.host)) print(output) except sdk.exceptions.OpenStackCloudException as e: sys.stderr.write('%s\n' % e.message) sys.exit(1) sys.exit(0)
def cli(verbose, debug, openstack_debug): if debug: set_up_logging(logging.INFO) openstack.enable_logging() logging.getLogger("p9admin").setLevel(logging.DEBUG) elif verbose: set_up_logging(logging.INFO) openstack.enable_logging() else: set_up_logging(logging.WARNING) if openstack_debug: openstack.enable_logging(debug=True, http_debug=True)
def _console_tests(self, level, debug, stream): openstack.enable_logging(debug=debug, stream=stream) self.assertEqual(self.openstack_logger.addHandler.call_count, 1) self.openstack_logger.setLevel.assert_called_with(level)
# under the License. """ Connect to an OpenStack cloud. For a full guide see TODO(etoews):link to docs on developer.openstack.org """ import argparse import os import openstack from openstack.config import loader import sys openstack.enable_logging(True, stream=sys.stdout) #: Defines the OpenStack Config cloud key in your config file, #: typically in $HOME/.config/openstack/clouds.yaml. That configuration #: will determine where the examples will be run and what resource defaults #: will be used to run the examples. TEST_CLOUD = os.getenv('OS_TEST_CLOUD', 'devstack-admin') config = loader.OpenStackConfig() cloud = openstack.connect(cloud=TEST_CLOUD) class Opts(object): def __init__(self, cloud_name='devstack-admin', debug=False): self.cloud = cloud_name self.debug = debug # Use identity v3 API for examples.
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import openstack openstack.enable_logging(http_debug=True) cloud = openstack.connect( cloud='datacentred', app_name='AmazingApp', app_version='1.0') cloud.list_networks()
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import openstack openstack.enable_logging() cloud = openstack.connect( cloud='fuga', region_name='cystack', strict=True) image = cloud.get_image( 'Ubuntu 16.04 LTS - Xenial Xerus - 64-bit - Fuga Cloud Based Image') cloud.pprint(image)
def main(): parser = argparse.ArgumentParser() parser.add_argument("name", help="server name") parser.add_argument("--cloud", dest="cloud", required=True, help="cloud name") parser.add_argument("--region", dest="region", help="cloud region") parser.add_argument("--flavor", dest="flavor", default='1GB', help="name (or substring) of flavor") parser.add_argument("--image", dest="image", default="Ubuntu 18.04 LTS (Bionic Beaver) (PVHVM)", help="image name") parser.add_argument("--environment", dest="environment", help="Puppet environment to use", default=None) parser.add_argument("--volume", dest="volume", help="UUID of volume to attach to the new server.", default=None) parser.add_argument("--mount-path", dest="mount_path", help="Path to mount cinder volume at.", default=None) parser.add_argument("--fs-label", dest="fs_label", help="FS label to use when mounting cinder volume.", default=None) parser.add_argument("--boot-from-volume", dest="boot_from_volume", help="Create a boot volume for the server and use it.", action='store_true', default=False) parser.add_argument("--volume-size", dest="volume_size", help="Size of volume (GB) for --boot-from-volume", default="50") parser.add_argument("--keep", dest="keep", help="Don't clean up or delete the server on error.", action='store_true', default=False) parser.add_argument("--verbose", dest="verbose", default=False, action='store_true', help="Be verbose about logging cloud actions") parser.add_argument("--network", dest="network", default=None, help="network label to attach instance to") parser.add_argument("--config-drive", dest="config_drive", help="Boot with config_drive attached.", action='store_true', default=False) parser.add_argument("--timeout", dest="timeout", help="Increase timeouts (default 600s)", type=int, default=600) parser.add_argument("--az", dest="availability_zone", default=None, help="AZ to boot in.") options = parser.parse_args() openstack.enable_logging(debug=options.verbose) cloud_kwargs = {} if options.region: cloud_kwargs['region_name'] = options.region cloud = openstack.connect(cloud=options.cloud, **cloud_kwargs) flavor = cloud.get_flavor(options.flavor) if flavor: print("Found flavor", flavor.name) else: print("Unable to find matching flavor; flavor list:") for i in cloud.list_flavors(): print(i.name) sys.exit(1) image = cloud.get_image_exclude(options.image, 'deprecated') if image: print("Found image", image.name) else: print("Unable to find matching image; image list:") for i in cloud.list_images(): print(i.name) sys.exit(1) server = build_server(cloud, options.name, image, flavor, options.volume, options.keep, options.network, options.boot_from_volume, options.config_drive, options.mount_path, options.fs_label, options.availability_zone, options.environment, options.volume_size, options.timeout) dns.print_dns(cloud, server)
#!/usr/bin/env python3 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Find CloudEye alarm rule by name or ID """ import openstack openstack.enable_logging(True) conn = openstack.connect(cloud='otc') alarm = 'alarm_id_or_name' alarm = conn.ces.find_alarm(alarm) print(alarm)
from openstack.cloud import inventory parser = argparse.ArgumentParser( description='Generate a static inventory via a query of cloud providers') parser.add_argument("--debug", help="enable some debugging output", action="store_true") parser.add_argument("--output", help="output to file", default='openstack.yaml') parser.add_argument("--force", help="overwrite output file if exists", action="store_true") args = parser.parse_args() logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) if args.debug: openstack.enable_logging(debug=True) if os.path.exists(args.output) and not args.force: logging.error("Refusing to overwrite output: %s" % args.output) sys.exit(1) filtered_inv = {} logging.info("Querying inventory ...") inv = inventory.OpenStackInventory() for host in inv.list_hosts(expand=False): logging.info("Found %s" % host['name']) filtered_inv[host['name']] = dict( ansible_host=host['interface_ip'], public_v4=host['public_v4'], private_v4=host['private_v4'], public_v6=host['public_v6'],