def create_table(username, apikey): pyrax.set_credentials(username, apikey) raw_server_list = pyrax.cloudservers.list() raw_network_list = pyrax.cloud_networks.list() raw_flavor_list = pyrax.cloudservers.flavors.list() flavor_dict = {} for flavor in raw_flavor_list: flavor_dict[flavor.id] = flavor.name headers = ['UUID', 'name', 'RackConnect status', 'flavor', 'accessIPv4'] network_list = [] for each in raw_network_list: network_list.append(each.label) headers += network_list output = prettytable.PrettyTable(headers) for server in raw_server_list: row = server_data(server, network_list, flavor_dict) output.add_row(row) output.align = 'l' output.sortby = 'name' return output
def authenticate_credentials(self): """This method try to authenticate with available credentials :returns: True or False (Boolean) """ logger = logging.getLogger(__name__) logger.debug( 'authenticating with credentials ' '(identity_type:%s, username:%s, api-key:%s, region=%s)' % (self._identity_type, self._username, self._apikey, self._region)) try: pyrax.set_setting("identity_type", self._identity_type) pyrax.set_credentials(self._username, self._apikey, region=self._region) logger.info("authenticated with credentials, username:%s, " "api-key:%s, region:%s, identity_type:%s" % (self._username, self._apikey, self._region, self._identity_type)) logger.debug("user authenticated: %s" % pyrax.identity.authenticated) if pyrax.identity.authenticated: self._token = pyrax.identity.auth_token self._tenant_id = pyrax.identity.tenant_id self.save_token() return pyrax.identity.authenticated except pyrax.exceptions.AuthenticationFailed: logger.warn("cannot authenticate with credentials") return False
def setup(): username = os.environ.get('OS_USERNAME') api_key = os.environ.get('OS_PASSWORD') credentials = os.environ.get('RAX_CREDENTIALS') or os.environ.get('RAX_CREDS_FILE') region = os.environ.get('OS_REGION_NAME') if credentials is None: credentails = os.path.expanduser('~/.rackspace_cloud_credentials') try: pyrax.set_setting('identity_type', 'rackspace') if api_key and username: pyrax.set_credentials(username, api_key=api_key) elif credentials: credentials = os.path.expanduser(credentials) pyrax.set_credential_file(credentials) else: sys.stderr.write('No value in environment variable %s and/or no ' 'credentials file at %s\n' % (e.message, default_creds_file)) sys.exit(1) except Exception, e: sys.stderr.write("%s: %s\n" % (e, e.message)) sys.exit(1)
def connect_to_rackspace(region, access_key_id, secret_access_key): """ returns a connection object to Rackspace """ pyrax.set_setting('identity_type', 'rackspace') pyrax.set_default_region(region) pyrax.set_credentials(access_key_id, secret_access_key) nova = pyrax.connect_to_cloudservers(region=region) return nova
def upload(self, local_dir, cf_prefix, container_name=None): pyrax.set_setting('identity_type', 'rackspace') try: pyrax.set_credentials(Configuration().SWIFT_USERNAME, Configuration().SWIFT_API_KEY) except pyrax.exceptions.AuthenticationFailed, e: self.logger.exception(e) raise
def __init__(self, name): self.region, name = name.split('://') self.basename = os.path.basename(name) pyrax.set_setting('identity_type', 'rackspace') with open(os.path.expanduser('~/.cloudfiles'), 'r') as f: self.conf = json.loads(f.read()) pyrax.set_credentials(self.conf['access_key'], self.conf['secret_key'], region=self.region) conn = pyrax.connect_to_cloudfiles(region=self.region.upper()) if self.region == 'dfw': self.container_name = remote_filename(name) else: self.container_name = remote_filename('%s/%s' % (self.region, name)) container = conn.create_container(self.container_name) for i in range(3): try: container.log_retention(True) break except: pass for info in conn.list_containers_info(): if info['name'] == self.container_name: remote_total = info['bytes'] print( '%s Remote store %s contains %s in %d objects' % (datetime.datetime.now(), self.region, utility.DisplayFriendlySize(remote_total), info['count']))
def connectToClouds(): """ Open connections to S3 and Cloud Files """ s3Conn = None cfConn = None try: ## boto reads from /etc/boto.cfg (or ~/boto.cfg) s3Conn = boto.connect_s3() ## the cloud files library doesn't automatically read from a file, so we handle that here: cfConfig = configparser.ConfigParser() cfConfig.read('/etc/cloudfiles.cfg') pyrax.set_setting("identity_type", "rackspace") pyrax.set_default_region(cfConfig.get('Credentials', 'region')) pyrax.set_credentials(cfConfig.get('Credentials', 'username'), cfConfig.get('Credentials', 'api_key')) cfConn = pyrax.connect_to_cloudfiles( cfConfig.get('Credentials', 'region')) except (NoSectionError, NoOptionError, MissingSectionHeaderError, ParsingError) as err: raise MultiCloudMirrorException( "Error in reading Cloud Files configuration file (/etc/cloudfiles.cfg): %s" % (err)) except (S3ResponseError, S3PermissionsError) as err: raise MultiCloudMirrorException("Error in connecting to S3: [%d] %s" % (err.status, err.reason)) except (ClientException, AuthenticationFailed) as err: raise MultiCloudMirrorException("Error in connecting to CF: %s" % str(err)) return (s3Conn, cfConn)
def main(): '''Script execution''' parser = argparse.ArgumentParser(description='get percent of api limit ' 'of ram used') parser.add_argument('-u', '--username', help='Rackspace Username', required=True) parser.add_argument('-a', '--apikey', help='Rackspace API Key', required=True) parser.add_argument('-m', '--maxthreshold', help='API Percent Used Threshold, integer between ' '1-99', required=True) parser.add_argument('-r', '--region', help='Rackspace Regional Datacenter', required=True) parser.add_argument('--human', help='Format output for humans, not Cloud Monitoring', action='store_true') args = parser.parse_args() if int(args.maxthreshold) < 1 or int(args.maxthreshold) > 99: print "You must enter a valid integer from 1-99 for maxthreshold" sys.exit(2) pyrax.set_setting("identity_type", "rackspace") pyrax.set_credentials(args.username, args.apikey) (ram_used, ram_allowed) = getlimits(args.region) display_usage(ram_used, ram_allowed, args.maxthreshold, args.human)
def umount(volume_name, server_name, etcd): resp = requests.get(build_url(etcd, 'rackspace', 'credentials')).json() credentials = json.loads(resp['node']['value']) username = credentials['username'] api_key = credentials['apiKey'] region = credentials['region'] pyrax.set_setting('identity_type', 'rackspace') pyrax.set_credentials(username, api_key, region=region) cs = pyrax.cloudservers cbs = pyrax.cloud_blockstorage volume = cbs.find(display_name=volume_name) server = cs.servers.find(name=server_name) if volume.attachments and volume.attachments[0]['server_id'] == server.id: volume.detach() pyrax.utils.wait_until(volume, 'status', 'available', interval=3, attempts=0)
def get_queryset(self): pyrax.set_setting("identity_type", "rackspace") profile = UserProfile.get_profile(self.request.user) username = profile.rackspace_username api_key = profile.rackspace_api_key pyrax.set_credentials(username, api_key) return pyrax.cloudservers.list()
def handle(self, *args, **options): if len(args) != 1: raise CommandError( "Pass one and only one [container_name] as an argument") self.connect() container_name = args[0] print("Creating container: {0}".format(container_name)) self.conn.put_container(container_name) if not options.get("private"): print("Publish container: {0}".format(container_name)) if CUMULUS["USE_PYRAX"]: if CUMULUS["PYRAX_IDENTITY_TYPE"]: pyrax.set_setting("identity_type", CUMULUS["PYRAX_IDENTITY_TYPE"]) pyrax.set_credentials(CUMULUS["USERNAME"], CUMULUS["API_KEY"]) public = not CUMULUS["SERVICENET"] connection = pyrax.connect_to_cloudfiles( region=CUMULUS["REGION"], public=public) container = connection.get_container(container_name) if not container.cdn_enabled: container.make_public(ttl=CUMULUS["TTL"]) else: headers = {"X-Container-Read": ".r:*"} self.conn.post_container(container_name, headers=headers) print("Done")
def mount(volume_name, server_name, etcd): resp = requests.get( build_url(etcd, 'rackspace', 'credentials') ).json() credentials = json.loads(resp['node']['value']) username = credentials['username'] api_key = credentials['apiKey'] region = credentials['region'] pyrax.set_setting('identity_type', 'rackspace') pyrax.set_credentials(username, api_key, region=region) cs = pyrax.cloudservers cbs = pyrax.cloud_blockstorage volume = cbs.find(display_name=volume_name) server = cs.servers.find(name=server_name) if volume.attachments and volume.attachments[0]['server_id'] != server.id: volume.detach() pyrax.utils.wait_until(volume, 'status', 'available', interval=3, attempts=0) if not volume.attachments: volume.attach_to_instance(server, mountpoint='') pyrax.utils.wait_until(volume, 'status', 'in-use', interval=3, attempts=0) resp = requests.put( build_url(etcd, 'rackspace', 'cbs', volume_name), data={"value": volume.attachments[0]['device']} )
def cred_prompt(): print """Before we can proceed, you will need to enter your username and API key. Protip: In the future you can authenticate with the following methods: Authenticate with ~/.rackspace_cloud_credentials File. [rackspace_cloud] username = my_username api_key = 01234567890abcdef Authenticate by passing arguments to this script. python /path/to/this/script my_username 01234567890abcdef NOTE: This method trumps the ~/.rackspace_cloud_credentials file! """ while True: username = raw_input("Rackspace Username%s" % prompt) api_key = raw_input("Rackspace API Key%s" % prompt) try: pyrax.set_credentials(username, api_key) break except pexc.AuthenticationFailed: print "The credentials provided are not valid. Please try again." continue cred_save = raw_input("Would you like for me to store these credentials in ~/.rackspace_cloud_credentials for you?%s" % ynprompt) if cred_save.lower().startswith("y"): cred_save_file = open(os.path.join(os.path.expanduser("~"), ".rackspace_cloud_credentials"), "w") cred_save_file.write("[rackspace_cloud]\n" + "username = "******"\napi_key = " + api_key + "\n") cred_save_file.close()
def url_for(endpoint, **values): """ Generates a URL to the given endpoint. If the endpoint is for a static resource then a Rackspace Cloud File URL is generated, otherwise the call is passed on to `flask.url_for`. Because this function is set as a jinja environment variable when `FlaskRSF.init_app` is invoked, this function replaces `flask.url_for` in templates automatically. It is unlikely that this function will need to be directly called from within your application code, unless you need to refer to static assets outside of your templates. """ app = current_app if "RSF_CONTAINER_NAME" not in app.config: raise ValueError("RSF_CONTAINER_NAME not found in app configuration.") if app.debug and not app.config["USE_RSF_DEBUG"]: return flask_url_for(endpoint, **values) if endpoint == "static" or endpoint.endswith(".static"): pyrax.set_credentials(["RSF_USERNAME"], ["RSF_API_KEY"]) cf = pyrax.cloudfiles cont = cf.create_container(app.config["RSF_CONTAINER_NAME"]) scheme = "http" bucket_path = cont.cdn_uri if app.config["RSF_USE_HTTPS"]: scheme = "https" bucket_path = cont.cdn_ssl_uri bucket_path = re.sub(r"(http[s]*://)", r"", bucket_path) urls = app.url_map.bind(bucket_path, url_scheme=scheme) return urls.build(endpoint, values=values, force_external=True) return flask_url_for(endpoint, **values)
def main(): # option parsing usage = "%prog --local /var/backup --remote backup" parser = OptionParser(usage=usage) parser.add_option("-i", "--identity", default="rackspace", help="Pyrax identity class") parser.add_option("-l", "--local", help="local path to backup") parser.add_option("-r", "--remote", help="remote container to backup to") parser.add_option("-v", "--verbose", action="count", help="Increase verbosity", default=0) (options, args) = parser.parse_args() for option in (options.local, options.remote): if option is None: parser.print_help() sys.exit(1) # Get login details from .netrc. login, account, password = netrc().authenticators( 'pyrax.%s' % options.identity) # Configure logging logging.basicConfig(level=max(4-options.verbose,1)*10) logging.info("Logging on to %s", options.identity) pyrax.set_setting("identity_type", options.identity) pyrax.set_credentials(login, password) logging.info("Synchronising") pyrax.cloudfiles.sync_folder_to_container( options.local, options.remote, delete=True, ignore_timestamps=True)
def login(self): """ Logs into cloud files. Note that this is on the main thread. init_thread is responsible for initializing individual threads. :return: True on success, false on failure """ try: pyrax.set_credentials(username=self.username, api_key=self.api_key) self.rax = pyrax.connect_to_cloudfiles(self.region, True) if self.rax is None: ThreadedDeleter.output('Unknown error occured while connecting' ' to CloudFiles.') return False except pyrax.exceptions.AuthenticationFailed as e: ThreadedDeleter.output('Authentication failed: {msg}'.format( msg=str(e))) return False except pyrax.exceptions.PyraxException as e: ThreadedDeleter.output('Unknown error occurred: {msg}'.format( msg=str(e))) return False return True
def setup(): rax_username = get_config(p, 'rax', 'username', 'RAX_USERNAME', None) rax_api_key = get_config(p, 'rax', 'api_key', 'RAX_API_KEY', None) pyrax.set_setting('identity_type', 'rackspace') pyrax.set_credentials(rax_username, rax_api_key) region = pyrax.get_setting('region') regions = [] if region: regions.append(region) else: region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', islist=True) for region in region_list: region = region.strip().upper() if region == 'ALL': regions = pyrax.regions break elif region not in pyrax.regions: sys.stderr.write('Unsupported region %s' % region) sys.exit(1) elif region not in regions: regions.append(region) return regions
def main(username, project, list): pyrax.set_setting('identity_type', 'rackspace') with open(os.path.expanduser('~/.bugminion'), 'r') as f: conf = json.loads(f.read()) pyrax.set_credentials(conf['access_key'], conf['secret_key'], region=conf['region'].upper()) conn = pyrax.connect_to_cloudfiles(region=conf['region'].upper()) container = conn.create_container(conf['container']) # Prioritize a list of bugs from an input file now = datetime.datetime.now() datestamp = '%04d%02d%02d' %(now.year, now.month, now.day) with open(list) as f: for bug in f.readlines(): bug = bug.rstrip() triage = {'reviewer': username, 'osic': 'y'} common.clobber_object(container, '%s-bug/%s-%s' %(project, bug, datestamp), json.dumps(triage, indent=4, sort_keys=True)) print 'Done!'
def __init__(self, name): self.region, name = name.split('://') self.basename = os.path.basename(name) pyrax.set_setting('identity_type', 'rackspace') with open(os.path.expanduser('~/.cloudfiles'), 'r') as f: self.conf = json.loads(f.read()) pyrax.set_credentials(self.conf['access_key'], self.conf['secret_key'], region=self.region) conn = pyrax.connect_to_cloudfiles(region=self.region.upper()) if self.region == 'dfw': self.container_name = remote_filename(name) else: self.container_name = remote_filename('%s/%s' %(self.region, name)) container = conn.create_container(self.container_name) for i in range(3): try: container.log_retention(True) break except: pass for info in conn.list_containers_info(): if info['name'] == self.container_name: remote_total = info['bytes'] print ('%s Remote store %s contains %s in %d objects' %(datetime.datetime.now(), self.region, utility.DisplayFriendlySize(remote_total), info['count']))
def setUp(self): pyrax.connect_to_cloudservers = Mock() pyrax.connect_to_cloud_loadbalancers = Mock() pyrax.connect_to_cloud_databases = Mock() pyrax.connect_to_cloud_blockstorage = Mock() pyrax.clear_credentials() pyrax.identity = FakeIdentity() pyrax.set_credentials("fakeuser", "fakeapikey") pyrax.connect_to_cloudfiles() self.client = pyrax.cloudfiles self.container = FakeContainer(self.client, self.container_name, 0, 0) self.container.name = self.container_name self.client.get_container = Mock(return_value=self.container) self.client.connection.get_container = Mock() self.client.connection.head_object = Mock() objs = [{ "name": self.obj_name, "content_type": "test/test", "bytes": 444, "hash": "abcdef0123456789" }] self.client.connection.head_object.return_value = ({}, objs) self.client.connection.get_container.return_value = ({}, objs) self.storage_object = self.client.get_object(self.container, "testobj") self.client._container_cache = {} self.container.object_cache = {}
def pyrax_auth(username, apikey): try: pyrax.set_credentials(str(username), str(apikey)) return True except exc.AuthenticationFailed: print "Authentication was not successful, please enter your username and API key from your Rackspace Control Panel" return False
def content_store_url(quiet=False): """ Access the public content store URL. Respect the environment variable CONTENT_STORE_URL if it is populated. Otherwise, find the content store load balancer and derive its public IP via the Rackspace API. Prints the derived URL to stdout as a side-effect unless "quiet" is set to True. """ content_store_url = os.environ.get("CONTENT_STORE_URL") domain = get("domain") if content_store_url: if content_store_url.endswith("/"): content_store_url = content_store_url[:-1] if not quiet: print("Using content store URL: {}".format(content_store_url)) return content_store_url elif domain: content_store_url = "https://{}:9000".format(domain) if not quiet: print("Using content store URL: {}".format(content_store_url)) return content_store_url else: rackspace_username = get("rackspace_username") rackspace_apikey = get("rackspace_api_key") rackspace_region = get("rackspace_region") instance_name = get("instance") pyrax.set_setting("identity_type", "rackspace") pyrax.set_setting("region", rackspace_region) pyrax.set_credentials(rackspace_username, rackspace_apikey) clb = pyrax.cloud_loadbalancers the_lb = None content_lb_name = "deconst-{}-content".format(instance_name) for lb in clb.list(): if lb.name == content_lb_name: the_lb = lb if not the_lb: raise Exception("Content service load balancer not found") addr = the_lb.virtual_ips[0].address port = the_lb.port content_store_url = "https://{}:{}".format(addr, port) if not quiet: print("Derived content store URL: {}".format(content_store_url)) print("If this is incorrect, set CONTENT_STORE_URL to the correct value.") return content_store_url
def authenticate(self, api_creds): pyrax.set_setting('identity_type', 'rackspace') pyrax.set_credentials(api_creds[0], password=api_creds[1], authenticate=True) # Construct the authentication response body and store it as JSON. local_tz = get_localzone() expiry_in_utc = pyrax.identity.expires.astimezone(local_tz) expiry_in_utc = expiry_in_utc.astimezone(pytz.utc) json_out = { "access": { "token": { "id": pyrax.identity.token, "tenant": { "name": pyrax.identity.tenant_name, "id": pyrax.identity.tenant_id, }, "expires": expiry_in_utc.strftime('%Y-%m-%dT%H:%M:%S.000Z') }, "user": { "id": pyrax.identity.user['id'], "name": pyrax.identity.username, "roles": pyrax.identity.user['roles'] }, "serviceCatalog": pyrax.identity.service_catalog } } # The file is rw- only for the current user. with open(os.open(self.token_file, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as fp: json.dump(json_out, fp)
def api_initialization(): pyrax.set_setting("identity_type", "rackspace") try: progname, username, api_key = argv pyrax.set_credentials(username, api_key) except ValueError: if os.path.isfile(os.path.expanduser("~/.rackspace_cloud_credentials")): creds_file = os.path.expanduser("~/.rackspace_cloud_credentials") try: pyrax.set_credential_file(creds_file) except pexc.AuthenticationFailed: print "The credentials located in ~/.rackspace_cloud_credentials are not valid. Please provide the correct Username and API Key below.\n" cred_prompt() else: cred_prompt() except pexc.AuthenticationFailed: if os.path.isfile(os.path.expanduser("~/.rackspace_cloud_credentials")): print "The provided credentials are not valid; reverting to the ~/.rackspace_cloud_credentials file." creds_file = os.path.expanduser("~/.rackspace_cloud_credentials") try: pyrax.set_credential_file(creds_file) except pexc.AuthenticationFailed: print "The credentials located in ~/.rackspace_cloud_credentials are not valid. Please provide the correct Username and API Key below.\n" cred_prompt() else: print "The provided credentials are not valid; please enter them below.\n" cred_prompt()
def __init__(self, options=None, *args, **kwargs): """ Delegates to super, then attaches a pyrax cloud files connection. """ # Transparent delegation to super super(CloudFilesStorage, self).__init__(*args, **kwargs) # Get cloudfiles settings, if options were not provided if options is None: try: options = settings.CLOUDFILES except AttributeError: raise ImproperlyConfigured( u'Provide options or use settings.CLOUDFILES') # Set pyrax authentication type to "rackspace" which is the one used # for the Rackspace "public cloud" pyrax.set_setting('identity_type', 'rackspace') # Set the encoding to utf-8 (default, but in the name explicitness) pyrax.set_setting('encoding', 'utf-8') # Get credentials try: username, api_key = options['USERNAME'], options['API_KEY'] except KeyError: raise ImproperlyConfigured( u'USERNAME and API_KEY are both required options') # Authenticate (accesses network) try: pyrax.set_credentials(username, api_key) except AuthenticationFailed: raise ImproperlyConfigured( u'Rackspace Cloudfiles API authentication failed - check ' 'username and api_key') # Get the region try: region = options['REGION'] except KeyError: raise ImproperlyConfigured(u'REGION is a required option') # Attach a cloudfiles connection for the selected region self.cloudfiles = pyrax.connect_to_cloudfiles(region=region, public=options.get( 'PUBLIC', True)) # Get the container name try: container = options['CONTAINER'] except KeyError: raise ImproperlyConfigured(u'CONTAINER is a required option') # Attach the container try: self.container = self.cloudfiles.get_container(container) except NoSuchContainer: raise ImproperlyConfigured( u'No such container named "{c}"'.format(c=container))
def authenticate_credentials(self): """ This method try to authenticate with available credentials :returns: True or False (Boolean) """ logger = logging.getLogger(__name__) logger.debug('authenticating with credentials ' '(identity_type:%s, username:%s, api-key:%s, region=%s)', self._identity_type, self._username, self._apikey, self._region) try: pyrax.set_setting("identity_type", self._identity_type) pyrax.set_credentials(self._username, self._apikey, region=self._region) logger.info("authenticated with credentials, username: %s, " "api-key: %s, region: %s, identity_type: %s", self._username, self._apikey, self._region, self._identity_type) logger.debug("user authenticated: %s", pyrax.identity.authenticated) if pyrax.identity.authenticated: self._token = pyrax.identity.auth_token self._tenant_id = pyrax.identity.tenant_id self.save_token() return pyrax.identity.authenticated except AuthenticationFailed: logger.warn("cannot authenticate with credentials") return False
def main(username, project, list): pyrax.set_setting('identity_type', 'rackspace') with open(os.path.expanduser('~/.bugminion'), 'r') as f: conf = json.loads(f.read()) pyrax.set_credentials(conf['access_key'], conf['secret_key'], region=conf['region'].upper()) conn = pyrax.connect_to_cloudfiles(region=conf['region'].upper()) container = conn.create_container(conf['container']) now = datetime.datetime.now() datestamp = '%04d%02d%02d' %(now.year, now.month, now.day) with open(list) as f: with open('%s.csv' % list, 'w') as csvfile: csvwriter = csv.writer(csvfile, dialect='excel') for bug in f.readlines(): bug = bug.rstrip() try: data = json.loads(container.get_object( '%s-bug/%s' %(project, bug)).get()) except pyrax.exceptions.NoSuchObject: data = {} csvwriter.writerow([ 'https://bugs.launchpad.net/nova/+bug/%s' % bug, data.get('title', 'unknown'), data.get('status', 'unknown'), username]) print 'Done!'
def connection(self): if not hasattr(self, '_connection'): pyrax.set_credentials(self.username, self.api_key) # For some reason pyrax.encoding doesn't get set by default. pyrax.encoding = "utf-8" self._connection = pyrax.connect_to_cloudfiles(region=self.region) return self._connection
def main(): parser = argparse.ArgumentParser( description="Rackspace server creation/deletion") parser.add_argument("action", choices=['create', 'delete'], help='Action to be perfromed') parser.add_argument("-n", "--count", help='Number of machines') parser.add_argument("--region", help='Region to launch the machines', default='ORD') args = parser.parse_args() count = args.count region = args.region # configuration of cloud service provider pyrax.set_setting('identity_type', 'rackspace') pyrax.set_default_region(region) pyrax.set_credentials(os.environ.get('USERNAME'), os.environ.get('PASSWORD')) nova_obj = pyrax.cloudservers if (args.action == 'create'): create_node(nova_obj, count) elif (args.action == 'delete'): delete_node(nova_obj)
def conn(self): if self._conn is None: pyrax.set_setting("identity_type", "rackspace") pyrax.set_credentials(os.environ["RACKSPACE_USERNAME"], os.environ["RACKSPACE_API_KEY"]) pyrax.set_setting("verify_ssl", False) self._conn = pyrax # We love globals, don't we?! return self._conn
def __init__(self, task_kwargs=None, json_config_file=None): json_config_file = json_config_file or './deploy_settings.json' self.cloudservers = None self.settings = AttributeDict({}) self.fabric_env_servers = [] self.created_servers = [] task_kwargs = task_kwargs or {} settings = self.read_settings_file(json_config_file) for key in self.SETTINGS_KEYS: try: self.settings[key] = task_kwargs[key] except KeyError: try: self.settings[key] = environ[key] except KeyError: try: self.settings[key] = settings[key] except KeyError: pass self.settings.server_count = int(self.settings.server_count) self.settings.setdefault('ssh_user', 'root') self.settings.setdefault('git_branch', 'master') self.ensure_settings('rackspace_username', 'rackspace_apikey') pyrax.set_setting('identity_type', 'rackspace') pyrax.set_credentials( self.settings.rackspace_username, self.settings.rackspace_apikey) self.cloudservers = pyrax.connect_to_cloudservers() self.loadbalancers = pyrax.connect_to_cloud_loadbalancers()
def create_node(counts, name, node_request, pubkey): ''' Create a node in the cloud provider ''' pyrax.set_setting('identity_type', app.config['AUTH_SYSTEM']) pyrax.set_default_region(app.config['AUTH_SYSTEM_REGION']) pyrax.set_credentials(app.config['USERNAME'], app.config['API_KEY']) nova = pyrax.cloudservers flavor = nova.flavors.find(name='1 GB General Purpose v1') image = nova.images.find(name='CentOS 7 (PVHVM)') node_request = NodeRequest.query.get(node_request) keypair = nova.keypairs.create(name, pubkey) # create the nodes for count in range(int(counts)): vm_name = 'softserve-' + name + '.' + str(count + 1) node = nova.servers.create(name=vm_name, flavor=flavor.id, image=image.id, key_name=keypair.name) # wait for server to get active while node.status == 'BUILD': time.sleep(5) node = nova.servers.get(node.id) # get ip_address of the active node for network in node.networks['public']: if re.match(r'\d+\.\d+\.\d+\.\d+', network): machine = Vm(ip_address=network, vm_name=vm_name, state=node.status) machine.details = node_request db.session.add(machine) db.session.commit()
def update_records(self, ip_addresses): record_types = { 'ipv4': 'A', 'ipv6': 'AAAA' } username = self.config_get('username') if username is None: raise exc.NoUsername('A username is not configured in %s' % self.config_file) apikey = self.config_get('apikey') if apikey is None: raise exc.NoApiKey('An API key is not configured in %s' % self.config_file) pyrax.set_setting('identity_type', 'rackspace') pyrax.set_credentials(username, apikey) self.dns = pyrax.cloud_dns dns_info = self.find_dns() for ip_type, ip in ip_addresses.iteritems(): if ip is None: continue if dns_info[ip_type] is None: self.logger.info('Creating %s record for %s' % (record_types[ip_type], dns_info['host'])) records = dns_info['domain'].add_records([ { 'type': record_types[ip_type], 'name': dns_info['host'], 'data': ip, 'ttl': 300 } ]) else: self.logger.info('Updating %s record for %s' % (record_types[ip_type], dns_info['host'])) dns_info[ip_type].update(data=ip)
def __init__(self, parsed_url): try: import pyrax except ImportError: raise BackendException("This backend requires the pyrax " "library available from Rackspace.") # Inform Pyrax that we're talking to Rackspace # per Jesus Monzon (gsusmonzon) pyrax.set_setting("identity_type", "rackspace") conn_kwargs = {} if not os.environ.has_key('CLOUDFILES_USERNAME'): raise BackendException('CLOUDFILES_USERNAME environment variable' 'not set.') if not os.environ.has_key('CLOUDFILES_APIKEY'): raise BackendException('CLOUDFILES_APIKEY environment variable not set.') conn_kwargs['username'] = os.environ['CLOUDFILES_USERNAME'] conn_kwargs['api_key'] = os.environ['CLOUDFILES_APIKEY'] if os.environ.has_key('CLOUDFILES_REGION'): conn_kwargs['region'] = os.environ['CLOUDFILES_REGION'] container = parsed_url.path.lstrip('/') try: pyrax.set_credentials(**conn_kwargs) except Exception, e: log.FatalError("Connection failed, please check your credentials: %s %s" % (e.__class__.__name__, str(e)), log.ErrorCode.connection_failed)
def __init__(self, username=None, api_key=None, container=None, timeout=None, max_retries=None, container_uri=None): """ Initialize the settings for the and container. """ if username is not None: self.username = username if api_key is not None: self.api_key = api_key if container is not None: self.container_name = container if timeout is not None: self.timeout = timeout if max_retries is not None: self.max_retries = max_retries if container_uri is not None: self._container_public_uri = container_uri elif 'CONTAINER_URI' in CUMULUS: self._container_public_uri = CUMULUS['CONTAINER_URI'] pyrax.set_setting("identity_type", self.pyrax_identity_type) pyrax.set_credentials(self.username, self.api_key)
def connect_container(self): """ Connects to a container using the swiftclient api. The container will be created and/or made public using the pyrax api if not already so. """ self.conn = swiftclient.Connection(authurl=CUMULUS["AUTH_URL"], user=CUMULUS["USERNAME"], key=CUMULUS["API_KEY"], snet=CUMULUS["SERVICENET"], auth_version=CUMULUS["AUTH_VERSION"], tenant_name=CUMULUS["AUTH_TENANT_NAME"]) try: self.conn.head_container(self.container_name) except swiftclient.client.ClientException as exception: if exception.msg == "Container HEAD failed": call_command("container_create", self.container_name) else: raise if CUMULUS["USE_PYRAX"]: public = not CUMULUS["SERVICENET"] pyrax.set_credentials(CUMULUS["USERNAME"], CUMULUS["API_KEY"]) connection = pyrax.connect_to_cloudfiles(region=CUMULUS["REGION"], public=public) container = connection.get_container(self.container_name) if not container.cdn_enabled: container.make_public(ttl=CUMULUS["TTL"]) else: headers = {"X-Container-Read": ".r:*"} self.conn.post_container(self.container_name, headers=headers) self.container = self.conn.get_container(self.container_name)
def impl(context, operating_system, ram_size): server_id = context.output.strip() pyrax.set_setting("identity_type", "rackspace") pyrax.set_credentials(os.environ['OS_USERNAME'], os.environ['OS_PASSWORD']) cs = pyrax.cloudservers import code code.interact(local=locals())
def do_main_program() : pyrax.set_credentials(config.rackspace['API_USERNAME'], config.rackspace['API_KEY']) my_client_id = str(uuid.uuid4()) pq = pyrax.queues pq.client_id = my_client_id cf = pyrax.cloudfiles in_container = cf.get_container(config.rackspace['API_FILES_IN']) out_container = cf.get_container(config.rackspace['API_FILES_OUT']) # We set the ttl and grace period to their minimum, 60 seconds. # Get 1 at a time. claim = pq.claim_messages(config.rackspace['API_QUEUE'], 60, 60, 1) if claim and len(claim.messages) : for msg in claim.messages: #print 'Claimed {0}'.format([msg.body]) in_obj = in_container.get_object(msg.body['Tempname']) # Generate a safe filename. new_filename = '/tmp/' + msg.body['Tempname'] # Insert our data into that file. f = open(new_filename, 'w') f.write(in_obj.get()) f.close() # If we successfully govify'd the document try: subprocess.check_call(["/usr/bin/govify", new_filename]) f = open(new_filename + '.pdf', 'r') # Upload the new file obj = out_container.store_object(msg.body['Tempname'] + '.pdf', f.read(), content_type='application/pdf', ttl=config.rackspace['API_FILE_LIFETIME']) f.close() os.remove(new_filename + '.pdf') # Remove the item from the inbox # Do this via the container so the cache is cleared! in_container.delete_object(msg.body['Tempname']) # Remove the item from the queue pq.delete_message(config.rackspace['API_QUEUE'], msg.id, claim.id) # Notify the user do_mail(msg.body['Author'], obj.get_temp_url(config.rackspace['API_FILE_LIFETIME'])); except subprocess.CalledProcessError: print 'Something went wrong!' # Remove our temp files. os.remove(new_filename)
def cache_clean(folder, extension): # NOTE: Manually install gevent & pyrax, no need for it to be depenency just for this method. from gevent import monkey from gevent.pool import Pool from gevent import Timeout monkey.patch_all() import six import pyrax import logging from mfr.server import settings # Monkey patch pyrax for python 3 compatibility. def _add_details(self, info): """ Takes the dict returned by the API call and sets the corresponding attributes on the object. """ for (key, val) in six.iteritems(info): if six.PY2 and isinstance(key, six.text_type): key = key.encode(pyrax.get_encoding()) elif isinstance(key, bytes): key = key.decode("utf-8") setattr(self, key, val) pyrax.resource.BaseResource._add_details = _add_details # WARNING: We are using provider specific functions to enumerate files to quickly # purge the cache, which can contain hundreds of thousands of objects. Thus # asserting the provider, we will need to update if we move providers. assert settings.CACHE_PROVIDER_NAME == 'cloudfiles' logging.captureWarnings(True) pyrax.set_setting('identity_type', 'rackspace') pyrax.set_setting('verify_ssl', True) pyrax.set_credentials(settings.CACHE_PROVIDER_CREDENTIALS['username'], settings.CACHE_PROVIDER_CREDENTIALS['token']) cf = pyrax.connect_to_cloudfiles(region=settings.CACHE_PROVIDER_CREDENTIALS['region'].upper(), public=True) container = cf.get_container(settings.CACHE_PROVIDER_SETTINGS['container']) def delete_object(obj): # added timeout of 5 seconds just in case with Timeout(5, False): try: print(obj) obj.delete() except Exception as ex: print(ex) pool = Pool(100) objects = container.get_objects(prefix=folder, limit=5000, marker='') while objects: for obj in objects: if obj.name.endswith(extension): pool.spawn(delete_object, obj) objects = container.get_objects(prefix=folder, limit=5000, marker=objects[-1].name) pool.join()
def connection(self): if not hasattr(self, '_connection'): pyrax.set_credentials(self.username, self.api_key) # For some reason pyrax.encoding doesn't get set by default. pyrax.encoding = "utf-8" self._connection = pyrax.connect_to_cloudfiles( region=self.region) return self._connection
def __init__(self, username, api_key): super(DNSClient, self).__init__() self.username = username self.api_key = api_key pyrax.set_setting('identity_type', 'rackspace') pyrax.set_credentials(self.username, self.api_key)
def authenticate(username='', passwd='', path=''): if username or passwd: pyrax.set_credentials(username,passwd) elif path: pyrax.set_credential_file(os.path.expanduser(path)) else: print "Authentication Failed... please use username/password or file to authenticate" sys.exit()
def rax_auth(user, api): global debug if debug: print "*** Username: %s, API_Key: %s" % (user, api) pyrax.set_setting("identity_type", "rackspace") pyrax.set_credentials(user, api)
def _connect(self): if self.connected: return pyrax.set_credentials(username=self.config['rackspace_username'], api_key=self.config['rackspace_password'], tenant_id=self.config['rackspace_project_id'], region=self.config['rackspace_region']) self.nova = pyrax.cloudservers self.connected = True
def __init__(self, username, api_key): super(DNSClient, self).__init__() self.username = username self.api_key = api_key pyrax.set_setting("identity_type", "rackspace") pyrax.set_credentials(self.username, self.api_key)
def _connect_to_rackspace(self): """ returns a connection object to Rackspace """ pyrax.set_setting('identity_type', 'rackspace') pyrax.set_default_region(self.state.region) pyrax.set_credentials(self.config.access_key_id, self.config.secret_access_key) nova = pyrax.connect_to_cloudservers(region=self.state.region) return nova
def __init__(self, user_name, api_key, test_domain, dns_api_timeout=15): super(RackspaceDNSClient, self).__init__() pyrax.settings.set('identity_type', 'rackspace') pyrax.set_credentials(user_name, api_key) self.dns_client = pyrax.cloud_dns self.dns_client.set_timeout(dns_api_timeout) self.test_domain = test_domain self.domain = self.dns_client.find(name=test_domain)
def main(num_of_workers=0, worker_id=0, glacier=True, parity=True, dry_run=True): global container_primary global container_parity global vault global audit_temp_path # Set up storage backends init_app(set_backends=True, routes=False) try: # Authenticate to Rackspace pyrax.settings.set('identity_type', 'rackspace') pyrax.set_credentials(storage_settings.USERNAME, storage_settings.API_KEY, region=storage_settings.REGION) container_primary = pyrax.cloudfiles.get_container( storage_settings.PRIMARY_CONTAINER_NAME) container_parity = pyrax.cloudfiles.get_container( storage_settings.PARITY_CONTAINER_NAME) # Connect to AWS layer2 = Layer2( aws_access_key_id=storage_settings.AWS_ACCESS_KEY, aws_secret_access_key=storage_settings.AWS_SECRET_KEY, ) vault = layer2.get_vault(storage_settings.GLACIER_VAULT) # Log to file if not dry_run: scripts_utils.add_file_logger(logger, __file__, suffix=worker_id) audit_temp_path = os.path.join(storage_settings.AUDIT_TEMP_PATH, str(worker_id)) if not dry_run: try: os.makedirs(audit_temp_path) except OSError: pass if glacier: logger.info('glacier audit start') audit(glacier_targets(), num_of_workers, worker_id, dry_run) logger.info('glacier audit complete') if parity: logger.info('parity audit start') audit(parity_targets(), num_of_workers, worker_id, dry_run) logger.info('parity audit complete') except Exception as err: logger.error('=== Unexpected Error ===') logger.exception(err) raise err
def main(): """Parse arguments, take snap, and trigger deletion of oldest autosnap.""" parser = argparse.ArgumentParser(description='auto-rotate image snapshots') parser.add_argument('user', metavar='username', type=str, help='Username for account') parser.add_argument('key', metavar='apikey', type=str, help='API key for account') parser.add_argument('server', metavar='server', type=str, help='UUID of server to autosnap') parser.add_argument('retention', metavar='retention', type=int, help='Minimum number of autosnaps to retain') parser.add_argument('region', metavar='region', type=str, help='Region of Server (DFW, ORD, LON)', choices=['DFW', 'ORD', 'LON']) args = parser.parse_args() pyrax.set_setting("identity_type", "rackspace") pyrax.set_credentials(args.user, args.key) #Set up a cloud object based on region specified by user if args.region == "LON": cloud = pyrax.cloudservers else: cloud = pyrax.connect_to_cloudservers(region=args.region) server = cloud.servers.get(args.server) images = cloud.images.list() snap_list = [] snap_name = server.name + "-autosnap-" + args.server #Search for previous autosnaps of server and append to a list for i in images: search_name = server.name + "-autosnap-" + server.id if i.name == search_name: snap_list.append(i) #Delete oldest snapshot(s) if retention < num images while args.retention < len(snap_list): snap_list = delete_old(snap_list, cloud) #Create snapshot if retention >= number of previous autosnaps if args.retention >= len(snap_list): server.create_image(snap_name)
def push(topdir, region, container_name): pyrax.set_setting('identity_type', 'rackspace') with open(os.path.expanduser('~/.cloudfiles'), 'r') as f: conf = json.loads(f.read()) pyrax.set_credentials(conf['access_key'], conf['secret_key'], region=region) conn = pyrax.connect_to_cloudfiles(region=region.upper(), public=False) container = conn.create_container(container_name) copy_dir(topdir, topdir, container)
def __init__(self, username, password, region, **kwargs): cloud_kwargs = { 'password': password, 'region': region } pyrax.settings.set('identity_type', kwargs.get('identity_type', 'rackspace')) pyrax.settings.set('auth_endpoint', kwargs.get('auth_endpoint', 'https://identity.api.rackspacecloud.com/v2.0')) pyrax.set_credentials(username, **cloud_kwargs) self.conn = pyrax
def send_sitemap_to_rackspace(sitemap_files_path): global CONFIG RS_USERNAME = CONFIG.get("Rackspace", "Username") RS_APIKEY = CONFIG.get("Rackspace", "ApiKey") RS_CONTAINER_NAME = CONFIG.get("Rackspace", "SitemapContainer") print "Loading files from %s to Rackspace CDN." % sitemap_files_path pyrax.set_setting("identity_type", "rackspace") pyrax.set_credentials(RS_USERNAME, RS_APIKEY) pyrax.cloudfiles.sync_folder_to_container(sitemap_files_path, RS_CONTAINER_NAME)