def setUp(self): pyrax.connect_to_cloudservers = Mock() pyrax.connect_to_cloud_loadbalancers = Mock() pyrax.connect_to_cloud_databases = Mock() pyrax.connect_to_cloud_blockstorage = Mock() pyrax.clear_credentials() pyrax.identity = FakeIdentity() pyrax.set_credentials("fakeuser", "fakeapikey") pyrax.connect_to_cloudfiles() self.client = pyrax.cloudfiles self.container = FakeContainer(self.client, self.container_name, 0, 0) self.container.name = self.container_name self.client.get_container = Mock(return_value=self.container) self.client.connection.get_container = Mock() self.client.connection.head_object = Mock() objs = [{ "name": self.obj_name, "content_type": "test/test", "bytes": 444, "hash": "abcdef0123456789" }] self.client.connection.head_object.return_value = ({}, objs) self.client.connection.get_container.return_value = ({}, objs) self.storage_object = self.client.get_object(self.container, "testobj") self.client._container_cache = {} self.container.object_cache = {}
def setUp(self): pyrax.connect_to_cloudservers = Mock() pyrax.connect_to_cloud_loadbalancers = Mock() pyrax.connect_to_cloud_databases = Mock() pyrax.connect_to_cloud_blockstorage = Mock() pyrax.identity = FakeIdentity() pyrax.set_credentials("fakeuser", "fakeapikey", region="FAKE") pyrax.connect_to_cloudfiles(region="FAKE") self.client = pyrax.cloudfiles self.client._container_cache = {} self.cont_name = utils.random_name() self.obj_name = utils.random_name() self.fake_object = FakeStorageObject(self.client, self.cont_name, self.obj_name)
def setUp(self): pyrax.connect_to_cloudservers = Mock() pyrax.connect_to_cloud_loadbalancers = Mock() pyrax.connect_to_cloud_databases = Mock() pyrax.connect_to_cloud_blockstorage = Mock() pyrax.identity = FakeIdentity() pyrax.set_credentials("fakeuser", "fakeapikey") pyrax.connect_to_cloudfiles() self.client = pyrax.cloudfiles self.client._container_cache = {} self.cont_name = utils.random_name() self.obj_name = utils.random_name() self.fake_object = FakeStorageObject(self.client, self.cont_name, self.obj_name)
def setUp(self): pyrax.connect_to_cloudservers = Mock() pyrax.connect_to_cloud_loadbalancers = Mock() pyrax.connect_to_cloud_databases = Mock() pyrax.connect_to_cloud_blockstorage = Mock() pyrax.connect_to_cloudfiles() self.client = pyrax.cloudfiles self.client.connection.head_container = Mock() self.cont_name = utils.random_name() self.container = self.client.get_container(self.cont_name) self.obj_name = utils.random_name() self.fake_object = FakeStorageObject(self.client, self.cont_name, self.obj_name) self.client._container_cache = {} self.container.object_cache = {}
def setUp(self): pyrax.connect_to_cloudservers = Mock() pyrax.connect_to_cloud_loadbalancers = Mock() pyrax.connect_to_cloud_databases = Mock() pyrax.connect_to_cloud_blockstorage = Mock() pyrax.connect_to_cloudfiles() self.client = pyrax.cloudfiles self.client.connection.head_container = Mock() self.cont_name = utils.random_name(ascii_only=True) self.container = self.client.get_container(self.cont_name) self.obj_name = utils.random_name(ascii_only=True) self.fake_object = FakeStorageObject(self.client, self.cont_name, self.obj_name) self.client._container_cache = {} self.container.object_cache = {}
def __init__(self, name): self.region, name = name.split('://') self.basename = os.path.basename(name) pyrax.set_setting('identity_type', 'rackspace') with open(os.path.expanduser('~/.cloudfiles'), 'r') as f: self.conf = json.loads(f.read()) pyrax.set_credentials(self.conf['access_key'], self.conf['secret_key'], region=self.region) conn = pyrax.connect_to_cloudfiles(region=self.region.upper()) if self.region == 'dfw': self.container_name = remote_filename(name) else: self.container_name = remote_filename('%s/%s' %(self.region, name)) container = conn.create_container(self.container_name) for i in range(3): try: container.log_retention(True) break except: pass for info in conn.list_containers_info(): if info['name'] == self.container_name: remote_total = info['bytes'] print ('%s Remote store %s contains %s in %d objects' %(datetime.datetime.now(), self.region, utility.DisplayFriendlySize(remote_total), info['count']))
def load(context, path, callback): key = ( context.config.RACKSPACE_PYRAX_REGION, context.config.get('RACKSPACE_PYRAX_IDENTITY_TYPE','rackspace'), context.config.RACKSPACE_PYRAX_CFG, context.config.RACKSPACE_PYRAX_PUBLIC, context.config.RACKSPACE_LOADER_CONTAINER ) if key not in CONNECTIONS: if(context.config.RACKSPACE_PYRAX_REGION): pyrax.set_default_region(context.config.RACKSPACE_PYRAX_REGION) pyrax.set_setting('identity_type', context.config.get('RACKSPACE_PYRAX_IDENTITY_TYPE','rackspace')) pyrax.set_credential_file(expanduser(context.config.RACKSPACE_PYRAX_CFG)) cf = pyrax.connect_to_cloudfiles(public=context.config.RACKSPACE_PYRAX_PUBLIC) CONNECTIONS[key] = cf.get_container(context.config.RACKSPACE_LOADER_CONTAINER) cont = CONNECTIONS[key] file_abspath = normalize_path(context, path) logger.debug("[LOADER] getting from %s/%s" % (context.config.RACKSPACE_LOADER_CONTAINER, file_abspath)) try: obj = cont.get_object(file_abspath) if obj: logger.debug("[LOADER] Found object at %s/%s" % (context.config.RACKSPACE_LOADER_CONTAINER, file_abspath)) else: logger.warning("[LOADER] Unable to find object %s/%s" % (context.config.RACKSPACE_LOADER_CONTAINER, file_abspath )) except: callback(None) else: callback(obj.get())
def listdir(self): for ent in self.shalist.keys(): if ent in self.remote_files: yield ent # Directories don't appear in shalists conn = pyrax.connect_to_cloudfiles(region=self.region.upper()) container = conn.create_container(self.container_name) dirs = {} marker = None prefix = remote_filename(self.path) + '~' while True: results = container.get_objects(prefix=prefix, marker=marker) if not results: break for f in results: marker = f.name if f.name.endswith('.shalist'): subdir = f.name[len(prefix):] if subdir and subdir != '.shalist': dirs[subdir.split('~')[0]] = True for d in dirs: yield d
def login(self): """ Logs into cloud files. Note that this is on the main thread. init_thread is responsible for initializing individual threads. :return: True on success, false on failure """ try: pyrax.set_credentials(username=self.username, api_key=self.api_key) self.rax = pyrax.connect_to_cloudfiles(self.region, True) if self.rax is None: ThreadedDeleter.output('Unknown error occured while connecting' ' to CloudFiles.') return False except pyrax.exceptions.AuthenticationFailed as e: ThreadedDeleter.output('Authentication failed: {msg}'.format( msg=str(e))) return False except pyrax.exceptions.PyraxException as e: ThreadedDeleter.output('Unknown error occurred: {msg}'.format( msg=str(e))) return False return True
def connect_container(self): """ Connects to a container using the swiftclient api. The container will be created and/or made public using the pyrax api if not already so. """ self.conn = swiftclient.Connection(authurl=CUMULUS["AUTH_URL"], user=CUMULUS["USERNAME"], key=CUMULUS["API_KEY"], snet=CUMULUS["SERVICENET"], auth_version=CUMULUS["AUTH_VERSION"], tenant_name=CUMULUS["AUTH_TENANT_NAME"]) try: self.conn.head_container(self.container_name) except swiftclient.client.ClientException as exception: if exception.msg == "Container HEAD failed": call_command("container_create", self.container_name) else: raise if CUMULUS["USE_PYRAX"]: public = not CUMULUS["SERVICENET"] pyrax.set_credentials(CUMULUS["USERNAME"], CUMULUS["API_KEY"]) connection = pyrax.connect_to_cloudfiles(region=CUMULUS["REGION"], public=public) container = connection.get_container(self.container_name) if not container.cdn_enabled: container.make_public(ttl=CUMULUS["TTL"]) else: headers = {"X-Container-Read": ".r:*"} self.conn.post_container(self.container_name, headers=headers) self.container = self.conn.get_container(self.container_name)
def uploadFile(username, userid, userkey, userenabled, lid, lname, fp, date,**kw): upenabled = kw.pop('upenabled','true') cleardirs = kw.pop('cleardirs','true') if upenabled == 'false': print "Not uploading files, option disabled!" print format('Files will not be uploaded: %s for userId: %s' % (fp, userid)) return if userenabled: print format('Access CloudFiles for user id %s : user name %s' % (pyrax.identity.user['id'], pyrax.identity.user['name'])) cf = pyrax.connect_to_cloudfiles(region=getRegion()) try: cf.create_container(genContName(lid, lname, date)) chksum = pyrax.utils.get_checksum(fp) filename = genRemoteFileName(lid, lname, date) gencname = genContName(lid, lname, date) print format('Uploading... \n Remote File Name: %s size(%i) as %s, Container Name: %s' % (fp,os.stat(fp).st_size,filename, gencname)) ucont = cf.upload_file(gencname, fp, obj_name=filename, etag=chksum) print "Chksum valid: ", chksum == ucont.etag print format("Successfully uploaded file for: LBID: %s" % lid) print "DELETING UPLOADED FILE:",fp removeLocalFile(fp) except exc.UploadFailed, e: print 'Upload failed for %s %s. Exception: %s' % (userid, username, e) return except KeyboardInterrupt: print "Skipping this entry" time.sleep(1.0)
def connectToClouds(): """ Open connections to S3 and Cloud Files """ s3Conn = None cfConn = None try: ## boto reads from /etc/boto.cfg (or ~/boto.cfg) s3Conn = boto.connect_s3() ## the cloud files library doesn't automatically read from a file, so we handle that here: cfConfig = configparser.ConfigParser() cfConfig.read('/etc/cloudfiles.cfg') pyrax.set_setting("identity_type", "rackspace") pyrax.set_default_region(cfConfig.get('Credentials', 'region')) pyrax.set_credentials(cfConfig.get('Credentials', 'username'), cfConfig.get('Credentials', 'api_key')) cfConn = pyrax.connect_to_cloudfiles( cfConfig.get('Credentials', 'region')) except (NoSectionError, NoOptionError, MissingSectionHeaderError, ParsingError) as err: raise MultiCloudMirrorException( "Error in reading Cloud Files configuration file (/etc/cloudfiles.cfg): %s" % (err)) except (S3ResponseError, S3PermissionsError) as err: raise MultiCloudMirrorException("Error in connecting to S3: [%d] %s" % (err.status, err.reason)) except (ClientException, AuthenticationFailed) as err: raise MultiCloudMirrorException("Error in connecting to CF: %s" % str(err)) return (s3Conn, cfConn)
def handle(self, *args, **options): if len(args) != 1: raise CommandError( "Pass one and only one [container_name] as an argument") self.connect() container_name = args[0] print("Creating container: {0}".format(container_name)) self.conn.put_container(container_name) if not options.get("private"): print("Publish container: {0}".format(container_name)) if CUMULUS["USE_PYRAX"]: if CUMULUS["PYRAX_IDENTITY_TYPE"]: pyrax.set_setting("identity_type", CUMULUS["PYRAX_IDENTITY_TYPE"]) pyrax.set_credentials(CUMULUS["USERNAME"], CUMULUS["API_KEY"]) public = not CUMULUS["SERVICENET"] connection = pyrax.connect_to_cloudfiles( region=CUMULUS["REGION"], public=public) container = connection.get_container(container_name) if not container.cdn_enabled: container.make_public(ttl=CUMULUS["TTL"]) else: headers = {"X-Container-Read": ".r:*"} self.conn.post_container(container_name, headers=headers) print("Done")
def _get_connection(self): if not hasattr(self.local_cache, "connection"): public = not self.use_snet # invert connection = pyrax.connect_to_cloudfiles(region=self.region, public=public) self.local_cache.connection = connection return self.local_cache.connection
def get_link(jsonresp): foo = jsonresp["access"]["serviceCatalog"] for i in foo: for value in i.values(): if value == "cloudFiles": bar = i regions = [ { str(bar["endpoints"][0]["region"]) : str(bar["endpoints"][0]["publicURL"]) }, { str(bar["endpoints"][1]["region"]) : str(bar["endpoints"][1]["publicURL"]) }, { str(bar["endpoints"][2]["region"]) : str(bar["endpoints"][2]["publicURL"]) }, { str(bar["endpoints"][3]["region"]) : str(bar["endpoints"][3]["publicURL"])}] sys.stdout.write("\x1b[2J\x1b[H") print "Regions/URLs:" for i, item in enumerate(regions): for value in item.values(): j=str(i+1) print "%s) %s" % (j, value) value = raw_input("Please enter choice: ") value = int(value)-1 while True: try: link = regions[value].values()[0]+"/" region = regions[value].keys()[0] break except IndexError: "Wrong value!" cf = pyrax.connect_to_cloudfiles(region=region) return cf, link
def connect_container(self): """ Connects to a container using the swiftclient api. The container will be created and/or made public using the pyrax api if not already so. """ if CUMULUS["USE_PYRAX"]: public = not self.use_snet # invert self.conn = pyrax.connect_to_cloudfiles(region=self.region, public=public) else: self.conn = swiftclient.Connection(authurl=CUMULUS["AUTH_URL"], user=CUMULUS["USERNAME"], key=CUMULUS["API_KEY"], snet=CUMULUS["SERVICENET"], auth_version=CUMULUS["AUTH_VERSION"], tenant_name=CUMULUS["AUTH_TENANT_NAME"]) #try: # self.conn.head_container(self.container_name) #except swiftclient.client.ClientException as exception: # if exception.msg == "Container HEAD failed": # call_command("container_create", self.container_name) # else: # raise self.container = self.conn.get_container(self.container_name)
def __init__(self, name): self.region, name = name.split('://') self.basename = os.path.basename(name) pyrax.set_setting('identity_type', 'rackspace') with open(os.path.expanduser('~/.cloudfiles'), 'r') as f: self.conf = json.loads(f.read()) pyrax.set_credentials(self.conf['access_key'], self.conf['secret_key'], region=self.region) conn = pyrax.connect_to_cloudfiles(region=self.region.upper()) if self.region == 'dfw': self.container_name = remote_filename(name) else: self.container_name = remote_filename('%s/%s' % (self.region, name)) container = conn.create_container(self.container_name) for i in range(3): try: container.log_retention(True) break except: pass for info in conn.list_containers_info(): if info['name'] == self.container_name: remote_total = info['bytes'] print( '%s Remote store %s contains %s in %d objects' % (datetime.datetime.now(), self.region, utility.DisplayFriendlySize(remote_total), info['count']))
def main(username, project, list): pyrax.set_setting('identity_type', 'rackspace') with open(os.path.expanduser('~/.bugminion'), 'r') as f: conf = json.loads(f.read()) pyrax.set_credentials(conf['access_key'], conf['secret_key'], region=conf['region'].upper()) conn = pyrax.connect_to_cloudfiles(region=conf['region'].upper()) container = conn.create_container(conf['container']) # Prioritize a list of bugs from an input file now = datetime.datetime.now() datestamp = '%04d%02d%02d' %(now.year, now.month, now.day) with open(list) as f: for bug in f.readlines(): bug = bug.rstrip() triage = {'reviewer': username, 'osic': 'y'} common.clobber_object(container, '%s-bug/%s-%s' %(project, bug, datestamp), json.dumps(triage, indent=4, sort_keys=True)) print 'Done!'
def connection(self): if not hasattr(self, '_connection'): pyrax.set_credentials(self.username, self.api_key) # For some reason pyrax.encoding doesn't get set by default. pyrax.encoding = "utf-8" self._connection = pyrax.connect_to_cloudfiles(region=self.region) return self._connection
def write_checksum(self, checksum): self.shalist[self.path] = checksum self.cache['checksum'] = checksum shafile = remote_filename(os.path.join(self.container_path, '.shalist')) print '%s Updating %s with %s' % (datetime.datetime.now(), shafile, self.path) conn = pyrax.connect_to_cloudfiles(region=self.region.upper()) container = conn.create_container(self.container_name) for i in range(3): try: try: obj = container.delete_object(shafile) except: pass obj = container.store_object( shafile, json.dumps(self.shalist, sort_keys=True, indent=4)) break except Exception as e: print('%s Upload FAILED TO UPLOAD CHECKSUM (%s)' % (datetime.datetime.now(), e))
def checksum(self): if 'checksum' in self.cache: return self.cache['checksum'] write_remote_checksum = False conn = pyrax.connect_to_cloudfiles(region=self.region.upper()) container = conn.create_container(self.container_name) try: self.cache['checksum'] = container.get_object( remote_filename(self.path + '.sha512')).fetch() container.delete_object(remote_filename(self.path + '.sha512')) write_remote_checksum = True print('%s Found old style checksum for %s' % (datetime.datetime.now(), self.path)) except: print('%s Missing checksum for %s' % (datetime.datetime.now(), self.path)) local_file = self.fetch() h = hashlib.sha512() with open(local_file) as f: d = f.read(1024 * 1204) while d: h.update(d) d = f.read(1024 * 1024) os.remove(local_file) self.cache['checksum'] = h.hexdigest() write_remote_checksum = True if write_remote_checksum: self.write_checksum(self.cache['checksum']) return self.cache['checksum']
def main(username, project, list): pyrax.set_setting('identity_type', 'rackspace') with open(os.path.expanduser('~/.bugminion'), 'r') as f: conf = json.loads(f.read()) pyrax.set_credentials(conf['access_key'], conf['secret_key'], region=conf['region'].upper()) conn = pyrax.connect_to_cloudfiles(region=conf['region'].upper()) container = conn.create_container(conf['container']) now = datetime.datetime.now() datestamp = '%04d%02d%02d' %(now.year, now.month, now.day) with open(list) as f: with open('%s.csv' % list, 'w') as csvfile: csvwriter = csv.writer(csvfile, dialect='excel') for bug in f.readlines(): bug = bug.rstrip() try: data = json.loads(container.get_object( '%s-bug/%s' %(project, bug)).get()) except pyrax.exceptions.NoSuchObject: data = {} csvwriter.writerow([ 'https://bugs.launchpad.net/nova/+bug/%s' % bug, data.get('title', 'unknown'), data.get('status', 'unknown'), username]) print 'Done!'
def __init__(self, options=None, *args, **kwargs): """ Delegates to super, then attaches a pyrax cloud files connection. """ # Transparent delegation to super super(CloudFilesStorage, self).__init__(*args, **kwargs) # Get cloudfiles settings, if options were not provided if options is None: try: options = settings.CLOUDFILES except AttributeError: raise ImproperlyConfigured( u'Provide options or use settings.CLOUDFILES') # Set pyrax authentication type to "rackspace" which is the one used # for the Rackspace "public cloud" pyrax.set_setting('identity_type', 'rackspace') # Set the encoding to utf-8 (default, but in the name explicitness) pyrax.set_setting('encoding', 'utf-8') # Get credentials try: username, api_key = options['USERNAME'], options['API_KEY'] except KeyError: raise ImproperlyConfigured( u'USERNAME and API_KEY are both required options') # Authenticate (accesses network) try: pyrax.set_credentials(username, api_key) except AuthenticationFailed: raise ImproperlyConfigured( u'Rackspace Cloudfiles API authentication failed - check ' 'username and api_key') # Get the region try: region = options['REGION'] except KeyError: raise ImproperlyConfigured(u'REGION is a required option') # Attach a cloudfiles connection for the selected region self.cloudfiles = pyrax.connect_to_cloudfiles(region=region, public=options.get( 'PUBLIC', True)) # Get the container name try: container = options['CONTAINER'] except KeyError: raise ImproperlyConfigured(u'CONTAINER is a required option') # Attach the container try: self.container = self.cloudfiles.get_container(container) except NoSuchContainer: raise ImproperlyConfigured( u'No such container named "{c}"'.format(c=container))
def deleteFile(username, userid, userkey, userenabled, lid, lname, fp, date,**kw): if userenabled: print format('Access CloudFiles for user id %s : user name %s' % (pyrax.identity.user['id'], pyrax.identity.user['name'])) filename = genRemoteFileName(lid, lname, date) gencname = genContName(lid, lname, date) if gencname in deleted_containers: print "container %s already deleted"%gencname return cf = pyrax.connect_to_cloudfiles(region=getRegion()) try: print format('Deleteing... \n Remote File Name: %s size(%i) as %s, Container Name: %s' % (fp,os.stat(fp).st_size,filename, gencname)) print format('WARNING DELETING Container and files from %s' % gencname) delcont = cf.get_container(gencname) print format('Retrieved container to delete is %s' % delcont.name) cf.delete_container(gencname,del_objects=True) deleted_containers.add(gencname) print format("Successfully deleted file/container for: LBID: %s" % lid) except Exception, e: print 'DELETING failed for %s %s. Exception: %s' % (userid, username, e) return except KeyboardInterrupt: print "Skipping this entry" time.sleep(1.0)
def cache_clean(folder, extension): # NOTE: Manually install gevent & pyrax, no need for it to be depenency just for this method. from gevent import monkey from gevent.pool import Pool from gevent import Timeout monkey.patch_all() import six import pyrax import logging from mfr.server import settings # Monkey patch pyrax for python 3 compatibility. def _add_details(self, info): """ Takes the dict returned by the API call and sets the corresponding attributes on the object. """ for (key, val) in six.iteritems(info): if six.PY2 and isinstance(key, six.text_type): key = key.encode(pyrax.get_encoding()) elif isinstance(key, bytes): key = key.decode("utf-8") setattr(self, key, val) pyrax.resource.BaseResource._add_details = _add_details # WARNING: We are using provider specific functions to enumerate files to quickly # purge the cache, which can contain hundreds of thousands of objects. Thus # asserting the provider, we will need to update if we move providers. assert settings.CACHE_PROVIDER_NAME == 'cloudfiles' logging.captureWarnings(True) pyrax.set_setting('identity_type', 'rackspace') pyrax.set_setting('verify_ssl', True) pyrax.set_credentials(settings.CACHE_PROVIDER_CREDENTIALS['username'], settings.CACHE_PROVIDER_CREDENTIALS['token']) cf = pyrax.connect_to_cloudfiles(region=settings.CACHE_PROVIDER_CREDENTIALS['region'].upper(), public=True) container = cf.get_container(settings.CACHE_PROVIDER_SETTINGS['container']) def delete_object(obj): # added timeout of 5 seconds just in case with Timeout(5, False): try: print(obj) obj.delete() except Exception as ex: print(ex) pool = Pool(100) objects = container.get_objects(prefix=folder, limit=5000, marker='') while objects: for obj in objects: if obj.name.endswith(extension): pool.spawn(delete_object, obj) objects = container.get_objects(prefix=folder, limit=5000, marker=objects[-1].name) pool.join()
def __init__(self, context): self.context = context if(self.context.config.RACKSPACE_PYRAX_REGION): pyrax.set_default_region(self.context.config.RACKSPACE_PYRAX_REGION) pyrax.set_credential_file(expanduser(self.context.config.RACKSPACE_PYRAX_CFG)) self.cloudfiles = pyrax.connect_to_cloudfiles(public=self.context.config.RACKSPACE_PYRAX_PUBLIC)
def connection(self): if not hasattr(self, '_connection'): pyrax.set_credentials(self.username, self.api_key) # For some reason pyrax.encoding doesn't get set by default. pyrax.encoding = "utf-8" self._connection = pyrax.connect_to_cloudfiles( region=self.region) return self._connection
def main(username, project): pyrax.set_setting('identity_type', 'rackspace') with open(os.path.expanduser('~/.bugminion'), 'r') as f: conf = json.loads(f.read()) pyrax.set_credentials(conf['access_key'], conf['secret_key'], region=conf['region'].upper()) conn = pyrax.connect_to_cloudfiles(region=conf['region'].upper()) container = conn.create_container(conf['container']) # Read the most recent bug dump most_recent = common.get_most_recent_dump(container, project) most_recent_datestamp = most_recent.split('/')[1] print 'Using the dump from %s' % most_recent bug_list = json.loads(container.get_objects(prefix=most_recent)[0].get()) for priority in common.PRIORITIES: targets = bug_list.get(priority, []) random.shuffle(targets) for bug in targets: triages = common.triages(container, project, bug) if not common.recently_triaged(triages): print 'Bug %s (%s) is not triaged' %(bug, priority) print 'Triages: %s' % triages data = json.loads(container.get_object('%s-bug/%s' %(project, bug)).get()) for field in common.DISPLAY_ORDER: print '%s: %s' %(field, data.get(field, '')) print 'tags: %s' % ' '.join(data.get('tags', [])) print print 'Description:' print print data.get('description') print triage = {'reviewer': username} sys.stdout.write('OSIC (y/n)? ') triage['osic'] = sys.stdin.readline().rstrip() if triage['osic'] == 'y': for question in common.QUESTIONS: sys.stdout.write('%s? ' % question) answer = sys.stdin.readline().rstrip() triage[question] = answer common.clobber_object(container, '%s-bug/%s-%s' %(project, bug, most_recent_datestamp), json.dumps(triage, indent=4, sort_keys=True)) print print print 'Done!'
def cloudfiles_func(settings, filename, results): """ Uploads files to Rackspace Cloud Files. """ name = threading.currentThread().getName() logger = logging.getLogger(__name__ + "." + name) creds_file = settings['credential_file'] pyrax.set_credential_file(creds_file) pyrax.set_setting('use_servicenet', settings['use_snet']) region = settings['region'] container_name = settings['container_name'] nest_by_timestamp = settings.get('nest_by_timestamp', False) obj_ttl = settings.get('set_ttl', None) try: cf = pyrax.connect_to_cloudfiles(region=region) container = cf.get_container(container_name) except: logger.error("Unable to connect to cloudfiles. Transfer for {0} aborted, failing gracefully.".format(filename)) results.append(name) return if os.path.getsize(filename) >= 5368709120: logger.error("{0} is too large. Files over 5GB are not currently supported.".format(filename)) results.append(name) return obj_name = os.path.basename(filename) # Create new obj_name for nested directory if nest_by_timestamp: t = os.path.getmtime(filename) d = dt.fromtimestamp(t) obj_name = "{year}/{month}/{day}/{filename}".format( year=d.strftime("%Y"), month=d.strftime("%m"), day=d.strftime("%d"), filename=obj_name) chksum = pyrax.utils.get_checksum(filename) for i in range(MAX_RETRIES): try: start = time.time() #Used for testing the retry #raise pyrax.exceptions.UploadFailed() obj = container.upload_file(filename, obj_name=obj_name, etag=chksum, ttl=obj_ttl) end = time.time() logger.debug("%s transferred to %s in %.2f secs." % (filename, container_name, (end - start))) break except pyrax.exceptions.UploadFailed: logger.warning("Upload to container:%s in %s failed, retry %d" % (container_name, region, i + 1)) time.sleep(2) else: logger.error("Upload to container:%s in %s failed!" % (container_name, region)) results.append(name)
def isdir(self): conn = pyrax.connect_to_cloudfiles(region=self.region.upper()) container = conn.create_container(self.container_name) prefix = remote_filename(self.path) + '~' results = container.get_objects(prefix=prefix) if not results: return False return True
def _get_connection(self): if not hasattr(self._local_cache, "connection"): public = not self.use_snet # invert pyrax.set_credentials(self.username, self.api_key, authenticate=True) connection = pyrax.connect_to_cloudfiles(region=self.region, public=public) self._local_cache.connection = connection return self._local_cache.connection
def init_thread(self, local): """ Initialize thread-specific RAX connection & data list :param local: The Local object :return: None """ local.rax = pyrax.connect_to_cloudfiles(self.region, True) local.data = dict() local.size = 0
def setUp(self): pyrax.connect_to_cloudservers = Mock() pyrax.connect_to_cloud_loadbalancers = Mock() pyrax.connect_to_cloud_databases = Mock() pyrax.connect_to_cloud_blockstorage = Mock() pyrax.clear_credentials() pyrax.identity = FakeIdentity() pyrax.set_credentials("fakeuser", "fakeapikey") pyrax.connect_to_cloudfiles() self.client = pyrax.cloudfiles self.container = FakeContainer(self.client, self.container_name, 0, 0) self.container.name = self.container_name self.client.get_container = Mock(return_value=self.container) self.client.connection.get_container = Mock() self.client.connection.head_object = Mock(return_value=fake_attdict) self.storage_object = self.client.get_object(self.container, "testobj") self.client._container_cache = {} self.container.object_cache = {}
def dlcont(c, dc): datacenter = dc cf = pyrax.connect_to_cloudfiles(datacenter) cont = cf.get_container(c) objs = cont.get_objects() for i in objs: print "Downloading", i f_gen = cont.fetch_object(i, chunk_size=1024) with open(i.name, "w") as dl_file: dl_file.write("".join(f_gen))
def test_custom_region(self): "Ensure that the region option works properly" self.ord_connection = pyrax.connect_to_cloudfiles(region="ORD", public=self.public) name = "{0}-ORD".format(CUMULUS["CONTAINER"]) self.ord_container = self.ord_connection.create_container(name) self.assertTrue(self.ord_connection.get_container(name)) self.ord_connection.delete_container(name) self.assertFalse([c.name for c in self.ord_connection.get_all_containers() if c.name == name])
def push(topdir, region, container_name): pyrax.set_setting('identity_type', 'rackspace') with open(os.path.expanduser('~/.cloudfiles'), 'r') as f: conf = json.loads(f.read()) pyrax.set_credentials(conf['access_key'], conf['secret_key'], region=region) conn = pyrax.connect_to_cloudfiles(region=region.upper(), public=False) container = conn.create_container(container_name) copy_dir(topdir, topdir, container)
def setUp(self): pyrax.connect_to_cloudservers = Mock() pyrax.connect_to_cloud_loadbalancers = Mock() pyrax.connect_to_cloud_databases = Mock() pyrax.connect_to_cloud_blockstorage = Mock() pyrax.clear_credentials() pyrax.identity = FakeIdentity() pyrax.set_setting("region", None) pyrax.set_credentials("fakeuser", "fakeapikey") pyrax.connect_to_cloudfiles() self.client = pyrax.cloudfiles self.container = FakeContainer(self.client, self.container_name, 0, 0) self.container.name = self.container_name self.client.get_container = Mock(return_value=self.container) self.client.connection.get_container = Mock() self.client.connection.head_object = Mock(return_value=fake_attdict) self.storage_object = self.client.get_object(self.container, "testobj") self.client._container_cache = {} self.container.object_cache = {}
def _get_driver(driver_type, region='ORD'): """ Returns the appropriate diver for the specified rackspace product. Available options include:: lb: Cloud Load Balancers db: Cloud Databases dns: Cloud DNS bs: Cloud Block Storage mon: Cloud Monitoring net: Cloud Networks cf: Cloud Files cs: Cloud Servers :param driver_type: A str or unicode object for the appropriate type of driver above. :param region: A str or unicode object specify which region the driver should be initialized for. :return: A driver object initialized to the specified region :raise TypeError: :raise KeyError: If no valid drivers are found """ _auth() if not isinstance(driver_type, six.string_types): raise TypeError("driver_type must be str or unicode object") if not isinstance(region, six.string_types): raise TypeError("region must be str or unicode object") region = region.upper() if driver_type == "lb": return pyrax.connect_to_cloud_loadbalancers(region) if driver_type == "db": return pyrax.connect_to_cloud_databases(region) if driver_type == "dns": return pyrax.connect_to_cloud_dns() if driver_type == "bs": return pyrax.connect_to_cloud_blockstorage(region) if driver_type == "mon": return pyrax.connect_to_cloud_monitoring(region) if driver_type == "net": return pyrax.connect_to_cloud_networks(region) if driver_type == 'cf': return pyrax.connect_to_cloudfiles(region) if driver_type == 'cs': return pyrax.connect_to_cloudservers(region) raise KeyError(u"No Driver found by: {}".format(driver_type))
def handle(self, *args, **options): self.connect() account = self.conn.get_account() if args: container_names = args else: container_names = [c["name"] for c in account[1]] containers = {} for container_name in container_names: containers[container_name] = self.conn.head_container( container_name) if not containers: print("No containers found.") return if not args: print("{0}, {1}, {2}\n".format( account[0]["x-account-container-count"], account[0]["x-account-object-count"], account[0]["x-account-bytes-used"], )) opts = ["name", "count", "size", "uri"] for container_name, values in containers.iteritems(): if CUMULUS["USE_PYRAX"]: if CUMULUS["PYRAX_IDENTITY_TYPE"]: pyrax.set_setting("identity_type", CUMULUS["PYRAX_IDENTITY_TYPE"]) pyrax.set_credentials(CUMULUS["USERNAME"], CUMULUS["API_KEY"]) public = not CUMULUS["SERVICENET"] connection = pyrax.connect_to_cloudfiles( region=CUMULUS["REGION"], public=public) metadata = connection.get_container_cdn_metadata( container_name) if "x-cdn-enabled" not in metadata or metadata[ "x-cdn-enabled"] == "False": uri = "NOT PUBLIC" else: uri = metadata["x-cdn-uri"] info = { "name": container_name, "count": values["x-container-object-count"], "size": values["x-container-bytes-used"], "uri": uri, } output = [str(info[o]) for o in opts if options.get(o)] if not output: output = [str(info[o]) for o in opts] print(", ".join(output)) else: headers, data = self.conn.get_container(container_name) print(headers) print(data)
def size(self): if 'size' in self.cache: return self.cache['size'] print ('%s Querying the size of %s in %s' %(datetime.datetime.now(), self.path, self.region)) conn = pyrax.connect_to_cloudfiles(region=self.region.upper()) container = conn.create_container(self.container_name) obj = container.get_object(remote_filename(self.path)) self.cache['size'] = obj.total_bytes return self.cache['size']
def size(self): if 'size' in self.cache: return self.cache['size'] print('%s Querying the size of %s in %s' % (datetime.datetime.now(), self.path, self.region)) conn = pyrax.connect_to_cloudfiles(region=self.region.upper()) container = conn.create_container(self.container_name) obj = container.get_object(remote_filename(self.path)) self.cache['size'] = obj.total_bytes return self.cache['size']
def __init__(self, *args, **kwargs): pyrax.set_setting('identity_type', self.setting('PYRAX_IDENTITY_TYPE', 'rackspace')) pyrax_password = self.setting('PYRAX_PASSWORD', None) or self.setting('PYRAX_APIKEY', None) if pyrax_password is None: raise EnvironmentError( 'Backuper: Settings `PYRAX_PASSWORD` and `PYRAX_APIKEY` ' 'are not defined in the environment. Backuper needs at ' 'least one of them.') pyrax.set_credentials(self.setting('PYRAX_USERNAME'), pyrax_password) self.cloudfiles = pyrax.connect_to_cloudfiles() self.settings = self.read_config()
def get_rackspace_container(self): # TODO ensure we can have concurrent pyraxes for multiple apps pyrax.set_setting( "identity_type", "rackspace" ) pyrax.set_credentials( self.app.config['CDN_RACKSPACE_USERNAME'], self.app.config['CDN_RACKSPACE_KEY'] ) cf = pyrax.connect_to_cloudfiles( self.app.config['CDN_RACKSPACE_REGION'] ) container = cf.create_container( self.app.config['CDN_RACKSPACE_CONTAINER'] ) container.make_public() return container
def test_connect_to_cloudfiles_ServiceNet(self): orig = pyrax.get_setting("use_servicenet") pyrax.set_setting("use_servicenet", True) pyrax.cloudfiles = None pyrax.connect_to_cloudfiles = self.orig_connect_to_cloudfiles sav = pyrax._create_client pyrax._create_client = Mock() cf = pyrax.connect_to_cloudfiles(public=False) pyrax._create_client.assert_called_once_with(ep_name="object_store", region=None, public=False) pyrax.set_setting("use_servicenet", orig) pyrax._create_client = sav
def _get_connection(self): if not hasattr(self, "_connection"): if CUMULUS["USE_PYRAX"]: public = not self.use_snet # invert self.ord_connection = pyrax.connect_to_cloudfiles( region="ORD", public=public) self.dfw_connection = pyrax.connect_to_cloudfiles( region="DFW", public=public) if CUMULUS["REGION"] == "ORD": self._connection = self.ord_connection else: self._connection = self.dfw_connection else: self._connection = swiftclient.Connection( authurl=CUMULUS["AUTH_URL"], user=CUMULUS["USERNAME"], key=CUMULUS["API_KEY"], snet=CUMULUS["SERVICENET"], auth_version=CUMULUS["AUTH_VERSION"], tenant_name=CUMULUS["AUTH_TENANT_NAME"], ) return self._connection
def store(self, local_path): # Uploads sometimes timeout. Retry three times. conn = pyrax.connect_to_cloudfiles(region=self.region.upper()) container = conn.create_container(self.container_name) for i in range(3): try: obj = container.upload_file(local_path, obj_name=remote_filename( self.path)) break except Exception as e: print '%s Upload FAILED (%s)' % (datetime.datetime.now(), e)
def test_connect_to_cloudfiles_ServiceNet(self, client): orig = pyrax.get_setting("use_servicenet") pyrax.set_setting("use_servicenet", True) pyrax.cloudfiles = None pyrax.connect_to_cloudfiles = self.orig_connect_to_cloudfiles cf = pyrax.connect_to_cloudfiles(public=False) # Check the call arguments to see that our setting stuck and we're # sending internalURL on to CFClient. _, kwargs = client.call_args opts = kwargs["os_options"] self.assertEqual(opts["endpoint_type"], "internalURL") self.assertIsNotNone(cf) pyrax.set_setting("use_servicenet", orig)
def __init__(self, region, container_name, path): self.region = region self.container_name = container_name self.path = path self.shalist = {} self.remote_files = {} conn = pyrax.connect_to_cloudfiles(region=self.region.upper()) container = conn.create_container(self.container_name) if not self.path: self.shalist_path = '.shalist' prefix = None else: self.shalist_path = remote_filename( os.path.join(self.path, '.shalist')) prefix = remote_filename(self.path) for i in range(3): try: self.shalist = json.loads( container.get_object(remote_filename( self.shalist_path)).fetch()) break except: pass print '%s Finding existing remote files' % datetime.datetime.now() try: marker = None while True: results = container.get_objects(prefix=prefix, marker=marker) print('%s ... %d results, marker %s' % (datetime.datetime.now(), len(results), marker)) if not results: break for f in results: marker = f.name if f.name.endswith('.sha512'): pass elif f.name.endswith('.shalist'): pass else: self.remote_files[f.name.replace('~', '/')] = True except pyrax.exceptions.NoSuchObject: pass print('%s Found %d existing files in %s' % (datetime.datetime.now(), len(self.remote_files), self.region))
def fetch(self): conn = pyrax.connect_to_cloudfiles(region=self.region.upper()) container = conn.create_container(self.container_name) (local_fd, local_file) = tempfile.mkstemp() os.close(local_fd) url = container.get_object(remote_filename( self.path)).get_temp_url(3600) url = url.replace(' ', '%20') print '%s Fetch URL is %s' % (datetime.datetime.now(), url) maxval = self.size() if maxval == 0: # Special case for zero length remote files with open(local_file, 'w') as f: pass return local_file if has_progressbar: widgets = [ 'Fetching: ', ' ', progressbar.Percentage(), ' ', progressbar.Bar(marker=progressbar.RotatingMarker()), ' ', progressbar.ETA(), ' ', progressbar.FileTransferSpeed() ] pbar = progressbar.ProgressBar(widgets=widgets, maxval=maxval).start() r = urllib2.urlopen(url) count = 0 try: with open(local_file, 'w') as f: d = r.read(409600) count += len(d) while d: f.write(d) d = r.read(14096) count += len(d) if has_progressbar: pbar.update(count) finally: if has_progressbar: pbar.finish() print '%s Fetch finished' % datetime.datetime.now() r.close() return local_file