def __init__(self, conf):
        # DONE, CLOUDFILES COMPLIANT
        if conf is None:
            try:
                auth_user = config.get("cf", "apiuser")
                auth_key = config.get("cf", "apikey")
                self.container = config.get("cf", "container")

                try:
                    region_name = config.get("cf", "region_name")
                except ConfigParser.NoOptionError:
                    region_name = DEFAULT_RACKSPACE_LOCATION
            except ConfigParser.NoOptionError:
                log.error("Configuration file not available.")
                log.info("Use 'bakthat configure' to create one.")
                return
        else:
            auth_user = conf.get("apiuser")
            auth_key = conf.get("apikey")
            self.container = conf.get("container")
            region_name = conf.get("region_name", DEFAULT_RACKSPACE_LOCATION)
            

        if  region_name == "dfw" or region_name == "ord":
            self.con = cloudfiles.get_connection(auth_user, auth_key,
                                                 authurl = "https://identity.api.rackspacecloud.com/v1.0/")
        else:
            self.con = cloudfiles.get_connection(auth_user, auth_key, 
                                                 authurl = "https://lon.identity.api.rackspacecloud.com/v1.0/")
Ejemplo n.º 2
0
def open_connection_with_credentials(username,api_key,auth_url,verbose=0):
    if auth_url == "uk" or auth_url == "UK":
        auth_url="https://lon.auth.api.rackspacecloud.com/v1.0"
        connection = cloudfiles.get_connection(username,api_key,authurl=auth_url)
    else:
        auth_url="https://auth.api.rackspacecloud.com/v1.0"
        connection = cloudfiles.get_connection(username,api_key,authurl=auth_url)
    return connection
Ejemplo n.º 3
0
 def __init__(self, queue, config, opt, state, l, i):
     """Call parent __init__ and set queue to read work from"""
     threading.Thread.__init__(self)
     self.queue = queue
     self.l = l
     self.i = i
     self.opt = opt
     self.config = config
     Statsd.config = config
     self.state = state
     self.source = cloudfiles.get_connection(config.get(opt.source, "user"), config.get(opt.source, "password"), authurl=config.get(opt.source, "authurl"), timeout=30)
     self.destination = cloudfiles.get_connection(config.get(opt.destination, "user"), config.get(opt.destination, "password"), authurl=config.get(opt.destination, "authurl"), timeout=30)
     self.quit = threading.Event()
Ejemplo n.º 4
0
def __get_container__():
    username = os.environ['RACKSPACE_USERNAME']
    api_key = os.environ['RACKSPACE_KEY']
    conn = cloudfiles.get_connection(username, api_key)
    container = conn.create_container(os.environ['RACKSPACE_CONTAINER'])
    container.make_public()
    return container
Ejemplo n.º 5
0
 def __init__(self, username, api_key, 
             region=None, servicenet=False, timeout=4):
     """Initialize connection to Rackspace Cloud Files.
     
     @param username: Rackspace Cloud username
     @param api_key:  Rackspace Cloud api_key
     @param region:   Try passing "us" for US Auth Service, and "uk" UK Auth 
                      Service; omit parameter to use library default.
     @servicenet:     If True, Rackspace ServiceNet network will be used to 
                      access Cloud Files.
     @timeout:        Connection timeout in seconds. (Default: 4)
     
     """
     self._connParams = {}
     self._connParams['username'] = username
     self._connParams['api_key'] = api_key
     if region is not None:
         try:
             authurl = getattr(cloudfiles, '%s_authurl' % str(region))
             self._connParams['authurl'] = authurl
         except:
             raise Exception("Invalid region code: %s" % str(region))
     if servicenet:
         self._connParams['servicenet'] = True
     self._connParams['timeout'] = timeout
     self._conn = cloudfiles.get_connection(**self._connParams)
Ejemplo n.º 6
0
Archivo: sync.py Proyecto: lowks/cfsync
def main(argv):
  user = None
  key = None
  delete = False
  try:
    opts, args = getopt.getopt(argv, "hu:k:c:d", ["help", "user", "key", "container", "delete"])
  except getopt.GetoptError:
    Sync.usage()
    sys.exit(2)
  for opt, arg in opts:
    logging.debug("Processing opt: " + opt)
    if opt in ("-h", "--help"):
      Sync.usage(sys.argv[0], args)
      sys.exit()
    elif opt in ("-u", "--user"):
      user = arg
    elif opt in ("-k", "--key"):
      key = arg
    elif opt in ("-d", "--delete"):
      delete = True
  print "Authenticating..."
  conn = cloudfiles.get_connection(user, key)
  cfsync = Sync(conn)
  # cfsync.list_files(args[1])
  if delete:
    cfsync.clear(args[0])
  cfsync.upload(args[0], args[1])
Ejemplo n.º 7
0
def upload_to_dho(dho_user, dho_key, backup_loc):
    conn = cloudfiles.get_connection(
        username=dho_user,
        api_key=dho_key,
        authurl='https://objects-us-west-1.dream.io/auth',
    )

    container = create_container(conn)

    for category in os.listdir(backup_loc):
        category_path = os.path.join(backup_loc, category)
        for section in os.listdir(category_path):
            section_path = os.path.join(category_path, section)
            for file_name in os.listdir(section_path):
                file_path = os.path.join(section_path, file_name)
                print "uploading " + file_path
                obj = container.create_object(file_path)
                uploaded = False
                i = 0
                while i <= 4 and uploaded == False:
                    try:
                        obj.load_from_filename(file_path)
                        uploaded = True

                    except ssl.SSLError:
                        if i < 4:
                            print "Failed to upload " + file_path + ", trying again"
                            i = i + 1

                        else:
                            print "Failed to upload 5 times, aborting"
                            sys.exit(1)
Ejemplo n.º 8
0
    def sync_files(self):
        self.conn = cloudfiles.get_connection(self.USERNAME,
                                              self.API_KEY,
                                              servicenet=self.USE_SERVICENET)

        try:
            self.container = self.conn.get_container(self.STATIC_CONTAINER)
        except cloudfiles.errors.NoSuchContainer:
            self.container = self.conn.create_container(self.STATIC_CONTAINER)

        if not self.container.is_public():
            self.container.make_public()

        # if -w option is provided, wipe out the contents of the container
        if self.wipe:
            if self.test_run:
                print "Wipe would delete %d objects." % self.container.object_count
            else:
                print "Deleting %d objects..." % self.container.object_count
                for cloud_obj in self.container.get_objects():
                    self.container.delete_object(cloud_obj.name)

        # walk through the directory, creating or updating files on the cloud
        os.path.walk(self.DIRECTORY, self.upload_files, "foo")

        # remove any files on remote that don't exist locally
        self.delete_files()

        # print out the final tally to the cmd line
        self.update_count = self.upload_count - self.create_count
        print
        if self.test_run:
            print "Test run complete with the following results:"
        print "Skipped %d. Created %d. Updated %d. Deleted %d." % (
            self.skip_count, self.create_count, self.update_count, self.delete_count)
Ejemplo n.º 9
0
def main():

    user = "******"
    key = "Rn1qOp0IQkxcT1FCDezaqxmpGmIUbw7bkpXeF31J"
    authurl = "http://127.0.0.1:80/auth"
    authurl = "http://192.168.59.103:80/auth"

    conn = cloudfiles.get_connection(username=user,
                                     api_key=key,
                                     authurl=authurl)

    print("Create test_container")
    test_container = conn.create_container('test_container')

    print("List all containers")
    print(conn.get_all_containers())

    print("Put test_object in test_container")
    test_object = test_container.create_object("test_object")
    test_object.content_type = "text/plain"
    test_object.load_from_filename("/etc/ceph/ceph.conf")

    print("List all objects in test_container")
    print(test_container.get_objects())

    print("Download test_object")
    test_object = test_container.get_object("test_object")
    test_object.save_to_filename('./test_object')

    print("Delete test_object")
    test_container.delete_object("test_object")

    print("Delete test_container")
    conn.delete_container("test_container")
Ejemplo n.º 10
0
def collect(user, key, rule, container='', dryrun=False):
    """
    Connects to rackspace with the user and the key and crawls every container
    applying the rule to each cloudfile object. If the rule applies, i.e.
    returns True, the object is deleted.

    If a container name is passed to the parameter 'container', only that
    container will be crawled.

    If dryrun is True, the objects will not be deleted.

    The function returns a list with the names of the objects to which the
    rule was successfully applied.
    """
    conn = cloudfiles.get_connection(user, key)
    if container:
        containers = [conn.get_container(container)]
    else:
        containers = conn.get_all_containers()

    deleted = []
    for cont in containers:
        for obj in progress.bar(cont.get_objects(), label="Removing Objects"):
            if rule.apply(obj):
                if not dryrun:
                    cont.delete_object(obj.name)
                deleted.append(obj.name)

    return deleted
Ejemplo n.º 11
0
def get_rackspace_connection():
    """Returns a new Rackspace connection.

    Returns:
        new Rackspace connection
    """
    return cloudfiles.get_connection(RS_USERNAME, RS_APIKEY)
Ejemplo n.º 12
0
    def _get_connection(self):
        if not hasattr(self.local_cache, 'connection'):
            connection = cloudfiles.get_connection(self.username,
                                    self.api_key, **self.connection_kwargs)
            self.local_cache.connection = connection

        return self.local_cache.connection
Ejemplo n.º 13
0
Archivo: api.py Proyecto: dais/colony
def swift_api(request):
    LOG.debug('object store connection created using token "%s"'
                ' and url "%s"' %
                (request.session['token'], url_for(request, 'object-store')))
    auth = SwiftAuthentication(url_for(request, 'object-store'),
                               request.session['token'])
    return cloudfiles.get_connection(auth=auth)
Ejemplo n.º 14
0
    def __init__(self, queue, config, opt, state, l):
        """Call parent __init__ and set queue to read work from"""
        threading.Thread.__init__(self)
        self.queue = queue
        self.l = l
        self.opt = opt
        self.config = config
        self.state = state
        if opt.source:
            # source may not be set if we're deleting
            self.source = cloudfiles.get_connection(config.get(opt.source, "user"), config.get(opt.source, "password"), authurl=config.get(opt.source, "authurl"), timeout=30)
        else:
            self.source = None

        self.destination = cloudfiles.get_connection(config.get(opt.destination, "user"), config.get(opt.destination, "password"), authurl=config.get(opt.destination, "authurl"), timeout=30)
        self.quit = threading.Event()
Ejemplo n.º 15
0
def swift_api(request):
    LOG.debug('object store connection created using token "%s"'
                ' and url "%s"' %
                (request.session['token'], url_for(request, 'swift')))
    auth = SwiftAuthentication(url_for(request, 'swift'),
                               request.session['token'])
    return cloudfiles.get_connection(auth=auth)
Ejemplo n.º 16
0
    def handle(self, *args, **options):
        conn = cloudfiles.get_connection(
                username=settings.CUMULUS['USERNAME'],
                api_key=settings.CUMULUS['API_KEY'],
                authurl=settings.CUMULUS['AUTH_URL'])

        if args:
            containers = []
            for container_name in args:
                try:
                    container = conn.get_container(container_name)
                except cloudfiles.errors.NoSuchContainer:
                    raise CommandError("Container does not exist: %s" % container_name)
                containers.append(container)
        else:
            containers = conn.get_all_containers()

        opts = ['name', 'count', 'size', 'uri']

        for container in containers:
            info = {
                'name': container.name,
                'count': container.object_count,
                'size': container.size_used,
                'uri': container.public_uri() if container.is_public() else "NOT PUBLIC",
            }
            output = [str(info[o]) for o in opts if options.get(o)]
            if not output:
                output = [str(info[o]) for o in opts]
            print ', '.join(output)

        if not containers:
            print 'No containers found.'
Ejemplo n.º 17
0
    def __init__(self, *args, **kwargs):
        "Set up the CloudFiles connection and grab the container."
        super(Client, self).__init__(*args, **kwargs)

        container_name = msettings['CLOUDFILES_CONTAINER']
        username = msettings['CLOUDFILES_USERNAME']
        key = msettings['CLOUDFILES_API_KEY']

        if not container_name:
            raise ImproperlyConfigured(
                "CLOUDFILES_CONTAINER is a required setting.")

        if not username:
            raise ImproperlyConfigured(
                "CLOUDFILES_USERNAME is a required setting.")

        if not key:
            raise ImproperlyConfigured(
                "CLOUDFILES_API_KEY is a required setting.")

        self.conn = cloudfiles.get_connection(username, key)
        self.container = self.conn.create_container(container_name)

        if not self.container.is_public():
            self.container.make_public()
Ejemplo n.º 18
0
def application(environ, start_response):
    from datetime import datetime
    our = pytz.timezone('Asia/Novosibirsk')
    i = datetime.now(our)
    _data = urlparse.parse_qs(environ["QUERY_STRING"])

    username = urlparse.parse_qs(environ["QUERY_STRING"])['ceph_api_user'][0]
    api_key = urlparse.parse_qs(environ["QUERY_STRING"])['ceph_api_key'][0]
    api_key = urllib.unquote(api_key).decode('utf8')

    container_name = "records"

    record = urlparse.parse_qs(environ["QUERY_STRING"])['recording'][0]
    record = record[1:]
    record = i.strftime('%Y%m%d-%H%M%S') + "_" + record

    conn = cloudfiles.get_connection(
        username=username,
        api_key=api_key,
        authurl=authurl,
    )

    length = int(environ["CONTENT_LENGTH"])
    body = "Hello world"

    buf = environ['wsgi.input'].read(length)

    testuser = conn.create_container(container_name)
    obj = testuser.create_object(record)
    obj.write(buf)

    start_response("200 Ok", [("Content-Type", "text/html"),
                              ("Content-Length", str(len(body)))])

    return [body]
Ejemplo n.º 19
0
    def __init__(self, **kwargs):
        self.param_container = kwargs.get('cloudfiles_container')
        self.param_user = kwargs.get('cloudfiles_user')
        self.param_api_key = kwargs.get('cloudfiles_api_key')
        self.param_host = kwargs.get('cloudfiles_host')
        self.param_use_servicenet = kwargs.get('cloudfiles_use_servicenet')

        # the Mime Type webm doesn't exists, let's add it
        mimetypes.add_type("video/webm", "webm")

        if not self.param_host:
            _log.info('No CloudFiles host URL specified, '
                      'defaulting to Rackspace US')

        self.connection = cloudfiles.get_connection(
            username=self.param_user,
            api_key=self.param_api_key,
            servicenet=True if self.param_use_servicenet == 'true' or \
                self.param_use_servicenet == True else False)

        _log.debug('Connected to {0} (auth: {1})'.format(
            self.connection.connection.host, self.connection.auth.host))

        if not self.param_container == \
                self.connection.get_container(self.param_container):
            self.container = self.connection.create_container(
                self.param_container)
            self.container.make_public(ttl=60 * 60 * 2)
        else:
            self.container = self.connection.get_container(
                self.param_container)

        _log.debug('Container: {0}'.format(self.container.name))

        self.container_uri = self.container.public_ssl_uri()
Ejemplo n.º 20
0
def backup_database():
    # First, generate the filename we will be using from the current time in UTC
    timestamp = datetime.utcnow().isoformat()
    timestamp = timestamp.replace(":", "-")
    chdir("/tmp")
    filename = "newdjangosite-prod-db-{0}.dump".format(timestamp)
    file = open(filename, "wb")

    # Now create the backup
    call(["pg_dump", "--format=custom", "newdjangosite_prod"], stdout=file)
    file.close()

    # Now connection to Cloud Files
    connection = get_connection(username="******", api_key=API_KEY, servicenet=True)
    backup_container = connection.get_container("database-backups")

    # Now upload the new backup
    current_backup = backup_container.create_object(filename)
    current_backup.load_from_filename(filename)

    # Now delete the backup file we created locally
    remove(filename)

    # Now prune the backups if needed
    backup_objects = backup_container.get_objects()
    while len(backup_objects) > 168:  # Keep one backup an hour for a week (168 = 24 * 7)
        # We have too many backups, delete the oldest one.
        # The oldest one has a file name that is less than all other objects.
        oldest = backup_objects[0].name
        for object in backup_objects:
            if object.name < oldest:
                oldest = object

        backup_container.delete_object(oldest)
        backup_objects = backup_container.get_objects()
Ejemplo n.º 21
0
 def __init__(self,
              username,
              api_key,
              region=None,
              servicenet=False,
              timeout=4):
     """Initialize connection to Rackspace Cloud Files.
     
     @param username: Rackspace Cloud username
     @param api_key:  Rackspace Cloud api_key
     @param region:   Try passing "us" for US Auth Service, and "uk" UK Auth 
                      Service; omit parameter to use library default.
     @servicenet:     If True, Rackspace ServiceNet network will be used to 
                      access Cloud Files.
     @timeout:        Connection timeout in seconds. (Default: 4)
     
     """
     self._connParams = {}
     self._connParams['username'] = username
     self._connParams['api_key'] = api_key
     if region is not None:
         try:
             authurl = getattr(cloudfiles, '%s_authurl' % str(region))
             self._connParams['authurl'] = authurl
         except:
             raise Exception("Invalid region code: %s" % str(region))
     if servicenet:
         self._connParams['servicenet'] = True
     self._connParams['timeout'] = timeout
     self._conn = cloudfiles.get_connection(**self._connParams)
Ejemplo n.º 22
0
    def _get_connection(self):
        if not hasattr(self.local_cache, 'connection'):
            connection = cloudfiles.get_connection(self.username, self.api_key,
                                                   **self.connection_kwargs)
            self.local_cache.connection = connection

        return self.local_cache.connection
Ejemplo n.º 23
0
 def do_login_uk(self, user, token):
     try:
         self.conn = cloudfiles.get_connection(
                         user, token,
                         authurl = cloudfiles.uk_authurl)
     except Exception as e:
         print("Login failed")
Ejemplo n.º 24
0
    def __init__(self, **kwargs):
        self.param_container = kwargs.get("cloudfiles_container")
        self.param_user = kwargs.get("cloudfiles_user")
        self.param_api_key = kwargs.get("cloudfiles_api_key")
        self.param_host = kwargs.get("cloudfiles_host")
        self.param_use_servicenet = kwargs.get("cloudfiles_use_servicenet")

        # the Mime Type webm doesn't exists, let's add it
        mimetypes.add_type("video/webm", "webm")

        if not self.param_host:
            _log.info("No CloudFiles host URL specified, " "defaulting to Rackspace US")

        self.connection = cloudfiles.get_connection(
            username=self.param_user,
            api_key=self.param_api_key,
            servicenet=True if self.param_use_servicenet == "true" or self.param_use_servicenet == True else False,
        )

        _log.debug("Connected to {0} (auth: {1})".format(self.connection.connection.host, self.connection.auth.host))

        if not self.param_container == self.connection.get_container(self.param_container):
            self.container = self.connection.create_container(self.param_container)
            self.container.make_public(ttl=60 * 60 * 2)
        else:
            self.container = self.connection.get_container(self.param_container)

        _log.debug("Container: {0}".format(self.container.name))

        self.container_uri = self.container.public_ssl_uri()
Ejemplo n.º 25
0
 def check_auth_password(self, username, password):
     try:
         self._set_conn(cloudfiles.get_connection(username, password))
                 # authurl='https://auth.stg.swift.racklabs.com/auth'))
         return paramiko.AUTH_SUCCESSFUL
     except:
         return paramiko.AUTH_FAILED
Ejemplo n.º 26
0
def erase(args):
    "erase a remote file from a container"
    connection = cloudfiles.get_connection(
        RACKSPACE_USERNAME,
        RACKSPACE_API_KEY,
    )
    m = '\033[1;31mAre you \033[1;37m100%\033[1;31m ' \
        'sure you wanna delete \033[1;32m"{0} ?"\033[0m (y/n)'

    container = connection.create_container(args.from_container)
    try:
        obj = container.get_object(args.object)
        question = m.format('%s (%s)' % (
            obj.name, convert_bytes(obj.size)))

        agreed = decided = bool(args.imsure)
        while not decided and not agreed:
            i = raw_input(question).strip().lower()
            decided = i in ['y', 'n']
            agreed = i == 'y'

        if agreed:
            obj.purge_from_cdn(email=OUR_EMAIL)
            container.delete_object(obj.name)
        else:
            print
            print "Ok then, never mind"

    except NoSuchObject:
        print 'There is no such object "{0}" in the container "{1}"'.format(
            args.object,
            args.from_container,
        )
Ejemplo n.º 27
0
def main(argv):
    user = None
    key = None
    delete = False
    try:
        opts, args = getopt.getopt(
            argv, "hu:k:c:d", ["help", "user", "key", "container", "delete"])
    except getopt.GetoptError:
        Sync.usage()
        sys.exit(2)
    for opt, arg in opts:
        logging.debug("Processing opt: " + opt)
        if opt in ("-h", "--help"):
            Sync.usage(sys.argv[0], args)
            sys.exit()
        elif opt in ("-u", "--user"):
            user = arg
        elif opt in ("-k", "--key"):
            key = arg
        elif opt in ("-d", "--delete"):
            delete = True
    print "Authenticating..."
    conn = cloudfiles.get_connection(user, key)
    cfsync = Sync(conn)
    # cfsync.list_files(args[1])
    if delete:
        cfsync.clear(args[0])
    cfsync.upload(args[0], args[1])
Ejemplo n.º 28
0
def fetch_newest_object(args):
    "retrieves the most recent backup file"
    fullpath = abspath(expanduser(args.to or '.'))
    connection = cloudfiles.get_connection(
        RACKSPACE_USERNAME,
        RACKSPACE_API_KEY,
    )
    container = connection.create_container(args.from_container)

    remote_backup_items = container.list_objects_info()
    remote_backup_items.sort(key=lambda x: x['last_modified'])
    print "found %d items in the container '%s'" % (
        len(remote_backup_items),
        args.from_container,
    )
    # now we have the newest being the latest

    newest = remote_backup_items[-1]
    backup = container.get_object(newest['name'])
    print "the latest object is %s" % newest['name']
    save_to = args.to and fullpath or newest['name']
    print "saving to %s" % save_to

    progress_callback = progress_for('downloading')
    progress_callback(0, 0, False)
    backup.save_to_filename(save_to, progress_callback)
Ejemplo n.º 29
0
def upload_files(username, key, service_net, auth_url, container_name, files, recursive, prefix, quiet):
	try:
		# Attempt a connection
		conn = cloudfiles.get_connection(username = username, api_key = key, servicenet = service_net, authurl = auth_url)
	except cloudfiles.errors.AuthenticationFailed:
		try:
			# Switch to the other authurl and try again
			if auth_url == cloudfiles.us_authurl:
				auth_url = cloudfiles.uk_authurl
			else:
				auth_url = cloudfiles.us_authurl
			conn = cloudfiles.get_connection(username = username, api_key = key, servicenet = service_net, authurl = auth_url)
		except cloudfiles.errors.AuthenticationFailed:
			# Still didn't work
			sys.stderr.write('Authentication failed!\n')
			return
	except:
		sys.stderr.write('API connection failed!\n')
		return

	try:
		cont = conn.get_container(container_name)
	except cloudfiles.errors.NoSuchContainer:
		cont = conn.create_container(container_name)
	except:
		sys.stderr.write('Failed to create a container called "%s"\n' % (container_name))
	else:
		while len(files) > 0:
			f = files.pop(0)
			if os.path.isfile(f):
				destname = f
				if not recursive:
					destname = os.path.basename(f)
				try:
					if not quiet:
						sys.stdout.write('Uploading "%s"...\n' % (destname))
					obj = cont.create_object('%s%s' % (prefix, destname))
					obj.load_from_filename(f)
				except:
					sys.stderr.write('  Upload of "%s" failed!\n' % (f))
			elif f not in ('.', '..'):
				if recursive:
					for filename in glob.glob("%s/*" % (f)):
						files.append(filename)
				else:
					sys.stderr.write('Ignoring "%s" because it\'s a directory\n' % (f))
Ejemplo n.º 30
0
    def handle(self, *args, **options):
        """Main"""

        if len(args) != 1:
            raise CommandError('Usage: %s' % USAGE)

        container_name = args[0]

        is_yes = options.get('is_yes')
        num_workers = int(options.get('workers'))
        self.batch_size = int(options.get('batch_size'))

        print('Connecting')
        conn = cloudfiles.get_connection(username=settings.CUMULUS['USERNAME'],
                                         api_key=settings.CUMULUS['API_KEY'])
        container = conn.get_container(container_name)

        if not is_yes:
            is_ok = raw_input('Permanently delete container %s? [y|N]' %
                              container)
            if not is_ok == 'y':
                raise CommandError('Aborted')

        queue = multiprocessing.Queue()

        print('Listing objects in container')
        while not self.is_fetch_done and queue.qsize() < (num_workers * 2):
            sys.stdout.write('\rQueued batches: %d. Need: %d ' %
                             (queue.qsize(), (num_workers * 2)))
            sys.stdout.flush()
            self.fetch_more(container, queue)

        print('Deleting objects from container')

        procs = [
            multiprocessing.Process(target=delete, args=(container, queue))
            for _ in range(num_workers)
        ]

        for proc in procs:
            proc.start()

        print('')
        while not queue.empty():
            sys.stdout.write('\rItems remaining: %d                 ' %
                             (queue.qsize() * self.batch_size))
            sys.stdout.flush()
            time.sleep(1)

            if not self.is_fetch_done:
                self.fetch_more(container, queue)

        print('Container empty. Waiting for final threads to finish.')
        for proc in procs:
            proc.join()

        print('Deleting container')
        conn.delete_container(container)
Ejemplo n.º 31
0
 def __init__(self, username, api_key, container):
     """
     Here we set up the connection and select the user-supplied container.
     If the container isn't public (available on Limelight CDN), we make
     it a publicly available container.
     """
     self.connection = cloudfiles.get_connection(username, api_key)
     self.container = self.connection.get_container(container)
     if not self.container.is_public():
         self.container.make_public()
Ejemplo n.º 32
0
def connectCloud(setting, nodeid):
    '''Establish connection.'''
    try:
        user = setting.nodeInfo[nodeid].accesskey
        key = setting.nodeInfo[nodeid].secretkey
        return cloudfiles.get_connection(username=user,
                                         api_key=key,
                                         timeout=666)
    except:
        return False
Ejemplo n.º 33
0
 def _get_connection(self):
     if not hasattr(self, '_connection'):
         self._connection = cloudfiles.get_connection(
             username=self.username,
             api_key=self.api_key,
             authurl=self.auth_url,
             timeout=self.timeout,
             servicenet=self.use_servicenet,
             **self.connection_kwargs)
     return self._connection
Ejemplo n.º 34
0
 def __init__(self):
     """
     Here we set up the connection and select the user-supplied container.
     If the container isn't public (available on Limelight CDN), we make
     it a publicly available container.
     """
     self.connection = cloudfiles.get_connection(CLOUDFILES_USERNAME,
                                                 CLOUDFILES_API_KEY)
     self.container = self.connection.get_container(CLOUDFILES_CONTAINER)
     if not self.container.is_public():
         self.container.make_public()
Ejemplo n.º 35
0
def open_connection_with_configfile(config_file='~/.pycflogin',verbose=0):
    if verbose >=2: print '-Debug- open_connection_with_configfile - START'
    if verbose >=1: print '-Debug- config_file:', config_file
    config_file = os.path.expanduser(config_file)
    if verbose >=2: print '-Debug exapanded config_file:', config_file

    # Read the config file
    config = ConfigParser.ConfigParser()
    if os.path.exists(config_file):
        config.read(config_file)
        # get the username and API
        username = config.get('account', 'username')
        apikey = config.get('account', 'apikey')
        location = config.get('account', 'location')
        if location == "uk" or location == "UK":
            auth_url="https://lon.auth.api.rackspacecloud.com/v1.0"
        else:
            auth_url="https://auth.api.rackspacecloud.com/v1.0"
        if verbose >= 1: print '-Info- Logging in as', username
        if verbose >= 2: print '-Debug- Using AUTH-URL as', auth_url
    else:
        if verbose >= 1: print '-Info- %s file does not exists' % config_file
        print '-Info- No existing configuration file found %s , would you like to create one?\n' % config_file
        answer = raw_input('Please enter yes to provide new credentials or no to exit. [yes/no]:')
        if answer == 'yes' or answer == 'YES':
            username = raw_input('username:'******'api_key:')
            location = raw_input('location [us/uk]:')
            if (location == 'uk' or location == 'UK'):
                auth_url="https://lon.auth.api.rackspacecloud.com/v1.0"
            else:
                auth_url="https://auth.api.rackspacecloud.com/v1.0"
            if verbose >= 1: print '-Info- Logging in as', username
            if verbose >= 2: print '-Debug- Using AUTH-URL as', auth_url
            if verbose >= 1: print '-Info Saving credentials in %s file for future use' % config_file
            fo = open(config_file, "wb")
            fo.write("[account]\n")
            fo.write("username="******"\n")
            fo.write("apikey=")
            fo.write(apikey)
            fo.write("\n")
            fo.write("location=")
            fo.write(location)
            fo.write("\n")
            fo.close()
            if verbose >= 1: print '-Debug- Credentials saved to the %s file' % config_file
        else:
            exit()

    connection = cloudfiles.get_connection(username,apikey,authurl=auth_url)
    return connection
Ejemplo n.º 36
0
 def getStats(self):
     """
     Query Rackspace and return stats.
     
     @return: Dictionary of stats.
     """
     conn = cloudfiles.get_connection(self.username, self.api_key)
     bucket = conn.get_container(self.container)
     return {
         'rackspace_containercount': bucket.object_count,
         'rackspace_containersize': bucket.size_used
     }
Ejemplo n.º 37
0
    def _get_connection(self):
        """Return native connection object."""
        kwargs = {"username": self.account, "api_key": self.secret_key}

        # Only add kwarg for servicenet if True because user could set
        # environment variable 'RACKSPACE_SERVICENET' separately.
        if self.servicenet:
            kwargs["servicenet"] = True

        if self.authurl:
            kwargs["authurl"] = self.authurl

        return cloudfiles.get_connection(**kwargs)
Ejemplo n.º 38
0
 def sendImage(self, filename):
     fname = os.path.split(filename)[1]
     conn = cloudfiles.get_connection(self.username,
                                      self.apikey,
                                      servicenet=False,
                                      authurl=self.authurl,
                                      timeout=15)
     container = conn.get_container('public')
     my_dog = container.create_object('***/*****/' + fname)
     my_dog.load_from_filename(filename)
     print "OK"
     #print my_dog
     return "http://******************************" + my_dog.name
Ejemplo n.º 39
0
    def sync_files(self):
        self.conn = cloudfiles.get_connection(username = self.USERNAME,
                                              api_key = self.API_KEY,
                                              authurl = self.AUTH_URL,
                                              servicenet=self.USE_SERVICENET)

        try:
            self.container = self.conn.get_container(self.STATIC_CONTAINER)
        except cloudfiles.errors.NoSuchContainer:
            self.container = self.conn.create_container(self.STATIC_CONTAINER)

        if not self.container.is_public():
            self.container.make_public()

        # if -w option is provided, wipe out the contents of the container
        if self.wipe:
            if self.test_run:
                print "Wipe would delete %d objects." % self.container.object_count
            else:
                print "Deleting %d objects..." % self.container.object_count
                for cloud_obj in self.container.get_objects():
                    self.container.delete_object(cloud_obj.name)
                    if self.verbosity > 1:
                        print "Deleted %s" % cloud_obj.name

        if self.verbosity > 1:
            print "Retreiving cloud file metadata"
        self.cloudfile_info_list = self.container.list_objects_info()

        # walk through the directory, creating or updating files on the cloud
        os.path.walk(self.DIRECTORY, self.upload_files, "foo")

        # remove any files on remote that don't exist locally
        if (not self.add_only) and (not self.no_delete):
            self.delete_files()

        # print out the final tally to the cmd line
        self.update_count = self.upload_count - self.create_count
        print
        if self.test_run:
            print "Test run complete with the following results:"
        print "Skipped %d. Created %d. Updated %d. Deleted %d." % (
            self.skip_count, self.create_count, self.update_count, self.delete_count)

        if self.verbosity > 1:
            print "CDN public uri:           %s" % self.container.public_uri()
            print "CDN public ssl uri:       %s" % self.container.public_ssl_uri()
            print "CDN public streaming uri: %s" % self.container.public_streaming_uri()
            print "CDN TTL: %s" % self.container.cdn_ttl
            print "CDN Size: %s" % self.container.size_used
Ejemplo n.º 40
0
def connectToClouds():
   """
   Open connections to S3 and Cloud Files
   """
   s3Conn = None
   cfConn = None
   try:
      ## boto reads from /etc/boto.cfg (or ~/boto.cfg)
      s3Conn = boto.connect_s3()
      ## the cloud files library doesn't automatically read from a file, so we handle that here:
      cfConfig = ConfigParser.ConfigParser()
      cfConfig.read('/etc/cloudfiles.cfg')
      cfConn = cloudfiles.get_connection(cfConfig.get('Credentials','username'), cfConfig.get('Credentials','api_key'))
   except (NoSectionError, NoOptionError, MissingSectionHeaderError, ParsingError), err:
      raise MultiCloudMirrorException("Error in reading Cloud Files configuration file (/etc/cloudfiles.cfg): %s" % (err))
Ejemplo n.º 41
0
    def _get_connection(self):
        """Return native connection object."""
        kwargs = {
            'username': self.account,
            'api_key': self.secret_key,
        }

        # Only add kwarg for servicenet if True because user could set
        # environment variable 'RACKSPACE_SERVICENET' separately.
        if self.servicenet:
            kwargs['servicenet'] = True

        if self.authurl:
            kwargs['authurl'] = self.authurl

        return cloudfiles.get_connection(**kwargs)  # pylint: disable=W0142
Ejemplo n.º 42
0
    def handle(self, *args, **options):
        """Main"""

        if len(sys.argv) != 3:
            raise CommandError('Usage: %s' % USAGE)

        container_name = sys.argv[2]
        print('Creating container: %s' % container_name)

        conn = cloudfiles.get_connection(username=settings.CUMULUS['USERNAME'],
                                         api_key=settings.CUMULUS['API_KEY'])

        container = conn.create_container(container_name)
        container.make_public()

        print('Done')
Ejemplo n.º 43
0
def upload_to_dho(dho_user, dho_key, backup_loc):
    conn = cloudfiles.get_connection(
        username=dho_user,
        api_key=dho_key,
        authurl='https://objects.dreamhost.com/auth',
    )
    container = create_container(conn)

    for category in os.listdir(backup_loc):
        category_path = os.path.join(backup_loc, category)
        for section in os.listdir(category_path):
            section_path = os.path.join(category_path, section)
            for file_name in os.listdir(section_path):
                file_path = os.path.join(section_path, file_name)
                print "uploading " + file_path
                obj = container.create_object(file_path)
                obj.load_from_filename(file_path)
Ejemplo n.º 44
0
    def __init__(self,
                 container_name,
                 verbose,
                 dry_run=False,
                 forward=0,
                 max_attempts=5):
        self.verbose = verbose
        self.dry_run = dry_run
        self.rp_conn = cloudfiles.get_connection(settings.RACKSPACE_USER,
                                                 settings.RACKSPACE_API_KEY)
        self.start_time = datetime.datetime.now()
        self.container = self.rp_conn.get_container(container_name)
        self.max_attempts = max_attempts

        if forward == 0:
            self.start_time = datetime.datetime.now()
        else:
            self.start_time = datetime.datetime.now() + datetime.timedelta(
                days=forward)
        super(RackspaceUtil, self).__init__()
Ejemplo n.º 45
0
    def sync_files(self):
        self.conn = cloudfiles.get_connection(username=self.USERNAME,
                                              api_key=self.API_KEY,
                                              authurl=self.AUTH_URL,
                                              servicenet=self.USE_SERVICENET)

        try:
            self.container = self.conn.get_container(self.STATIC_CONTAINER)
        except cloudfiles.errors.NoSuchContainer:
            self.container = self.conn.create_container(self.STATIC_CONTAINER)

        if not self.container.is_public():
            self.container.make_public()

        # if -w option is provided, wipe out the contents of the container
        if self.wipe:
            if self.test_run:
                print "Wipe would delete %d objects." % self.container.object_count
            else:
                print "Deleting %d objects..." % self.container.object_count
                for cloud_obj in self.container.get_objects():
                    self.container.delete_object(cloud_obj.name)

        # walk through the directory, creating or updating files on the cloud
        os.path.walk(self.DIRECTORY, self.upload_files, "foo")

        # remove any files on remote that don't exist locally
        self.delete_files()

        # print out the final tally to the cmd line
        self.update_count = self.upload_count - self.create_count

        if self.CONTAINER_PURGE:
            self.container.purge_from_cdn(','.join(
                self.PURGE_NOTIFICATION_LIST))

        if self.test_run:
            print "Test run complete with the following results:"
        print "Skipped %d. Created %d. Updated %d. Deleted %d." % (
            self.skip_count, self.create_count, self.update_count,
            self.delete_count)
Ejemplo n.º 46
0
 def setUp(self):
     if not hasattr(self, 'username'):
         cls = self.__class__
         if not all([
                 'RCLOUD_API_KEY' in os.environ, 'RCLOUD_API_USER'
                 in os.environ
         ]):
             print "env RCLOUD_API_USER or RCLOUD_API_KEY not found."
             sys.exit(1)
         cls.username = os.environ['RCLOUD_API_USER']
         cls.api_key = os.environ['RCLOUD_API_KEY']
         cls.auth_url = os.environ.get('RCLOUD_AUTH_URL')
         cls.cnx = CloudFilesFS(self.username,
                                self.api_key,
                                authurl=self.auth_url)
         cls.conn = cloudfiles.get_connection(self.username,
                                              self.api_key,
                                              authurl=self.auth_url)
     self.cnx.mkdir("/ftpcloudfs_testing")
     self.cnx.chdir("/ftpcloudfs_testing")
     self.container = self.conn.get_container('ftpcloudfs_testing')
def connectToClouds():
    """
   Open connections to S3 and Cloud Files
   """
    s3Conn = None
    cfConn = None
    try:
        ## boto reads from ~/.aws/credentials profiles
        session = boto3.Session(profile_name='jwp-webteam')
        s3Conn = session.resource('s3')
        ## the cloud files library doesn't automatically read from a file, so we handle that here:
        cfConfig = ConfigParser.ConfigParser()
        cfConfig.read('/Users/sarahgray/cloudfiles.cfg')
        cfConn = cloudfiles.get_connection(
            cfConfig.get('Credentials', 'username'),
            cfConfig.get('Credentials', 'api_key'))
    except (NoSectionError, NoOptionError, MissingSectionHeaderError,
            ParsingError), err:
        raise MultiCloudMirrorException(
            "Error in reading Cloud Files configuration file (/etc/cloudfiles.cfg): %s"
            % (err))