예제 #1
0
def prep_filesystem(fs_name):

    if fs_name == 'OSFS':

        with make_temp_dir() as tmp:
            local = OSFS(tmp)

            yield local

            local.close()

    elif fs_name == 'S3FS':

        m = moto.mock_s3()
        m.start()

        try:

            s3 = S3FS('test-bucket',
                      aws_access_key='MY_KEY',
                      aws_secret_key='MY_SECRET_KEY')

            yield s3
            s3.close()

        finally:
            m.stop()
예제 #2
0
def get_s3fs(namespace):
    ''' Helper method to get_filesystem for a file system on S3 '''
    global key_id, key_secret
    fullpath = namespace
    if 'prefix' in djfs_settings:
        fullpath = os.path.join(djfs_settings['prefix'], fullpath)
    s3fs = S3FS(djfs_settings['bucket'],
                fullpath,
                aws_access_key=key_id,
                aws_secret_key=key_secret)

    def get_s3_url(self, filename, timeout=60):
        global s3conn
        try:
            if not s3conn:
                s3conn = S3Connection(aws_access_key_id=key_id,
                                      aws_secret_access_key=key_secret)
            return s3conn.generate_url(timeout,
                                       'GET',
                                       bucket=djfs_settings['bucket'],
                                       key=os.path.join(fullpath, filename))
        except:  # Retry on error; typically, if the connection has timed out, but the broad except covers all errors.
            s3conn = S3Connection(aws_access_key_id=key_id,
                                  aws_secret_access_key=key_secret)
            return s3conn.generate_url(timeout,
                                       'GET',
                                       bucket=djfs_settings['bucket'],
                                       key=os.path.join(fullpath, filename))

    s3fs = patch_fs(s3fs, namespace, get_s3_url)
    return s3fs
예제 #3
0
    def __init__(self, AWS_ACCESS_KEY, AWS_SECRET_KEY, *args, **kwargs):
        super(MyAPI, self).__init__(*args, **kwargs)

        # pre-configure the API with your organization's setup

        manager = DynamoDBManager(table_name='project_data',
                                  session_args={
                                      'aws_access_key_id': AWS_ACCESS_KEY,
                                      'aws_secret_access_key': AWS_SECRET_KEY
                                  },
                                  resource_args={
                                      'endpoint_url': 'http://localhost:8000/',
                                      'region_name': 'us-east-1'
                                  })

        if 'project_data' in manager.table_names:
            manager.delete_table('project_data')

        manager.create_archive_table('project_data', raise_on_err=False)

        self.attach_manager(manager)

        # Prevent changes to the manager configuration
        self.lock_manager()

        s3_bucket1 = S3FS('org-bucket-1',
                          aws_access_key=AWS_ACCESS_KEY,
                          aws_secret_key=AWS_SECRET_KEY)

        s3_bucket2 = S3FS('org-bucket-2',
                          aws_access_key=AWS_ACCESS_KEY,
                          aws_secret_key=AWS_SECRET_KEY)

        network_storage = OSFS(tmpdir)

        self.attach_authority('s3-1', s3_bucket1)
        self.attach_authority('s3-2', s3_bucket2)
        self.attach_authority('NAT-1', network_storage)

        # Prevent changes to the set of authorities
        self.lock_authorities()
예제 #4
0
 def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
     from fs.s3fs import S3FS
     
     bucket = fs_path
     path =''
     if '/' in fs_path:
         bucket, path = fs_path.split('/', 1)
     
     fs = S3FS(bucket)
             
     if path:
         dirpath, resourcepath = pathsplit(path)
         if dirpath:
             fs = fs.opendir(dirpath)
         path = resourcepath
         
     return fs, path
예제 #5
0
def get_s3fs(namespace):
    ''' Helper method to get_filesystem for a file system on S3 '''
    fullpath = namespace
    if 'prefix' in settings.DJFS: 
        fullpath = os.path.join(settings.DJFS['prefix'], fullpath)
    s3fs = S3FS(settings.DJFS['bucket'], fullpath)

    def get_s3_url(self, filename, timeout=60):
        global s3conn
        try: 
            return s3conn.generate_s3_url(timeout, 'GET', bucket = settings.DJFS['bucket'], key = filename)
        except: # If connection has timed out
            s3conn = S3Connection()
            return s3conn.generate_s3_url(timeout, 'GET', bucket = settings.DJFS['bucket'], key = filename)

    s3fs = patch_fs(s3fs, namespace, get_s3_url)
    return s3fs
예제 #6
0
    def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
        from fs.s3fs import S3FS

        username, password, bucket = _parse_credentials(fs_path)
        path = ''
        if '/' in bucket:
            bucket, path = fs_path.split('/', 1)

        fs = S3FS(bucket,
                  aws_access_key=username or None,
                  aws_secret_key=password or None)

        if path:
            dirpath, resourcepath = pathsplit(path)
            if dirpath:
                fs = fs.opendir(dirpath)
            path = resourcepath

        return fs, path