def __init__(self, uri): (unused, regionPath) = uri.split(':') (region, tableName) = regionPath.split('/') self.log = logging.getLogger("dynamo-fuse-oper ") self.tableName = tableName self.region = region for reg in boto.dynamodb2.regions(): if reg.name == region: self.regionv2 = reg break provider = Provider('aws') self.conn = boto.dynamodb.connect_to_region(region, aws_access_key_id=provider.get_access_key(), aws_secret_access_key=provider.get_secret_key()) connection = DynamoDBConnection(aws_access_key_id=provider.get_access_key(), aws_secret_access_key=provider.get_secret_key(), region=self.regionv2) try: self.table = self.conn.get_table(tableName) self.tablev2 = Table(tableName, connection=connection) self.blockTable = self.conn.get_table(self.tableName + "Blocks") self.blockTablev2 = Table(self.tableName + "Blocks", connection=connection) except: self.createTable() self.counter = itertools.count() self.counter.next() # start from 1 self.__createRoot() print "Ready"
def has_google_credentials(): global _HAS_GOOGLE_CREDENTIALS if _HAS_GOOGLE_CREDENTIALS is None: provider = Provider('google') if (provider.get_access_key() is None or provider.get_secret_key() is None): _HAS_GOOGLE_CREDENTIALS = False else: _HAS_GOOGLE_CREDENTIALS = True return _HAS_GOOGLE_CREDENTIALS
def cleanup(uri): (unused, regionPath) = uri.split(':') (region, tableName) = regionPath.split('/') provider = Provider('aws') conn = boto.dynamodb.connect_to_region(region, aws_access_key_id=provider.get_access_key(), aws_secret_access_key=provider.get_secret_key()) table = conn.get_table(tableName) for item in table.scan(attributes_to_get=["name", "path"]): if item["path"] == "/" and item["name"] == "/": continue if item["path"] == "global" and item["name"] == "counter": continue if item["path"] == '/' and item['name'] == DELETED_LINKS: continue item.delete()
def createTable(self): provider = Provider('aws') connection = DynamoDBConnection(aws_access_key_id=provider.get_access_key(), aws_secret_access_key=provider.get_secret_key(), region=self.regionv2) self.blockTablev2 = Table.create(self.tableName + "Blocks", schema=[ HashKey('blockId'), RangeKey('blockNum', data_type=NUMBER) ], throughput={'read': 30, 'write': 10}, connection=connection ) self.tablev2 = Table.create(self.tableName, schema=[ HashKey('path'), RangeKey('name') ], throughput={'read': 30, 'write': 10}, indexes=[ KeysOnlyIndex("Links", parts=[ HashKey('path'), RangeKey('link') ]) ], connection=connection ) description = connection.describe_table(self.tableName) iter = 0 while description["Table"]["TableStatus"] != "ACTIVE": print "Waiting for %s to create %d..." % (self.tableName, iter) iter += 1 sleep(1) description = connection.describe_table(self.tableName) self.table = self.conn.get_table(self.tableName) self.blockTable = self.conn.get_table(self.tableName + "Blocks")
from boto.provider import Provider from apscheduler.scheduler import Scheduler from apscheduler.jobstores.shelve_store import ShelveJobStore from shove import Shove import logging SHOVE_BUCKET = 'my-bucket' class ShoveJobStore(ShelveJobStore): def __init__(self, path): self.jobs = [] self.path = path self.store = Shove(path, optimize=False) class S3JobStore(ShoveJobStore): def __init__(self, access_key, secret_key, bucket, prefix='job_'): self.prefix = prefix if prefix[-1] == '/' else (prefix + '/') path = 's3://{}:{}@{}'.format(access_key, secret_key, bucket) super(S3JobStore, self).__init__(path) logging.basicConfig() PROVIDER = Provider('aws') JOB_STORE = S3JobStore(PROVIDER.get_access_key(), PROVIDER.get_secret_key(), SHOVE_BUCKET) SCHEDULER = Scheduler(misfire_grace_time=1000) SCHEDULER.add_jobstore(JOB_STORE, 's3')