def __init__(self, config): '''*config* is a dictionary with the keys id (access_key_id), secret (secret_access_key), and bucket_name. For instance:: config['id'] = 'FDS54548SDF8D2S311DF' config['secret'] = 'D370JKD=564++873ZHFD9FDKDD' config['bucket_name'] = 'cloudfusion' The bucket will be created if it does not exist. A bucket is similar to a subfolder, to which access with CloudFusion is restricted. Id and secret can be obtained from the console.aws.amazon.com/s3/home * Click on your name on the top left and select Security Credentials form the drop down menu. * Go to Access Keys and Generate New Access Keys to generate the new key pair. :param config: dictionary with key value pairs''' super(AmazonStore, self).__init__() self.name = 'amazon' self._logging_handler = self.name self.logger = logging.getLogger(self._logging_handler) self.logger = db_logging_thread.make_logger_multiprocessingsave(self.logger) self.logger.info("creating %s store", self.name) self.bucket_name = config['bucket_name'] id_key = get_id_key(config) secret_key = get_secret_key(config) self.access_key_id = config[id_key] self.secret_access_key = config[secret_key] try: boto.config.add_section('Boto') except DuplicateSectionError, e: pass
def __init__(self, config): '''*config* can be obtained from the function :func:`cloudfusion.store.sugarsync.sugarsync_store.SugarsyncStore.get_config`, but you need to add user and password:: config = SugarsyncStore.get_config() config['user'] = '******' #your account username/e-mail address config['password'] = '******' #your account password Or you can use a configuration file that already has password and username set by specifying a path:: path_to_my_config_file = '/home/joe/MySugarsync.ini' config = get_config(path_to_my_config_file) :param config: dictionary with key value pairs''' #self.dir_listing_cache = {} self._logging_handler = 'sugarsync' self.logger = logging.getLogger(self._logging_handler) self.logger = db_logging_thread.make_logger_multiprocessingsave(self.logger) manager = Manager() self.path_cache = manager.dict() # use a lock for synchronized appends self._dir_listing_cache = manager.dict() self._dir_listing_cache_lock = RLock() self._last_partial_cache = manager.list() self._time_of_last_partial_cache = 0 #error handling for authorization error self.root = config["root"] try: self.client = SugarsyncClient(config) except Exception, e: raise StoreAutorizationError(repr(e), 0)
def __init__(self, config): '''*config* is a dictionary with the keys id (access_key_id), secret (secret_access_key), and bucket_name. For instance:: config['id'] = 'FDS54548SDF8D2S311DF' config['secret'] = 'D370JKD=564++873ZHFD9FDKDD' config['bucket_name'] = 'cloudfusion' The bucket will be created if it does not exist. A bucket is similar to a subfolder, to which access with CloudFusion is restricted. Id and secret can be obtained from the console.aws.amazon.com/s3/home * Click on your name on the top left and select Security Credentials form the drop down menu. * Go to Access Keys and Generate New Access Keys to generate the new key pair. :param config: dictionary with key value pairs''' super(AmazonStore, self).__init__() self.name = 'amazon' self._logging_handler = self.name self.logger = logging.getLogger(self._logging_handler) self.logger = db_logging_thread.make_logger_multiprocessingsave( self.logger) self.logger.info("creating %s store", self.name) self.bucket_name = config['bucket_name'] id_key = get_id_key(config) secret_key = get_secret_key(config) self.access_key_id = config[id_key] self.secret_access_key = config[secret_key] try: boto.config.add_section('Boto') except DuplicateSectionError, e: pass
def run(self): self.logger = db_logging_thread.make_logger_multiprocessingsave(self.logger) remaining_loops = 2000 while remaining_loops != 0: remaining_loops -= 1 time.sleep(random()) self.logger.debug("Process %s says hello!" % os.getpid())
def run(self): self.logger = db_logging_thread.make_logger_multiprocessingsave( self.logger) remaining_loops = 2000 while remaining_loops != 0: remaining_loops -= 1 time.sleep(random()) self.logger.debug("Process %s says hello!" % os.getpid())
def __init__(self, config): '''*config* can be obtained from the function :func:`cloudfusion.store.dropbox.dropbox_store.DropboxStore.get_config`, but you need to add user and password:: config = DropboxStore.get_config() config['user'] = '******' #your account username/e-mail address config['password'] = '******' #your account password You may add a cache id, so that you can continue previous sessions. If you use the same cache id in a later session, the store will remember some metadata and does not need to rely on auto-login (since the auto-login feature often breaks because Dropbox changes their interface):: config['cache_id'] = 'dropbox_db' You can also choose between full access to dropbox or to a single subfolder by setting the value for 'root':: config['root'] = 'dropbox' #for full dropbox access (this is the default) or config['root'] = 'app_folder' #for restricted access to one subfolder Or you can use a configuration file that already has password and username set by specifying a path:: path_to_my_config_file = '/home/joe/MyDropbox.ini' config = DropboxStore.get_config(path_to_my_config_file) :param config: dictionary with key value pairs''' self._logging_handler = 'dropbox' #TODO: check if is filehandler self.logger = logging.getLogger(self._logging_handler) self.logger = db_logging_thread.make_logger_multiprocessingsave( self.logger) self.dir_listing_cache = {} self.logger.info("get Dropbox session") if not config['root'] in ['dropbox', 'app_folder']: raise StoreAccessError( "Configuration error: root must be one of dropbox or app_folder, check your configuration file", 0) self._cache_dir = self._get_cachedir_name(config) self.create_session(config, self._cache_dir) self.logger.info("get DropboxClient") self.client = client.DropboxClient(self.sess) self.root = config['root'] self.time_difference = self._get_time_difference() self.logger.info("api initialized") manager = Manager() self._revisions = manager.dict() self._revision_db_path = self._cache_dir + "/Dropbox_revisions.db" try: last_session_revisions = shelve.open(self._revision_db_path) self._revisions.update(last_session_revisions) except Exception, e: self.logger.debug( "Revision database from last session could not be loaded.")
def __init__(self, config): '''*config* can be obtained from the function :func:`cloudfusion.store.gdrive.google_drive.GoogleDrive.get_config`, but you need to add the id and secret, which can be obtained by creating an id and secret for an "Installed Application" in the developer console: https://console.developers.google.com/project, as described in https://developers.google.com/drive/web/quickstart/quickstart-python:: config = GoogleDrive.get_config() config['id'] = '4523154788555-kjsdfj87sdfjh44dfsdfj45kjj.apps.googleusercontent.com' #your id config['secret'] = 'sdfjk3h5j444jnjfo0' #your secret You may add a cache id, so that you can continue previous sessions. If you use the same cache id in a later session, the store will remember some metadata and won't need the id and secret for authentication (just use empty strings in this case):: config['cache_id'] = 'gdrive_db' Or you can use a configuration file that already has id and secret set by specifying a path:: path_to_my_config_file = '/home/joe/gdrive.ini' config = GoogleDrive.get_config(path_to_my_config_file) :param config: dictionary with key value pairs''' super(GoogleDrive, self).__init__() self.name = 'google_drive' self._logging_handler = self.name self.logger = logging.getLogger(self._logging_handler) self.logger = db_logging_thread.make_logger_multiprocessingsave(self.logger) self.logger.info("creating %s store", self.name) id_key = get_id_key(config) secret_key = get_secret_key(config) client_auth = self.CLIENT_AUTH_TEMPLATE.substitute(SECRET=config[secret_key], ID=config[id_key]) # workaround for possible side effect in fuse when called without foreground option self._settings_yaml = self._get_cachedir_name(config)+'/settings.yaml' self._client_secrets = self._get_cachedir_name(config)+'/client_secrets.json' credentials = self._get_cachedir_name(config)+'/credentials.json' settings_yaml = self.SETTINGS_YAML_TEMPLATE.substitute(CLIENT_SECRETS=self._client_secrets, CREDENTIALS_DB=credentials) with open(self._settings_yaml, 'w') as fh: fh.write(settings_yaml) with open(self._client_secrets, 'w') as client_secrets: client_secrets.write(client_auth) self.gauth = GoogleAuth(settings_file=self._settings_yaml) try: self.gauth.Authorize() except AuthenticationError, e: self.logger.info("Authentication error: %s", e) # The call to LocalWebserverAuth raises RefreshError if credentials are out of date. # Thus, remove credentials and reinitialize gauth: if os.path.exists(credentials): os.remove(credentials) self.gauth = GoogleAuth(settings_file=self._settings_yaml) self.gauth.LocalWebserverAuth() self.gauth.Authorize()
def __init__(self, config): '''*config* can be obtained from the function :func:`cloudfusion.store.dropbox.dropbox_store.DropboxStore.get_config`, but you need to add user and password:: config = DropboxStore.get_config() config['user'] = '******' #your account username/e-mail address config['password'] = '******' #your account password You may add a cache id, so that you can continue previous sessions. If you use the same cache id in a later session, the store will remember some metadata and does not need to rely on auto-login (since the auto-login feature often breaks because Dropbox changes their interface):: config['cache_id'] = 'dropbox_db' You can also choose between full access to dropbox or to a single subfolder by setting the value for 'root':: config['root'] = 'dropbox' #for full dropbox access (this is the default) or config['root'] = 'app_folder' #for restricted access to one subfolder Or you can use a configuration file that already has password and username set by specifying a path:: path_to_my_config_file = '/home/joe/MyDropbox.ini' config = DropboxStore.get_config(path_to_my_config_file) :param config: dictionary with key value pairs''' self._logging_handler = 'dropbox' #TODO: check if is filehandler self.logger = logging.getLogger(self._logging_handler) self.logger = db_logging_thread.make_logger_multiprocessingsave(self.logger) self.dir_listing_cache = {} self.logger.debug("get Dropbox session") if not config['root'] in ['dropbox', 'app_folder']: raise StoreAccessError("Configuration error: root must be one of dropbox or app_folder, check your configuration file", 0) self._cache_dir = self._get_cachedir_name(config) self.create_session(config, self._cache_dir) self.logger.debug("get DropboxClient") self.client = client.DropboxClient(self.sess) self.root = config['root'] self.time_difference = self._get_time_difference() self.logger.info("api initialized") manager = Manager() self._revisions = manager.dict() self._revision_db_path = self._cache_dir + "/Dropbox_revisions.db" try: last_session_revisions = shelve.open(self._revision_db_path) self._revisions.update(last_session_revisions) except: self.logger.debug("Revision database from last session could not be loaded.") self._is_copy = False atexit.register( lambda : self._close() ) super(DropboxStore, self).__init__()
def _run(self, result_queue, end_time): self.logger = db_logging_thread.make_logger_multiprocessingsave(self.logger) self.logger.debug("Starting ReadWorker process %s to read %s", os.getpid(), self.path) try: content = self.store.get_file(self.path) end_time.value = time.time() result_queue.put(content) except Exception, e: self.logger.exception("Error on reading %s in ReadWorker", self.path) try: pickle.loads(pickle.dumps(e)) #check if exception can be de/serialized result_queue.put(e) except Exception: self.logger.error("Error on serializing exception in ReadWorker: %s", repr(e)) result_queue.put(Exception(repr(e)))
def _run(self, result_queue, interrupt_event, end_time): self.logger = db_logging_thread.make_logger_multiprocessingsave(self.logger) self.logger.debug("Start WriteWorker process %s to write %s", os.getpid(), self.path) try: update_time = self.store.store_file(self._filename, os.path.dirname(self.path), os.path.basename(self.path), interrupt_event) end_time.value = time.time() if not update_time: update_time = end_time.value result_queue.put(update_time) except Exception, e: self.logger.exception("Error on storing %s in WriteWorker", self.path) try: pickle.loads(pickle.dumps(e)) #check if exception can be de/serialized result_queue.put(e) except Exception: self.logger.error("Error on serializing exception in WriteWorker: %s", repr(e)) result_queue.put(Exception(repr(e)))
def __init__(self, config): super(LocalHDStore, self).__init__() self.name = 'harddrive' self._logging_handler = self.name self.logger = logging.getLogger(self._logging_handler) self.logger = db_logging_thread.make_logger_multiprocessingsave(self.logger) self.logger.info("creating %s store", self.name) if not 'root' in config or config['root']=='/': #prevent writing to actual file system root self.logger.error("Error: specify a root directory with root=/my_root_folder in the configuration ini file; root must be a subdirectory -using /tmp/harddriveroot instead") self.root = '/tmp/harddriveroot' else: self.root = config['root'] if not os.path.exists(self.root): os.makedirs(self.root) print "root:"+self.root self.logger.info("api initialized")
def __init__(self, config): '''*config* is a dictionary with the keys user, password, and URL. For instance:: #url can also contain an existing subfolder to access, i.e. https://webdav.mediencenter.t-online.de/myfolder #url can also contain the port for the WebDAV server, i.e. https://webdav.mediencenter.t-online.de:443 config['url'] = 'https://webdav.mediencenter.t-online.de' config['user'] = '******' #your account username/e-mail address config['password'] = '******' #your account password :param config: dictionary with key value pairs''' super(WebdavStore, self).__init__() self.name = 'webdav' self._logging_handler = self.name self.logger = logging.getLogger(self._logging_handler) self.logger = db_logging_thread.make_logger_multiprocessingsave(self.logger) self.logger.info("creating %s store", self.name) self.tinyclient = TinyDAVClient(config['url'], config['user'], config['password'] ) self.logger.info("api initialized")
def __init__(self, config): '''*config* is a dictionary with the keys user, password, and URL. For instance:: #url can also contain an existing subfolder to access, i.e. https://webdav.mediencenter.t-online.de/myfolder #url can also contain the port for the WebDAV server, i.e. https://webdav.mediencenter.t-online.de:443 config['url'] = 'https://webdav.mediencenter.t-online.de' config['user'] = '******' #your account username/e-mail address config['password'] = '******' #your account password :param config: dictionary with key value pairs''' super(WebdavStore, self).__init__() self.name = 'webdav' self._logging_handler = self.name self.logger = logging.getLogger(self._logging_handler) self.logger = db_logging_thread.make_logger_multiprocessingsave( self.logger) self.logger.info("creating %s store", self.name) self.tinyclient = TinyDAVClient(config['url'], config['user'], config['password']) self.logger.info("api initialized")
def __init__(self, config): super(LocalHDStore, self).__init__() self.name = 'harddrive' self._logging_handler = self.name self.logger = logging.getLogger(self._logging_handler) self.logger = db_logging_thread.make_logger_multiprocessingsave( self.logger) self.logger.info("creating %s store", self.name) if not 'root' in config or config['root'] == '/': #prevent writing to actual file system root self.logger.error( "Error: specify a root directory with root=/my_root_folder in the configuration ini file; root must be a subdirectory -using /tmp/harddriveroot instead" ) self.root = '/tmp/harddriveroot' else: self.root = config['root'] if not os.path.exists(self.root): os.makedirs(self.root) print "root:" + self.root self.logger.info("api initialized")
def __init__(self, config): '''*config* is a dictionary with the keys id (access_key_id), secret (secret_access_key), and bucket_name. For instance:: config['id'] = 'FDS54548SDF8D2S311DF' config['secret'] = 'D370JKD=564++873ZHFD9FDKDD' config['bucket_name'] = 'cloudfusion' The bucket will be created if it does not exist. A bucket is similar to a subfolder, to which access with CloudFusion is restricted. Id and secret can be obtained from the developer's console: * Go to console.developers.google.com/project * Create a new project * Select Project dashboard on the left, which opens a new tab * Go to the new tab * Select Billing on the left to set up billing * Select Google Cloud Storage on the left * Click on the button labeled "Make this my default project for interoperable storage access" * Click on Interoperable Access on the left * Click Generate new key, to generate the new key pair :param config: dictionary with key value pairs''' super(GoogleStore, self).__init__() self.name = 'google' self._logging_handler = self.name self.logger = logging.getLogger(self._logging_handler) self.logger = db_logging_thread.make_logger_multiprocessingsave(self.logger) self.logger.info("creating %s store", self.name) self.bucket_name = config['bucket_name'] id_key = get_id_key(config) secret_key = get_secret_key(config) self.access_key_id = config[id_key] self.secret_access_key = config[secret_key] self.write_gsutil_config() try: boto.config.add_section('Boto') except DuplicateSectionError, e: pass