def file_by_name(self, filename): from pydrive.files import ApiRequestError if filename in self.id_cache: # It might since have been locally moved, renamed or deleted, so we # need to validate the entry. file_id = self.id_cache[filename] drive_file = self.drive.CreateFile({'id': file_id}) try: if drive_file['title'] == filename and not drive_file['labels']['trashed']: for parent in drive_file['parents']: if parent['id'] == self.folder: log.Info("PyDrive backend: found file '%s' with id %s in ID cache" % (filename, file_id)) return drive_file except ApiRequestError as error: # A 404 occurs if the ID is no longer valid if error.args[0].resp.status != 404: raise # If we get here, the cache entry is invalid log.Info("PyDrive backend: invalidating '%s' (previously ID %s) from ID cache" % (filename, file_id)) del self.id_cache[filename] # Not found in the cache, so use directory listing. This is less # reliable because there is no strong consistency. q = "title='%s' and '%s' in parents and trashed=false" % (filename, self.folder) fields = 'items(title,id,fileSize,downloadUrl,exportLinks),nextPageToken' flist = self.drive.ListFile({'q': q, 'fields': fields}).GetList() if len(flist) > 1: log.FatalError(_("PyDrive backend: multiple files called '%s'.") % (filename,)) elif flist: file_id = flist[0]['id'] self.id_cache[filename] = flist[0]['id'] log.Info("PyDrive backend: found file '%s' with id %s on server, adding to cache" % (filename, file_id)) return flist[0] log.Info("PyDrive backend: file '%s' not found in cache or on server" % (filename,)) return None
def list(self): """List files in directory""" for n in range(1, globals.num_retries + 1): log.Info("Listing directory %s on WebDAV server" % (self.directory, )) self.headers['Depth'] = "1" response = self.request("PROPFIND", self.directory, self.listbody) del self.headers['Depth'] # if the target collection does not exist, create it. if response.status == 404: log.Info("Directory '%s' being created." % self.directory) res = self.request("MKCOL", self.directory) log.Info("WebDAV MKCOL status: %s %s" % (res.status, res.reason)) continue if response.status == 207: document = response.read() break log.Info("WebDAV PROPFIND attempt #%d failed: %s %s" % (n, response.status, response.reason)) if n == globals.num_retries + 1: log.Warn( "WebDAV backend giving up after %d attempts to PROPFIND %s" % (globals.num_retries, self.directory)) raise BackendException((response.status, response.reason)) log.Info("%s" % (document, )) dom = xml.dom.minidom.parseString(document) result = [] for href in dom.getElementsByTagName( 'd:href') + dom.getElementsByTagName('D:href'): filename = self.__taste_href(href) if filename: result.append(filename) return result
def __init__(self, parsed_url): duplicity.backend.Backend.__init__(self, parsed_url) self.headers = {'Connection': 'keep-alive'} self.parsed_url = parsed_url self.digest_challenge = None self.digest_auth_handler = None if parsed_url.path: foldpath = re.compile('/+') self.directory = foldpath.sub('/', parsed_url.path + '/') else: self.directory = '/' log.Info("Using WebDAV host %s" % (parsed_url.hostname, )) log.Info("Using WebDAV port %s" % (parsed_url.port, )) log.Info("Using WebDAV directory %s" % (self.directory, )) log.Info("Using WebDAV protocol %s" % (globals.webdav_proto, )) if parsed_url.scheme == 'webdav': self.conn = httplib.HTTPConnection(parsed_url.hostname, parsed_url.port) elif parsed_url.scheme == 'webdavs': self.conn = httplib.HTTPSConnection(parsed_url.hostname, parsed_url.port) else: raise BackendException("Unknown URI scheme: %s" % (parsed_url.scheme))
def add_inc(self, incset): u""" Add incset to self. Return False if incset does not match """ if self.end_time == incset.start_time: self.incset_list.append(incset) else: if (self.incset_list and incset.start_time == self.incset_list[-1].start_time and incset.end_time > self.incset_list[-1].end_time): log.Info(_(u"Preferring Backupset over previous one!")) self.incset_list[-1] = incset else: log.Info( _(u"Ignoring incremental Backupset (start_time: %s; needed: %s)" ) % (dup_time.timetopretty(incset.start_time), dup_time.timetopretty(self.end_time))) return False self.end_time = incset.end_time log.Info( _(u"Added incremental Backupset (start_time: %s / end_time: %s)") % (dup_time.timetopretty( incset.start_time), dup_time.timetopretty(incset.end_time))) assert self.end_time return True
def resetConnection(self): parsed_url = self.url try: imap_server = os.environ[u'IMAP_SERVER'] except KeyError: imap_server = parsed_url.hostname # Try to close the connection cleanly try: self.conn.close() except Exception: pass if (parsed_url.scheme == u"imap"): cl = imaplib.IMAP4 self.conn = cl(imap_server, 143) elif (parsed_url.scheme == u"imaps"): cl = imaplib.IMAP4_SSL self.conn = cl(imap_server, 993) log.Debug(u"Type of imap class: %s" % (cl.__name__)) self.remote_dir = re.sub(r'^/', r'', parsed_url.path, 1) # Login if (not (globals.imap_full_address)): self.conn.login(self.username, self.password) self.conn.select(globals.imap_mailbox) log.Info(u"IMAP connected") else: self.conn.login(self.username + u"@" + parsed_url.hostname, self.password) self.conn.select(globals.imap_mailbox) log.Info(u"IMAP connected")
def _put(self, source_path, remote_filename): f = source_path.open(u"rb") allowedTimeout = globals.timeout if (allowedTimeout == 0): # Allow a total timeout of 1 day allowedTimeout = 2880 while allowedTimeout > 0: try: self.conn.select(remote_filename) body = self.prepareBody(f, remote_filename) # If we don't select the IMAP folder before # append, the message goes into the INBOX. self.conn.select(globals.imap_mailbox) self.conn.append(globals.imap_mailbox, None, None, body.encode()) break except (imaplib.IMAP4.abort, socket.error, socket.sslerror): allowedTimeout -= 1 log.Info(u"Error saving '%s', retrying in 30s " % remote_filename) time.sleep(30) while allowedTimeout > 0: try: self.resetConnection() break except (imaplib.IMAP4.abort, socket.error, socket.sslerror): allowedTimeout -= 1 log.Info(u"Error reconnecting, retrying in 30s ") time.sleep(30) log.Info(u"IMAP mail with '%s' subject stored" % remote_filename)
def makedir(self): """Make (nested) directories on the server.""" dirs = self.directory.split("/") # url causes directory to start with /, but it might be given # with or without trailing / (which is required) if dirs[-1] == '': dirs = dirs[0:-1] for i in range(1, len(dirs)): d = "/".join(dirs[0:i + 1]) + "/" self.close() # or we get previous request's data or exception self.headers['Depth'] = "1" response = self.request("PROPFIND", d) del self.headers['Depth'] log.Info("Checking existence dir %s: %d" % (d, response.status)) if response.status == 404: log.Info("Creating missing directory %s" % d) self.close() # or we get previous request's data or exception res = self.request("MKCOL", d) if res.status != 201: raise BackendException("WebDAV MKCOL %s failed: %s %s" % (d, res.status, res.reason)) self.close()
def test_basic(self): """Check get/parse cycle""" dup_time.setprevtime(10) dup_time.setcurtime(20) filename = file_naming.get("inc", volume_number=23) log.Info("Inc filename: " + filename) pr = file_naming.parse(filename) assert pr and pr.type == "inc", pr assert pr.start_time == 10 assert pr.end_time == 20 assert pr.volume_number == 23 assert not pr.partial filename = file_naming.get("full-sig") log.Info("Full sig filename: " + filename) pr = file_naming.parse(filename) assert pr.type == "full-sig" assert pr.time == 20 assert not pr.partial filename = file_naming.get("new-sig") pr = file_naming.parse(filename) assert pr.type == "new-sig" assert pr.start_time == 10 assert pr.end_time == 20 assert not pr.partial
def patch_diff_tarfile(base_path, diff_tarfile, restrict_index=()): u"""Patch given Path object using delta tarfile (as in tarfile.TarFile) If restrict_index is set, ignore any deltas in diff_tarfile that don't start with restrict_index. """ if base_path.exists(): path_iter = selection.Select(base_path).set_iter() else: path_iter = empty_iter() # probably untarring full backup diff_path_iter = difftar2path_iter(diff_tarfile) if restrict_index: diff_path_iter = filter_path_iter(diff_path_iter, restrict_index) collated = diffdir.collate2iters(path_iter, diff_path_iter) ITR = IterTreeReducer(PathPatcher, [base_path]) for basis_path, diff_ropath in collated: if basis_path: log.Info( _(u"Patching %s") % (util.fsdecode(basis_path.get_relative_path())), log.InfoCode.patch_file_patching, util.escape(basis_path.get_relative_path())) ITR(basis_path.index, basis_path, diff_ropath) else: log.Info( _(u"Patching %s") % (util.fsdecode(diff_ropath.get_relative_path())), log.InfoCode.patch_file_patching, util.escape(diff_ropath.get_relative_path())) ITR(diff_ropath.index, basis_path, diff_ropath) ITR.Finish() base_path.setdata()
def test_basic(self): u"""Check get/parse cycle""" dup_time.setprevtime(10) dup_time.setcurtime(20) file_naming.prepare_regex(force=True) filename = file_naming.get(u"inc", volume_number=23) log.Info(u"Inc filename: " + util.fsdecode(filename)) pr = file_naming.parse(filename) assert pr and pr.type == u"inc", pr assert pr.start_time == 10 assert pr.end_time == 20 assert pr.volume_number == 23 assert not pr.partial filename = file_naming.get(u"full-sig") log.Info(u"Full sig filename: " + util.fsdecode(filename)) pr = file_naming.parse(filename) assert pr.type == u"full-sig" assert pr.time == 20 assert not pr.partial filename = file_naming.get(u"new-sig") pr = file_naming.parse(filename) assert pr.type == u"new-sig" assert pr.start_time == 10 assert pr.end_time == 20 assert not pr.partial
def login(self): if self.load_access_token() is None: self.obtain_access_token() self.api_client = Dropbox(self.load_access_token()) self.api_account = None try: log.Debug('dpbx,users_get_current_account([token])') self.api_account = self.api_client.users_get_current_account() log.Debug("dpbx,%s" % self.api_account) except (BadInputError, AuthError) as e: log.Debug('dpbx,exception: %s' % e) log.Info( "dpbx: Authentication failed. Trying to obtain new access token" ) self.obtain_access_token() # We're assuming obtain_access_token will throw exception. # So this line should not be reached raise BackendException( "dpbx: Please update DPBX_ACCESS_TOKEN and try again") log.Info("dpbx: Successfully authenticated as %s" % self.api_account.name.display_name)
def obtain_access_token(self): log.Info("dpbx: trying to obtain access token") for env_var in ['DPBX_APP_KEY', 'DPBX_APP_SECRET']: if env_var not in os.environ: raise BackendException( 'dpbx: %s environment variable not set' % env_var) app_key = os.environ['DPBX_APP_KEY'] app_secret = os.environ['DPBX_APP_SECRET'] if not sys.stdout.isatty() or not sys.stdin.isatty(): log.FatalError( 'dpbx error: cannot interact, but need human attention', log.ErrorCode.backend_command_error) auth_flow = DropboxOAuth2FlowNoRedirect(app_key, app_secret) log.Debug('dpbx,auth_flow.start()') authorize_url = auth_flow.start() print print '-' * 72 print "1. Go to: " + authorize_url print "2. Click \"Allow\" (you might have to log in first)." print "3. Copy the authorization code." print '-' * 72 auth_code = raw_input("Enter the authorization code here: ").strip() try: log.Debug('dpbx,auth_flow.finish(%s)' % auth_code) authresult = auth_flow.finish(auth_code) except Exception as e: raise BackendException('dpbx: Unable to obtain access token: %s' % e) log.Info("dpbx: Authentication successfull") self.save_access_token(authresult.access_token)
def _put(self, source_path, remote_filename): remote_filename = util.fsdecode(remote_filename) drive_file = self.file_by_name(remote_filename) if drive_file is None: # No existing file, make a new one create_file_args = { u'title': remote_filename, u'parents': [{ u"kind": u"drive#fileLink", u"id": self.folder }] } create_file_args[u'parents'][0].update(self.api_params) drive_file = self.drive.CreateFile(create_file_args) log.Info(u"PyDrive backend: creating new file '%s'" % (remote_filename, )) else: log.Info( u"PyDrive backend: replacing existing file '%s' with id '%s'" % (remote_filename, drive_file[u'id'])) drive_file.SetContentFile(util.fsdecode(source_path.name)) if self.shared_drive_id: drive_file.Upload(param={u'supportsTeamDrives': True}) else: drive_file.Upload() self.id_cache[remote_filename] = drive_file[u'id']
def file_by_name(self, filename): from googleapiclient.errors import HttpError filename = util.fsdecode(filename) if filename in self.id_cache: # It might since have been locally moved, renamed or deleted, so we # need to validate the entry. file_id = self.id_cache[filename] try: drive_file = self.drive.files().get( fileId=file_id, fields=u'id,size,name,parents,trashed', **self.shared_drive_flags_support).execute() if drive_file[ u'name'] == filename and not drive_file[u'trashed']: for parent in drive_file[u'parents']: if parent == self.folder: log.Info( u"GDrive backend: found file '%s' with id %s in ID cache" % (filename, file_id)) return drive_file except HttpError as error: # A 404 occurs if the ID is no longer valid if error.resp.status != 404: raise # If we get here, the cache entry is invalid log.Info( u"GDrive backend: invalidating '%s' (previously ID %s) from ID cache" % (filename, file_id)) del self.id_cache[filename] # Not found in the cache, so use directory listing. This is less # reliable because there is no strong consistency. q = u"name = '%s' and '%s' in parents and trashed = false" % ( filename, self.folder) results = self.drive.files().list( q=q, fields=u'files(name,id,size),nextPageToken', pageSize=2, **self.shared_drive_corpora, **self.shared_drive_id, **self.shared_drive_flags_include, **self.shared_drive_flags_support).execute() file_list = results.get(u'files', []) if len(file_list) > 1: log.FatalError(u"GDrive backend: multiple files called '%s'." % (filename, )) elif len(file_list) > 0: file_id = file_list[0][u'id'] self.id_cache[filename] = file_list[0][u'id'] log.Info(u"GDrive backend: found file '%s' with id %s on server, " u"adding to cache" % (filename, file_id)) return file_list[0] log.Info(u"GDrive backend: file '%s' not found in cache or on server" % (filename, )) return None
def pre_process_download_batch(self, remote_filenames): log.Info(u"Starting batch unfreezing from Glacier") # Used primarily to move all necessary files in Glacier to S3 at once with ThreadPoolExecutor( thread_name_prefix=u's3-unfreeze-glacier') as executor: for remote_filename in remote_filenames: remote_filename = util.fsdecode(remote_filename) executor.submit(self.pre_process_download, remote_filename, False) log.Info(u"Batch unfreezing from Glacier finished")
def request(self, url, method="GET", body=None, headers={}, ignore=None): url, oauth_header = self._get_oauth_request_header(url, method) headers.update(oauth_header) for n in range(1, globals.num_retries + 1): log.Info("making %s request to %s (attempt %d)" % (method, url, n)) try: resp, content = self.client.request(url, method, headers=headers, body=body) except Exception, e: log.Info("request failed, exception %s" % e) log.Debug("Backtrace of previous error: %s" % duplicity.util.exception_traceback()) if n == globals.num_retries: log.FatalError( "Giving up on request after %d attempts, last exception %s" % (n, e)) if isinstance(body, file): body.seek( 0) # Go to the beginning of the file for the retry time.sleep(30) continue log.Info("completed request with status %s %s" % (resp.status, resp.reason)) oops_id = resp.get('x-oops-id', None) if oops_id: log.Debug("Server Error: method %s url %s Oops-ID %s" % (method, url, oops_id)) if resp['content-type'] == 'application/json': content = loads(content) # were we successful? status either 2xx or code we're told to ignore numcode = int(resp.status) if (numcode >= 200 and numcode < 300) or (ignore and numcode in ignore): return resp, content ecode = log.ErrorCode.backend_error if numcode == 402: # Payment Required ecode = log.ErrorCode.backend_no_space elif numcode == 404: ecode = log.ErrorCode.backend_not_found if isinstance(body, file): body.seek(0) # Go to the beginning of the file for the retry if n < globals.num_retries: time.sleep(30)
def _put(self, source_path, remote_filename): drive_file = self.file_by_name(remote_filename) if drive_file is None: # No existing file, make a new one drive_file = self.drive.CreateFile({'title': remote_filename, 'parents': [{"kind": "drive#fileLink", "id": self.folder}]}) log.Info("PyDrive backend: creating new file '%s'" % (remote_filename,)) else: log.Info("PyDrive backend: replacing existing file '%s' with id '%s'" % ( remote_filename, drive_file['id'])) drive_file.SetContentFile(source_path.name) drive_file.Upload() self.id_cache[remote_filename] = drive_file['id']
def schedule_task(self, fn, params): """ Schedule the given task (callable, typically function) for execution. Pass the given parameters to the function when calling it. Returns a callable which can optionally be used to wait for the task to complete, either by returning its return value or by propagating any exception raised by said task. This method may block or return immediately, depending on the configuration and state of the scheduler. This method may also raise an exception in order to trigger failures early, if the task (if run synchronously) or a previous task has already failed. NOTE: Pay particular attention to the scope in which this is called. In particular, since it will execute concurrently in the background, assuming fn is a closure, any variables used most be properly bound in the closure. This is the reason for the convenience feature of being able to give parameters to the call, to avoid having to wrap the call itself in a function in order to "fixate" variables in, for example, an enclosing loop. """ assert fn is not None # Note: It is on purpose that we keep track of concurrency in # the front end and launch threads for each task, rather than # keep a pool of workers. The overhead is not relevant in the # situation this will be used, and it removes complexity in # terms of ensuring the scheduler is garbage collected/shut # down properly when no longer referenced/needed by calling # code. if self.__concurrency == 0: # special case this to not require any platform support for # threading at all log.Info( "%s: %s" % (self.__class__.__name__, _("running task synchronously (asynchronicity disabled)")), log.InfoCode.synchronous_upload_begin) return self.__run_synchronously(fn, params) else: log.Info( "%s: %s" % (self.__class__.__name__, _("scheduling task for asynchronous execution")), log.InfoCode.asynchronous_upload_begin) return self.__run_asynchronously(fn, params)
def _get(self, remote_filename, local_path): allowedTimeout = globals.timeout if (allowedTimeout == 0): # Allow a total timeout of 1 day allowedTimeout = 2880 while allowedTimeout > 0: try: self.conn.select(globals.imap_mailbox) (result, list) = self.conn.search(None, u'Subject', remote_filename) if result != u"OK": raise Exception(list[0]) # check if there is any result if list[0] == u'': raise Exception(u"no mail with subject %s") (result, list) = self.conn.fetch(list[0], u"(RFC822)") if result != u"OK": raise Exception(list[0]) rawbody = list[0][1] p = Parser() m = p.parsestr(rawbody.decode()) mp = m.get_payload(0) body = mp.get_payload(decode=True) break except (imaplib.IMAP4.abort, socket.error, socket.sslerror): allowedTimeout -= 1 log.Info(u"Error loading '%s', retrying in 30s " % remote_filename) time.sleep(30) while allowedTimeout > 0: try: self.resetConnection() break except (imaplib.IMAP4.abort, socket.error, socket.sslerror): allowedTimeout -= 1 log.Info(u"Error reconnecting, retrying in 30s ") time.sleep(30) tfile = local_path.open(u"wb") tfile.write(body) tfile.close() local_path.setdata() log.Info(u"IMAP mail with '%s' subject fetched" % remote_filename)
def can_fast_process( self, index, ropath ): """Can fast process (no recursion) if ropath isn't a directory""" log.Info( _( "Writing %s of type %s" ) % ( ropath.get_relative_path(), ropath.type ), log.InfoCode.patch_file_writing, "%s %s" % ( util.escape( ropath.get_relative_path() ), ropath.type ) ) return not ropath.isdir()
def unseal_status(self, u_remote_filenames): u""" Shows unsealing status for input volumes """ one_object_not_unsealed = False objs = self.__list_objs(ffilter=lambda x: util.fsdecode(x[u'name']) in u_remote_filenames) max_duration = 0 for o in objs: policy_retrieval_state = o[u'policy_retrieval_state'] filename = util.fsdecode(o[u'name']) if policy_retrieval_state == u'sealed': log.Notice(u"Error: volume is still in sealed state : %s." % (filename)) log.Notice(u"Launching unseal of volume %s." % (filename)) self.unseal(o[u'name']) one_object_not_unsealed = True elif policy_retrieval_state == u"unsealing": duration = int(o[u'policy_retrieval_delay']) log.Info(u"%s available in %d seconds." % (filename, duration)) if duration > max_duration: max_duration = duration one_object_not_unsealed = True m, s = divmod(max_duration, 60) h, m = divmod(m, 60) max_duration_eta = u"%dh%02dm%02ds" % (h, m, s) log.Notice(u"Need to wait %s before all volumes are unsealed." % (max_duration_eta)) return one_object_not_unsealed
def subprocess_popen(self, commandline): """ Execute the given command line with error check. Returns int Exitcode, string StdOut, string StdErr Raise a BackendException on failure. """ import shlex if isinstance(commandline, (types.ListType, types.TupleType)): logstr = ' '.join(commandline) args = commandline else: logstr = commandline args = shlex.split(commandline) logstr = self.munge_password(logstr) log.Info(_("Reading results of '%s'") % logstr) result, stdout, stderr = self.__subprocess_popen(args) if result != 0: try: ignores = self.popen_breaks[args[0]] ignores.index(result) """ ignore a predefined set of error codes """ return 0, '', '' except (KeyError, ValueError): raise BackendException("Error running '%s': returned %d, with output:\n%s" % (logstr, result, stdout + '\n' + stderr)) return result, stdout, stderr
def connect(self, forced=False): """ Connect or re-connect to the server, updates self.conn # reconnect on errors as a precaution, there are errors e.g. # "[Errno 32] Broken pipe" or SSl errors that render the connection unusable """ if not forced and self.conn \ and self.conn.host == self.parsed_url.hostname: return log.Info( _("WebDAV create connection on '%s'") % (self.parsed_url.hostname)) self._close() # http schemes needed for redirect urls from servers if self.parsed_url.scheme in ['webdav', 'http']: self.conn = httplib.HTTPConnection(self.parsed_url.hostname, self.parsed_url.port) elif self.parsed_url.scheme in ['webdavs', 'https']: if globals.ssl_no_check_certificate: self.conn = httplib.HTTPSConnection(self.parsed_url.hostname, self.parsed_url.port) else: self.conn = VerifiedHTTPSConnection(self.parsed_url.hostname, self.parsed_url.port) else: raise FatalBackendException( _("WebDAV Unknown URI scheme: %s") % (self.parsed_url.scheme))
def __init__(self, concurrency): """ Create an asynchronous scheduler that executes jobs with the given level of concurrency. """ log.Info( "%s: %s" % (self.__class__.__name__, _("instantiating at concurrency %d") % (concurrency))) assert concurrency >= 0, "%s concurrency level must be >= 0" % ( self.__class__.__name__, ) self.__failed = False # has at least one task failed so far? self.__failed_waiter = None # when __failed, the waiter of the first task that failed self.__concurrency = concurrency self.__worker_count = 0 # number of active workers self.__waiter_count = 0 # number of threads waiting to submit work self.__barrier = False # barrier currently in effect? self.__cv = threading.Condition( ) # for simplicity, we use a single cv with its lock # # for everything, even if the resulting notifyAll():s # # are not technically efficient. if concurrency > 0: require_threading("concurrency > 0 (%d)" % (concurrency, ))
def transfer(self, method, source_path, remote_filename): u"""create Par2 files and transfer the given file and the Par2 files with the wrapped backend. Par2 must run on the real filename or it would restore the temp-filename later on. So first of all create a tempdir and symlink the soure_path with remote_filename into this. """ par2temp = source_path.get_temp_in_same_dir() par2temp.mkdir() source_symlink = par2temp.append(remote_filename) source_target = source_path.get_canonical() if not os.path.isabs(source_target): source_target = os.path.join(util.fsencode(os.getcwd()), source_target) os.symlink(source_target, source_symlink.get_canonical()) source_symlink.setdata() log.Info(u"Create Par2 recovery files") par2create = u'par2 c -r%d -n1 %s %s' % ( self.redundancy, self.common_options, util.fsdecode(source_symlink.get_canonical())) out, returncode = pexpect.run(par2create, None, True) source_symlink.delete() files_to_transfer = [] if not returncode: for file in par2temp.listdir(): files_to_transfer.append(par2temp.append(file)) method(source_path, remote_filename) for file in files_to_transfer: method(file, file.get_filename()) par2temp.deltree()
def _list(self): ret = [] (result, list) = self.conn.select(globals.imap_mailbox) if result != "OK": raise BackendException(list[0]) # Going to find all the archives which have remote_dir in the From # address # Search returns an error if you haven't selected an IMAP folder. (result, list) = self.conn.search(None, 'FROM', self.remote_dir) if result != "OK": raise Exception(list[0]) if list[0] == '': return ret nums = list[0].strip().split(" ") set = "%s:%s" % (nums[0], nums[-1]) (result, list) = self.conn.fetch(set, "(BODY[HEADER])") if result != "OK": raise Exception(list[0]) for msg in list: if (len(msg) == 1): continue io = StringIO.StringIO(msg[1]) # pylint: disable=unsubscriptable-object m = rfc822.Message(io) subj = m.getheader("subject") header_from = m.getheader("from") # Catch messages with empty headers which cause an exception. if (not (header_from is None)): if (re.compile("^" + self.remote_dir + "$").match(header_from)): ret.append(subj) log.Info("IMAP LIST: %s %s" % (subj, header_from)) return ret
def __init__(self, parsed_url): duplicity.backend.Backend.__init__(self, parsed_url) # The URL form "file:MyFile" is not a valid duplicity target. log.Info("init: %s" % (parsed_url.path[1:])) if not parsed_url.path.startswith('//'): raise BackendException("Bad file:// path syntax.") self.remote_pathdir = parsed_url.path[1:]
def __init__(self, temproot=None): """ Create a new TemporaryDirectory backed by a unique and securely created file system directory. tempbase - The temp root directory, or None to use system default (recommended). """ if temproot is None: if globals.temproot: temproot = globals.temproot else: global _initialSystemTempRoot temproot = _initialSystemTempRoot self.__dir = tempfile.mkdtemp("-tempdir", "duplicity-", temproot) log.Info(_("Using temporary directory %s") % util.ufn(self.__dir)) # number of mktemp()/mkstemp() calls served so far self.__tempcount = 0 # dict of paths pending deletion; use dict even though we are # not concearned with association, because it is unclear whether # sets are O(1), while dictionaries are. self.__pending = {} self.__lock = threading.Lock() # protect private resources *AND* mktemp/mkstemp calls
def cleanup(self): """ Cleanup any files created in the temporary directory (that have not been forgotten), and clean up the temporary directory itself. On failure they are logged, but this method will not raise an exception. """ self.__lock.acquire() try: if self.__dir is not None: for file in self.__pending.keys(): try: log.Debug(_("Removing still remembered temporary file %s") % util.ufn(file)) util.ignore_missing(os.unlink, file) except Exception: log.Info(_("Cleanup of temporary file %s failed") % util.ufn(file)) pass try: os.rmdir(self.__dir) except Exception: log.Warn(_("Cleanup of temporary directory %s failed - this is probably a bug.") % util.ufn(self.__dir)) pass self.__pending = None self.__dir = None finally: self.__lock.release()
def put(self, source_path, remote_filename=None): """Copy file to remote""" if not remote_filename: remote_filename = source_path.get_filename() remote_full = self.meta_base + self.quote(remote_filename) # check if it exists already, returns existing content_path resp, content = self.client.request(remote_full, ignore=[404]) if resp['status'] == '404': # put with path returns new content_path resp, content = self.client.request( remote_full, method="PUT", headers={'content-type': 'application/json'}, body=dumps({"kind": "file"})) elif resp['status'] != '200': raise BackendException("access to %s failed, code %s" % (remote_filename, resp['status'])) assert (content['content_path'] is not None) # content_path allows put of the actual material remote_full = self.content_base + self.quote(content['content_path']) log.Info("uploading file %s to location %s" % (remote_filename, remote_full)) size = os.path.getsize(source_path.name) fh = open(source_path.name, 'rb') content_type = 'application/octet-stream' headers = {"Content-Length": str(size), "Content-Type": content_type} resp, content = self.client.request(remote_full, method="PUT", body=fh, headers=headers) fh.close()