Exemplo n.º 1
0
def upload_large_dropbox_file(client, source_full_path, destination_full_path):
    """
    Uploads a large (>CHUNK_SIZE) single file to Dropbox.
    """
    file_size = os.path.getsize(source_full_path)
    with open(source_full_path, 'rb') as f:
        try:
            upload_session_start_result = client.files_upload_session_start(
                f.read(CHUNK_SIZE))
            session_id = upload_session_start_result.session_id
            cursor = UploadSessionCursor(session_id=session_id,
                                         offset=f.tell())
            commit = CommitInfo(path=destination_full_path)

            while f.tell() < file_size:
                if ((file_size - f.tell()) <= CHUNK_SIZE):
                    print(
                        client.files_upload_session_finish(
                            f.read(CHUNK_SIZE), cursor, commit))
                else:
                    client.files_upload_session_append(f.read(CHUNK_SIZE),
                                                       cursor.session_id,
                                                       cursor.offset)
                    cursor.offset = f.tell()
        except ApiError as e:
            print(f'Failed to upload file {source_full_path}')

    print(f'{source_full_path} successfully uploaded to ' \
            f'{destination_full_path}')
Exemplo n.º 2
0
 def make_backup_dropbox(self, ts, name, dump_stream, info_file, info_file_content, cloud_params):
     # Upload two backup objects to Dropbox
     DropboxService = self.env['ir.config_parameter'].get_dropbox_service()
     folder_path = self.env['ir.config_parameter'].get_param("odoo_backup_sh_dropbox.dropbox_folder_path")
     info_file_size = info_file.tell()
     dump_stream.seek(0)
     info_file.seek(0)
     for obj, obj_name, file_size in \
             [[dump_stream, compute_backup_filename(name, ts, info_file_content.get('encrypted')), info_file_content.get("backup_size") * 1024 * 1024],
              [info_file, compute_backup_info_filename(name, ts), info_file_size]]:
         # The full path to upload the file to, including the file name
         full_path = "{folder_path}/{file_name}".format(
             folder_path=folder_path,
             file_name=obj_name,
         )
         # from here: https://www.dropboxforum.com/t5/API-Support-Feedback/python-upload-big-file-example/m-p/166627/highlight/true#M6013
         if file_size <= CHUNK_SIZE:
             DropboxService.files_upload(obj.read(), full_path)
         else:
             upload_session_start_result = DropboxService.files_upload_session_start(obj.read(CHUNK_SIZE))
             cursor = UploadSessionCursor(session_id=upload_session_start_result.session_id, offset=obj.tell())
             commit = CommitInfo(path=full_path)
             while obj.tell() < file_size:
                 if ((file_size - obj.tell()) <= CHUNK_SIZE):
                     DropboxService.files_upload_session_finish(obj.read(CHUNK_SIZE), cursor, commit)
                 else:
                     DropboxService.files_upload_session_append(obj.read(CHUNK_SIZE), cursor.session_id,
                                                                cursor.offset)
                     cursor.offset = obj.tell()
Exemplo n.º 3
0
    def _upload_large_file(self, handle, upload_size, remote_file_path):
        """It uploads a large source files to the dropbox.

        Arguments:
            handle {A File handle} -- The source file handle.
            upload_size {int} -- The number of bytes to be uploaded.
            remote_file_path {string} -- The destination path of the file.
        """
        #Upload session
        session = self._client.files_upload_session_start(
            handle.read(constants.DROPBOX_CHUNK_SIZE))
        cursor = Dropbox_UploadSessionCursor(session_id=session.session_id,
                                             offset=handle.tell())

        #Upload look
        with tqdm(desc='Uploading: {}'.format(remote_file_path),
                  total=upload_size) as pbar:
            #Update the progress bar for the session start reads
            pbar.update(handle.tell())

            while handle.tell() < upload_size:
                #Calculate remaining bytes
                remaining_bytes = upload_size - handle.tell()

                #If it is the last chunk, finalize the upload
                if remaining_bytes <= constants.DROPBOX_CHUNK_SIZE:
                    #Commit info
                    commit = Dropbox_CommitInfo(
                        path=remote_file_path,
                        mode=Dropbox_WriteMode.overwrite)

                    #Finish upload
                    self._client.files_upload_session_finish(
                        handle.read(remaining_bytes), cursor, commit)

                    #Update progress
                    pbar.update(remaining_bytes)
                #More than chunk size remaining to upload
                else:
                    self._client.files_upload_session_append_v2(
                        handle.read(constants.DROPBOX_CHUNK_SIZE), cursor)

                    #Update the cursor
                    cursor.offset = handle.tell()

                    #Update the progress
                    pbar.update(constants.DROPBOX_CHUNK_SIZE)

                #Refresh the progress bar
                pbar.refresh()
Exemplo n.º 4
0
    def _chunked_upload(self, content, dest_path):
        upload_session = self.client.files_upload_session_start(
            content.read(self.CHUNK_SIZE))
        cursor = UploadSessionCursor(session_id=upload_session.session_id,
                                     offset=content.tell())
        commit = CommitInfo(path=dest_path)

        while content.tell() < content.size:
            if (content.size - content.tell()) <= self.CHUNK_SIZE:
                self.client.files_upload_session_finish(
                    content.read(self.CHUNK_SIZE), cursor, commit)
            else:
                self.client.files_upload_session_append_v2(
                    content.read(self.CHUNK_SIZE), cursor)
                cursor.offset = content.tell()
Exemplo n.º 5
0
    def put(self, source, dest, retry=True):
        dest = self._fix_slashes(dest)

        if (self.client is not None):
            # open the file and get its size
            f = open(source, 'rb')
            f_size = os.path.getsize(source)

            try:
                if (f_size < self.MAX_CHUNK):
                    # use the regular upload
                    self.client.files_upload(f.read(),
                                             dest,
                                             mode=WriteMode('overwrite'))
                else:
                    # start the upload session
                    upload_session = self.client.files_upload_session_start(
                        f.read(self.MAX_CHUNK))
                    upload_cursor = UploadSessionCursor(
                        upload_session.session_id, f.tell())

                    while (f.tell() < f_size):
                        # check if we should finish the upload
                        if ((f_size - f.tell()) <= self.MAX_CHUNK):
                            # upload and close
                            self.client.files_upload_session_finish(
                                f.read(self.MAX_CHUNK), upload_cursor,
                                CommitInfo(dest, mode=WriteMode('overwrite')))
                        else:
                            # upload a part and store the offset
                            self.client.files_upload_session_append_v2(
                                f.read(self.MAX_CHUNK), upload_cursor)
                            upload_cursor.offset = f.tell()

                # if no errors we're good!
                return True
            except Exception as anError:
                utils.log(str(anError))

                # if we have an exception retry
                if (retry):
                    return self.put(source, dest, False)
                else:
                    # tried once already, just quit
                    return False
        else:
            return False
Exemplo n.º 6
0
    def sync(self, file_path: Path) -> None:
        """Sync a file to Dropbox."""
        # Check if Dropbox token is valid.
        if self.valid is False:
            error = "Dropbox token is invalid!"
            self.success = False
            self.output = error
            log.error(error)
            return None

        # This is size what can be uploaded as one chunk.
        # When file is bigger than that, this will be uploaded
        # in multiple parts.
        chunk_size = 4 * 1024 * 1024

        temp_file, recompressed = self.compress(file_path)
        upload_path = f"{self.upload_base}{file_path.name}{'.gz' if recompressed else ''}"

        try:
            with temp_file as f:
                file_size = os.stat(f.name).st_size
                log.debug(file_size)
                if file_size <= chunk_size:
                    self.client.files_upload(f.read(), upload_path,
                                             WriteMode.overwrite)
                else:
                    session_start = self.client.files_upload_session_start(
                        f.read(chunk_size))
                    cursor = UploadSessionCursor(session_start.session_id,
                                                 offset=f.tell())
                    # Commit contains path in Dropbox and write mode about file
                    commit = CommitInfo(upload_path, WriteMode.overwrite)

                    while f.tell() < file_size:
                        if (file_size - f.tell()) <= chunk_size:
                            self.client.files_upload_session_finish(
                                f.read(chunk_size), cursor, commit)
                        else:
                            self.client.files_upload_session_append(
                                f.read(chunk_size), cursor.session_id,
                                cursor.offset)
                            cursor.offset = f.tell()
            self.success = True
        except (ApiError, HttpError) as e:
            log.error(e)
            self.success = False
            self.output = str(e)
Exemplo n.º 7
0
def upload_file(dbx, LOCALFILE):
    """Uploads a local file to Dropbox in chunks <=4MiB each"""
    with open(LOCALFILE, 'rb') as f:
        BACKUPPATH = '/' + os.path.split(LOCALFILE)[1]
        file_size = os.path.getsize(LOCALFILE)
        if file_size <= CHUNK_SIZE:
            dbx.files_upload(f.read(), BACKUPPATH, mode=WriteMode('overwrite'))
        else:
            upload_session_start_result = dbx.files_upload_session_start(f.read(CHUNK_SIZE))
            cursor = UploadSessionCursor(session_id=upload_session_start_result.session_id, offset=f.tell())
            commit = CommitInfo(path=BACKUPPATH)
            while f.tell() < file_size:
                if ((file_size - f.tell()) <= CHUNK_SIZE):
                    dbx.files_upload_session_finish(f.read(CHUNK_SIZE), cursor, commit)
                else:
                    dbx.files_upload_session_append(f.read(CHUNK_SIZE), cursor.session_id, cursor.offset)
                    cursor.offset = f.tell()
Exemplo n.º 8
0
 def put(self,source,dest,retry=True):
     dest = self._fix_slashes(dest)
     
     if(self.client != None):
         #open the file and get its size
         f = open(source,'rb')
         f_size = os.path.getsize(source)
         
         try:
             if(f_size < self.MAX_CHUNK):
                 #use the regular upload
                 response = self.client.files_upload(f.read(),dest,mode=WriteMode('overwrite'))
             else:
                 #start the upload session
                 upload_session = self.client.files_upload_session_start(f.read(self.MAX_CHUNK))
                 upload_cursor = UploadSessionCursor(upload_session.session_id,f.tell())
                 
                 while(f.tell() < f_size):
                     #check if we should finish the upload
                     if((f_size - f.tell()) <= self.MAX_CHUNK):
                         #upload and close
                         self.client.files_upload_session_finish(f.read(self.MAX_CHUNK),upload_cursor,CommitInfo(dest,mode=WriteMode('overwrite')))
                     else:
                         #upload a part and store the offset
                         self.client.files_upload_session_append_v2(f.read(self.MAX_CHUNK),upload_cursor)
                         upload_cursor.offset = f.tell()
                 
              #if no errors we're good!   
             return True
         except Exception as anError:
             utils.log(str(anError))
             
             #if we have an exception retry
             if(retry):
                 return self.put(source,dest,False)
             else:
                 #tried once already, just quit
                 return False
     else:
         return False
Exemplo n.º 9
0
    def put_file_chunked(self, source_path, remote_path):
        if not self.user_authenticated():
            self.login()

        file_size = os.path.getsize(source_path.name)
        f = source_path.open('rb')
        try:
            buf = f.read(DPBX_UPLOAD_CHUNK_SIZE)
            log.Debug(
                'dpbx,files_upload_session_start([%d bytes]), total: %d' %
                (len(buf), file_size))
            upload_sid = self.api_client.files_upload_session_start(buf)
            log.Debug('dpbx,files_upload_session_start(): %s' % upload_sid)
            upload_cursor = UploadSessionCursor(upload_sid.session_id,
                                                f.tell())
            commit_info = CommitInfo(remote_path,
                                     mode=WriteMode.overwrite,
                                     autorename=False,
                                     client_modified=None,
                                     mute=True)
            res_metadata = None
            progress.report_transfer(f.tell(), file_size)

            requested_offset = None
            current_chunk_size = DPBX_UPLOAD_CHUNK_SIZE
            retry_number = globals.num_retries
            is_eof = False

            # We're doing our own error handling and retrying logic because
            # we can benefit from Dpbx chunked upload and retry only failed
            # chunk
            while not is_eof or not res_metadata:
                try:
                    if requested_offset is not None:
                        upload_cursor.offset = requested_offset

                    if f.tell() != upload_cursor.offset:
                        f.seek(upload_cursor.offset)
                    buf = f.read(current_chunk_size)

                    is_eof = f.tell() >= file_size
                    if not is_eof and len(buf) == 0:
                        continue

                    # reset temporary status variables
                    requested_offset = None
                    current_chunk_size = DPBX_UPLOAD_CHUNK_SIZE
                    retry_number = globals.num_retries

                    if not is_eof:
                        assert len(buf) != 0
                        log.Debug(
                            'dpbx,files_upload_sesssion_append([%d bytes], offset=%d)'
                            % (len(buf), upload_cursor.offset))
                        self.api_client.files_upload_session_append(
                            buf, upload_cursor.session_id,
                            upload_cursor.offset)
                    else:
                        log.Debug(
                            'dpbx,files_upload_sesssion_finish([%d bytes], offset=%d)'
                            % (len(buf), upload_cursor.offset))
                        res_metadata = self.api_client.files_upload_session_finish(
                            buf, upload_cursor, commit_info)

                    upload_cursor.offset = f.tell()
                    log.Debug('progress: %d of %d' %
                              (upload_cursor.offset, file_size))
                    progress.report_transfer(upload_cursor.offset, file_size)
                except ApiError as e:
                    error = e.error
                    if isinstance(error, UploadSessionLookupError
                                  ) and error.is_incorrect_offset():
                        # Server reports that we should send another chunk.
                        # Most likely this is caused by network error during
                        # previous upload attempt. In such case we'll get
                        # expected offset from server and it's enough to just
                        # seek() and retry again
                        new_offset = error.get_incorrect_offset(
                        ).correct_offset
                        log.Debug(
                            'dpbx,files_upload_session_append: incorrect offset: %d (expected: %s)'
                            % (upload_cursor.offset, new_offset))
                        if requested_offset is not None:
                            # chunk failed even after seek attempt. Something
                            # strange and no safe way to recover
                            raise BackendException(
                                "dpbx: unable to chunk upload")
                        else:
                            # will seek and retry
                            requested_offset = new_offset
                        continue
                    raise
                except ConnectionError as e:
                    log.Debug('dpbx,files_upload_session_append: %s' % e)

                    retry_number -= 1

                    if not self.user_authenticated():
                        self.login()

                    if retry_number == 0:
                        raise

                    # We don't know for sure, was partial upload successful or
                    # not. So it's better to retry smaller amount to avoid extra
                    # reupload
                    log.Info('dpbx: sleeping a bit before chunk retry')
                    time.sleep(30)
                    current_chunk_size = DPBX_UPLOAD_CHUNK_SIZE / 5
                    requested_offset = None
                    continue

            if f.tell() != file_size:
                raise BackendException('dpbx: something wrong')

            log.Debug('dpbx,files_upload_sesssion_finish(): %s' % res_metadata)
            progress.report_transfer(f.tell(), file_size)

            return res_metadata

        finally:
            f.close()
Exemplo n.º 10
0
    def _put(self, source_path, remote_filename):
        remote_dir = urllib.unquote(self.parsed_url.path.lstrip('/'))
        remote_path = '/' + os.path.join(remote_dir, remote_filename).rstrip()

        file_size = os.path.getsize(source_path.name)
        f = source_path.open('rb')
        try:
            progress.report_transfer(0, file_size)
            buf = f.read(DPBX_UPLOAD_CHUNK_SIZE)
            log.Debug('dpbx,files_upload_session_start([%d bytes]), total: %d' % (len(buf), file_size))
            upload_sid = self.api_client.files_upload_session_start(buf)
            log.Debug('dpbx,files_upload_session_start(): %s' % upload_sid)
            upload_cursor = UploadSessionCursor(upload_sid.session_id, f.tell())
            commit_info = CommitInfo(remote_path, mode=WriteMode.overwrite, autorename=False, client_modified=None, mute=True)
            res_metadata = None
            progress.report_transfer(f.tell(), file_size)

            requested_offset = None
            current_chunk_size = DPBX_UPLOAD_CHUNK_SIZE
            retry_number = globals.num_retries

            # We're doing our own error handling and retrying logic because
            # we can benefit from Dpbx chunked upload and retry only failed chunk
            while (f.tell() < file_size) or not res_metadata:
                try:
                    if requested_offset is not None:
                        upload_cursor.offset = requested_offset

                    if f.tell() != upload_cursor.offset:
                        f.seek(upload_cursor.offset)
                    buf = f.read(current_chunk_size)

                    # reset temporary status variables
                    requested_offset = None
                    current_chunk_size = DPBX_UPLOAD_CHUNK_SIZE
                    retry_number = globals.num_retries

                    if len(buf) != 0:
                        log.Debug('dpbx,files_upload_sesssion_append([%d bytes], offset=%d)' % (len(buf), upload_cursor.offset))
                        self.api_client.files_upload_session_append(buf, upload_cursor.session_id, upload_cursor.offset)
                    else:
                        log.Debug('dpbx,files_upload_sesssion_finish([%d bytes], offset=%d)' % (len(buf), upload_cursor.offset))
                        res_metadata = self.api_client.files_upload_session_finish(buf, upload_cursor, commit_info)

                    upload_cursor.offset = f.tell()
                    log.Debug('progress: %d of %d' % (upload_cursor.offset, file_size))
                    progress.report_transfer(upload_cursor.offset, file_size)
                except ApiError as e:
                    error = e.error
                    if isinstance(error, UploadSessionLookupError) and error.is_incorrect_offset():
                        # Server reports that we should send another chunk. Most likely this is caused by
                        # network error during previous upload attempt. In such case we'll get expected offset
                        # from server and it's enough to just seek() and retry again
                        new_offset = error.get_incorrect_offset().correct_offset
                        log.Debug('dpbx,files_upload_session_append: incorrect offset: %d (expected: %s)' % (upload_cursor.offset, new_offset))
                        if requested_offset is not None:
                            # chunk failed even after seek attempt. Something strange and no safe way to recover
                            raise BackendException("dpbx: unable to chunk upload")
                        else:
                            # will seek and retry
                            requested_offset = new_offset
                        continue
                    raise
                except ConnectionError as e:
                    log.Debug('dpbx,files_upload_session_append: %s' % e)

                    retry_number -= 1
                    if retry_number == 0:
                        raise

                    # We don't know for sure, was partial upload successfull or not. So it's better to retry smaller amount to avoid extra reupload
                    log.Info('dpbx: sleeping a bit before chunk retry')
                    time.sleep(30)
                    current_chunk_size = DPBX_UPLOAD_CHUNK_SIZE / 5
                    requested_offset = None
                    continue

            if f.tell() != file_size:
                raise BackendException('dpbx: something wrong')

            log.Debug('dpbx,files_upload_sesssion_finish(): %s' % res_metadata)
            progress.report_transfer(f.tell(), file_size)

            # A few sanity checks
            if res_metadata.path_display != remote_path:
                raise BackendException('dpbx: result path mismatch: %s (expected: %s)' % (res_metadata.path_display, remote_path))
            if res_metadata.size != file_size:
                raise BackendException('dpbx: result size mismatch: %s (expected: %s)' % (res_metadata.size, file_size))

        finally:
            f.close()