def download_from_drive(service, file_id, file_title, output_file): """ Description: downloads a file from Google Drive by file ID. Inputs: file_id -- ID of the file to download. output_directory -- directory in which to save the downloaded file. Returned Value: Return a files to local disk for the file on Google Drive Preconditions: files containing imagery tiles must have been exported to Google Drive from Google Earth Engine """ # Import packages from googleapiclient.http import MediaIoBaseDownload import io # Create request, file handler, and downloader print(f'\tSaving {file_title}...') request = service.files().get_media(fileId=file_id) file_handler = io.FileIO(output_file, 'wb') downloader = MediaIoBaseDownload(file_handler, request, chunksize=16384*16384) # Download file and report progress done = False while done is False: status, done = downloader.next_chunk() print('\t\tDownload {0}%...'.format(int(status.progress() * 100))) file_handler.close()
def download_google_drive_file(service, blob, destination_file_name=None): """ Download a selected file from Google Drive to local storage in the current working directory. """ local_path = os.path.normpath(f'{os.getcwd()}/{destination_file_name}') name = blob['name'] path = local_path.rsplit('/', 1)[0] if not os.path.exists(path): os.mkdir(path) fh = io.FileIO(local_path, 'wb+') try: request = service.files().get_media(fileId=blob['id']) downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() except Exception as e: print(f'{name} failed to downoad') raise (e) print(f'{name} successfully downloaded to {local_path}') return
def download_file_from_gdrive(file_path, drive_file, service): """Downloads file from Google Drive. If file is Google Doc's type, then it will be downloaded with the corresponding non-Google mimetype. Args: path: Directory string, where file will be saved. file: File information object (dictionary), including it's name, ID and mimeType. service: Google Drive service instance. """ file_id = drive_file['id'] file_name = drive_file['name'] if drive_file['mimeType'] in GOOGLE_MIME_TYPES.keys(): if file_name.endswith(GOOGLE_MIME_TYPES[drive_file['mimeType']][1]): file_name = drive_file['name'] else: file_name = '{}{}'.format( drive_file['name'], GOOGLE_MIME_TYPES[drive_file['mimeType']][1]) service.files().update(fileId=file_id, body={'name': file_name}).execute() request = service.files().export( fileId=file_id, mimeType=(GOOGLE_MIME_TYPES[drive_file['mimeType']])[0]).execute() with io.FileIO(os.path.join(file_path, file_name), 'wb') as file_write: file_write.write(request) else: request = service.files().get_media(fileId=file_id) file_io = io.FileIO(os.path.join(file_path, drive_file['name']), 'wb') downloader = MediaIoBaseDownload(file_io, request) done = False while done is False: _, done = downloader.next_chunk()
def download_file(DRIVE, file_obj, print_status=True): ''' Use the drive api service to download a file from your personal gdrive. Does NOT remove the locally downloaded file. Prints a status report of the print_statusdownload (if `print_status == True`). :param DRIVE: the drive api service. :param file_obj: a file object, must have keys `id` and `name`. :param print_status: bool, Whether or not to print the file download status. :return None. ''' if 'id' not in file_obj or 'name' not in file_obj: print('`file_obj` needs `id` and `name`.') return None request = DRIVE.files().get_media(fileId=file_obj['id']) fh = io.FileIO(file_obj['name'], mode='w') downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() if print_status: print("Download %d%%." % int(status.progress() * 100)) return None
def download_file(service, file_id, download_file_path, download_drive_service_name): """ 雲端將資料下載到本地端用 :param service: 認證用 :param file_id: 雲端檔案的id 可透過 update_file()取得 :param download_file_path: 將雲端上的資料下載到本地端的位置 :param download_drive_service_name: """ if file_id is not None: # 如果有取得到檔案id就會跑這裡 request = service.files().get_media(fileId=file_id) # 取得檔案id local_download_path = download_file_path + download_drive_service_name # 你存放在本地端的位置以及名稱 fh = io.FileIO(local_download_path, 'wb') # 指定要寫入的檔案位置以及名稱 downloader = MediaIoBaseDownload(fh, request) # 使用 下載的功能 Google Drive內建的 # print("下載檔案中....") done = False while done is False: status, done = downloader.next_chunk() # print("Download %d%%." % int(status.progress() * 100)) # print("下載檔案位置為: ", str(download_file_path + download_drive_service_name)) # print("下載檔案完成!") else: # 沒找到 id 會跑這裏 print("下載檔案失敗,未找到檔案")
def download_file(file_id): if type(file_id) not in [str, unicode]: raise TypeError("Invalid File ID") result=False request = drive_service.files().export_media(fileId=file_id, mimeType='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') fh = io.BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False try: while done is False: status, done = downloader.next_chunk() print("Download %d%%.", int(status.progress() * 100)) except: raise IOError("Unable to download files") try: with open('downloaded_excel.xlsx', 'wb') as f: f.write(fh.getvalue()) result = True except: raise IOError("unable to write file to disk") return result
def load_model(m): param_name = 'NLST-Tri2DNet_True_0.0001_16-00700-encoder.ptm' if not osp.isfile(param_name): print('Please login to download the model parameters.') auth.authenticate_user() drive_service = build('drive', 'v3') print('Downloading the model parameters...') file_id = '1H2PFQ_PxXa5ryKmwNivvwGR-hZf5yyGJ' request = drive_service.files().get_media(fileId=file_id) param = io.BytesIO() downloader = MediaIoBaseDownload(param, request) done = False while done is False: status, done = downloader.next_chunk() param.seek(0) with open(param_name, 'wb') as f: f.write(param.read()) print('Loading model parameters...') m.encoder.load_state_dict(torch.load(param_name)) print('Model initialized.') return m
def test_media_io_base_download_empty_file(self): self.request.http = HttpMockSequence([ ({ 'status': '200', 'content-range': '0-0/0' }, b''), ]) download = MediaIoBaseDownload(fd=self.fd, request=self.request, chunksize=3) self.assertEqual(self.fd, download._fd) self.assertEqual(0, download._progress) self.assertEqual(None, download._total_size) self.assertEqual(False, download._done) self.assertEqual(self.request.uri, download._uri) status, done = download.next_chunk() self.assertEqual(True, done) self.assertEqual(0, download._progress) self.assertEqual(0, download._total_size) self.assertEqual(0, status.progress())
def download_latest_revision(service, drive_file_name, destination_path): # Check if the file exists non_deleted_drive_items = get_non_deleted_items(service) parent_folder_id = get_drive_backup_folder(service, non_deleted_drive_items)['id'] matching_drive_files = get_matching_drive_files(non_deleted_drive_items, parent_folder_id, drive_file_name) if len(matching_drive_files) > 1: logger.error("Drive Filename '%s' is not unique!", drive_file_name) exit(1) if len(matching_drive_files) == 0: logger.error("Drive Filename '%s' does not exist!", drive_file_name) exit(1) drive_file = matching_drive_files[0] logger.info("Downloading file '%s' to '%s'", drive_file_name, destination_path) with open(destination_path, "wb") as file_handle: request = service.files().get_media(fileId=drive_file['id']) downloader = MediaIoBaseDownload(file_handle, request) done = False while done is False: status, done = downloader.next_chunk() logger.debug("Download %d%%.", int(status.progress() * 100))
def image2text(self, imgfile): result = io.BytesIO() mime = 'application/vnd.google-apps.document' res = self.service.files().create(body={ 'name': imgfile, 'mimeType': mime }, media_body=MediaFileUpload( imgfile, mimetype=mime, resumable=True)).execute() downloader = MediaIoBaseDownload( result, self.service.files().export_media(fileId=res['id'], mimeType="text/plain")) done = False while not done: status, done = downloader.next_chunk() self.service.files().delete(fileId=res['id']).execute() return result.getvalue().decode('utf-8')
def download_file(self, file_id, path, filename, mime_type): request = self.__service.files().get_media(fileId=file_id) filename = filename.replace('/', '') fh = io.FileIO('{}{}'.format(path, filename), 'wb') downloader = MediaIoBaseDownload(fh, request, chunksize=65 * 1024 * 1024) done = False while not done: if self.is_cancelled: fh.close() break try: self.dstatus, done = downloader.next_chunk() except HttpError as err: if err.resp.get('content-type', '').startswith('application/json'): reason = json.loads(err.content).get('error').get( 'errors')[0].get('reason') if reason == 'downloadQuotaExceeded' or reason == 'dailyLimitExceeded': if USE_SERVICE_ACCOUNTS: if self.sa_count == self.service_account_count: self.is_cancelled = True raise err else: self.switchServiceAccount() LOGGER.info(f"Got: {reason}, Trying Again...") return self.download_file( file_id, path, filename, mime_type) else: self.is_cancelled = True LOGGER.info(f"Got: {reason}") raise err else: raise err self._file_downloaded_bytes = 0
def run(self): global responses request = self.drive.files().get_media(fileId=self.file_id) fh = io.BytesIO() downloader = MediaIoBaseDownload(fh, request) response = False while not response: chunk = downloader.next_chunk() if chunk: status, response = chunk if status: self.progress = int(status.progress() * 100) fh.seek(0) with self.app.app_context(): self.resp = make_response(fh.read()) fh.close() self.resp.headers.set('Content-Type', self.type) db_sess = db_session.create_session() item = db_sess.query(Items).filter( (Items.uploaded_file_secured_name == self.name)).first() self.resp.headers.set('Content-Disposition', 'attachment', filename=item.uploaded_file_name) self.progress = 101
def post(self, request, format=None, *args, **kwargs): serializer = GooglePhotosUploadInputSerializer(data=request.data) if not serializer.is_valid(): return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) validated_data = serializer.validated_data file_list = validated_data.get("fileList", None) drive_service = self.get_google_drive_service( access_token=validated_data.get("token", None) ) social_auth = self.request.user.social_auth.get( provider="mediawiki" ).extra_data["access_token"] wiki_uploader = WikiUploader( host=settings.WIKI_URL, consumer_secret=settings.SOCIAL_AUTH_MEDIAWIKI_SECRET, consumer_token=settings.SOCIAL_AUTH_MEDIAWIKI_KEY, access_token=social_auth.get("oauth_token", None), access_secret=social_auth.get("oauth_token_secret", None), ) uploaded_results = [] for file in file_list: request = drive_service.files().get_media(fileId=file["id"]) fh = io.BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: download_status, done = downloader.next_chunk() uploaded, image_info = wiki_uploader.upload_file( file_name=file["name"], file_stream=fh, description=file["description"] ) if uploaded: uploaded_results.append(image_info) return Response(data=uploaded_results, status=status.HTTP_200_OK)
def import_drive_folder(self, folder_id, dest_path): folder_id = "'{0}' in parents".format(folder_id) req = self.service.files().list(pageSize=50, q=folder_id).execute() for f in req.get('files', []): file_path = os.path.join(os.path.abspath(dest_path), f['name']) if f['mimeType'] == 'application/vnd.google-apps.folder': if not os.path.exists(file_path): os.mkdir(file_path) ExcelGenerator.import_drive_folder(self, f['id'], os.path.join(dest_path, f['name'])) else: request = self.service.files().export_media(fileId=f['id'], mimeType='application/vnd.openxmlformats-officedocument' '.spreadsheetml.sheet') fh = io.BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() print("Download {0:50} {1:3d}%.".format(f['name'], int(status.progress() * 100))) with open(os.path.join(file_path + '.xlsx'), "wb") as excel_file: excel_file.write(fh.getvalue()) fh.close()
def getDocument(ID): item = service.files().get(fileId=ID).execute() fileName = item['name'] + ".docx" if not item: return jsonify({"message": "No file found."}) else: request = service.files().get_media(fileId=ID) myPath = p.join(app_files, fileName) myFile = io.FileIO(myPath, 'wb') fileRequest = MediaIoBaseDownload(myFile, request) while True: try: download_progress, done = fileRequest.next_chunk() except errors.HttpError as error: return jsonify({'message': 'An error occurred: %s' % error}) if done: return send_file(myPath, as_attachment=True, mimetype=item['mimeType']) time.sleep(120) rm(myPath)
def get_contents_to_fileobj(self, key, fileobj_to_store_to, *, progress_callback=None): key = self.format_key_for_backend(key) self.log.debug("Starting to fetch the contents of: %r to %r", key, fileobj_to_store_to) next_prog_report = 0.0 with self._object_client(not_found=key) as clob: req = clob.get_media(bucket=self.bucket_name, object=key) download = MediaIoBaseDownload(fileobj_to_store_to, req, chunksize=CHUNK_SIZE) done = False while not done: status, done = download.next_chunk() if status: progress_pct = status.progress() * 100 self.log.debug("Download of %r: %d%%", key, progress_pct) if progress_callback and progress_pct > next_prog_report: progress_callback(progress_pct, 100) next_prog_report = progress_pct + 0.1 return self._metadata_for_key(clob, key)
def _download(self, folder, path): if not os.path.exists(path): os.makedirs(path) path = os.path.join(path, folder) if not os.path.exists(path): os.makedirs(path) rlist = self._get_list() folder_id = self._get_folder_id(folder, rlist) rlist = [dic for dic in rlist if folder_id in dic['parents']] fname = {dic['name'] for dic in rlist} for name in fname: name_list = [dic for dic in rlist if name in dic['name']] file_id = max(name_list, key=lambda x: x['createdTime'])['id'] request = self.service.files().get_media(fileId=file_id) fh = io.FileIO(os.path.join(path, name), 'wb') downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() self.logger.info('Download file : {}({})'.format(os.path.join(path, name), file_id))
def download_file_to_directory(self, file_metadata, export_dir='', recursive=True): """ Downloads file to a given directory :param file_metadata: Google file object :param export_dir: location to drop off file :rtype: None """ file_id = file_metadata['id'] file_name = file_metadata['name'] new_dir = export_dir + file_name if self.is_folder(file_metadata): if recursive: children = self.children_search(file_id) if not os.path.exists(new_dir): os.makedirs(new_dir) for child in children: self.download_file_to_directory(child, new_dir + '/', True) else: return "Download request was run with a folder, but was not recursive" else: request = self.service.files().get_media(fileId=file_id) fh = io.FileIO(new_dir, 'wb') downloader = MediaIoBaseDownload(fh, request) done = False while not done: status, done = downloader.next_chunk()
def open_file(self): service = self._get_cookie() metadata = service.files().get( fileId=self.handle.relative_path).execute() # Export and download Google-type files to pdf if 'vnd.google-apps' in metadata.get('mimeType'): request = service.files().export_media( fileId=self.handle.relative_path, fields='files(id, name)', mimeType='application/pdf') # Download files where no export needed else: request = service.files().get_media( fileId=self.handle.relative_path, fields='files(id, name)') fh = BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() # Seek(0) points back to the beginning of the file as it appears to not do this by it self. fh.seek(0) yield fh
def google_drive_file_download_optima_project(): SCOPES = ['https://www.googleapis.com/auth/drive'] SERVICE_ACCOUNT_FILE = 'C:/Users/Anastasia Siedykh/Documents/Backup/KPI report/MODULE SET V6/spherical-voice-308407-91bd91dbc945.json' credentials = service_account.Credentials.from_service_account_file( SERVICE_ACCOUNT_FILE, scopes=SCOPES) service = build('drive', 'v3', credentials=credentials) results = service.files().list(pageSize=10, fields="nextPageToken, files(id, name, mimeType)").execute() pp.pprint(results) #results = service.files().list( #pageSize=5, #fields="nextPageToken, files(id, name, mimeType, parents, createdTime)", #q="'1w9VncB6O4OTT4svs89l3gZALe52-nn-O' in parents").execute() results_dirs = service #pp.pprint(results['files']) file_id = '1nEgWvQoeXaKhDqxsMF5_KJX3DXKzeLS4' request = service.files().get_media(fileId=file_id) filename = 'C:\\Users\\Anastasia Siedykh\\Documents\\Backup\\KPI report\\MODULE SET V6\\sales_report_riga\\tender\\optima_raw\\3.optima_project.xls' fh = io.FileIO(filename, 'wb') downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() print ("Download %d%%." % int(status.progress() * 100))
async def download_gdrive(gdrive, service, uri): reply = "" global is_cancelled """ - remove drivesdk and export=download from link - """ if not isdir(TEMP_DOWNLOAD_DIRECTORY): os.mkdir(TEMP_DOWNLOAD_DIRECTORY) if "&export=download" in uri: uri = uri.split("&export=download")[0] elif "file/d/" in uri and "/view" in uri: uri = uri.split("?usp=drivesdk")[0] try: file_Id = uri.split("uc?id=")[1] except IndexError: try: file_Id = uri.split("open?id=")[1] except IndexError: if "/view" in uri: file_Id = uri.split("/")[-2] else: try: file_Id = uri.split("uc?export=download&confirm=")[1].split("id=")[ 1 ] except IndexError: """ - if error parse in url, assume given value is Id - """ file_Id = uri try: file = await get_information(service, file_Id) except HttpError as e: if "404" in str(e): drive = "https://drive.google.com" url = f"{drive}/uc?export=download&id={file_Id}" session = requests.session() download = session.get(url, stream=True) try: download.headers["Content-Disposition"] except KeyError: page = BeautifulSoup(download.content, "lxml") try: export = drive + page.find("a", {"id": "uc-download-link"}).get( "href" ) except AttributeError: try: error = ( page.find("p", {"class": "uc-error-caption"}).text + "\n" + page.find("p", {"class": "uc-error-subcaption"}).text ) except Exception: reply += ( "`[ARQUIVO - ERRO]`\n\n" "`Status` : **FALHA** - falha no download.\n" "`Motivo` : uncaught err." ) else: reply += ( "`[ARQUIVO - ERRO]`\n\n" "`Status` : **FALHA** - falha no download.\n" f"`Motivo` : {error}" ) return reply download = session.get(export, stream=True) file_size = human_to_bytes( page.find("span", {"class": "uc-name-size"}) .text.split()[-1] .strip("()") ) else: file_size = int(download.headers["Content-Length"]) file_name = re.search( 'filename="(.*)"', download.headers["Content-Disposition"] ).group(1) file_path = TEMP_DOWNLOAD_DIRECTORY + file_name with io.FileIO(file_path, "wb") as files: CHUNK_SIZE = None current_time = time.time() display_message = None first = True is_cancelled = False for chunk in download.iter_content(CHUNK_SIZE): if is_cancelled is True: raise CancelProcess if not chunk: break diff = time.time() - current_time if first is True: downloaded = len(chunk) first = False else: downloaded += len(chunk) percentage = downloaded / file_size * 100 speed = round(downloaded / diff, 2) eta = round((file_size - downloaded) / speed) prog_str = "`Baixando` | [{0}{1}] `{2}%`".format( "".join(["■" for i in range(math.floor(percentage / 10))]), "".join(["▨" for i in range(10 - math.floor(percentage / 10))]), round(percentage, 2), ) current_message = ( "`[ARQUIVO - DOWNLOAD]`\n\n" f"`{file_name}`\n" f"`Status`\n{prog_str}\n" f"`{humanbytes(downloaded)} of {humanbytes(file_size)}" f" @ {humanbytes(speed)}`\n" f"`Tempo Estimado` -> {time_formatter(eta)}" ) if ( round(diff % 15.00) == 0 and (display_message != current_message) or (downloaded == file_size) ): await gdrive.edit(current_message) display_message = current_message files.write(chunk) else: file_name = file.get("name") mimeType = file.get("mimeType") if mimeType == "application/vnd.google-apps.folder": await gdrive.edit("`Abortando, o download da pasta não é suportado...`") return False file_path = TEMP_DOWNLOAD_DIRECTORY + file_name request = service.files().get_media(fileId=file_Id, supportsAllDrives=True) with io.FileIO(file_path, "wb") as df: downloader = MediaIoBaseDownload(df, request) complete = False is_cancelled = False current_time = time.time() display_message = None while complete is False: if is_cancelled is True: raise CancelProcess status, complete = downloader.next_chunk() if status: file_size = status.total_size diff = time.time() - current_time downloaded = status.resumable_progress percentage = downloaded / file_size * 100 speed = round(downloaded / diff, 2) eta = round((file_size - downloaded) / speed) prog_str = "`Baixando` | [{0}{1}] `{2}%`".format( "".join(["■" for i in range(math.floor(percentage / 10))]), "".join(["▨" for i in range(10 - math.floor(percentage / 10))]), round(percentage, 2), ) current_message = ( "`[ARQUIVO - DOWNLOAD]`\n\n" f"`{file_name}`\n" f"`Status`\n{prog_str}\n" f"`{humanbytes(downloaded)} of {humanbytes(file_size)}" f" @ {humanbytes(speed)}`\n" f"`Tempo Estimado` -> {time_formatter(eta)}" ) if ( round(diff % 15.00) == 0 and (display_message != current_message) or (downloaded == file_size) ): await gdrive.edit(current_message) display_message = current_message await gdrive.edit( "`[ARQUIVO - DOWNLOAD]`\n\n" f"`Nome :` `{file_name}`\n" f"`Tamanho :` `{humanbytes(file_size)}`\n" f"`Caminho :` `{file_path}`\n" "`Status :` **OK** - Download concluído." ) msg = await gdrive.respond("`Responda à pergunta em seu grupo do BOTLOG`") async with gdrive.client.conversation(BOTLOG_CHATID) as conv: ask = await conv.send_message("`Prosseguir com o mirror? [y/N]`") try: r = conv.wait_event(events.NewMessage(outgoing=True, chats=BOTLOG_CHATID)) r = await r except Exception: ans = "N" else: ans = r.message.message.strip() await gdrive.client.delete_messages(BOTLOG_CHATID, r.id) await gdrive.client.delete_messages(gdrive.chat_id, msg.id) await gdrive.client.delete_messages(BOTLOG_CHATID, ask.id) if ans.capitalize() == "N": return reply elif ans.capitalize() == "Y": try: result = await upload(gdrive, service, file_path, file_name, mimeType) except CancelProcess: reply += ( "`[ARQUIVO - CANCELADO]`\n\n" "`Status` : **OK** - sinal recebido: cancelado." ) else: reply += ( "`[ARQUIVO - UPLOAD]`\n\n" f"`Nome :` `{file_name}`\n" f"`Tamanho :` `{humanbytes(result[0])}`\n" f"`Link :` [{file_name}]({result[1]})\n" "`Status :` **OK**\n\n" ) return reply else: await gdrive.client.send_message( BOTLOG_CHATID, "`Tipo de resposta inválido [Y/N] apenas...`" ) return reply
def pull(self, images, file_name=None, save=True, **kwargs): '''pull an image from google drive, based on a query (uri or id) Parameters ========== images: refers to the uri given by the user to pull in the format <collection>/<namespace>. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user's requested name for the file. It can optionally be None if the user wants a default. save: if True, you should save the container to the database using self.add() Returns ======= finished: a single container path, or list of paths ''' if not isinstance(images, list): images = [images] bot.debug('Execution of PULL for %s images' % len(images)) # If used internally we want to return a list to the user. finished = [] for image in images: q = parse_image_name(remove_uri(image)) # Use container search to find the container based on uri bot.info('Searching for %s in drive://%s' % (q['uri'], self._base)) matches = self._container_query(q['uri'], quiet=True) if len(matches) == 0: bot.info('No matching containers found.') sys.exit(0) # If the user didn't provide a file, make one based on the names if file_name is None: file_name = q['storage'].replace('/', '-') # We give the first match, the uri should be unique and known image = matches[0] request = self._service.files().get_media(fileId=image['id']) with open(file_name, 'wb') as fh: downloader = MediaIoBaseDownload(fh, request) done = False bar = None # Download and update the user with progress bar while done is False: status, done = downloader.next_chunk() response = None # Create bar on first call if bar is None: total = status.total_size / (1024 * 1024.0) bar = ProgressBar(expected_size=total, filled_char='=') bar.show(status.resumable_progress / (1024 * 1024.0)) # If the user is saving to local storage, you need to assumble the uri # here in the expected format <collection>/<namespace>:<tag>@<version> if save is True: image_uri = q['uri'] if "uri" in image: image_uri = image['uri'] # Update metadata with selfLink image['selfLink'] = downloader._uri container = self.add(image_path=image_file, image_uri=image_uri, metadata=image, url=downloader._uri) # When the container is created, this is the path to the image image_file = container.image if os.path.exists(image_file): bot.debug('Retrieved image file %s' % image_file) bot.custom(prefix="Success!", message=image_file) finished.append(image_file) if len(finished) == 1: finished = finished[0] return finished
async def download_gdrive(gdrive, service, uri): reply = '' global is_cancelled """ - remove drivesdk and export=download from link - """ if not isdir(TEMP_DOWNLOAD_DIRECTORY): os.mkdir(TEMP_DOWNLOAD_DIRECTORY) if "&export=download" in uri: uri = uri.split("&export=download")[0] elif "file/d/" in uri and "/view" in uri: uri = uri.split("?usp=drivesdk")[0] try: file_Id = uri.split("uc?id=")[1] except IndexError: try: file_Id = uri.split("open?id=")[1] except IndexError: if "/view" in uri: file_Id = uri.split("/")[-2] else: try: file_Id = uri.split( "uc?export=download&confirm=")[1].split("id=")[1] except IndexError: """ - if error parse in url, assume given value is Id - """ file_Id = uri try: file = await get_information(service, file_Id) except HttpError as e: if '404' in str(e): drive = 'https://drive.google.com' url = f'{drive}/uc?export=download&id={file_Id}' session = requests.session() download = session.get(url, stream=True) try: download.headers['Content-Disposition'] except KeyError: page = BeautifulSoup(download.content, 'lxml') try: export = drive + page.find('a', { 'id': 'uc-download-link' }).get('href') except AttributeError: try: error = (page.find('p', { 'class': 'uc-error-caption' }).text + '\n' + page.find('p', { 'class': 'uc-error-subcaption' }).text) except Exception: reply += ("`[FILE - ERROR]`\n\n" "`Status` : **BAD** - failed to download.\n" "`Reason` : uncaught err.") else: reply += ("`[FILE - ERROR]`\n\n" "`Status` : **BAD** - failed to download.\n" f"`Reason` : {error}") return reply download = session.get(export, stream=True) file_size = human_to_bytes( page.find('span', { 'class': 'uc-name-size' }).text.split()[-1].strip('()')) else: file_size = int(download.headers['Content-Length']) file_name = re.search( 'filename="(.*)"', download.headers["Content-Disposition"]).group(1) file_path = TEMP_DOWNLOAD_DIRECTORY + file_name with io.FileIO(file_path, 'wb') as files: CHUNK_SIZE = None current_time = time.time() display_message = None first = True is_cancelled = False for chunk in download.iter_content(CHUNK_SIZE): if is_cancelled is True: raise CancelProcess if not chunk: break diff = time.time() - current_time if first is True: downloaded = len(chunk) first = False else: downloaded += len(chunk) percentage = downloaded / file_size * 100 speed = round(downloaded / diff, 2) eta = round((file_size - downloaded) / speed) prog_str = "`Downloading` | [{0}{1}] `{2}%`".format( "".join( ["▰" for i in range(math.floor(percentage / 10))]), "".join([ "▱" for i in range(10 - math.floor(percentage / 10)) ]), round(percentage, 2)) current_message = ( "`[FILE - DOWNLOAD]`\n\n" f"`{file_name}`\n" f"`Status`\n{prog_str}\n" f"`{humanbytes(downloaded)} of {humanbytes(file_size)}" f" @ {humanbytes(speed)}`\n" f"`ETA` -> {time_formatter(eta)}") if round(diff % 15.00) == 0 and ( display_message != current_message) or ( downloaded == file_size): await gdrive.edit(current_message) display_message = current_message files.write(chunk) else: file_name = file.get('name') mimeType = file.get('mimeType') if mimeType == 'application/vnd.google-apps.folder': return await gdrive.edit("`Aborting, folder download not support`") file_path = TEMP_DOWNLOAD_DIRECTORY + file_name request = service.files().get_media(fileId=file_Id) with io.FileIO(file_path, 'wb') as df: downloader = MediaIoBaseDownload(df, request) complete = False is_cancelled = False current_time = time.time() display_message = None while complete is False: if is_cancelled is True: raise CancelProcess status, complete = downloader.next_chunk() if status: file_size = status.total_size diff = time.time() - current_time downloaded = status.resumable_progress percentage = downloaded / file_size * 100 speed = round(downloaded / diff, 2) eta = round((file_size - downloaded) / speed) prog_str = "`Downloading` | [{0}{1}] `{2}%`".format( "".join( ["▰" for i in range(math.floor(percentage / 10))]), "".join([ "▱" for i in range(10 - math.floor(percentage / 10)) ]), round(percentage, 2)) current_message = ( "`[FILE - DOWNLOAD]`\n\n" f"`{file_name}`\n" f"`Status`\n{prog_str}\n" f"`{humanbytes(downloaded)} of {humanbytes(file_size)}" f" @ {humanbytes(speed)}`\n" f"`ETA` -> {time_formatter(eta)}") if round(diff % 15.00) == 0 and ( display_message != current_message) or ( downloaded == file_size): await gdrive.edit(current_message) display_message = current_message await gdrive.edit("`[FILE - DOWNLOAD]`\n\n" f"`Name :` `{file_name}`\n" f"`Size :` `{humanbytes(file_size)}`\n" f"`Path :` `{file_path}`\n" "`Status :` **OK** - Successfully downloaded.") msg = await gdrive.respond("`Answer the question in your BOTLOG group`") async with gdrive.client.conversation(BOTLOG_CHATID) as conv: ask = await conv.send_message("`Proceed with mirroring? [y/N]`") try: r = conv.wait_event( events.NewMessage(outgoing=True, chats=BOTLOG_CHATID)) r = await r except Exception: ans = 'N' else: ans = r.message.message.strip() await gdrive.client.delete_messages(BOTLOG_CHATID, r.id) await gdrive.client.delete_messages(gdrive.chat_id, msg.id) await gdrive.client.delete_messages(BOTLOG_CHATID, ask.id) if ans.capitalize() == 'N': return reply elif ans.capitalize() == "Y": try: result = await upload(gdrive, service, file_path, file_name, mimeType) except CancelProcess: reply += ("`[FILE - CANCELLED]`\n\n" "`Status` : **OK** - received signal cancelled.") else: reply += ("`[FILE - UPLOAD]`\n\n" f"`Name :` `{file_name}`\n" f"`Size :` `{humanbytes(result[0])}`\n" f"`Link :` [{file_name}]({result[1]})\n" "`Status :` **OK**\n\n") return reply else: await gdrive.client.send_message( BOTLOG_CHATID, "`Invalid answer type [Y/N] only...`") return reply
async def download_gdrive(gdrive, service, uri): reply = '' """ - remove drivesdk and export=download from link - """ if not isdir(TEMP_DOWNLOAD_DIRECTORY): os.mkdir(TEMP_DOWNLOAD_DIRECTORY) if "&export=download" in uri: uri = uri.split("&export=download")[0] elif "file/d/" in uri and "/view" in uri: uri = uri.split("?usp=drivesdk")[0] try: file_Id = uri.split("uc?id=")[1] except IndexError: try: file_Id = uri.split("open?id=")[1] except IndexError: try: if "/view" in uri: file_Id = uri.split("/")[-2] except IndexError: try: file_Id = uri.split("uc?export=download&confirm=" )[1].split("id=")[1] except IndexError: """ - if error parse in url, assume given value is Id - """ file_Id = uri file = await get_information(service, file_Id) file_name = file.get('name') mimeType = file.get('mimeType') if mimeType == 'application/vnd.google-apps.folder': return await gdrive.edit("`Aborting, folder download not support`") file_path = TEMP_DOWNLOAD_DIRECTORY + file_name request = service.files().get_media(fileId=file_Id) with io.FileIO(file_path, 'wb') as df: downloader = MediaIoBaseDownload(df, request) complete = False current_time = time.time() display_message = None while complete is False: status, complete = downloader.next_chunk() if status: file_size = status.total_size diff = time.time() - current_time downloaded = status.resumable_progress percentage = downloaded / file_size * 100 speed = round(downloaded / diff, 2) eta = round((file_size - downloaded) / speed) prog_str = "`Downloading` | [{0}{1}] `{2}%`".format( "".join(["**#**" for i in range(math.floor(percentage / 5))]), "".join(["**--**" for i in range(20 - math.floor(percentage / 5))]), round(percentage, 2)) current_message = ( "`[FILE - DOWNLOAD]`\n\n" f"`Name :`\n`{file_name}`\n\n" "`Status :`\n" f"{prog_str}\n" f"`{humanbytes(downloaded)} of {humanbytes(file_size)} " f"@ {humanbytes(speed)}`\n" f"`ETA` -> {time_formatter(eta)}" ) if display_message != current_message: try: await gdrive.edit(current_message) display_message = current_message except Exception: pass reply += ( "`[FILE - DOWNLOAD]`\n\n" f"`Name :`\n`{file_name}`\n" f"`Size :` `{humanbytes(file_size)}`\n" f"`Path :` `{file_path}`\n" "`Status :` **OK**\n" "`Reason :` Successfully downloaded...\n\n" ) msg = await gdrive.respond("`Answer the question in your BOTLOG group`") async with gdrive.client.conversation(BOTLOG_CHATID) as conv: ask = await conv.send_message("`Proceed with mirroring? [y/N]`") try: r = conv.wait_event( events.NewMessage(outgoing=True, chats=BOTLOG_CHATID)) r = await r except Exception: ans = 'N' else: ans = r.message.message.strip() await gdrive.client.delete_messages(BOTLOG_CHATID, r.id) await gdrive.client.delete_messages(gdrive.chat_id, msg.id) await gdrive.client.delete_messages(BOTLOG_CHATID, ask.id) if ans.capitalize() == 'N': return reply elif ans.capitalize() == "Y": result = await upload(gdrive, service, file_path, file_name, mimeType) reply += ( "`[FILE - UPLOAD]`\n\n" f"`Name :`\n`{file_name}`\n" f"`Size :` `{humanbytes(result[0])}`\n" f"`Download :` [{file_name}]({result[1]})\n" "`Status :` **OK**\n\n" ) return reply else: await gdrive.client.send_message( BOTLOG_CHATID, "`Invalid answer type [Y/N] only...`" ) return reply
def test_media_io_base_download_retries_5xx(self): self.request.http = HttpMockSequence([ ({ 'status': '500' }, ''), ({ 'status': '500' }, ''), ({ 'status': '500' }, ''), ({ 'status': '200', 'content-range': '0-2/5' }, b'123'), ({ 'status': '503' }, ''), ({ 'status': '503' }, ''), ({ 'status': '503' }, ''), ({ 'status': '200', 'content-range': '3-4/5' }, b'45'), ]) download = MediaIoBaseDownload(fd=self.fd, request=self.request, chunksize=3) self.assertEqual(self.fd, download._fd) self.assertEqual(3, download._chunksize) self.assertEqual(0, download._progress) self.assertEqual(None, download._total_size) self.assertEqual(False, download._done) self.assertEqual(self.request.uri, download._uri) # Set time.sleep and random.random stubs. sleeptimes = [] download._sleep = lambda x: sleeptimes.append(x) download._rand = lambda: 10 status, done = download.next_chunk(num_retries=3) # Check for exponential backoff using the rand function above. self.assertEqual([20, 40, 80], sleeptimes) self.assertEqual(self.fd.getvalue(), b'123') self.assertEqual(False, done) self.assertEqual(3, download._progress) self.assertEqual(5, download._total_size) self.assertEqual(3, status.resumable_progress) # Reset time.sleep stub. del sleeptimes[0:len(sleeptimes)] status, done = download.next_chunk(num_retries=3) # Check for exponential backoff using the rand function above. self.assertEqual([20, 40, 80], sleeptimes) self.assertEqual(self.fd.getvalue(), b'12345') self.assertEqual(True, done) self.assertEqual(5, download._progress) self.assertEqual(5, download._total_size)
def fetch_homework(request_from_server=False): auth() global creds, memory_cache logger.info('fetch_homework: called') if memory_cache: logger.info('fetch_homework: checking memory cache') else: logger.info('fetch_homework: checking disk cache') memory_cache = cache.fetch(config.GOOGLE_CACHE_PATH) content = cache.content(memory_cache, config.GOOGLE_CACHE_LIFETIME, request_from_server) if content: return content try: service = build('drive', 'v3', credentials=creds) request = service.files().export_media( fileId=config.GOOGLE_HOMEWORK_DOC_ID, mimeType='text/html') fh = io.BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() logger.info("fetch_homework: Download %d%%." % int(status.progress() * 100)) html = fh.getvalue().decode('UTF-8') raw_text = html2text(html) classes = raw_text.split('Flow:')[0] for key in config.GOOGLE_HOMEWORK_DOC_REPLACEMENT.keys(): classes = classes.replace( key, config.GOOGLE_HOMEWORK_DOC_REPLACEMENT[key]) formatted_assignments = [] classes = classes.split('<class>')[1:] for class_str in classes: classes_split = class_str.split('\n') class_name = classes_split[0] for raw_assignment in classes_split[1:]: raw_assignment = raw_assignment.replace(' * ', '') detail_split = raw_assignment.split('] ') if len(detail_split) == 2: date = detail_split[0][1:] date_split = date.split('/') date_dt = datetime(2020, int(date_split[0]), int(date_split[1])) name = detail_split[1] formatted_assignments.append({ 'name': class_name + ': ' + name, 'start': date_dt.timestamp(), 'end': date_dt.timestamp() }) logger.info('fetch_homework: fetched {} classes'.format( len(formatted_assignments))) memory_cache = cache.save(formatted_assignments, config.GOOGLE_CACHE_PATH) return formatted_assignments except: return []
async def on_message(message): global voice # メッセージ送信者がBotだった場合は無視する if message.author.bot: return if message.content.startswith('/play'): voice_channel = client.get_channel(message.guild.voice_channels[0].id) voice_client = message.guild.voice_client search_word = message.content.split(" ", 1) # print(search_word[0]) if search_word[1].startswith('https://www.youtube.com'): #youtubeの場合 url = search_word[1] ydl_opts = { 'format': 'bestaudio/best', 'outtmpl': '%(title)s.%(ext)s', 'postprocessors': [ { 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192' }, { 'key': 'FFmpegMetadata' }, ], } ydl = youtube_dl.YoutubeDL(ydl_opts) data = ydl.extract_info(url, download=False) filename = data['title'] + ".mp3" if not os.path.exists(filename): await message.channel.send("ダウンロードしてくるからちょっと待ってて!") loop = asyncio.get_event_loop() data = await loop.run_in_executor( None, lambda: ydl.extract_info(url, download=True)) audio_source = discord.FFmpegPCMAudio(filename) audiofile_list.append(filename) if not voice: #ボイチャ接続 voice = await voice_channel.connect() # 再生中、一時停止中はキューに入れる if audio_queue.empty( ) and not voice.is_playing() and not voice.is_paused(): await message.channel.send("**" + data['title'] + "**を再生するよー♪") voice.play(audio_source, after=check_queue) else: await message.channel.send("**" + filename + "**を再生リストに入れておくね!") audio_queue.put(audio_source) else: #youtube以外の場合 results = service.files().list( q="mimeType != 'application/vnd.google-apps.folder' and name contains '" + search_word[1] + "'", pageSize=10, fields="nextPageToken, files(id, name)").execute() items = results.get('files', []) if len(items) == 0: await message.channel.send("その曲はないみたい") elif len(items) == 1: #1曲のときのみ再生する filename = items[0]['name'] if not os.path.exists(filename): request = service.files().get_media( fileId=items[0]['id']) #httpリクエストを返す fh = io.FileIO(filename, "wb") downloader = MediaIoBaseDownload(fh, request) await message.channel.send("ダウンロードしてくるからちょっと待ってて!") done = False while done is False: status, done = downloader.next_chunk() print("Download %d%%." % int(status.progress() * 100)) audio_source = discord.FFmpegPCMAudio(filename) audiofile_list.append(filename) if not voice: #ボイチャ接続 voice = await voice_channel.connect() # 再生中、一時停止中はキューに入れる if audio_queue.empty( ) and not voice.is_playing() and not voice.is_paused(): await message.channel.send("**" + filename + "**を再生するよー♪") voice.play(audio_source, after=check_queue) else: await message.channel.send("**" + filename + "**を再生リストに入れておくね!") audio_queue.put(audio_source) elif len(items) >= 2: #10曲まで表示する msg = "**どれにするー?**\n----------------------------\n" for item in items: msg += item['name'] + "\n" msg += "----------------------------" await message.channel.send(msg) if message.content.startswith('/stop'): # voice_client = message.guild.voice_client if voice.is_playing(): await message.channel.send("曲、止めちゃうの?") voice.stop() else: await message.channel.send("もう止まってるよ?") if message.content.startswith('/pause'): # voice_client = message.guild.voice_client if voice.is_paused(): await message.channel.send("再開は/resumeだよー") else: await message.channel.send("一時停止グサァーッ!") voice.pause() if message.content.startswith('/resume'): # voice_client = message.guild.voice_client if voice.is_paused(): await message.channel.send("再開するよ!") voice.resume() else: await message.channel.send("再生中だよー") if message.content.startswith('/list'): if audiofile_list != []: msg = "今の再生リストはこんな感じだよー\n----------------------------\n" for i in range(0, len(audiofile_list)): msg += "**" + str(i + 1) + ".** " + audiofile_list[i] + "\n" msg += "----------------------------" await message.channel.send(msg) else: await message.channel.send("静かだねぇ〜") if message.content.startswith('/profile'): key = message.content.split(" ", 1) data = ('%' + key[1] + '%', '%' + key[1] + '%') sql = "SELECT * FROM idol where name like %s or kana like %s" # データベースへの接続とカーソルの生成 DB = settings.DB connection = MySQLdb.connect( host=DB["host"], user=DB["user"], passwd=DB["pass"], db=DB["db"], # テーブル内部で日本語を扱うために追加 charset='utf8') cursor = connection.cursor(MySQLdb.cursors.DictCursor) # 一覧の表示 cursor.execute(sql, data) rows = cursor.fetchall() if len(rows) == 0: await message.channel.send("その名前の子はいないよ〜") for row in rows: await message.channel.send(row['name'] + "さんのプロフィールはこちら!") await message.channel.send( "名前:" + row['name'] + "(" + row['kana'] + ")\n" + "年齢:" + str(row['age']) + "\t誕生日:" + row['birthday'] + "\t星座:" + row['constellation'] + "\n" + "身長:" + str(row['height']) + "cm\t体重:" + str(row['weight']) + "kg\n" + "スリーサイズ:" + str(row['B']) + "/" + str(row['W']) + "/" + str(row['H']) + "\n" + "血液型:" + row['blood'] + "\t利き手:" + row['handed'] + "\n" + "出身:" + row['hometown'] + "\n" + "趣味:" + row['hobby'] + "\n" + "特技:" + row['talent'] + "\n" + "CV:" + row['cv']) # print(row) # ex) /search 3 <keyword>、/search <keyword> if message.content.startswith('/search'): msg = message.content.split(" ") if msg[1].isnumeric(): search_word = message.content.split(" ", 2)[2] num_img = int(msg[1]) if num_img > 5: num_img = 5 elif num_img < 1: num_img = 1 else: search_word = message.content.split(" ", 1)[1] num_img = 1 print("search word : " + search_word) param = {'q': search_word} response = requests.get("http://images.google.com/images", params=param) print(str(response.status_code) + response.reason) response.raise_for_status() #ステータスコードが200番台以外なら例外起こす. soup = BeautifulSoup(response.text, 'html.parser') elements = soup.select("img[src*='http']") # print(elements) await message.channel.send(num_img + "件探してくるね!") for i in range(num_img): img = elements[i] await message.channel.send(img.attrs['src']) if message.content.startswith('/yuzu'): await message.channel.send("なになに?柚とお話したいの?") if message.content.startswith('/name'): await message.channel.send(client.user.display_name) await client.user.edit(username="******") await message.channel.send(client.user.display_name) if message.content == '/bye': await message.channel.send("じゃあねー♪") voice_client = message.guild.voice_client if voice_client: await voice_client.disconnect() print("ボイスチャンネルから切断しました") print("ログアウトします") await client.logout() if message.content == '/help': msg = "柚の使い方はこちら♪\n" for i in commands.commands.items(): msg += "`" + i[0] + "` : " + i[1] + "\n" await message.channel.send(msg)
documents = yaml.dump(erex_data, file) with open(r'cat_erek.yaml', 'w') as file: documents = yaml.dump(cat_data, file) with open(r'data_love.yaml', 'r') as file: bot.send_document('-447680338', file) with open(r'data_erek.yaml', 'r') as file: bot.send_document('-447680338', file) with open(r'cat_erek.yaml', 'r') as file: bot.send_document('-447680338', file) time.sleep(60) for _ in range(10): item = random.choice(items) request = service.files().get_media(fileId=item['id']) fh = io.FileIO('file.' + item['name'].split('.')[-1], 'wb') downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() print("Download %d%%." % int(status.progress() * 100)) fh.close() fh = io.FileIO('file.' + item['name'].split('.')[-1], 'rb') # bot.send_message("-1001473134565", "123", disable_notification=True) markup = types.InlineKeyboardMarkup() markup.add( types.InlineKeyboardButton('❤️', callback_data=f"{item['id']} {item['name']} 0 0"), types.InlineKeyboardButton('💔', callback_data=f"{item['id']} {item['name']} 1 0"), types.InlineKeyboardButton('😄', callback_data=f"{item['id']} {item['name']} 0 1"), types.InlineKeyboardButton('😳', callback_data=f"{item['id']} {item['name']} 1 1"), types.InlineKeyboardButton('No 😸', callback_data=f"{item['id']} {item['name']} 0 2"), types.InlineKeyboardButton('😸', callback_data=f"{item['id']} {item['name']} 1 2"),
def download_file(self, file_id, download_directory_path, username): """ Downloads the file for file_id to the given download_path. :param file_id: :param download_directory_path: :return: the full path to the downloaded file """ googledrive_file = self.googledrive_api.files().get( fileId=file_id, fields="name, mimeType").execute() # convert utf-8 chars safe_filename = googledrive_file['name'].encode( sys.getfilesystemencoding(), 'ignore') file_download_path = os.path.join(download_directory_path, safe_filename) logger.debug('Download file %s <= googledrive://file/%s', file_download_path, file_id) if 'vnd.google-apps' in googledrive_file['mimeType']: # if googledrive_file['mimeType'] == 'application/vnd.google-apps.spreadsheet': # mimeType = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' # elif googledrive_file['mimeType'] == 'application/vnd.google-apps.document': # mimeType = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document' # elif googledrive_file['mimeType'] == 'application/vnd.google-apps.presentation': # mimeType = 'application/vnd.openxmlformats-officedocument.presentationml.presentation' # else: n = Notification( event_type='data', status=Notification.ERROR, operation='googledrive_download_error', message= 'Copying Google-type files is currently unsupported. Export the file to' ' a standard format and try again.', user=username, extra={ 'path': "'{}' of type {}".format(googledrive_file['name'], googledrive_file['mimeType']) }) n.save() return None request = self.googledrive_api.files().get_media(fileId=file_id) # Incremental Partial Download fh = io.FileIO(file_download_path, 'wb') downloader = MediaIoBaseDownload(fh, request) done = False backoff_attempts = 0 while done is False: try: status, done = downloader.next_chunk() # logger.debug('status: {} percent'.format(status.progress())) except HttpError as e: # Incremental backoff for exceeding google api rate limit if "Rate Limit Exceeded" in str(e): logger.debug('RATE LIMIT EXCEEDED') backoff_attempts += 1 time.sleep(backoff_attempts) if backoff_attempts > 10: n = Notification( event_type='data', status=Notification.ERROR, operation='googledrive_download_error', message= 'Rate Limit Exceeded. Try again after a few minutes for this file.', user=username, extra={ 'path': "{}".format(googledrive_file['name']) }) n.save() return None elif "Only files with binary content can be downloaded" in str( e): n = Notification( event_type='data', status=Notification.ERROR, operation='googledrive_download_error', message= 'Only files with binary content can be downloaded. Convert the file to' ' a standard format and try again.', user=username, extra={ 'path': "'{}' of type {}".format( googledrive_file['name'], googledrive_file['mimeType']) }) n.save() return None else: raise fh.close() return file_download_path
def post(self, request): file_name = request.data['file_name'] private_key = request.data['private_key'] # getting username and User model object from JWT token username, user = get_user_from_jwt(request) try: file_record = FileData.objects.get(username=user, file_name=file_name) split_1 = file_record.split_1 # gdrive file id split_2 = file_record.split_2 # dropbox file split_3 = file_record.split_3 # gdrive file id except ObjectDoesNotExist: return Response({"message": "File not found!"}, status=s.HTTP_404_NOT_FOUND) # downloading the splits if check_google_auth_token(user=username) and check_dropbox_auth_token( user=username): # generating drive services g_service, d_service = get_drive_services(username) # downloading from dropbox d_service.files_download_to_file( os.path.join(splits_dir, f"{file_name}02"), split_2) # downloading from gdrive for i, split in zip((1, 3), (split_1, split_3)): request = g_service.files().get_media(fileId=split) fh = io.BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() fh.seek(0) with open(os.path.join(splits_dir, f"{file_name}0{i}"), "wb") as f: f.write(fh.read()) # join and decrypt the file if (file_name): filepath = os.path.join(os.getcwd(), "media", "files", file_name) try: decrypt(filepath, splits_dir, private_key, username) except: # deleting the splits from storage for file in os.listdir(splits_dir): os.remove(os.path.join(splits_dir, file)) return Response( {"message": "You entered an invalid private key!"}, status=s.HTTP_400_BAD_REQUEST) with open(filepath, "rb") as f: response = HttpResponse(f) response[ 'Content-Disposition'] = 'attachment; filename="{}"'.format( file_name) # deleting the splits from storage for file in os.listdir(splits_dir): os.remove(os.path.join(splits_dir, file)) # deleting file storage for file in os.listdir(media_dir): os.remove(os.path.join(media_dir, file)) return response return Response({"message": "Can't access storage. Download failed!"}, status=s.HTTP_409_CONFLICT)