def main(): """Adds pdf files to Google Drive folder and downloads their text representations. Pdf files will be uploaded and automatically converted to Google Docs. They will then be downloaded as txt files. Returns: Nothing, local folder will be populated with text files """ # authorize access and create Google Drive API service object credentials = get_credentials() http = credentials.authorize(httplib2.Http()) drive_service = discovery.build('drive', 'v3', http=http) # retrieve the file ID of a folder called Operating_Reports (manually created) # below code based off of https://developers.google.com/drive/v3/web/search-parameters response = drive_service.files().list(q="name='Operating_Reports'", spaces='drive', fields='files(id, name)').execute() file = response.get('files', [])[0] folder_id = file.get('id') print('Found file: {0} ({1})'.format(file.get('name'), folder_id)) reports_dir = "../data/operating_reports/pdf_files/" dest_dir = "../data/operating_reports/text_files/" # convert each pdf file in reports_dir to a txt file for item in os.listdir(reports_dir): file_path = reports_dir + item file_name = item[:-4] # upload pdf file to folder and convert to google doc # below code based off of https://developers.google.com/drive/v3/web/manage-uploads file_metadata = { 'name': file_name, 'mimeType': 'application/vnd.google-apps.document', 'parents': [folder_id] } media = MediaFileUpload(file_path, mimetype='application/pdf') file = drive_service.files().create(body=file_metadata, media_body=media, fields='id').execute() file_id = file.get('id') print('File ID: {0}'.format(file_id)) # download converted google doc as text file # below code based off of https://developers.google.com/drive/v3/web/manage-downloads # https://stackoverflow.com/questions/36173356/google-drive-api-download-files-python-no-files-downloaded file_dest = dest_dir + file_name + ".txt" request = drive_service.files().export_media(fileId=file_id, mimeType='text/plain') fh = FileIO(file_dest, 'wb') downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() print("Download %d%%." % int(status.progress() * 100)) print('Files Successfully Converted')
def update_file(self, local_path, remote_id): media = MediaFileUpload(local_path, mimetype='text/plain') self.service.files().update(fileId=remote_id, media_body=media).execute()
async def upload(gdrive, service, file_path, file_name, mimeType): try: await gdrive.edit("`Processing upload...`") except Exception: pass body = { "name": file_name, "description": "Uploaded from Telegram using Catuserbot.", "mimeType": mimeType, } try: if parent_Id is not None: pass except NameError: """ - Fallback to G_DRIVE_FOLDER_ID else root dir - """ if G_DRIVE_FOLDER_ID is not None: body["parents"] = [G_DRIVE_FOLDER_ID] else: """ - Override G_DRIVE_FOLDER_ID because parent_Id not empty - """ body["parents"] = [parent_Id] media_body = MediaFileUpload(file_path, mimetype=mimeType, resumable=True) """ - Start upload process - """ file = service.files().create( body=body, media_body=media_body, fields="id, size, webContentLink", supportsAllDrives=True, ) global is_cancelled current_time = time.time() response = None display_message = None is_cancelled = False while response is None: if is_cancelled is True: raise CancelProcess status, response = file.next_chunk() if status: file_size = status.total_size diff = time.time() - current_time uploaded = status.resumable_progress percentage = uploaded / file_size * 100 speed = round(uploaded / diff, 2) eta = round((file_size - uploaded) / speed) prog_str = "`Uploading :`\n`[{0}{1}] {2}`".format( "".join(["▰" for i in range(math.floor(percentage / 10))]), "".join(["▱" for i in range(10 - math.floor(percentage / 10))]), round(percentage, 2), ) current_message = ( "**Uploading **\n\n" f"**Name : **`{file_name}`\n" f"**Status : **\n{prog_str}\n" f"`{humanbytes(uploaded)} of {humanbytes(file_size)} " f"@ {humanbytes(speed)}`\n" f"**ETA** -> `{time_formatter(eta)}`") if display_message != current_message: await gdrive.edit(current_message) display_message = current_message file_id = response.get("id") file_size = response.get("size") downloadURL = response.get("webContentLink") """ - Change permission - """ await change_permission(service, file_id) return int(file_size), downloadURL
'mimeType': 'application/vnd.google-apps.folder' } folder = drive_service.files().create(body=folder_metadata, fields='id').execute() folder_id = folder.get('id') print('folder_id:') print(folder_id) # I s.create a file so I can upload it: with open('/tmp/hello.txt', 'w') as fh: fh.write("hello world\n") file_metadata = {'name': 'hello.txt', 'parents': [folder_id]} # From my laptop, I s.upload a file named hello.txt: media = MediaFileUpload('/tmp/hello.txt', mimetype='text/plain') file = drive_service.files().create(body=file_metadata, media_body=media, fields='id').execute() file_id = file.get('id') print('file_id:') print(file_id) list_results = drive_service.files().list( pageSize=10, fields="nextPageToken, files(id, name)").execute() print('list_results:') items = list_results.get('files', []) if not items: print('No files found.')
def update_thumbnail(youtube_service, video_id, thumbnail_file): request = youtube_service.thumbnails().set( videoId=video_id, media_body=MediaFileUpload(thumbnail_file)) response = request.execute() return response
def upload(self, filepaths, folder_id, new_folder=False, show_info=True, **kwargs): """ ------------------------------------------------------------------------------------------ Parameters: @ folder_id: str_like - The GDrive Folder ID - Need to upload files into it - or create a new sub folder in it @ filepaths: list_like - Absolute Paths of all local files that need to be uploaded @ new_folder: boolean (default is False) - Decide whether to create a new subfolder - NOTE: This function only considers level2 folders structure - which means parent_folder --> new_sub_folder @ show_info: boolean (default is True) - Decide whether to print outputs @ kwargs: dict_like (optional) - If you choose new_folder = True - Should pass another parameter into kwargs as the new subfolder's name Returns: response_info ------------------------------------------------------------------------------------------ """ service = build("drive", "v3", credentials=self.creds, cache_discovery=False) response_info = { "upload_to": "", } response_info[ "upload_to"] = "https://drive.google.com/drive/u/0/folders/{0}".format( folder_id) if new_folder == True: if "folder_name" in kwargs.keys(): folder_name = kwargs["folder_name"] else: folder_name = kwargs[list(kwargs.keys())[0]] # folder_meta_data = {"name": str(folder_name), # "mimeType": "application/vnd.google-apps.folder", # "parents": [folder_id]} # new_folder_response = service.files().create(body = folder_meta_data, fields = "id").execute() # new_folder_id = new_folder_response["id"] else: folder_id = gdrive_folder_id for file_path in files: filename = file_path.split("/")[-1] file_meta_data = {"name": filename, "parents": [folder_id]} # Here only considering csv and xlsx 2 formats # More MIME Types Refer: https://developers.google.com/drive/api/v3/ref-export-formats file_format = filename.split(".")[-1] if file_format == "csv": upload_type = "text/csv" elif file_format == "xlsx": upload_type = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" else: print( "x Upload FAIL, please check file type is 'csv' or 'xlsx'." ) print("\t- Failed file: {0}".format(filename)) return None media = MediaFileUpload(file_path, mimetype=upload_type) file = service.files().create(body=file_meta_data, media_body=media, fields='id').execute() self.uploaded_files.append(filename) if show_info == True: print("-- AUTO UPLOAD") print("+ Upload Files To Gdrive: {0}".format(self.upload_to)) print("+ Create New Folder Option: {0}".format( "True\n* New Folder Name: {0}". format(folder_name) if new_folder == True else "False")) print("+ Uploaded {0} Files:\n\t{1}".format( len(self.uploaded_files), "\n\t".join(self.uploaded_files))) else: pass
async def upload(gdrive, service, file_path, file_name, mimeType): try: await gdrive.edit("`Processing upload...`") except Exception: pass body = { "name": file_name, "description": "Uploaded from Telegram using ProjectBish userbot.", "mimeType": mimeType, } try: if parent_Id is not None: pass except NameError: """ - Fallback to G_DRIVE_FOLDER_ID else root dir - """ if G_DRIVE_FOLDER_ID is not None: body['parents'] = [G_DRIVE_FOLDER_ID] else: """ - Override G_DRIVE_FOLDER_ID because parent_Id not empty - """ body['parents'] = [parent_Id] media_body = MediaFileUpload( file_path, mimetype=mimeType, resumable=True ) """ - Start upload process - """ file = service.files().create(body=body, media_body=media_body, fields="id, size, webContentLink") current_time = time.time() response = None display_message = None while response is None: status, response = file.next_chunk() await asyncio.sleep(0.3) if status: file_size = status.total_size diff = time.time() - current_time uploaded = status.resumable_progress percentage = uploaded / file_size * 100 speed = round(uploaded / diff, 2) eta = round((file_size - uploaded) / speed) prog_str = "`Uploading` | [{0}{1}] `{2}%`".format( "".join(["**#**" for i in range(math.floor(percentage / 5))]), "".join(["**--**" for i in range(20 - math.floor(percentage / 5))]), round(percentage, 2)) current_message = ( "`[FILE - UPLOAD]`\n\n" f"`Name :` `{file_name}`\n" "`Status :`\n" f"{prog_str}\n" f"`{humanbytes(uploaded)} of {humanbytes(file_size)} " f"@ {humanbytes(speed)}`\n" f"`ETA` -> {time_formatter(eta)}" ) if display_message != current_message: try: await gdrive.edit(current_message) display_message = current_message except Exception: pass file_id = response.get("id") file_size = response.get("size") downloadURL = response.get("webContentLink") """ - Change permission - """ try: await change_permission(service, file_id) except Exception: pass return int(file_size), downloadURL
async def upload(gdrive, service, file_path, file_name, mimeType): try: await gdrive.edit("`Processing upload...`") except Exception: pass body = { "name": file_name, "mimeType": mimeType, } try: pass except NameError: """ - Fallback to G_DRIVE_FOLDER_ID else root dir - """ if G_DRIVE_FOLDER_ID is not None: body['parents'] = [G_DRIVE_FOLDER_ID] else: """ - Override G_DRIVE_FOLDER_ID because parent_Id not empty - """ body['parents'] = [parent_Id] media_body = MediaFileUpload(file_path, mimetype=mimeType, resumable=True) """ - Start upload process - """ file = service.files().create(body=body, media_body=media_body, fields="id, size, webContentLink") global is_cancelled current_time = time.time() response = None display_message = None is_cancelled = False while response is None: if is_cancelled: raise CancelProcess status, response = file.next_chunk() if status: file_size = status.total_size diff = time.time() - current_time uploaded = status.resumable_progress percentage = uploaded / file_size * 100 speed = round(uploaded / diff, 2) eta = round((file_size - uploaded) / speed) prog_str = "`Uploading` | [{0}{1}] `{2}%`".format( "".join(["●" for i in range(math.floor(percentage / 10))]), "".join(["○" for i in range(10 - math.floor(percentage / 10))]), round(percentage, 2)) current_message = ( "`[FILE - UPLOAD]`\n\n" f"`{file_name}`\n" f"`Status`\n{prog_str}\n" f"`{humanbytes(uploaded)} of {humanbytes(file_size)} " f"@ {humanbytes(speed)}`\n" f"`ETA` -> {time_formatter(eta)}") if round(diff % 15.00) == 0 and (display_message != current_message ) or (uploaded == file_size): await gdrive.edit(current_message) display_message = current_message file_id = response.get("id") file_size = response.get("size") downloadURL = response.get("webContentLink") """ - Change permission - """ try: await change_permission(service, file_id) except Exception: pass return int(file_size), downloadURL
def upload(filepath, mimeType, parent=None): # If no parent is specified, use the project root_id if not parent: parent = Drive.root_id if Dev.get("NO_UPLOAD"): return True filepath = Path(filepath) file = None # We will attempt to upload 5x before we give up i = 0 while i < 5: try: # Check to see if the file is already uploaded results = Drive.ls(parent) for r in results: if r["name"] == filepath.name: # Update file = Drive.service.files().update( fileId=r["id"], body={ "name": r["name"], }, media_body=MediaFileUpload( filepath.absolute(), chunksize=51200*1024, mimetype=mimeType, resumable=True ) ) break # If file is not already uploaded if not file: file = Drive.service.files().create( body={ "name": filepath.name, "parents": [ parent ], }, media_body=MediaFileUpload( filepath.absolute(), chunksize=51200*1024, mimetype=mimeType, resumable=True ) ) # Upload print(f'----> "{filepath.name}" Uploaded 0%', end="\r", flush=True) response = None while response is None: status, response = file.next_chunk() if status: print(f'----> "{filepath.name}" Uploaded {int(status.progress() * 100)}%', end="\r", flush=True) # If we made it this far then we should be all set i = 99 except: i += 1 Log(f'Upload attempt #{i}..',"notice") # If we got a return from the Drive AND we completed the while loop if file and i==99: print(f' "{filepath.name}" Uploaded successfully!') # Try 5x to get the new ID from the file i = 0 while i < 5: id = Drive.get_id(filepath.name, root=parent) if id: return id i += 1 return False else: Log(f'Failed to upload "{filepath.name}"!',"warning") return False
async def gDrive_upload_file(creds, file_path, message): # Create Google Drive service instance service = build( "drive", "v3", credentials=creds, cache_discovery=False ) # getting the mime type of the file mime_type = guess_type(file_path)[0] mime_type = mime_type if mime_type else "text/plain" # File body description media_body = MediaFileUpload( file_path, mimetype=mime_type, chunksize=150*1024*1024, resumable=True ) file_name = os.path.basename(file_path) body = { "name": file_name, "description": "Uploaded using PyrogramUserBot gDrive v7", "mimeType": mime_type, } # Insert a file u_file_obj = service.files().create(body=body, media_body=media_body) response = None display_message = "" while response is None: status, response = u_file_obj.next_chunk() #await asyncio.sleep(5) if status: percentage = int(status.progress() * 100) progress_str = "[{0}{1}]\nProgress: {2}%\n".format( "".join(["â–ˆ" for i in range(math.floor(percentage / 5))]), "".join(["â–‘" for i in range(20 - math.floor(percentage / 5))]), round(percentage, 2) ) current_message = f"uploading to gDrive\nFile Name: {file_name}\n{progress_str}" if display_message != current_message: try: await message.edit_text(current_message) display_message = current_message except Exception as e: logger.info(str(e)) pass # Permissions body description: anyone who has link can upload # Other permissions can be found at https://developers.google.com/drive/v3/reference/permissions # permissions = { # "role": "reader", # "type": "anyone", # "value": None, # "withLink": True # } # try: # # Insert new permissions # service.permissions().insert(fileId=file_id, body=permissions).execute() # except: # pass file_id = response.get("id") return file_id
def upload_object(self, bucket_name, object_name, read_path, predefined_acl=None, projection=None, **object_resource): """ Uploads object in chunks. Optional parameters and valid object resources are listed here [https://cloud.google.com/storage/docs/json_api/v1/objects/insert] :param bucket_name: Bucket identifier. :type bucket_name: string :param object_name: Can take string representation of object resource or list denoting path to object on GCS. :type object_name: list or string :param read_path: Local path of object to upload. :type read_path: string :param predefined_acl: Apply a predefined set of access controls to this object. :param projection: Set of properties to return. :param object_resource: Supply optional properties [https://cloud.google.com/storage/docs/json_api/v1/objects/insert#request-body] :returns: GcsResponse object. :raises: HttpError if non-retryable errors are encountered. """ resp_obj = GcsResponse('uploaded') media = MediaFileUpload(read_path, chunksize=self._chunksize, resumable=True) if not media.mimetype(): media = MediaFileUpload(read_path, 'application/octet-stream', resumable=True) req = self._service.objects().insert( bucket=bucket_name, name=self._parse_object_name(object_name), media_body=media, predefinedAcl=predefined_acl, projection=projection, body=object_resource) progressless_iters = 0 resp = None while resp is None: error = None try: progress, resp = req.next_chunk() except HttpError as e: error = e if e.resp.status < 500: raise except self._RETRYABLE_ERRORS as e: error = e if error: progressless_iters += 1 self._handle_progressless_iter(error, progressless_iters) else: progressless_iters = 0 resp_obj.load_resp(resp, is_download=False) return resp_obj
def push(self, path, name, tag=None): '''push an image to Google Cloud Drive, meaning uploading it path: should correspond to an absolte image path (or derive it) name: should be the complete uri that the user has requested to push. tag: should correspond with an image tag. This is provided to mirror Docker ''' # The root of the drive for containers (the parent folder) parent = self._get_or_create_folder(self._base) image = None path = os.path.abspath(path) bot.debug("PUSH %s" % path) if not os.path.exists(path): bot.error('%s does not exist.' % path) sys.exit(1) names = parse_image_name(remove_uri(name), tag=tag) if names['version'] is None: version = get_image_hash(path) names = parse_image_name(remove_uri(name), tag=tag, version=version) # Update metadata with names, flatten to only include labels metadata = self.get_metadata(path, names=names) metadata = metadata['data'] metadata.update(names) metadata.update(metadata['attributes']['labels']) del metadata['attributes'] file_metadata = { 'name': names['storage'], 'mimeType': 'application/octet-stream', 'parents': [parent['id']], 'properties': metadata } media = MediaFileUpload(path, resumable=True) try: bot.spinner.start() image = self._service.files().create(body=file_metadata, media_body=media, fields='id').execute() # Add a thumbnail! thumbnail = get_thumbnail() with open(thumbnail, "rb") as f: body = { "contentHints": { "thumbnail": { "image": base64.urlsafe_b64encode(f.read()).decode('utf8'), "mimeType": "image/png" } } } image = self._service.files().update(fileId=image['id'], body=body).execute() bot.spinner.stop() print(image['name']) except HttpError: bot.error('Error uploading %s' % path) pass return image
def test_media_file_upload_mimetype_detection(self): upload = MediaFileUpload(datafile('small.png')) self.assertEqual('image/png', upload.mimetype()) upload = MediaFileUpload(datafile('empty')) self.assertEqual('application/octet-stream', upload.mimetype())
def main(): creds = None if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', SCOPES) creds = flow.run_local_server(port=0) with open('token.pickle', 'wb') as token: pickle.dump(creds, token) drive_service = build('drive', 'v3', credentials=creds) sheets_service = build('sheets', 'v4', credentials=creds) docs_service = build('docs', 'v1', credentials=creds) #Chamar a Sheets API para pegar as informações de pedidos sheet = sheets_service.spreadsheets() orders_result = sheet.values().get( spreadsheetId=ORDERS_SPREADSHEET_ID, range='Pedidos', majorDimension='COLUMNS').execute() orders = orders_result.get('values', []) d = {col[0]: col[1:] for col in orders} orders_df = pd.DataFrame(data=d) #Obtendo informações de preço sheet = sheets_service.spreadsheets() prices_result = sheet.values().get( spreadsheetId=ORDERS_SPREADSHEET_ID, range='Menu').execute() prices = dict(prices_result.get('values', [])[1:]) #Obtendo informações do estoque stock_result = sheet.values().get( spreadsheetId=STOCK_SPREADSHEET_ID, range='Estoque', majorDimension='COLUMNS').execute() stock = stock_result.get('values', []) d = {col[0]: col[1:] for col in stock} stock_df = pd.DataFrame(data=d) stock_df["Quantidade necessaria"] = [float(0)] * len(stock_df.index) stock_df = stock_df.set_index("Ingrediente") #Buscando receitas no diretorio recipes_response = drive_service.files().list(q="'{}' in parents and trashed = False".format(RECIPES_DIR_ID), spaces='drive', fields='files(id, name)').execute() recipes = {} for recipe_file in recipes_response['files']: document = docs_service.documents().get(documentId=recipe_file['id']).execute() title = document.get('title') content = document.get('body').get('content') recipe_lines = read_structural_elements(content).splitlines() recipe_dict = {} for line in recipe_lines: if line != "": try: recipe_dict[line[:line.index(':')]] = float(line[line.index(':')+1:]) except Exception as e: print("{}: line: <{}>".format(e, line)) recipes[title] = recipe_dict #Obtendo template, contendo nome do produto e política de descontos document = docs_service.documents().get(documentId=TEMPLATE_DOC_ID).execute() content = document.get('body').get('content') template = read_structural_elements(content) template_lines = template.splitlines(True) product_info = template_lines[0].split(',') discount_treshold = int(product_info[2][product_info[2].index('>')+1:-1]) discount_mult = float(product_info[2][:product_info[2].index('>')]) #Analisando pedidos bills = open("bills.txt", "w+", encoding="utf-8") for index, row in orders_df.iterrows(): template = read_structural_elements(content) template_lines = template.splitlines(True) current_orders = [] total_price = 0 orders_qty = 0 for col in orders_df.columns: if row[col] != "0" and col != "Cliente": #Calculo da quantidade necessaria de cada ingrediente for ingredient, qty in recipes[col].items(): stock_df.at[ingredient, "Quantidade necessaria"] += float(qty) * float(row[col]) #Construindo mensagem de confirmacao de pedido orders_qty += int(row[col]) total_price += float(row[col])*float(prices[col].replace(',','.')) name = product_info[0] if int(row[col]) > 1: name = product_info[1] current_orders.append("{} {} de {}".format(int(row[col]), name, col)) #Inserindo pedidos no template de mensagem insertion_index = template_lines.index('Vamos conferir seu pedido:\n') + 1 template_lines[insertion_index:insertion_index] = current_orders #Calculo de preço if orders_qty > discount_treshold: total_price *= discount_mult total_price = format(total_price, '.2f') #Adicionando informações de cliente e preço na mensagem for index, line in enumerate(template_lines): if "{total}" in line: template_lines[index] = line.format(total=total_price) elif "{client}" in line: template_lines[index] = line.format(client=row["Cliente"]) bills.writelines(template_lines[1:]) bills.close() with open("shopping_list.txt", "w+") as shopping_list: for index, row in stock_df.iterrows(): """Quantidade por embalagem 0 = ingrediente medido em unidades""" buy_qty = 0 if int(row["Quantidade por Embalagem"]) == 0: if float(row["Quantidade necessaria"]) >= float(row["Gramas"]): buy_qty = ceil((float(row["Quantidade necessaria"]) - float(row["Gramas"]))) elif (float(row["Quantidade necessaria"]) >= float(row["Gramas"]) or float(row["Gramas"]) <= 11): buy_qty = ceil((float(row["Quantidade necessaria"]) - float(row["Gramas"])) / float(row["Quantidade por Embalagem"])) if buy_qty != 0: shopping_list.write("{} x{}\n".format(index, buy_qty)) #Chamando a Drive API para atualizar as informações needed_files = { 'shopping_list.txt': '', 'bills.txt': '' } for path in needed_files.keys(): response = drive_service.files().list(q="name='{}' and '{}' in parents and trashed = False".format(path, MAIN_FOLDER_ID), spaces='drive', fields='files(id)').execute() if len(response['files']) == 0: print("Criando o arquivo {} no drive.".format(path)) response = create_drive_file(path, "./" + path, MAIN_FOLDER_ID, drive_service) needed_files[path] = response['id'] else: needed_files[path] = response['files'][0]['id'] media = MediaFileUpload("./" + path) file = drive_service.files().update( media_body=media, fileId=needed_files[path], fields='id').execute() print("Informações salvas no drive com sucesso.")
def ocr_files(service, processed, processed_folder_id): for item in processed: appProperties = item.get('appProperties', {}) copiedFrom = appProperties.get('copiedFrom') if copiedFrom is None or item['mimeType'] != 'application/pdf': continue if file_has_properties(item, ['ocrTextFileId']): continue basename = '.'.join(item['name'].split('.')[:-1]) LOG.info('Downloading and OCR-ing %s', item["name"]) with tempfile.TemporaryDirectory() as tmp_dir: request = service.files().get_media(fileId=item['id'], supportsTeamDrives=True) download_path = os.path.join(tmp_dir, 'file.pdf') with open(download_path, 'wb') as fobj: downloader = MediaIoBaseDownload(fobj, request) while True: _, done = downloader.next_chunk() if done: break text_path = os.path.join(tmp_dir, 'file.txt') text = textract.process(download_path, method='tesseract') with open(text_path, 'w') as fobj: fobj.write(text.decode('utf8', errors='replace')) LOG.info("Uploading text") media = MediaFileUpload(text_path, mimetype='text/plain') text_file = service.files().create(body={ 'name': basename + '.txt', 'copyRequiresWriterPermission': True, 'appProperties': { 'pdfSourceFileId': item['id'], }, 'parents': [processed_folder_id], }, supportsTeamDrives=True, media_body=media, fields='id').execute() service.files().update(fileId=item['id'], supportsTeamDrives=True, body={ 'appProperties': { 'ocrTextFileId': text_file.get('id'), }, }).execute() # After a successful processing of a file, return to let other pipeline # stages progress return True return False
def contents(request, parent_id): drive_services = DriveCredential.objects.filter(user_id=request.user.id) file_list = [] root_ids = [] drives = [] cred_list = [(x.token, x.credentials) for x in drive_services] page = request.GET.get('page', 1) for i in cred_list: file_list += (get_file_list(auth(*i))) root_ids.append(get_root_id(auth(*i))) file = get_file(parent_id, file_list) if parent_id != 'all': if not file: cred = DriveCredential.objects.get(user_id=request.user.id, root_id=parent_id) drive = auth(cred.token, cred.credentials) parent = parent_id else: for i in cred_list: if (get_user(auth(*i)) == file['owners'][0]['emailAddress']): drive = auth(*i) parent = file['id'] if request.method == 'POST': if 'upload' in request.POST: uploaded_file = request.FILES['document'] fs = FileSystemStorage() fs.save(uploaded_file.name, uploaded_file) media = MediaFileUpload(BASE_DIR + '/tmp/' + uploaded_file.name) file_metadata = {'name': uploaded_file.name, 'parents': [parent]} # print(BASE_DIR) drive.files().create(body=file_metadata, media_body=media, fields='id').execute() fs.delete(uploaded_file.name) file_list = [] for i in cred_list: file_list += (get_file_list(auth(*i))) root_ids.append(get_root_id(auth(*i))) elif 'search' in request.POST: string = request.POST.get('string') print(request.POST) return redirect('/search/' + string) # print(file_list) if parent_id != 'all': cleared = [ x for x in file_list if 'parents' in x if parent_id in x['parents'] ] print('not all') else: cleared = [x for x in file_list] print('all') # print(cleared) paginator = Paginator(cleared, 10) try: files = paginator.page(page) except PageNotAnInteger: files = paginator.page(1) except EmptyPage: files = paginator.page(paginator.num_pages) # print(files) for i in files: print(i['name']) context = { 'parent_id': parent_id, 'file_list': cleared, 'root_ids': root_ids, 'file': file, 'files': files, } # print(file_list) return render(request, 'contents.html', context)
def generate_summary(service, processed, processed_folder_id): processed_by_id = {item['id']: item for item in processed} required_properties = ['ucasPersonalId', 'extractedName'] fully_processed_files = [ item for item in processed if file_has_properties(item, required_properties) ] # Sort files fully_processed_files = sorted( fully_processed_files, key=lambda f: (f.get('appProperties', {}).get('extractedName', '').split(' ')[-1])) # Don't try to generate summary if no files if len(fully_processed_files) == 0: return # Do we have an summary already? summary_files = [ item for item in processed if item.get('appProperties', {}).get('isSummary', False) ] with tempfile.TemporaryDirectory() as tmpdir: # Make CSV outpath = os.path.join(tmpdir, 'summary.csv') with open(outpath, 'w') as fobj: w = csv.writer(fobj) w.writerow([ 'UCAS Personal ID', 'Extracted Name', 'PDF', 'Extracted text' ]) for item in fully_processed_files: appProperties = item.get('appProperties', {}) text_item = processed_by_id.get( appProperties.get('ocrTextFileId')) w.writerow([ appProperties['ucasPersonalId'], appProperties['extractedName'], item['webViewLink'], text_item['webViewLink'] if text_item is not None else '' ]) # Upload - allow downloads otherwise this is a little pointless media = MediaFileUpload(outpath, mimetype='text/csv') api_params = { 'body': { 'name': 'summary.csv', 'appProperties': { 'isSummary': True, }, }, 'supportsTeamDrives': True, 'media_body': media, 'fields': 'id' } if len(summary_files) == 0: api_params['body']['parents'] = [processed_folder_id] service.files().create(**api_params).execute() else: for item in summary_files: service.files().update(fileId=item['id'], **api_params).execute()
def _upload_file(self, gdrive_service: Resource, file: str, folder_ids: Dict[str, str]) -> Tuple[bool, int]: """Upload a file if it has changed Args: gdrive_service: Authenticated GDrive client file (str): Path to the file to be uploaded folder_ids (dict): Map of the workspace name to folder ids Returns: - (bool) Whether the file was updated - (int) Amount of data uploaded """ # Get the appropriate folder file_path = Path(file) folder_name = file_path.parent.name folder_id = folder_ids[folder_name] # See if the file already exists # Lookup the folder result = gdrive_service.files().list( q= f"name = '{file_path.name}' and '{folder_id}' in parents and trashed = false", pageSize=2, fields='files/id,files/md5Checksum,files/size').execute() hits = result.get('files', []) # Determine whether to upload the file if len(hits) > 1: raise ValueError('>1 file with this name in the backup directory') elif len(hits) == 1: # Otherwise, udate a new copy file_id = hits[0].get('id') logger.info(f'Matched existing file {file_id} to {file}') # Check if the file's md5 has has changed my_hash = md5() with open(file_path, 'rb') as fp: buff = fp.read(4096) while len(buff) > 0: my_hash.update(buff) buff = fp.read(4096) if my_hash.hexdigest() == hits[0].get('md5Checksum'): logger.info('MD5 checksum is unchanged. Skipping upload') return False, 0 # Update the file file_metadata = {'name': file_path.name} media = MediaFileUpload(file, mimetype='application/ld+json') result = gdrive_service.files().update(fileId=file_id, body=file_metadata, media_body=media, fields='id,size').execute() logger.info(f'Uploaded {file} to {result.get("id")}') return True, int(result.get('size')) else: # Upload the file file_metadata = {'name': file_path.name, 'parents': [folder_id]} media = MediaFileUpload(file, mimetype='application/ld+json') result = gdrive_service.files().create(body=file_metadata, media_body=media, fields='id,size').execute() logger.info(f'Uploaded {file} to {result.get("id")}') return True, int(result.get('size'))
def main(): creds = None if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', SCOPES) creds = flow.run_local_server(port=0) with open('token.pickle', 'wb') as token: pickle.dump(creds, token) drive_service = build('drive', 'v3', credentials=creds) # store backup now = datetime.now() year_folder_id = None month_folder_id = None latest_folder_id = None response = drive_service.files().list( q="mimeType = 'application/vnd.google-apps.folder'", spaces='drive', fields='nextPageToken, files(id, name)').execute() flag = False flagLatest = False for file in response.get('files', []): print(file) if file.get('name') == str(now.year): year_folder_id = file.get('id') flag = True if file.get('name') == 'ultimo_backup': latest_folder_id = file.get('id') flagLatest = True if (flagLatest == False): file_metadata = { 'name': 'ultimo_backup', 'mimeType': 'application/vnd.google-apps.folder' } file = drive_service.files().create(body=file_metadata, fields='id').execute() latest_folder_id = file.get('id') if (flag == False): file_metadata = { 'name': str(now.year), 'mimeType': 'application/vnd.google-apps.folder' } file = drive_service.files().create(body=file_metadata, fields='id').execute() year_folder_id = file.get('id') response = drive_service.files().list( q="mimeType = 'application/vnd.google-apps.folder' and '{}' in parents" .format(year_folder_id), spaces='drive', fields='nextPageToken, files(id, name)').execute() flag = False for file in response.get('files', []): if (file.get('name') == str(now.month)): month_folder_id = file.get('id') flag = True if (flag == False): file_metadata = { 'name': str(now.month), 'mimeType': 'application/vnd.google-apps.folder', 'parents': [str(year_folder_id)] } file = drive_service.files().create(body=file_metadata, fields='id').execute() month_folder_id = file.get('id') period = sys.argv[1] file_name = str(now.year) + '-' + str(now.month) + '-' + str( now.day) + '-' + period + '.db' file_metadata = {'name': file_name, 'parents': [month_folder_id]} media = MediaFileUpload(MEDIA_PATH, resumable=True) file = drive_service.files().create(body=file_metadata, media_body=media, fields='id').execute() print('File ID: %s' % file.get('id')) response = drive_service.files().list( q="mimeType != 'application/vnd.google-apps.folder' and '{}' in parents" .format(latest_folder_id), spaces='drive', fields='nextPageToken, files(id, name)').execute() if len(response.get('files', [])) > 0: for file in response.get('files', []): if (file.get('name') == 'caderneta.db'): drive_service.files().delete(fileId=file.get('id')).execute() file_metadata = {'name': 'caderneta.db', 'parents': [latest_folder_id]} media = MediaFileUpload(MEDIA_PATH, resumable=True) file = drive_service.files().create(body=file_metadata, media_body=media, fields='id').execute() print('Latest file ID: %s' % file.get('id')) arq = open('latest_backup_id.txt', 'w') arq.write(file.get('id')) arq.close()
'mimeType': 'application/vnd.google-apps.folder' } file = service.files().create(body=file_metadata, fields='id').execute() folder_id = file.get('id') print("your folder id is %s" % (folder_id)) print("your %s filename:%s start uploading to google drive" % (x, na)) filen = "%s/%s/%s" % (year, y, na) print(filen) file_metadata = {'name': na, 'parents': [folder_id]} try: media = MediaFileUpload(filen, mimetype='application/pdf', resumable=True) file = service.files().create(body=file_metadata, media_body=media, fields='id').execute() file_id = file.get('id') print('File ID Upload on google drive: %s' % (file.get('id'))) print("") file = service.files().get(fileId=file_id, fields='parents').execute() previous_parents = ",".join(file.get('parents')) file = service.files().update( fileId=file_id, addParents=folder_id, removeParents=previous_parents,
def run(self): """ This function will update the thumbnail and title of the video linked to the ID given above. It measures the time difference since the upload, as well as counting the views and comments. After that, a thumbnail with these information is drawn, and the video is updated. This will run every 20 Minutes, so that those information is kept up to date. """ api_service_name = "youtube" api_version = "v3" # client_secrets_file = "client_secret.json" # Get credentials and create an API client # flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file( # client_secrets_file, scopes) # credentials = credentials_to_dict(flow.run_console()) with open(os.path.join(os.getcwd(), "credentials.json")) as infile: credentials = json.load(infile) while True: with open(os.path.join(os.getcwd(), "credentials.json"), 'w') as outfile: json.dump(credentials, outfile, indent=2) with open(os.path.join(os.getcwd(), "credentials.json")) as infile: credentials = json.load(infile) credentials = google.oauth2.credentials.Credentials(**credentials) youtube = googleapiclient.discovery.build(api_service_name, api_version, credentials=credentials) credentials = credentials_to_dict(credentials) current_time = time.time() time_delta = current_time - UPLOAD_TIME time_delta_str = "" if 0 <= time_delta < HOUR: time_delta = int(time_delta / MINUTE) time_delta_str = str(time_delta) + " minute" elif HOUR <= time_delta < DAY: time_delta = int(time_delta / HOUR) time_delta_str = str(time_delta) + " hour" elif DAY <= time_delta < WEEK: time_delta = int(time_delta / DAY) time_delta_str = str(time_delta) + " day" elif WEEK <= time_delta < MONTH: time_delta = int(time_delta / WEEK) time_delta_str = str(time_delta) + " week" elif MONTH <= time_delta < YEAR: time_delta = int(time_delta / MONTH) time_delta_str = str(time_delta) + " month" elif YEAR <= time_delta: time_delta = int(time_delta / YEAR) time_delta_str = str(time_delta) + " year" if time_delta > 1: time_delta_str += "s" request = youtube.videos().list(part="statistics", id=VIDEO_ID) response = request.execute() stats = response['items'][0]['statistics'] view_count = stats['viewCount'] like_count = stats['likeCount'] comment_count = stats['commentCount'] def format_counter(count: str) -> str: count = int(count) if 1000 <= count < 10000: if int(count / 100 ) % 10 == 0: # if count is between 1.000 and 1.099 return "1K" return str( int(count / 100) / 10 ) + "K" # if count is between 1.100 and 9.999 return "{n}.{d}K" elif 10000 <= count < 1000000: # if count is between 10.000 and 999.999 return "{n}K" return str(int(count / 1000)) + "K" elif 1000000 <= count < 10000000: if int( count / 100000 ) % 10 == 0: # if count is between 1.000.000 and 1.099.999 return "1M" return str( int(count / 100000) / 10 ) + "M" # if count is between 1.100.000 and 9.999.999 return "{n}.{d}M" elif 10000000 < count: return str( int(count / 1000000) ) + "M" # if count is between 10.000.000 and 999.999.999 return "{n}M" return str( count ) # if count is under 1000 return count without rounding title = f"{time_delta_str} ago." subtitle = f"Has {format_counter(view_count)} views" other_subtitle = f"with {format_counter(like_count)} likes and {comment_count} comments" draw_thumbnail(title, subtitle, other_subtitle) request = youtube.thumbnails().set(videoId=VIDEO_ID, media_body=MediaFileUpload( os.path.join( os.getcwd(), "thumbnail.jpg"))) request.execute() request = youtube.videos().update( part="snippet", body={ "id": VIDEO_ID, "snippet": { "categoryId": 22, "defaultLanguage": "en", "title": f"This video was uploaded {time_delta_str} ago, has {format_counter(view_count)} views with {format_counter(like_count)} likes and {comment_count} comments.", "description": """ DISCLAIMER!!: It can take up to 10 minutes for the thumbnail to update reload the page and watch it update. The github project for this video can be found on: https://github.com/SeJV/UpdatableYoutube i will be updating this every hour or minute (sometimes) comment and mark what time you commented at so you can be really confused 2 years from now This Video is not in Reverse. This Video Has 26,564,302 Views this video was uploaded 1 day ago. this video was uploaded 6 days ago this video has around 210k comments why did you get this video to 100,000 likes this video could get recommended it's mathematically unlikely for mrbeast to comment on this video MrBeast commented on my video this video was uploaded 1 week ago this video was uploaded 1 month ago All comments in this video will be hearted MrBeast will not comment in this video Thank you MrBeast for commenting on my video Don't click this video using your tongue Comment is disabled until this video hit 10k likes Thanos will not comment in this video This video will not have verified comments I magnet your finger to make you click this video Comments will say their place This video will not be age restricted This video is not free pay to watch This video will never get any verified comments Don't turn on subtitle. Comment is disabled until this video hit 10k likes if MrBeast comments on this video, i will delete this video. This video has only one comment This video will be deleted in 5 days. This video will not get recommended again. This video will not reach 100K comments. This video will not reach 100K views. This video will not get recommended. This video will be deleted in 7 days. This video is not a speedrun. This Video Has 26,564,302 Views Press This Button To Win $100,000 Get This Random Person 1,000,000 Subscribers This video has views Dynamic updatable Title Dynamic updatable Thumbnail""" } }) response = request.execute() logging.info("UPDATED TO: ", response["snippet"]["title"]) time.sleep(MINUTE)
API_NAME = 'drive' API_VERSION = 'v3' SCOPES = ['https://www.googleapis.com/auth/drive'] service = Create_Service(CLIENT_SECRET_FILE, API_NAME, API_VERSION, SCOPES) folder_id = '1fgOX8hScgzxXEADZX9_yO462MMYtrKlJ' file_names = ['Covid19.XLSX'] mime_types = [ 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' ] for file_name, mime_type in zip(file_names, mime_types): file_metadata = {'name': file_name, 'parents': [folder_id]} media = MediaFileUpload(filename=file_name, mimetype=mime_type) service.files().create(body=file_metadata, media_body=media, fields='id').execute() # gauth = GoogleAuth() # gauth.LocalWebserverAuth # drive = GoogleDrive(gauth) # # file1 = drive.CreateFile({'title':'Covid19.xlsx'}) # file1.SetContentFile(writer) # # file1.Upload() #plotting using plotly and dassh trace4 = go.Bar(x=df['Country/Region'], y=df['Confirmed'],
sys.exit(1) self.drive_id = m_ids['ids'][0] self.add_to_db() print 'Created DMSF file on Drive: project:%s id:%s name:%s -> %s' % ( self.project.id, self.id, self.name, self.drive_id) file_is_uploaded = False for revision in self.revisions: if revision.drive_id: file_is_uploaded = True if not revision.drive_id: if file_is_uploaded: # update existing id media_body = MediaFileUpload( self.revisions[0].disk_filename, mimetype=self.revisions[0].mime_type, resumable=True) body = { 'id': self.drive_id, 'title': revision.name, 'mimeType': revision.mime_type } body[ 'description'] = revision.description + "\nCreated from revision %d.%d from DMSF id %s" % ( revision.major_version, revision.minor_version, self.id) body['version'] = (revision.major_version * 10000) + revision.minor_version body['parents'] = [{'id': self.parent.drive_id}] try:
def _upload_file(self, file_path: str, parent_id: str) -> str: if self._is_canceled: raise ProcessCanceled mime_type = guess_type(file_path)[0] or "text/plain" file_name = os.path.basename(file_path) body = { "name": file_name, "mimeType": mime_type, "description": "Uploaded using Userge" } if parent_id: body["parents"] = [parent_id] if os.path.getsize(file_path) == 0: media_body = MediaFileUpload(file_path, mimetype=mime_type, resumable=False) u_file_obj = self._service.files().create( body=body, media_body=media_body, supportsTeamDrives=True).execute() file_id = u_file_obj.get("id") else: media_body = MediaFileUpload(file_path, mimetype=mime_type, chunksize=50 * 1024 * 1024, resumable=True) u_file_obj = self._service.files().create(body=body, media_body=media_body, supportsTeamDrives=True) c_time = time.time() response = None while response is None: status, response = u_file_obj.next_chunk() if self._is_canceled: raise ProcessCanceled if status: f_size = status.total_size diff = time.time() - c_time uploaded = status.resumable_progress percentage = uploaded / f_size * 100 speed = round(uploaded / diff, 2) eta = round((f_size - uploaded) / speed) tmp = \ "__Uploading to GDrive...__\n" + \ "```[{}{}]({}%)```\n" + \ "**File Name** : `{}`\n" + \ "**File Size** : `{}`\n" + \ "**Uploaded** : `{}`\n" + \ "**Completed** : `{}/{}`\n" + \ "**Speed** : `{}/s`\n" + \ "**ETA** : `{}`" self._progress = tmp.format( "".join( ["█" for i in range(math.floor(percentage / 5))]), "".join([ "░" for i in range(20 - math.floor(percentage / 5)) ]), round(percentage, 2), file_name, humanbytes(f_size), humanbytes(uploaded), self._completed, self._list, humanbytes(speed), time_formatter(eta)) file_id = response.get("id") if not Config.G_DRIVE_IS_TD: self._set_permission(file_id) self._completed += 1 drive_file = self._service.files().get( fileId=file_id, fields='id, name, size', supportsTeamDrives=True).execute() file_id = drive_file.get('id') file_name = drive_file.get("name") file_size = humanbytes(int(drive_file.get('size', 0))) _LOG.info("Created Google-Drive File => Name: %s ID: %s Size: %s", file_name, file_id, file_size) return G_DRIVE_FILE_LINK.format(file_id, file_name, file_size)
def upload(self, bucket, object, filename, mime_type='application/octet-stream', gzip=False, multipart=False, num_retries=0): """ Uploads a local file to Google Cloud Storage. :param bucket: The bucket to upload to. :type bucket: str :param object: The object name to set when uploading the local file. :type object: str :param filename: The local file path to the file to be uploaded. :type filename: str :param mime_type: The MIME type to set when uploading the file. :type mime_type: str :param gzip: Option to compress file for upload :type gzip: bool :param multipart: If True, the upload will be split into multiple HTTP requests. The default size is 256MiB per request. Pass a number instead of True to specify the request size, which must be a multiple of 262144 (256KiB). :type multipart: bool or int :param num_retries: The number of times to attempt to re-upload the file (or individual chunks, in the case of multipart uploads). Retries are attempted with exponential backoff. :type num_retries: int """ service = self.get_conn() if gzip: filename_gz = filename + '.gz' with open(filename, 'rb') as f_in: with gz.open(filename_gz, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) filename = filename_gz try: if multipart: if multipart is True: chunksize = 256 * 1024 * 1024 else: chunksize = multipart if chunksize % (256 * 1024) > 0 or chunksize < 0: raise ValueError( "Multipart size is not a multiple of 262144 (256KiB)") media = MediaFileUpload(filename, mimetype=mime_type, chunksize=chunksize, resumable=True) request = service.objects().insert(bucket=bucket, name=object, media_body=media) response = None while response is None: status, response = request.next_chunk( num_retries=num_retries) if status: self.log.info("Upload progress %.1f%%", status.progress() * 100) else: media = MediaFileUpload(filename, mime_type) service \ .objects() \ .insert(bucket=bucket, name=object, media_body=media) \ .execute(num_retries=num_retries) except HttpError as ex: if ex.resp['status'] == '404': return False raise finally: if gzip: os.remove(filename) return True
upload_date_time = datetime.datetime(2021, 9, 8, 12, 30, 0).isoformat() + '.000Z' request_body = { 'snippet': { 'categoryI': 19, 'title': 'Upload Testing This is Private Video ', 'description': 'Upload TEsting This is Private Video', 'tags': ['Python', 'Youtube API', 'Google'] }, 'status': { 'privacyStatus': 'private', 'publishAt': upload_date_time, 'selfDeclaredMadeForKids': False, }, 'notifySubscribers': False } media = MediaFileUpload('video_tina.mp4') response_upload = youtube.videos().insert(part='snippet,status', body=request_body, media_body=media).execute() """ youtube.thumbnails().set( videoId=response_upload.get('id'), media_body=MediaFileUpload('thumbnail.png') ).execute() """
def method(self, **kwargs): # Don't bother with doc string, it will be over-written by createMethod. for name in six.iterkeys(kwargs): if name not in parameters.argmap: raise TypeError('Got an unexpected keyword argument "%s"' % name) # Remove args that have a value of None. keys = list(kwargs.keys()) for name in keys: if kwargs[name] is None: del kwargs[name] for name in parameters.required_params: if name not in kwargs: raise TypeError('Missing required parameter "%s"' % name) for name, regex in six.iteritems(parameters.pattern_params): if name in kwargs: if isinstance(kwargs[name], six.string_types): pvalues = [kwargs[name]] else: pvalues = kwargs[name] for pvalue in pvalues: if re.match(regex, pvalue) is None: raise TypeError( 'Parameter "%s" value "%s" does not match the pattern "%s"' % (name, pvalue, regex)) for name, enums in six.iteritems(parameters.enum_params): if name in kwargs: # We need to handle the case of a repeated enum # name differently, since we want to handle both # arg='value' and arg=['value1', 'value2'] if (name in parameters.repeated_params and not isinstance(kwargs[name], six.string_types)): values = kwargs[name] else: values = [kwargs[name]] for value in values: if value not in enums: raise TypeError( 'Parameter "%s" value "%s" is not an allowed value in "%s"' % (name, value, str(enums))) actual_query_params = {} actual_path_params = {} for key, value in six.iteritems(kwargs): to_type = parameters.param_types.get(key, 'string') # For repeated parameters we cast each member of the list. if key in parameters.repeated_params and type(value) == type([]): cast_value = [_cast(x, to_type) for x in value] else: cast_value = _cast(value, to_type) if key in parameters.query_params: actual_query_params[parameters.argmap[key]] = cast_value if key in parameters.path_params: actual_path_params[parameters.argmap[key]] = cast_value body_value = kwargs.get('body', None) media_filename = kwargs.get('media_body', None) media_mime_type = kwargs.get('media_mime_type', None) if self._developerKey: actual_query_params['key'] = self._developerKey model = self._model if methodName.endswith('_media'): model = MediaModel() elif 'response' not in methodDesc: model = RawModel() headers = {} headers, params, query, body = model.request(headers, actual_path_params, actual_query_params, body_value) expanded_url = uritemplate.expand(pathUrl, params) url = _urljoin(self._baseUrl, expanded_url + query) resumable = None multipart_boundary = '' if media_filename: # Ensure we end up with a valid MediaUpload object. if isinstance(media_filename, six.string_types): if media_mime_type is None: logger.warning( 'media_mime_type argument not specified: trying to auto-detect for %s', media_filename) media_mime_type, _ = mimetypes.guess_type(media_filename) if media_mime_type is None: raise UnknownFileType(media_filename) if not mimeparse.best_match([media_mime_type], ','.join(accept)): raise UnacceptableMimeTypeError(media_mime_type) media_upload = MediaFileUpload(media_filename, mimetype=media_mime_type) elif isinstance(media_filename, MediaUpload): media_upload = media_filename else: raise TypeError('media_filename must be str or MediaUpload.') # Check the maxSize if media_upload.size( ) is not None and media_upload.size() > maxSize > 0: raise MediaUploadSizeError("Media larger than: %s" % maxSize) # Use the media path uri for media uploads expanded_url = uritemplate.expand(mediaPathUrl, params) url = _urljoin(self._baseUrl, expanded_url + query) if media_upload.resumable(): url = _add_query_parameter(url, 'uploadType', 'resumable') if media_upload.resumable(): # This is all we need to do for resumable, if the body exists it gets # sent in the first request, otherwise an empty body is sent. resumable = media_upload else: # A non-resumable upload if body is None: # This is a simple media upload headers['content-type'] = media_upload.mimetype() body = media_upload.getbytes(0, media_upload.size()) url = _add_query_parameter(url, 'uploadType', 'media') else: # This is a multipart/related upload. msgRoot = MIMEMultipart('related') # msgRoot should not write out it's own headers setattr(msgRoot, '_write_headers', lambda self: None) # attach the body as one part msg = MIMENonMultipart(*headers['content-type'].split('/')) msg.set_payload(body) msgRoot.attach(msg) # attach the media as the second part msg = MIMENonMultipart(*media_upload.mimetype().split('/')) msg['Content-Transfer-Encoding'] = 'binary' payload = media_upload.getbytes(0, media_upload.size()) msg.set_payload(payload) msgRoot.attach(msg) # encode the body: note that we can't use `as_string`, because # it plays games with `From ` lines. fp = BytesIO() g = _BytesGenerator(fp, mangle_from_=False) g.flatten(msgRoot, unixfrom=False) body = fp.getvalue() multipart_boundary = msgRoot.get_boundary() headers['content-type'] = ( 'multipart/related; ' 'boundary="%s"') % multipart_boundary url = _add_query_parameter(url, 'uploadType', 'multipart') logger.info('URL being requested: %s %s' % (httpMethod, url)) return self._requestBuilder(self._http, model.response, url, method=httpMethod, body=body, headers=headers, methodId=methodId, resumable=resumable)
def upload_video(video_name, creds): """ Uploads the specified video to Youtube channel. """ artists = [random.choice(ALL_ARTISTS)] if random.choice([0,1]) == 0: artist = random.choice(ALL_ARTISTS) while artist in artists: artist = random.choice(ALL_ARTISTS) artists.append(artist) artists = ' x '.join(artists) track_title = video_name.split('.')[0].replace('-', ' ') title = '[FREE] {} type beat - {} | Produced by RavD'.format(artists, track_title) tags = ['divine type beat','indian rap beats','ikka type beat','ikka type beat free','krsna type beat', 'krishna type beat','krishna type beat free','divine type beat free','naezy type beat','naezy type beat free', 'gully gang beat','emiway type beat','raftaar type beat','raftaar type beat free','type beat', 'divine type beat free','emiway type beat free','ravdmusic','indian rat beat','free type beat', 'free type beats',track_title] description = f''' {title} {title} {title} 🙂 About Me? I'm a India based producer, producing beats for hiphop artists around the country.. I'm a part and fan of Indian hiphop scene. So my beats are dedicated to the followers of Indian hiphop. My sole purpose is to work with talented artists irrespective of how famous they are. 🙏 🎹 Free beat? The beats on this channel are free to use in your project but please give me a deserving credit. 📷 Wanna collaborate? If you want to collaborate with me on a project, you are most welcome. You can DM me on Instagram ☛ https://instagram.com/ravd_ravgeet 🥂 Be a part of family? Subscribe to this channel and be a part of a happy family ☛ http://bit.ly/2jeCIGS 📌 Tags: {','.join(tags)} 📝 Note: The beats on this channel are uploaded using an automated script. You can find it out at https://github.com/ravgeetdhillon/music #IndianHipHopTypeBeat #FreeTypeBeat #Divine #Emiway #Beats #TrapBeat #FreeBeats #BohtHard #IndianProducer #Producer ''' youtube = build('youtube', 'v3', credentials=creds) request = youtube.videos().insert( part="snippet,status", notifySubscribers=True, body={ "snippet": { "categoryId": "10", "title": title, "description": description, "tags": tags, }, "status": { "privacyStatus": "public", "license": "creativeCommon", } }, media_body=MediaFileUpload(video_name) ) response = request.execute() return response
def upload_file(self, file_path, file_name, mime_type, parent_id): # File body description file_metadata = { 'name': file_name, 'description': 'Uploaded by Slam Mirror Bot', 'mimeType': mime_type, } try: self.typee = file_metadata['mimeType'] except: self.typee = 'File' if parent_id is not None: file_metadata['parents'] = [parent_id] if os.path.getsize(file_path) == 0: media_body = MediaFileUpload(file_path, mimetype=mime_type, resumable=False) response = self.__service.files().create( supportsTeamDrives=True, body=file_metadata, media_body=media_body).execute() if not IS_TEAM_DRIVE: self.__set_permission(response['id']) drive_file = self.__service.files().get( supportsTeamDrives=True, fileId=response['id']).execute() download_url = self.__G_DRIVE_BASE_DOWNLOAD_URL.format( drive_file.get('id')) return download_url media_body = MediaFileUpload(file_path, mimetype=mime_type, resumable=True, chunksize=50 * 1024 * 1024) # Insert a file drive_file = self.__service.files().create(supportsTeamDrives=True, body=file_metadata, media_body=media_body) response = None while response is None: if self.is_cancelled: return None try: self.status, response = drive_file.next_chunk() except HttpError as err: if err.resp.get('content-type', '').startswith('application/json'): reason = json.loads(err.content).get('error').get( 'errors')[0].get('reason') if reason == 'userRateLimitExceeded' or reason == 'dailyLimitExceeded': if USE_SERVICE_ACCOUNTS: if not self.switchServiceAccount(): raise err LOGGER.info(f"Got: {reason}, Trying Again.") return self.upload_file(file_path, file_name, mime_type, parent_id) else: raise err else: raise err self._file_uploaded_bytes = 0 # Insert new permissions if not IS_TEAM_DRIVE: self.__set_permission(response['id']) # Define file instance and get url for download drive_file = self.__service.files().get( supportsTeamDrives=True, fileId=response['id']).execute() download_url = self.__G_DRIVE_BASE_DOWNLOAD_URL.format( drive_file.get('id')) return download_url
upload_date_time = datetime.datetime(2020, 8, 25, 12, 30, 0).isoformat() + '.000Z' request_body = { 'snippet': { 'categoryI': 19, 'title': 'Upload Testing This is Private Video ', 'description': 'Upload TEsting This is Private Video', 'tags': ['Python', 'Youtube API', 'Google'] }, 'status': { 'privacyStatus': 'private', 'publishAt': upload_date_time, 'selfDeclaredMadeForKids': False, }, 'notifySubscribers': False } mediaFile = MediaFileUpload('1.avi') response_upload = youtube.videos().insert( part='snippet,status', body=request_body, media_body=mediaFile ).execute() """ youtube.thumbnails().set( videoId=response_upload.get('id'), media_body=MediaFileUpload('thumbnail.png') ).execute() """