def create_bytes(buf, filepath, parent=None, text=True): ''' create filepath and fill content with buf. assumptions: write to root directory if parent not given ''' if not parent: parent='root' fname = os.path.basename(filepath) #file_ext = os.path.splitext(fname)[1] # lookup mime type #mimetypes.init() #mime_type = mimetypes.types_map.get(file_ext, 'application/octet-stream') mime_type = 'text/plain' if text else 'application/octet-stream' metadata = {} metadata['name'] = fname metadata['parents'] = [parent] media = MediaInMemoryUpload(buf, mimetype=mime_type, resumable=True) file = get_service().files().create(body=metadata, media_body=media, fields='id').execute() #print(f'File ID: {file["id"]}') return file['id']
def upload(self, screenshot, name): self.loadSettings() #Save to memory buffer ba = QByteArray() buf = QBuffer(ba) buf.open(QIODevice.ReadWrite) screenshot.save(buf, ScreenCloud.getScreenshotFormat()) #Create folder if not exists folders = self.driveService.files().list( q="name='%s' and mimeType='application/vnd.google-apps.folder' and trashed=false" % (self.folderName)).execute()["files"] exists = len(folders) > 0 if not exists: folderMetadata = { 'name': self.folderName, 'mimeType': 'application/vnd.google-apps.folder' } folder = self.driveService.files().create(body=folderMetadata, fields='id').execute() else: folder = folders[0] #Upload fileMetadata = {'name': name, 'parents': [folder["id"]]} media = MediaInMemoryUpload(ba.data(), mimetype='image/' + ScreenCloud.getScreenshotFormat()) file = self.driveService.files().create( body=fileMetadata, media_body=media, fields='webViewLink, id').execute() if self.copyLink: webViewLink = file.get('webViewLink') ScreenCloud.setUrl(webViewLink) return True
def update_bytes(buf, filepath, file_id, text=True): ''' update filepath content with buf assumptions: file_id must be specified ''' assert file_id, 'must have valid file_id' fname = os.path.basename(filepath) #file_ext = os.path.splitext(fname)[1] # lookup mime type #mimetypes.init() #mime_type = mimetypes.types_map.get(file_ext, 'application/octet-stream') mime_type = 'text/plain' if text else 'application/octet-stream' metadata = {} metadata['name'] = fname media = MediaInMemoryUpload(buf, mimetype=mime_type, resumable=True) file = get_service().files().update(body=metadata, media_body=media, fileId=file_id).execute() return file_id
def update_draft_email_message(self, message_string, draft_id): media = MediaInMemoryUpload(message_string, mimetype='message/rfc822', chunksize=settings.GMAIL_CHUNK_SIZE, resumable=True) return self.execute_service_call(self.service.users().drafts().update( userId='me', media_body=media, id=draft_id))
def upload_file(folder_name, file_name, file_blob): # FOLDER_NAME is the name of the folder to upload the file to # FILE_BLOB is the file blob to upload folders = DRIVE_SERVICE.files().list( q="mimeType='application/vnd.google-apps.folder'", spaces='drive', fields='nextPageToken, files(id, name)', pageToken=None).execute().get('files', []) # Finding folder id of requested folder (unassigned if not found) folder_id = 'none' # Other variables needed to check if folder creation is needed base_folder_id = 'none' unassigned_bool = False for f in folders: if f['name'] == folder_name: folder_id = f['id'] elif folder_id == 'none' and f['name'] == 'unassigned': folder_id = f['id'] unassigned_bool = True if (f['name'] == 'InternalSubmissionForm'): base_folder_id = f['id'] # If the given folder is not empty AND was unassigned, we will create the folder # We will not create a folder if the given name is nothing if folder_name and unassigned_bool: file_metadata = { 'name': folder_name, 'mimeType': 'application/vnd.google-apps.folder', 'parents': [base_folder_id] } folder = drive.files().create(body=file_metadata, fields='id').execute() folder_id = folder.get('id') file_front, file_ext = os.path.splitext(file_name) upload_time = datetime.now(tz=pytz.utc).astimezone( pytz.timezone('US/Pacific')).strftime('%Y.%m.%d %H.%M.%S') file = DRIVE_SERVICE.files().create( body={ 'name': file_front + '_' + upload_time + file_ext, 'parents': [folder_id] }, media_body=MediaInMemoryUpload(file_blob), fields='id').execute() DRIVE_SERVICE.permissions().create( fileId=file.get('id'), body={ 'type': 'anyone', 'role': 'reader', }, fields='id', ).execute() return DRIVE_SERVICE.files().get( fileId=file['id'], fields='webViewLink').execute()['webViewLink']
def create(self, content): self._drive_file = self.service.files().create( body={ 'name': self.FILENAME, 'mimeType': 'application/json', }, media_body=MediaInMemoryUpload(content) ).execute()
def update(self, content): self.service.files().update( fileId=self.drive_file['id'], body={ 'name': self.FILENAME, 'mimeType': 'application/json', }, media_body=MediaInMemoryUpload(content) ).execute()
def test_media_inmemory_upload(self): media = MediaInMemoryUpload('abcdef', mimetype='text/plain', chunksize=10, resumable=True) self.assertEqual('text/plain', media.mimetype()) self.assertEqual(10, media.chunksize()) self.assertTrue(media.resumable()) self.assertEqual('bc', media.getbytes(1, 2)) self.assertEqual(6, media.size())
def create_draft_email_message(self, message_string): media = MediaInMemoryUpload(message_string, mimetype='message/rfc822', chunksize=settings.GMAIL_CHUNK_SIZE, resumable=True) return self.execute_service_call(self.service.users().drafts().create( userId='me', media_body=media, quotaUser=self.email_account.id, ))
def send_email_message(self, message_string, thread_id=None): message_dict = {} media = MediaInMemoryUpload(message_string, mimetype='message/rfc822', chunksize=settings.GMAIL_CHUNK_SIZE, resumable=True) if thread_id: message_dict.update({'threadId': thread_id}) return self.execute_service_call(self.service.users().messages().send( userId='me', body=message_dict, media_body=media))
def upload_file_to_drive(filename, mimetype, file_buffer, credentials): service = build("drive", "v3", credentials=credentials) body = {"name": filename, "parents": [config.FOLDER_ID]} media = MediaInMemoryUpload(file_buffer, mimetype=mimetype) file = service.files().create(body=body, media_body=media, fields="id").execute() file_id = file.get("id") return f"https://drive.google.com/file/d/{file_id}"
def upload_project(self, folder_id, file_name, buffer, mimetype='application/tar+gzip'): metadata = {'name': file_name, 'parents': [folder_id]} media = MediaInMemoryUpload(buffer.getvalue(), mimetype=mimetype) file = self.service.files().create(body=metadata, media_body=media, fields='id').execute() return file.get('id'), file_name
def upload_string(self, account_id, profile_id, string, data_source_id): """ Upload to custom data sources - example function https://developers.google.com/analytics/devguides/config/mgmt/v3/mgmtReference/management/uploads/uploadData """ analytics = self.get_service_object(name='management') media = MediaInMemoryUpload(string, mimetype='application/octet-stream', resumable=False) analytics.management().uploads().uploadData( accountId=account_id, webPropertyId=profile_id, customDataSourceId=data_source_id, media_body=media).execute()
def drive_upload(file, name): credentials = store.get() # get access token if not credentials or credentials.invalid: flow = client.flow_from_clientsecrets(clientsecret_file_path, SCOPE) credentials = tools.run_flow(flow, store) # define API service http = credentials.authorize(Http()) drive = discovery.build('drive', 'v3', http=http) file_metadata = {'name': name} media = MediaInMemoryUpload(file, mimetype='image/jpeg', resumable=True) uploaded_file = drive.files().create(body=file_metadata, media_body=media, fields='id').execute()
def _do_upload_data(self, web_property_id, view_id, data_import_name, user_id_list_name, user_id_custom_dim, buyer_custom_dim, custom_dim_field, ga_account_id, ads_customer_id, mcc, rows): if user_id_list_name: self._create_list(web_property_id, view_id, user_id_list_name, buyer_custom_dim, ga_account_id, ads_customer_id, mcc) analytics = self._get_analytics_service() data_sources = analytics.management().customDataSources().list( accountId=ga_account_id, webPropertyId=web_property_id).execute()['items'] results = list( filter(lambda x: x['name'] == data_import_name, data_sources)) if len(results) == 1: id = results[0]['id'] logging.getLogger().info("Adding data to %s - %s" % (data_import_name, id)) body = '\n'.join([ '%s,%s' % (user_id_custom_dim, buyer_custom_dim), *[ '%s,%s' % (row['user_id'], row[custom_dim_field] if custom_dim_field else 'buyer') for row in rows ] ]) try: media = MediaInMemoryUpload( bytes(body, 'UTF-8'), mimetype='application/octet-stream', resumable=True) analytics.management().uploads().uploadData( accountId=ga_account_id, webPropertyId=web_property_id, customDataSourceId=id, media_body=media).execute() except Exception as e: logging.getLogger().error('Error while uploading GA Data: %s' % e) else: logging.getLogger().error( "%s - data import not found, please configure it in Google Analytics" % data_import_name)
def put(self, name): try: # Create a dictionary with the fields from the request body. requestBody = {'name': name} objectBody = MediaInMemoryUpload("hello world", mimetype="text/plain") request = storage.objects().insert(bucket=self.bucketName, media_body=objectBody, body=requestBody) response = request.execute() return response except errors.HttpError, err: # Something went wrong, print out some information. return 'There was an error creating the object. Check the details:' + err._get_reason( )
def _call_upload_api(self, analytics, data_import_name, ga_account_id, data_source_id, rows, web_property_id): logging.getLogger('megalista.GoogleAnalyticsDataImportUploader').info( 'Adding data to %s - %s' % (data_import_name, data_source_id)) csv = self.prepare_csv(rows) media = MediaInMemoryUpload(bytes(csv, 'UTF-8'), mimetype='application/octet-stream', resumable=True) analytics.management().uploads().uploadData( accountId=ga_account_id, webPropertyId=web_property_id, customDataSourceId=data_source_id, media_body=media).execute()
def create_document(self, name: str, content: bytes, source_mimetype: str) -> str: """Create a Google Doc from a file and return its ID.""" metadata = {"name": name, "mimeType": "application/vnd.google-apps.document"} document = MediaInMemoryUpload( content, mimetype=source_mimetype, resumable=True ) resp = ( self.drive_service.files() .create(body=metadata, media_body=document, fields="id") .execute() ) return resp["id"]
def write_file_bytes(self, filename: str, data: bytes) -> dict: """Write the file to the google drive These files are images, so they are big, about 4.5MB""" metadata = { 'title': filename, 'name': filename, 'parents': [self.sub_folder_id] } media = MediaInMemoryUpload(body=data, mimetype='application/octet-stream') results = self.drive_client.\ create(body=metadata, media_body=media).\ execute() return results
def upload_file(drive_service, filename, data, resumable=True, chunksize=262144): if check_if_file_exist(drive_service, filename): return media = MediaInMemoryUpload(data, resumable=resumable, chunksize=chunksize) body = { "name": filename, 'kind': 'drive#fileLink', 'teamDriveId': os.getenv("DRIVE_ID"), 'parents': [os.getenv("GAPI_FOLDER_ID")] } return drive_service.files().create(supportsAllDrives=True, body=body, media_body=media).execute()
def insert(self, bucket, object_name, media_body, mimetype='application/octet_stream'): """ call insert api. see: https://cloud.google.com/storage/docs/json_api/v1/objects/insert :param bucket: :param object_name: :param media_body: :param mimetype: :return: """ try: body_type = media_body.get('type', None) if body_type == 'memory': media = MediaInMemoryUpload(packer.dumps( media_body.get('data')), mimetype=mimetype) elif body_type == 'file': media = MediaFileUpload(media_body.get('path'), mimetype=mimetype) else: raise CloudStorageError("input parameter is not valid. %s" % media_body) result = self._client.objects().insert(bucket=bucket, name=object_name, body={ 'metadata': { 'type': body_type } }, media_body=media).execute() self._logger.debug("%s", result) return result except HttpError as e: # api call error self._logger.warning("%s", e, exc_info=True) raise CloudStorageError("Cloud Storage Error: %s" % e.message) except IOError as e: # target file does not exist self._logger.warning("%s", e.message) return None
def uploadDataToBucket(data, bucket_name, dest_path): '''Upload file to the bucket''' from googleapiclient.http import MediaInMemoryUpload storage = _getStorage() body = { 'name': dest_path, } print('INFO: Uploading data to "gs://%s/%s"...' % (bucket_name, body['name'])) # TODO: make sure file uploaded or there is an isssue resp = storage.objects().insert( bucket=bucket_name, body=body, media_body=MediaInMemoryUpload(data, mimetype='application/octet-stream'), ).execute() return True
def _upload_inner(self, is_new_entity=True, key=None, name=None, path=None, data=None): if (1 if path else 0) + (1 if data else 0) != 1: raise AssertionError("Provide one of 'path' and 'data'") if not is_new_entity and not key: raise AssertionError( 'fileId (aka. key) must be provided when updating an existing entity' ) if is_new_entity and not name: raise AssertionError( 'name must be provided when creating a new entity in Google Drive' ) if path: media = MediaFileUpload(path) else: media = MediaInMemoryUpload(data) try: self.lock.acquire() self.initialize_service() if is_new_entity: metadata = {'name': name} if self.folder_id: metadata['parents'] = [self.folder_id] file = self.service.files().create(body=metadata, media_body=media, fields='id').execute() else: file = self.service.files().update(media_body=media, fileId=key, fields='id').execute() return file.get('id') finally: self.lock.release()
def uploadDataToBucket(data, bucket_name, dest_path): '''Upload file to the bucket''' from googleapiclient.http import MediaInMemoryUpload storage = _getStorage() body = { 'name': dest_path, } # if the plugin was called from a windows OS, we need to convert the path separators for gsutil if platform.system() == 'Windows': body['name'] = pathlib.PurePath(body['name']).as_posix() print('INFO: Uploading data to "gs://%s/%s"...' % (bucket_name, body['name'])) # TODO: make sure file uploaded or there is an isssue storage.objects().insert( bucket=bucket_name, body=body, media_body=MediaInMemoryUpload(data, mimetype='application/octet-stream'), ).execute() return True
def document_to_text(url): file_content = download_file(url) if file_content is None: return None MIME_TYPE = 'application/vnd.google-apps.document' credentials = get_credentials() http = credentials.authorize(httplib2.Http()) service = discovery.build('drive', 'v3', http=http) media_body = MediaInMemoryUpload(file_content, mimetype=MIME_TYPE, resumable=True) body = { 'name': url, 'mimeType': MIME_TYPE, } created = service.files().create( body=body, media_body=media_body, ocrLanguage='ja', ).execute() request = service.files().export_media(fileId=created['id'], mimeType='text/plain') fh = io.BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() content = fh.getvalue().decode("UTF-8") service.files().delete(fileId=created['id']).execute() return content
def load_from_string(self, dest_project_id, dest_dataset_id, dest_table_id, schema, load_string, wait_finish=True, sleep_time=1, **kwargs): """ | For loading data from string representation of a file/object. | Can be used in conjunction with gwrappy.bigquery.utils.file_to_string() :param dest_project_id: Unique project identifier of destination table. :type dest_project_id: string :param dest_dataset_id: Unique dataset identifier of destination table. :type dest_dataset_id: string :param dest_table_id: Unique table identifier of destination table. :type dest_table_id: string :param schema: Schema of input data (schema.fields[]) [https://cloud.google.com/bigquery/docs/reference/v2/tables] :type schema: list of dictionaries :param load_string: String representation of an object. :type load_string: string :param wait_finish: Flag whether to poll job till completion. If set to false, multiple jobs can be submitted, repsonses stored, iterated over and polled till completion afterwards. :type wait_finish: boolean :param sleep_time: Time to pause (seconds) between polls. :type sleep_time: integer :keyword writeDisposition: (Optional) Config kwarg that determines table writing behaviour. :keyword sourceFormat: (Optional) Config kwarg that indicates format of input data. :keyword skipLeadingRows: (Optional) Config kwarg for leading rows to skip. Defaults to 1 to account for headers if sourceFormat is CSV or default, 0 otherwise. :keyword fieldDelimiter: (Optional) Config kwarg that indicates field delimiter. :keyword allowQuotedNewlines: (Optional) Config kwarg indicating presence of quoted newlines in fields. :return: JobResponse object """ from googleapiclient.http import MediaInMemoryUpload request_body = { 'jobReference': { 'projectId': dest_project_id }, 'configuration': { 'load': { 'destinationTable': { 'projectId': dest_project_id, 'datasetId': dest_dataset_id, 'tableId': dest_table_id }, 'writeDisposition': kwargs.get('writeDisposition', 'WRITE_TRUNCATE'), 'sourceFormat': kwargs.get('sourceFormat', None), 'skipLeadingRows': kwargs.get('skipLeadingRows', 1) if kwargs.get( 'sourceFormat', None) in (None, 'CSV') else None, 'fieldDelimiter': kwargs.get('fieldDelimiter', None), 'schema': { 'fields': schema }, 'allowQuotedNewlines': kwargs.get('allowQuotedNewlines', None) } } } media_body = MediaInMemoryUpload(load_string, mimetype='application/octet-stream') job_resp = self._service.jobs().insert( projectId=dest_project_id, body=request_body, media_body=media_body).execute(num_retries=self._max_retries) if wait_finish: job_resp = self.poll_job_status(job_resp, sleep_time) return JobResponse(job_resp, 'string')
folder_id = items[0].get("id") print("folder_id is = %s" % folder_id) # upload file from local path file_upload_metadata = { "name": "TCPIP%20Illustrated,%20Volume%201,%202nd%20Edition.pdf", "description": "", "parents": [folder_id] } # media_upload = MediaFileUpload(filename=r"C:\Users\wb-zj268791\Desktop\1_3_banner_dark.png", # mimetype=transform_mime("png"), # resumable=True) res = requests.get( url= "http://file.allitebooks.com/20150523/TCPIP%20Illustrated,%20Volume%201,%202nd%20Edition.pdf" ) media_upload = MediaInMemoryUpload( body=res.content, # mimetype=transform_mime("png"), chunksize=100 * 1024 * 1024, resumable=True) file_upload = drive_service.files() \ .create(body=file_upload_metadata, media_body=media_upload, fields="id, name, mimeType,description,md5Checksum,size", ).execute() print("==> file upload is %s" % file_upload)