def list(): try: request = ml.projects().models().list(parent=projectID) response = request.execute() if response: models = "<h4>Models</h4><ul>" for i, v in enumerate(response['models']): models = models + v['name'] + " " + v[ 'description'] + '<br> ' models = models + "</ul>" else: models = "<h4>No models</h4><br/>" request = storage.objects().list( bucket=storageBinding.get("bucket_name")) response = request.execute() if response: items = "<h4>Items</h4><ul>" for i, v in enumerate(response['items']): items = items + v['name'] + " " + v[ 'contentType'] + '<br> ' items = items + "</ul>" else: items = "<h4>No files</h4><br/>" result = models + items return result except errors.HttpError, err: # Something went wrong, print out some information. return 'There was an error creating the model. Check the details:' + err._get_reason( )
def selecttime(): if current_user.connected: storage = Storage('credentials/'+current_user.username) credentials = storage.get() http = httplib2.Http() http = credentials.authorize(http) service = build(serviceName='calendar', version='v3', http=http, developerKey=DEVELOPER_KEY) try: request = service.events().list(calendarId='primary', singleEvents=True,orderBy='startTime', timeMin="2013-01-19T23:20:50.52Z") response = request.execute() current_event = session['current_event'] interval = timedelta(minutes=current_event.intervalMinutes) latestendtime = datetime.now() for event in response.get('items', []): if rfc(event['start']['dateTime']) - interval > latestendtime: current_event.startTime = latestendtime current_event.endTime = latestendtime + interval current_event.save() break if rfc(event['end']['dateTime']) > latestendtime: latestendtime = event.get('end').get('dateTime') body = {'summary':current_event.name,'start':{'timeZone': 'America/Los_Angeles','dateTime':anti(current_event.startTime)},'end':{'timeZone': 'America/Los_Angeles','dateTime':anti(current_event.endTime)}} print ('yes way') test = service.events().insert(calendarId='primary',body=body) print('inter') print test test.execute() print('success') except Exception, e: print e print ('The credentials have been revoked or expired, please re-runthe application to re-authorize')
def searchForVideo(searchstr): # print("Search for:", searchstr) try: youtube = googleapiclient.discovery.build( youtube_api_service_name, youtube_api_version, developerKey=youtube_DEVELOPER_KEY) request = youtube.search().list( order="viewCount", q=searchstr, part="id,snippet", ) response = request.execute() # pprint(response['items'][0]) youtube_link = "https://youtu.be/" + response['items'][0]['id'][ 'videoId'] youtube_title = html.unescape(response['items'][0]['snippet']['title']) youtube_title = youtube_title.split("feat.")[0].split("ft.")[0] # print("Youtube title:", youtube_title) artist, title = get_artist_title(youtube_title) return { 'link': { "youtube": youtube_link }, 'title': title, 'artist': artist } except: return
def run_dataflow_job(name): from googleapiclient.discovery import build from oauth2client.client import GoogleCredentials credentials = GoogleCredentials.get_application_default() service = build('dataflow', 'v1b3', credentials=credentials) # Set the following variables to your values. PROJECT = settings.GCP_PROJECT_ID BUCKET = settings.CLOUD_STORAGE_BUCKET TEMPLATE = 'dataflowtemplates' GCSPATH = "gs://{bucket}/templates/{template}".format(bucket=BUCKET, template=TEMPLATE) BODY = { "jobName": "{jobname}".format(jobname=name), "parameters": { # "inputFile": "gs://{bucket}/input/my_input.txt", # "outputFile": "gs://{bucket}/output/my_output".format(bucket=BUCKET) }, "environment": { # "tempLocation": "gs://{bucket}/temp".format(bucket=BUCKET), "zone": "us-central1-f" } } request = service.projects().templates().launch(projectId=PROJECT, gcsPath=GCSPATH, body=BODY) response = request.execute() print(response)
def update_database(): try: scope = [ 'https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive' ] credentials = ServiceAccountCredentials.from_json_keyfile_name( 'credentials.json', scope) service = discovery.build('sheets', 'v4', credentials=credentials) sheet_id = '1R6_1r9WYXs3hrgwabZDXLlk_f9Z7KAgxjjXFAzAoFt4' range_ = '13thNov.Balance!C3:F' request = service.spreadsheets().values().get(spreadsheetId=sheet_id, range=range_) response = request.execute() connection = mysql.connect() cur = connection.cursor() records_to_insert = [] for i in range(len(response['values'])): records_to_insert.append(tuple(response['values'][i])) cur.execute("TRUNCATE TABLE balance") mysql_insert = """INSERT INTO balance (name, pref_realm, paid, balance) VALUES (%s,%s,%s,%s)""" cur.executemany(mysql_insert, records_to_insert) connection.commit() resp = jsonify("Inserted " + str(cur.rowcount) + " rows in the database!") resp.status_code = 200 return resp except Exception as e: print(e) finally: cur.close() connection.close()
def list(): # [START requests_start] try: compute = build('compute', 'v1', cache_discovery=False) params = { 'project': os.getenv('GOOGLE_CLOUD_PROJECT'), 'fields': 'items/*/instances(id,name,status,labels,networkInterfaces/accessConfigs/natIP)' } instances = [] request = compute.instances().aggregatedList(**params) # pylint: disable=E1101 while request is not None: response = request.execute() for name, instances_scoped_list in response['items'].items(): logging.debug("{}\n{}".format(name,instances_scoped_list)) for i in instances_scoped_list['instances']: i['zone'] = name.split('/')[-1] try: i['IP'] = i['networkInterfaces'][0]['accessConfigs'][0]['natIP'] except Exception as ex: i['IP'] = '-' try: i['function'] = i['labels']['function'] except Exception as ex: i['function'] = '-' instances = instances + instances_scoped_list.get('instances', []) request = compute.instances().aggregatedList_next(previous_request=request, previous_response=response) # pylint: disable=E1101 return render_template("index.html", instances=instances) except ValueError: return "Cannot get servers info" except HTTPError as ex: logging.exception(ex) return "Request error", 500
def googleLogin(): # Google login if 'credentials' not in session: return redirect(url_for('goauth2redirect')) credentials = client.OAuth2Credentials.from_json(session['credentials']) if credentials.access_token_expired: return redirect(url_for('goauth2redirect')) else: http_auth = credentials.authorize(httplib2.Http()) service = discovery.build('oauth2', 'v2', http=http_auth) request = service.userinfo().v2().me().get() response = request.execute() session['provider'] = 'google' session['username'] = response['name'] session['google_id'] = response['id'] session['picture'] = response['picture'] session['email'] = response['email'] # Check if user exist user_id = getUserID(session['email']) if not user_id: user_id = createUser(session) session['user_id'] = user_id return redirect(url_for('index'))
def detect_automl_labels(bucket_id, object_id): """Detects labels from image using AutoML Vision.""" try: # Read image file contents from GCS filename = '/{}/{}'.format(bucket_id, object_id) gcs_file = cloudstorage.open(filename) encoded_contents = base64.b64encode(gcs_file.read()) gcs_file.close() # Build request payload dict for label detection request_dict = { 'payload': { 'image': { 'imageBytes': encoded_contents } }, 'params': { 'score_threshold': "0.5" } } # Get predictions from the AutoML Vision model automl_svc = get_automl_svc() parent = 'projects/{}/locations/us-central1/models/{}'.format( app_identity.get_application_id(), current_app.config['AUTOML_MODEL_ID']) request = automl_svc.projects().locations().models().predict( name=parent, body=request_dict) response = request.execute() return response['payload'] except DeadlineExceededError: logging.exception('Exceeded deadline in detect_automl_labels()')
def d(): service = createDriveService() activities_resource = service.activities() request = activities_resource.list( userId='108250612542617275436' , collection='public' , maxResults=20 ) L = [] returnD = {} while request != None: activities_document = request.execute() if 'items' in activities_document: returnD['nextPageToken'] = activities_document['nextPageToken'] for activity in activities_document['items']: L.append(activity) request = None #"""request = service.activities().list_next(request, activities_document)""" returnD['items'] = L response = make_response( json.dumps(returnD) , 200) response.headers['Content-Type'] = 'application/json' return response
def read_events(calendarId): my_calendarId = calendarId service = init_google_service_call() print("Reading events") #read all calendars from the authorized user try: # The Calendar API's events().list method returns paginated results, so we # have to execute the request in a paging loop. First, build the # request object. The arguments provided are: # primary calendar for user request = service.events().list(calendarId=my_calendarId) # Loop until all pages have been processed. while request != None: # Get the next page. response = request.execute() # Accessing the response like a dict object with an 'items' key # returns a list of item objects (events). for event in response.get('items', []): # The event object is a dict object with a 'summary' key. this_event_item = event_item(event) this_event_item.show_event() print('---------------------------------------') # Get the next request object by passing the previous request object to # the list_next method. request = service.events().list_next(request, response) except AccessTokenRefreshError: # The AccessTokenRefreshError exception is raised if the credentials # have been revoked by the user or they have expired. print('The credentials have been revoked or expired, please re-run' 'the application to re-authorize')
def glogin(): """Handles Goolge login. Redirects to '/goauth2redirect' if user is not logged in. Retrieves user data and redirects to '/' if user is logged in. """ # If not logged in, redirect to Google login if 'credentials' not in session: return redirect(url_for('goauth2redirect')) credentials = client.OAuth2Credentials.from_json(session['credentials']) if credentials.access_token_expired: return redirect(url_for('goauth2redirect')) else: # Authorize and build a service object to retrieve user data http_auth = credentials.authorize(httplib2.Http()) service = discovery.build('oauth2', 'v2', http=http_auth) request = service.userinfo().v2().me().get() response = request.execute() session['provider'] = 'google' session['username'] = response['name'] session['google_id'] = response['id'] session['picture'] = response['picture'] session['email'] = response['email'] # See if user exists user_id = getUserID(session['email']) if not user_id: user_id = createUser(session) session['user_id'] = user_id return redirect(url_for('index'))
def get_all_data(query): request = service.column().list(tableId=TABLE_ID) response = request.execute() response = service.query().sql(sql=query).execute() logging.info(response['columns']) logging.info(response['rows']) return response
def submit_job(project_id, job_id): # Store your full project ID in a variable in the format the API needs. parent_project = 'projects/{}'.format(project_id) # Build a representation of the Cloud ML API. ml = discovery.build('ml', 'v1') # Create a dictionary with the fields from the request body. training_input = { 'scaleTier': 'BASIC', 'packageUris': ['gs://models.stufff.review/packages/default-1/default-1.tar.gz'], 'pythonModule': 'model.task', 'args': ['--model-id', '26144595808e', '--model-rev', '1'], 'region': 'europe-west1', "jobDir": 'gs://models.stufff.review/26144595808e/26144595808e/default-1', 'runtimeVersion': '1.12', 'pythonVersion': '2.7' } request_dict = {'jobId': job_id, 'trainingInput': training_input} # Create a request to submit a model for training request = ml.projects().jobs().create(parent=parent_project, body=request_dict) # Make the call. resp = request.execute() return resp
def runNewModel(session): if LoggedIn() != True: return defaultRedirect() project = get_current_project(session) JOB_NAME = "a_" + str(project.id) # Creating model requestDict = {'name': JOB_NAME, 'description': 'Built by runNewModel()'} request = ml.projects().models().create(parent=projectID, body=requestDict) try: response = request.execute() print(response, file=sys.stderr) operationID = response['name'] out = 'success' return out, 200, {'ContentType':'application/json'} except errors.HttpError as EOFError: print('There was an error. Check the details:', file=sys.stderr) print(EOFError._get_reason(), file=sys.stderr) out = 'failed' return out, 200, {'ContentType':'application/json'} return "Success", 200
def post(self, name): try: # Create a dictionary with the fields from the request body. jobId = "{0}_{1}_{2}".format(name, time.strftime('%Y%m%d'), time.strftime('%S%M%H')) requestBody = { "jobId": jobId, "trainingInput": { "args": [ "--train_dir=gs://{0}/{1}/train".format( storageBinding['bucket_name'], jobId) ], "packageUris": [ "gs://fe-cdantonio-ml/mnist_crdant_20161215_0117292/454e901f09767005b057fd41cb223de84329b09d/trainer-0.0.0.tar.gz" ], "pythonModule": "trainer.task", "region": "us-central1" } } # Create a request to call projects.models.create. request = ml.projects().jobs().create(parent=projectID, body=requestBody) response = request.execute() return response except errors.HttpError, err: # Something went wrong, print out some information. return 'There was an error creating the job. Check the details:' + err._get_reason( )
def runTraining(session): if LoggedIn() != True: return defaultRedirect() have_error, params = training_pre_conditions(session) if have_error: print("have error", params, file=sys.stderr) return json.dumps(params), 200, {'ContentType':'application/json'} # TODO Thinking on reasonable way to "copy" a version and track changes project = get_current_project(session) version = get_current_version(session) machine_learning_settings = get_ml_settings(session=session, version=version) JOB_NAME = "train_" + machine_learning_settings.JOB_NAME print(JOB_NAME, file=sys.stderr) REGION="us-central1" RUNTIME_VERSION="1.2" root_dir = "gs://" + settings.CLOUD_STORAGE_BUCKET + "/" + str(project.id) + "/" + str(version.id) + "/ml/" + str(machine_learning_settings.ml_compute_engine_id) + "/" JOB_DIR = root_dir + "train" pipeline_config_path = root_dir + "faster_rcnn_resnet.config" MAIN_TRAINER_MODULE='object_detection.train' training_inputs = {'scaleTier': 'CUSTOM', 'masterType': 'standard_gpu', 'workerType': 'standard_gpu', 'parameterServerType': 'standard_gpu', 'workerCount': 2, 'parameterServerCount': 1, 'packageUris': ['gs://' + settings.CLOUD_STORAGE_BUCKET + '/' + settings.LIB_OBJECT_DETECTION_PYTHON, 'gs://' + settings.CLOUD_STORAGE_BUCKET + '/' + settings.LIB_SLIM_PYTHON ], 'pythonModule': MAIN_TRAINER_MODULE, 'args': ['--train_dir', JOB_DIR, '--pipeline_config_path', pipeline_config_path], 'region': REGION, 'jobDir': JOB_DIR, 'runtimeVersion': RUNTIME_VERSION } job_spec = {'jobId': JOB_NAME, 'trainingInput': training_inputs} request = ml.projects().jobs().create(body=job_spec, parent=projectID) try: response = request.execute() print(response, file=sys.stderr) out = 'success' return out, 200, {'ContentType':'application/json'} except errors.HttpError as EOFError: print('There was an error. Check the details:', file=sys.stderr) print(EOFError._get_reason(), file=sys.stderr) out = 'failed' return out, 500, {'ContentType':'application/json'} return "success", 200
def search_youtube_playlist(youtube, playlist_name): request = youtube.playlists().list(part="snippet", mine=True) search_response = request.execute() return [ item["id"] for item in search_response["items"] if item["snippet"]["title"] == playlist_name ]
def processRequest(req): result = req.get("queryResult") parameters = result.get("parameters") username = parameters["username"] request = youtube.channels().list(part="statistics", forUsername=username) response = request.execute() totalNumber = response['items'][0]['statistics']['subscriberCount'] return speech_output(totalNumber)
def activities_user(idA): service = createDriveService() activities_resource = service.activities() request = activities_resource.list( userId=idA, maxResults= 23 , collection='public') activities_document = request.execute() response = make_response( json.dumps(activities_document) , 200) response.headers['Content-Type'] = 'application/json' return response
def get_playlists(self): request = self.youtube.playlists().list(part="snippet,contentDetails", channelId=self.id, maxResults=50) response = request.execute() fields = response['items'] for field in fields: self.text += field['snippet']['title'] + field['snippet'][ 'description'] + ' '
def get_prediction(ml_service, project, model_name, input_image): request_dict = make_request_json(input_image) body = {'instances': [request_dict]} # This request will use the default model version. parent = 'projects/{}/models/{}'.format(project, model_name) request = ml_service.projects().predict(name=parent, body=body) result = request.execute() return result
def delete(self, name): try: request = storage.objects().delete(bucket=self.bucketName, object=name) response = request.execute() return response except errors.HttpError, err: # Something went wrong, print out some information. return 'There was an error deleting the object. Check the details:' + err._get_reason( )
def videoSearch(q, salt="Explain"): request = youtube.search().list(part="snippet", maxResults=1, q=q + salt) response = request.execute() try: videoId = response['items'][0]['id']['videoId'] except: return "https://www.youtube.com/watch?v=sDP3SDaSf4c" return "https://www.youtube.com/watch?v=" + videoId
def test_api_request(): if 'credentials' not in flask.session: return flask.redirect('authorize') # Load credentials from the session. credentials = google.oauth2.credentials.Credentials( **flask.session['credentials']) youtube = googleapiclient.discovery.build(API_SERVICE_NAME, API_VERSION, credentials=credentials) # Search for a song request = youtube.search().list(part="snippet", q="Waltz in A minor", type="video") search_response = request.execute() first_video_id = search_response["items"][0]["id"]["videoId"] # Add songs to a playlist request = youtube.playlistItems().insert( part="snippet", body={ "snippet": { "playlistId": "PL3AgsfiW8ntWXfIKnARk2jyhP1xDM4-oL", "resourceId": { "kind": "youtube#video", "videoId": first_video_id } } }) add_song_response = request.execute() # Save credentials back to session in case access token was refreshed. # ACTION ITEM: In a production app, you likely want to save these # credentials in a persistent database instead. flask.session['credentials'] = credentials_to_dict(credentials) return flask.jsonify(add_song_response)
def trainingFrozenRun(session): if LoggedIn() != True: return defaultRedirect() project = get_current_project(session) version = get_current_version(session) machine_learning_settings = get_ml_settings(session=session, version=version) #now=strftime("%Y_%m_%d_%H_%M_%S", gmtime()) JOB_NAME = "frozen_user_" + machine_learning_settings.JOB_NAME print(JOB_NAME, file=sys.stderr) root_dir = "gs://" + settings.CLOUD_STORAGE_BUCKET + "/" + str(project.id) + "/" + str(version.id) + "/ml/" + str(machine_learning_settings.ml_compute_engine_id) + "/" JOB_DIR = root_dir + str(machine_learning_settings.re_train_id) + "/frozen" REGION ="us-central1" RUNTIME_VERSION ="1.2" # Should be updated during training and store in db? trained_checkpoint_prefix = configNew.check_actual_model_path_name(session=session) pipeline_config_path = root_dir + "faster_rcnn_resnet.config" MAIN_TRAINER_MODULE ="object_detection.export_inference_graph" training_inputs = {'scaleTier': 'CUSTOM', 'masterType': 'large_model', 'workerCount': 0, 'packageUris': ['gs://' + settings.CLOUD_STORAGE_BUCKET + '/' + settings.LIB_OBJECT_DETECTION_PYTHON, 'gs://' + settings.CLOUD_STORAGE_BUCKET + '/' + settings.LIB_SLIM_PYTHON ], 'pythonModule': MAIN_TRAINER_MODULE, 'args': ['--trained_checkpoint_prefix', trained_checkpoint_prefix, '--pipeline_config_path', pipeline_config_path, '--input_type', 'encoded_image_string_tensor', '--output_directory', JOB_DIR], 'region': REGION, 'jobDir': JOB_DIR, 'runtimeVersion': RUNTIME_VERSION } job_spec = {'jobId': JOB_NAME, 'trainingInput': training_inputs} request = ml.projects().jobs().create(body=job_spec, parent=projectID) try: response = request.execute() print(response, file=sys.stderr) out = 'success' return out, 200, {'ContentType':'application/json'} except errors.HttpError as EOFError: print('There was an error. Check the details:', file=sys.stderr) print(EOFError._get_reason(), file=sys.stderr) out = 'failed' return out, 200, {'ContentType':'application/json'} return "Success", 200
def get_storage_bucket(bucket_name, create_new=True): _request = get_google_client("storage").buckets().get(bucket=bucket_name) try: resp = _request.execute() return resp except errors.HttpError, e: if create_new: request = get_google_client("storage").buckets().insert( bucket=bucket_name) resp = request.execute() return resp
def get(self, name): try: jobName = '{0}/jobs/{1}'.format(projectID, name) # Create a request to call projects.models.create. request = ml.projects().jobs().get(name=jobName) response = request.execute() return response except errors.HttpError, err: # Something went wrong, print out some information. return 'There was an error getting the job. Check the details:' + err._get_reason( )
def delete(self, name): try: # Create a dictionary with the fields from the request body. modelName = '{0}/models/{1}'.format(projectID, name) # Create a request to call projects.models.create. request = ml.projects().models().delete(name=modelName) response = request.execute() return response except errors.HttpError, err: # Something went wrong, print out some information. return 'There was an error deleting the model. Check the details:' + err._get_reason( )
def getResponseYTAPI(vid): os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1" api_service_name = "youtube" api_version = "v3" DEVELOPER_KEY = secret.DEVELOPER_KEY youtube = googleapiclient.discovery.build(api_service_name, api_version, developerKey=DEVELOPER_KEY) request = youtube.videos().list(part="snippet,contentDetails,statistics", id=vid) response = request.execute() # print("YT Response : {}".format(response),file = sys.stdout) return response
def updateYoutube(): global ID global youtube if not youtube: print(ID) print("can not update, are you login yet?") return if os.path.exists("youtube_video_id.txt"): f = open("youtube_video_id.txt", encoding="utf-8") ID = f.readline() else: print("not found youtube_video_id") return if os.path.exists("title.txt"): f = open("title.txt", encoding="utf-8") CUSTOM_TITLE = f.readline() else: print("not found title") return video = youtube.videos().list(id=ID, part='snippet, id, statistics').execute() views = video["items"][0]["statistics"]["viewCount"] categoryId = video["items"][0]["snippet"]["categoryId"] description = video["items"][0]["snippet"]["description"] tags = video["items"][0]["snippet"]["tags"] if not "Techcast" in description: description = description + "\n\nScript นี้สร้างโดยช่อง Techcast (กดติดตามที่ลิงค์นี้ได้เลย)\nhttps://bit.ly/3hvHVXH" request = youtube.videos().update(part="snippet", body={ "id": ID, "snippet": { "title": CUSTOM_TITLE.format(views), "description": description, "categoryId": categoryId, "tags": tags } }) response = request.execute() refresh_api()
def identify_image(uri): IMAGE = 'gs://' + bucketName + '/' + imgFolder + uri vservice = build('vision', 'v1', developerKey=APIKEY) request = vservice.images().annotate( body={ 'requests': [{ 'image': { 'source': { 'gcs_image_uri': IMAGE } }, 'features': [{ 'type': 'TEXT_DETECTION', 'maxResults': 3, }] }], }) responses = request.execute(num_retries=3) try: api_text = ( responses['responses'][0]['textAnnotations'][0]['description']) start = api_text.find("NOMBRE") + len("NOMBRE") end = api_text.find("DOMICILIO") name = api_text[start:end] name = name.replace("\n", " ") """ api_array = api_text.splitlines() name_like_variable = difflib.get_close_matches('NOMBRE', api_array) address_like_variable = difflib.get_close_matches('DOMICILIO', api_array) name_like_variable_idx = api_array.index(''.join(name_like_variable)) address_like_variable_idx = api_array.index(''.join(address_like_variable)) name = '' i = name_like_variable_idx + 1 while i < address_like_variable_idx: name += api_array[i] + " " i += 1 """ return jsonify({'name': name}, {'error': False}) except Exception as e: # TODO Control Messaging for errors return jsonify({ 'error': True, 'uri': IMAGE, 'message': responses['responses'][0]['textAnnotations'][0] })
def requestComment(): request = youtube.commentThreads().list( part="snippet,replies", maxResults=20, videoId="m-IUcQrJYCo" ) response = request.execute() for item in response["items"]: comment = item["snippet"]["topLevelComment"] sendId = comment["id"] author = comment["snippet"]["authorDisplayName"] text = comment ["snippet"]["textDisplay"] analyze(text) print('requestComment')
def updateConfig(project_id, region, registry_id, device_id, data): """Push the data to the given device as configuration.""" config_data_json = json.dumps(data) body = { 'version_to_update': 0, 'binary_data': base64.b64encode(config_data_json.encode('utf-8')).decode('ascii') } device_name = ('projects/{}/locations/{}/registries/{}/' 'devices/{}'.format(project_id, region, registry_id, device_id)) request = service.projects().locations().registries().devices( ).modifyCloudToDeviceConfig(name=device_name, body=body) update_config_mutex.acquire() try: request.execute() except HttpError as e: print('Error executing ModifyCloudToDeviceConfig: {}'.format(e)) finally: update_config_mutex.release()
def try_to_store_file(): mime_type = 'text/plain' media_body = MediaFileUpload(g.file_path, mimetype=mime_type, resumable=True) parent_id = app.config['GOOGLE_DRIVE_FOLDER_ID'] body = { 'title': os.path.basename(g.file_path), 'mimeType': mime_type, 'parents': [{'id': parent_id}, ] } try: service = get_google_drive_service() request = service.files().insert(body=body, media_body=media_body) return request.execute() except Exception as e: app.logger.exception(e)