Ejemplo n.º 1
0
def set_endpoint():
    """Change your endpoint"""
    # [START vision_set_endpoint]
    from google.cloud import vision

    client_options = {'api_endpoint': 'eu-vision.googleapis.com'}

    client = vision.ImageAnnotatorClient(client_options=client_options)
    # [END vision_set_endpoint]
    image_source = vision.ImageSource(
        image_uri='gs://cloud-samples-data/vision/text/screen.jpg')
    image = vision.Image(source=image_source)

    response = client.text_detection(image=image)

    print('Texts:')
    for text in response.text_annotations:
        print('{}'.format(text.description))

        vertices = [
            '({},{})'.format(vertex.x, vertex.y)
            for vertex in text.bounding_poly.vertices
        ]

        print('bounds: {}\n'.format(','.join(vertices)))

    if response.error.message:
        raise Exception('{}\nFor more info on error messages, check: '
                        'https://cloud.google.com/apis/design/errors'.format(
                            response.error.message))
Ejemplo n.º 2
0
def blur_offensive_images(data):
    file_data = data

    file_name = file_data["name"]
    bucket_name = file_data["bucket"]

    blob = storage_client.bucket(bucket_name).get_blob(file_name)
    blob_uri = f"gs://{bucket_name}/{file_name}"
    blob_source = vision.Image(source=vision.ImageSource(image_uri=blob_uri))

    # Ignore already-blurred files
    if file_name.startswith("blurred-"):
        print(f"The image {file_name} is already blurred.")
        return

    print(f"Analyzing {file_name}.")

    result = vision_client.safe_search_detection(image=blob_source)
    detected = result.safe_search_annotation

    # Process image
    if detected.adult == 5 or detected.violence == 5:
        print(f"The image {file_name} was detected as inappropriate.")
        return __blur_image(blob)
    else:
        print(f"The image {file_name} was detected as OK.")
Ejemplo n.º 3
0
def extract_text(event, context):
    """Background Cloud Function to be triggered by Cloud Storage 
       logs relevant data when a file is changed/uploaded
    """
    # Validate and get property value
    bucket = validate_property(event, "bucket")
    filename = validate_property(event, "name")

    # Loads the image from GCS
    image = vision.Image(source=vision.ImageSource(
        gcs_image_uri=f"gs://{bucket}/{filename}"))

    # Perform text detection
    print("Extracting text from {}".format(f"gs://{bucket}/{filename}"))
    response = vision_client.text_detection(image=image)
    result = response.text_annotations
    if len(result) > 0:
        text = result[0].description.lstrip()
        print("Detected text: {}".format(repr(text)))
    else:
        print("No text detected from {}".format(filename))
        return None  # end

    # Perform language detection
    detect_lang = translate_client.detect_language(text)
    src_lang = detect_lang["language"]
    print("Detected source language: '{}'".format(src_lang))

    # Send to Pub/Sub
    print("Sending message to Pub/Sub")
    send_extracted(text, filename, src_lang)
    print("Finished extracting file {}".format(filename))
Ejemplo n.º 4
0
def blur_offensive_images(data, context):
    file_data = data

    file_name = file_data["name"]
    bucket_name = file_data["bucket"]

    blob = storage_client.bucket(bucket_name).get_blob(file_name)
    blob_uri = f"gs://{bucket_name}/{file_name}"
    blob_source = vision.Image(source=vision.ImageSource(image_uri=blob_uri))

    file_name = blob.name
    _, temp_local_filename = tempfile.mkstemp()

    blob.download_to_filename(temp_local_filename)

    api = tweepy.API(auth)

    tweets = [
        "Stop Hostile Architecture! We want our streets back.",
        "This should not exist in our public spaces", "#hostilearchiteture"
    ]

    tweet_content = random.choice(tweets)

    api.update_with_media(temp_local_filename, status=tweet_content)

    return
Ejemplo n.º 5
0
def detect_document_uri(userName, fileName):
    """Detects document features in the file located in Google Cloud
    Storage."""
    from google.cloud import vision

    # 결과 테스트
    import sys
    # sys.stdout = open('ocr_image_result.txt','w')

    bucketName = "graduation_bucket"
    client = vision.ImageAnnotatorClient()

    image = vision.Image(source=vision.ImageSource(
        gcs_image_uri=f"gs://{bucketName}/{fileName}"))

    response = client.document_text_detection(image=image)

    #     const [annotation] = textDetections.textAnnotations;
    #   const text = annotation ? annotation.description : '';
    #   console.log('Extracted text from image:', text);

    globalVariable.fullText += response.text_annotations[0].description

    # print("최종 결과")

    # print(globalVariable.fullText)

    if response.error.message:
        raise Exception('{}\nFor more info on error messages, check: '
                        'https://cloud.google.com/apis/design/errors'.format(
                            response.error.message))

    return
Ejemplo n.º 6
0
def process_images():

    for file in storageClient.list_blobs(bucket):
        image = vision.Image(source=vision.ImageSource(
            gcs_image_uri=f"gs://cs410_images/{file.name}"))
        text_detection_response = visionClient.text_detection(image=image)
        annotations = text_detection_response.text_annotations
        if len(annotations) > 0:
            text = annotations[0].description
        else:
            text = ""
        detect_language_response = translateClient.detect_language(text)
        src_lang = detect_language_response["language"]

        if text != "" and text is not None and src_lang == "en":
            data = {
                "src_lang": src_lang,
                "text": text,
                "file_name": file.name,
                "id": file.name.split(".")[0]
            }

            filename = file.name.split(".")[0] + '.json'

            with open("data/" + filename, 'w', encoding="utf-8") as outfile:
                json.dump(data, outfile)
            blob = resultBucket.blob(filename)

            with open('data/' + filename, 'rb') as f:
                blob.upload_from_file(f)
Ejemplo n.º 7
0
def get_similar_products_uri(project_id, location, product_set_id,
                             product_category, image_uri, filter):
    """Search similar products to image.
    Args:
        project_id: Id of the project.
        location: A compute region name.
        product_set_id: Id of the product set.
        product_category: Category of the product.
        image_uri: Cloud Storage location of image to be searched.
        filter: Condition to be applied on the labels.
        Example for filter: (color = red OR color = blue) AND style = kids
        It will search on all products with the following labels:
        color:red AND style:kids
        color:blue AND style:kids
    """
    # product_search_client is needed only for its helper methods.
    product_search_client = vision.ProductSearchClient()
    image_annotator_client = vision.ImageAnnotatorClient()

    # Create annotate image request along with product search feature.
    image_source = vision.ImageSource(image_uri=image_uri)
    image = vision.Image(source=image_source)

    # product search specific parameters
    product_set_path = product_search_client.product_set_path(
        project=project_id, location=location, product_set=product_set_id)
    product_search_params = vision.ProductSearchParams(
        product_set=product_set_path,
        product_categories=[product_category],
        filter=filter)
    image_context = vision.ImageContext(
        product_search_params=product_search_params)

    # Search products similar to the image.
    response = image_annotator_client.product_search(
        image, image_context=image_context)

    index_time = response.product_search_results.index_time
    print('Product set index time: ')
    print(index_time)

    results = response.product_search_results.results

    print('Search results:')
    output = []
    for result in results:
        product = result.product

        print('Score(Confidence): {}'.format(result.score))
        product_id = product.name.split('/')[-1]
        image_uri = get_reference_image_uri(PROJECT_ID, LOCATION_ID,
                                            product_id)
        blob_name = image_uri.split('/')[-1]
        meta = get_image_metadata(IMAGE_BUCKET, blob_name)
        print("Product Info: ", meta)
        output.append(meta)

    return output
Ejemplo n.º 8
0
def get_similar_products_uri(
        project_id, location, product_set_id, product_category,
        image_uri, filter):
    """Search similar products to image.
    Args:
        project_id: Id of the project.
        location: A compute region name.
        product_set_id: Id of the product set.
        product_category: Category of the product.
        image_uri: Cloud Storage location of image to be searched.
        filter: Condition to be applied on the labels.
        Example for filter: (color = red OR color = blue) AND style = kids
        It will search on all products with the following labels:
        color:red AND style:kids
        color:blue AND style:kids
    """
    # product_search_client is needed only for its helper methods.
    product_search_client = vision.ProductSearchClient()
    image_annotator_client = vision.ImageAnnotatorClient()

    # Create annotate image request along with product search feature.
    image_source = vision.ImageSource(image_uri=image_uri)
    image = vision.Image(source=image_source)

    # product search specific parameters
    product_set_path = product_search_client.product_set_path(
        project=project_id, location=location,
        product_set=product_set_id)
    product_search_params = vision.ProductSearchParams(
        product_set=product_set_path,
        product_categories=[product_category],
        filter=filter)
    image_context = vision.ImageContext(
        product_search_params=product_search_params)

    # Search products similar to the image.
    response = image_annotator_client.product_search(
        image, image_context=image_context)

    index_time = response.product_search_results.index_time
    print('Product set index time: ')
    print(index_time)

    results = response.product_search_results.results

    print('Search results:')
    for result in results:
        product = result.product

        print('Score(Confidence): {}'.format(result.score))
        print('Image name: {}'.format(result.image))

        print('Product name: {}'.format(product.name))
        print('Product display name: {}'.format(
            product.display_name))
        print('Product description: {}\n'.format(product.description))
        print('Product labels: {}\n'.format(product.product_labels))
def detect_text(bucket, filename):
    print("Looking for text in image {}".format(filename))

    image = vision.Image(source=vision.ImageSource(
        gcs_image_uri=f"gs://{bucket}/{filename}"))
    text_detection_response = vision_client.text_detection(image=image)
    annotations = text_detection_response.text_annotations
    if len(annotations) > 0:
        text = annotations[0].description
    else:
        text = ""
    print("Extracted text {} from image ({} chars).".format(text, len(text)))
Ejemplo n.º 10
0
def call_vision_api(blob):
    # Create a Cloud Vision client.
    vision_client = vision.ImageAnnotatorClient()

    # Use the Cloud Vision client to label the uploaded image.
    source_uri = "gs://{}/{}".format(CLOUD_STORAGE_BUCKET, blob.name)
    image = vision.Image(source=vision.ImageSource(gcs_image_uri=source_uri))

    # Performs label detection on the image file
    response = vision_client.label_detection(image=image)
    if response.error.message:
        raise Exception(
            '{}\nFor more info on error messages, check: '
            'https://cloud.google.com/apis/design/errors'.format(
                response.error.message))

    return response.label_annotations
Ejemplo n.º 11
0
def detect_text(bucket, filename):
    print("Looking for text in image {}".format(filename))

    futures = []

    image = vision.Image(source=vision.ImageSource(
        gcs_image_uri=f"gs://{bucket}/{filename}"))
    text_detection_response = vision_client.text_detection(image=image)
    annotations = text_detection_response.text_annotations
    if len(annotations) > 0:
        text = annotations[0].description
    else:
        text = ""
    print("Extracted text {} from image ({} chars).".format(text, len(text)))

    detect_language_response = translate_client.detect_language(text)
    src_lang = detect_language_response["language"]
    print("Detected language {} for text {}.".format(src_lang, text))

    # Submit a message to the bus for each target language
    to_langs = os.environ["TO_LANG"].split(",")
    for target_lang in to_langs:
        topic_name = os.environ["TRANSLATE_TOPIC"]
        if src_lang == target_lang or src_lang == "und":
            topic_name = os.environ["RESULT_TOPIC"]
        message = {
            "text": text,
            "filename": filename,
            "lang": target_lang,
            "src_lang": src_lang,
        }
        message_data = json.dumps(message).encode("utf-8")
        topic_path = publisher.topic_path(project_id, topic_name)
        future = publisher.publish(topic_path, data=message_data)
        futures.append(future)
    for future in futures:
        future.result()
Ejemplo n.º 12
0
def main(request):
	import google.auth
	from google.cloud import datastore, storage, vision
	import os
	from datetime import datetime
	
	
	# Set the project ID
	PROJECT_ID = os.environ['GCP_PROJECT']
	FUNCTION_REGION = os.environ['FUNCTION_REGION']
	NONCE = os.environ.get('NONCE', 'Specified environment variable is not set.')
	RESOURCE_PREFIX = os.environ.get('RESOURCE_PREFIX', 'Specified environment variable is not set.')
	LEVEL_NAME = os.environ.get('LEVEL_NAME', 'Specified environment variable is not set.')

	CLOUD_STORAGE_BUCKET = f'{RESOURCE_PREFIX}-bucket-{NONCE}'
	KIND =  f'{RESOURCE_PREFIX}-{NONCE}-{PROJECT_ID}'

	# Get credential of cloud function account
	credentials, project_id = google.auth.default()

	#score function url
	surl  = f'https://{FUNCTION_REGION}-{PROJECT_ID}.cloudfunctions.net/scores-f-{NONCE}'
	#check function url
	url=f'https://{FUNCTION_REGION}-{PROJECT_ID}.cloudfunctions.net/{RESOURCE_PREFIX}-f-check-{NONCE}'
	#upload url
	up_url = f'/{RESOURCE_PREFIX}-f-access-{NONCE}'
	#err_build=request.args['err_build'] if request.args and 'err_build' in request.args else ''
	err_build = ''
	err_query=''
	image_entities = []
	
	if request.files and 'file' in request.files:
		
		try:
			
			photo = request.files['file']

			# Create a Cloud Storage client.
			storage_client = storage.Client(credentials=credentials)

			# Get the bucket that the file will be uploaded to.
			bucket = storage_client.get_bucket(CLOUD_STORAGE_BUCKET)

			# Create a new blob and upload the file's content.
			blob = bucket.blob(photo.filename)
			blob.upload_from_string(photo.read(), content_type=photo.content_type)

			# Make the blob publicly viewable.
			blob.make_public()

			# Create a Cloud Vision client.
			vision_client = vision.ImageAnnotatorClient()

			# Use the Cloud Vision client to detect a face for our image.
			source_uri = 'gs://{}/{}'.format(CLOUD_STORAGE_BUCKET, blob.name)
			image = vision.Image(source=vision.ImageSource(gcs_image_uri=source_uri))
			faces = vision_client.face_detection(image).face_annotations

			# If a face is detected, save to Datastore the likelihood that the face
			# displays 'joy,' as determined by Google's Machine Learning algorithm.
			if len(faces) > 0:
				face = faces[0]

				# Convert the likelihood string.
				likelihoods = [
					'Unknown', 'Very Unlikely', 'Unlikely', 'Possible', 'Likely',
					'Very Likely']
				face_joy = likelihoods[face.joy_likelihood]
			else:
				face_joy = 'Unknown'

			# Create a Cloud Datastore client.
			datastore_client = datastore.Client(credentials=credentials)

			# Fetch the current date / time.
			current_datetime = datetime.now()

			# The kind for the new entity.
			kind = KIND

			# The name/ID for the new entity.
			name = blob.name

			# Create the Cloud Datastore key for the new entity.
			key = datastore_client.key(kind, name)

			# Construct the new entity using the key. Set dictionary values for entity
			# keys blob_name, storage_public_url, timestamp, and joy.
			entity = datastore.Entity(key)
			entity['blob_name'] = blob.name
			entity['image_public_url'] = blob.public_url
			entity['timestamp'] = current_datetime
			entity['joy'] = face_joy

			# Save the new entity to Datastore.
			datastore_client.put(entity)
		except Exception as e:
			err_build = str(e)

		if err_build == '':
			return redirect(up_url)

	try:
		#Build datastore REST API python object
		client = datastore.Client(credentials=credentials )
		# Use the Cloud Datastore client to fetch information from Datastore about each photo.
		query = client.query(kind=KIND)
		image_entities = list(query.fetch())

	except Exception as e:
		err_query=str(e)
	
	
	
	return render_template(f'{RESOURCE_PREFIX}-access.html', url=url, 
                err_build=err_build,err_query=err_query,prefix=RESOURCE_PREFIX, level_name=LEVEL_NAME, 
                nonce=NONCE,surl=surl,image_entities=image_entities,up_url=up_url)
Ejemplo n.º 13
0
def detect_multiple_objects(data, context):

    file_name = data['name']
    bucket_name = data['bucket']
    if file_name.startswith("output"):
        print(f"The image {file_name} is already detected.")
        return
    else:
        blob = storage_client.bucket(bucket_name).get_blob(file_name)
        blob_uri = f"gs://{bucket_name}/{file_name}"
        blob_source = vision.Image(source=vision.ImageSource(
            gcs_image_uri=blob_uri))

    # image = vision.types.Image(image=blob_source).
    # response = client.object_localization(image=image)

    # localized_object_annotations = response.localized_object_annotations

    objects = client.object_localization(
        image=blob_source).localized_object_annotations

    # localized_object_annotations = response.localized_object_annotations

    file_name = blob.name
    _, temp_local_filename = tempfile.mkstemp()

    # Download file from bucket.
    blob.download_to_filename(temp_local_filename)
    print(f"Image {file_name} was downloaded to {temp_local_filename}.")

    for obj in objects:
        with Image(filename=temp_local_filename) as image:
            with Drawing() as draw:
                x1 = int(image.width *
                         obj.bounding_poly.normalized_vertices[0].x)
                y1 = int(image.height *
                         obj.bounding_poly.normalized_vertices[0].y)
                x2 = int(image.width *
                         obj.bounding_poly.normalized_vertices[2].x)
                y2 = int(image.height *
                         obj.bounding_poly.normalized_vertices[2].y)

                print(
                    f"The image {image.height}  :  {image.width}  \n  ---{x1}--{y1}---\n---{x2}--{y2}--- is rectangular detected.\n"
                )

                draw.fill_color = Color('TRANSPARENT')
                draw.stroke_width = 4
                draw.stroke_color = Color('red')
                draw.rectangle(left=x1, top=y1, right=x2, bottom=y2)

                draw.font = 'wandtests/assets/League_Gothic.otf'
                draw.font_size = 40
                percentScore = int(obj.score * 100)
                textDraw = f'{obj.name} %{percentScore}'
                draw.text(x1, y1, str(textDraw))

                #print(f"The image {image.height}  :  {image.width}  \n  ---{obj.bounding_poly.normalized_vertices[0].x}--{obj.bounding_poly.normalized_vertices[0].y}---\n---{obj.bounding_poly.normalized_vertices[2].x}--{obj.bounding_poly.normalized_vertices[2].y}--- is rectangular detected.\n")
                #draw.fill_color = Color('TRANSPARENT')
                #draw.stroke_width = 4
                #draw.stroke_color = Color('red')

                #draw.rectangle(left=(image.width * obj.bounding_poly.normalized_vertices[0].x),
                #    top=(image.height * obj.bounding_poly.normalized_vertices[0].y),
                #    right=(image.width * obj.bounding_poly.normalized_vertices[2].x),
                #    bottom=(image.height * obj.bounding_poly.normalized_vertices[2].y))

                draw(image)
            image.save(filename=temp_local_filename)

    blur_bucket = storage_client.bucket("rnimagepicker-1ca25.appspot.com")
    new_blob = blur_bucket.blob("outputs/detected-" + file_name)
    new_blob.upload_from_filename(temp_local_filename)
    print(f" image uploaded to-: {file_name}")
Ejemplo n.º 14
0
def upload_photo():
    # getting the image file as photo
    photo = request.files['file']

    # Creating a Cloud Storage client.
    storage_client = storage.Client()

    # Getting the bucket where the file is uploaded to.
    bucket = storage_client.get_bucket(CLOUD_STORAGE_BUCKET)

    # Create a new blob and upload the file's content.
    blob1 = bucket.blob(photo.filename)
    blob1.upload_from_string(photo.read(), content_type=photo.content_type)

    # Make the blob publicly viewable.
    blob1.make_public()

    # Create a Cloud Vision client.
    vision_client = vision.ImageAnnotatorClient()

    # Use the Cloud Vision client to detect a face for our image.
    # representing the image url as a string
    image_url = 'gs://{}/{}'.format(CLOUD_STORAGE_BUCKET, blob1.name)
    # passing the image source to vision api
    image = vision.Image(source=vision.ImageSource(gcs_image_uri=image_url))
    # getting the response and assigning a variable
    response=vision_client.document_text_detection(image=image)
    # convert the response as a text-document
    docu = response.full_text_annotation.text
    
    # Creating the TexttoSpeech clienct
    text_client= texttospeech.TextToSpeechClient()
    # synthesis the input text
    synthesis_text= texttospeech.types.SynthesisInput(text=docu)
    # converting the text to speech with language as en-US and voice to NEUTRAl
    voice = texttospeech.types.VoiceSelectionParams(language_code='en-US',ssml_gender=texttospeech.enums.SsmlVoiceGender.NEUTRAL)
    # configuring the audio as a MP3
    audio_config=texttospeech.types.AudioConfig(audio_encoding=texttospeech.enums.AudioEncoding.MP3)
    # finally converting the text to speech and getting the response using texttospeech client
    audio_response=text_client.synthesize_speech(synthesis_text,voice,audio_config)
    
    # Creating a blob for the output audio file
    audio_file = '{}.mp3'.format(blob1.name)
    blob2=bucket.blob(audio_file)
    # Converting the audio response as a 'output.mp3'
    with open(audio_file,'wb') as output:
       output.write(audio_response.audio_content)
       print('content written')
    # Now converting the output file into the blob2
    with open(audio_file,'rb') as output:
        blob2.upload_from_file(output)
    #Now making the blob2 public to access the data publicly
    blob2.make_public() 
    
    #getting the voice url
    voice_url='https://storage.googleapis.com/{}/{}'.format(CLOUD_STORAGE_BUCKET,blob2.name)
    
    
    # Create a Cloud Datastore client.
    datastore_client = datastore.Client()
   
    kind = 'Faces'
    key = datastore_client.key(kind, blob1.name)
    
    entity = datastore.Entity(key)
    entity['blob_name'] = blob1.name
    entity['image_public_url'] = blob1.public_url
    entity['timestamp'] = voice_url
    entity['joy'] = docu
    
    #putting the entity using put command
    datastore_client.put(entity)
    # once work is done it is redirecting
    return redirect('/')
Ejemplo n.º 15
0
def upload_photo():
    photo = request.files["file"]

    # Create a Cloud Storage client.
    storage_client = storage.Client()

    # Get the bucket that the file will be uploaded to.
    bucket = storage_client.get_bucket(CLOUD_STORAGE_BUCKET)

    # Create a new blob and upload the file's content.
    blob = bucket.blob(photo.filename)
    blob.upload_from_string(photo.read(), content_type=photo.content_type)

    # Make the blob publicly viewable.
    blob.make_public()

    # Create a Cloud Vision client.
    vision_client = vision.ImageAnnotatorClient()

    # Use the Cloud Vision client to detect a face for our image.
    source_uri = "gs://{}/{}".format(CLOUD_STORAGE_BUCKET, blob.name)
    image = vision.Image(source=vision.ImageSource(gcs_image_uri=source_uri))
    faces = vision_client.face_detection(image=image).face_annotations

    # If a face is detected, save to Datastore the likelihood that the face
    # displays 'joy,' as determined by Google's Machine Learning algorithm.
    if len(faces) > 0:
        face = faces[0]

        # Convert the likelihood string.
        likelihoods = [
            "Unknown",
            "Very Unlikely",
            "Unlikely",
            "Possible",
            "Likely",
            "Very Likely",
        ]
        face_joy = likelihoods[face.joy_likelihood]
    else:
        face_joy = "Unknown"

    # Create a Cloud Datastore client.
    datastore_client = datastore.Client()

    # Fetch the current date / time.
    current_datetime = datetime.now()

    # The kind for the new entity.
    kind = "Faces"

    # The name/ID for the new entity.
    name = blob.name

    # Create the Cloud Datastore key for the new entity.
    key = datastore_client.key(kind, name)

    # Construct the new entity using the key. Set dictionary values for entity
    # keys blob_name, storage_public_url, timestamp, and joy.
    entity = datastore.Entity(key)
    entity["blob_name"] = blob.name
    entity["image_public_url"] = blob.public_url
    entity["timestamp"] = current_datetime
    entity["joy"] = face_joy

    # Save the new entity to Datastore.
    datastore_client.put(entity)

    # Redirect to the home page.
    return redirect("/")
Ejemplo n.º 16
0
def upload():
    uploaded_file = request.files.get('file')
    name = request.form.get('name')
    loc = request.form.get('loc')
    date = request.form.get('date')
    id = request.form.get('id')
    file_changed = request.form.get('fileChanged')
    old_url = request.form.get('file')
    img_name = request.form.get('img_name')
    edit = request.form.get('edit')

    # Instantiates a client
    vision_client = vision.ImageAnnotatorClient()

    if (file_changed == 'true'):
        # Create a Cloud Storage client.
        gcs = storage.Client()

        if (edit == 'true'):
            delete_bucket = gcs.bucket(CLOUD_STORAGE_BUCKET)
            del_blob = delete_bucket.blob(img_name)
            del_blob.delete()

        # Get the bucket that the file will be uploaded to.
        bucket = gcs.get_bucket(CLOUD_STORAGE_BUCKET)

        # Create a new blob and upload the file's content.
        blob = bucket.blob(uploaded_file.filename)

        blob.upload_from_string(
            uploaded_file.read(),
            content_type=uploaded_file.content_type
        )

        # Make the blob publicly viewable.
        blob.make_public()
        # print(blob.name)
        url = blob.public_url

        img_name = blob.name

        source_uri = "gs://{}/{}".format(CLOUD_STORAGE_BUCKET, blob.name)
        image = vision.Image(source=vision.ImageSource(gcs_image_uri=source_uri))

        # Performs label detection on the image file
        labels = vision_client.label_detection(image=image).label_annotations

        category = ''
        for label in labels:
            if 'human' == label.description.lower():
                category = 'People'
                break
            elif ('dog' == label.description.lower()) or ('cat' == label.description.lower()) or ('mammal' == label.description.lower()):
                category = 'Animal'
                break
            elif 'flower' == label.description.lower():
                category = 'Flower'
                break
            else:
                category = 'Others'
    else:
        url = old_url
        category = request.form.get('label')

    key = datastore_client.key('Photo Book', id)
    entity = datastore.Entity(key=key)
    entity.update({
        'name': name,
        'location': loc,
        'date': date,
        'url': url,
        'category': category,
        'id': id,
        'img_name': img_name
    })

    datastore_client.put(entity)

    return {'response': 'res'}