Exemple #1
0
def locate_post(post, query_data):
    path = "gs://" + app.config["CLOUD_STORAGE_BUCKET"] + "/" + post.link
    try:
        client = ImageAnnotatorClient()
        image = gcv_types.Image()
        image.source.image_uri = path

        vision_response = client.landmark_detection(image=image)
        landmarks = list(get_image_landmarks(vision_response).values())
        if len(landmarks) == 0:
            return None, get_response(status=HTTPStatus.NO_CONTENT)

        coordinates = landmarks[0]
        if len(coordinates) == 0:
            return None, get_response(status=HTTPStatus.NO_CONTENT)

        return {
                   "location": "https://maps.googleapis.com/maps/api/staticmap?center={},{}&zoom={}&size={}x{}&key={}".format(
                       coordinates[0]["lat"],
                       coordinates[0]["lon"],
                       str(query_data["zoom"]),
                       str(query_data["width"]),
                       str(query_data["height"]),
                       app.config["GOOGLE_MAPS_API_KEY"]
                   )
               }, None
    except:
        return None, get_response(status=HTTPStatus.INTERNAL_SERVER_ERROR)
Exemple #2
0
    def __init__(self):
        import google.auth
        from google.cloud.vision import ImageAnnotatorClient

        credentials, project_id = google.auth.default()
        self.client = ImageAnnotatorClient(credentials=credentials)
        log.info("Using Google Vision API. Charges apply.")
Exemple #3
0
 def __init__(self, session:AuthorizedSession, userId):
     self.IFR = './static'
     self.session = session
     self.queue = queue.Queue()
     self.session.mount('https://', HTTPAdapter(pool_maxsize=8, max_retries=10, pool_block=True))
     self.userId = userId
     self.client = ImageAnnotatorClient(credentials=service_account.Credentials.from_service_account_file('anster-1593361678608.json'))
     self.pageNum = int(os.getenv('PHOTO_THREAD_NUM'))
Exemple #4
0
 def _extract_text(self, data, languages=None):
     from google.cloud.vision import types
     if not hasattr(self, 'client'):
         import google.auth
         from google.cloud.vision import ImageAnnotatorClient
         credentials, project_id = google.auth.default()
         self.client = ImageAnnotatorClient(credentials=credentials)
         log.info("Using Google Vision API. Charges apply.")
     image = types.Image(content=data)
     res = self.client.document_text_detection(image)
     return res.full_text_annotation.text
Exemple #5
0
def vision(img_name):
    '''Labels for media file using Vision API'''
    vision_client = ImageAnnotatorClient()
    file_name = str(img_name)

    with io.open(file_name, 'rb') as image_file:
        content = image_file.read()
        image = types.Image(content=content)

    response = vision_client.label_detection(image=image)
    labels = response.label_annotations
    label = labels[0]
    return label.description
Exemple #6
0
class GoogleVisionService(OCRService, OCRUtils):
    def __init__(self):
        credentials, project_id = google.auth.default()
        self.client = ImageAnnotatorClient(credentials=credentials)
        log.info("Using Google Vision API. Charges apply.")

    @trace_function(span_name="GOOGLE_VISION_OCR")
    def extract_text(self, data, languages=None):
        if not MIN_SIZE < len(data) < MAX_SIZE:
            log.info('OCR: file size out of range (%d)', len(data))
            return None

        key = make_key('ocr', sha1(data).hexdigest())
        if kv.exists(key):
            text = kv.get(key)
            if text is not None:
                text = text.decode('utf-8')
                log.info('Vision API: %s chars cached', len(text))
            return text

        # data = self.ensure_size(data)
        # if data is None:
        #     return

        image = types.Image(content=data)
        res = self.client.document_text_detection(image)
        ann = res.full_text_annotation
        log.info('Vision API: %s chars recognized', len(ann.text))
        kv.set(key, ann.text)
        return ann.text
Exemple #7
0
class GoogleLocationService:
    image_annotator = ImageAnnotatorClient()

    def find(self, image_file):
        image = types.Image(content=image_file.read())
        response = self.image_annotator.landmark_detection(image)
        self._raise_for_error(response)
        return self._make_response(response.landmark_annotations)

    def _make_response(self, landmark_annotations):
        try:
            landmark = landmark_annotations[0]
        except IndexError:
            raise GoogleCloudVisionException(detail='Landmark not found.')
        return self._landmark_annotation_to_dict(landmark)

    @staticmethod
    def _raise_for_error(response):
        if response.HasField('error'):
            raise GoogleCloudVisionException(detail=response.error.message, )

    @staticmethod
    def _landmark_annotation_to_dict(landmark_annotation):
        try:
            coordinates = landmark_annotation.locations[0].lat_lng
        except IndexError:
            raise GoogleCloudVisionException(detail='Location not found.')
        return {
            'coordinates': {
                'latitude': coordinates.latitude,
                'longtitude': coordinates.longitude,
            },
            'name': landmark_annotation.description,
        }
Exemple #8
0
class GoogleOCRService(OCRService):
    """Use Google's Vision API to perform OCR. This has very good quality
    but is quite expensive. For this reason, its use is controlled via a
    separate configuration variable, OCR_VISION_API, which must be set to
    'true'. To use the API, you must also have a service account JSON file
    under GOOGLE_APPLICATION_CREDENTIALS."""

    @classmethod
    def is_available(cls):
        try:
            from google.cloud.vision import ImageAnnotatorClient  # noqa
        except ImportError:
            return False
        return env.to_bool('OCR_VISION_API', False)

    def _extract_text(self, data, languages=None):
        from google.cloud.vision import types
        if not hasattr(self, 'client'):
            import google.auth
            from google.cloud.vision import ImageAnnotatorClient
            credentials, project_id = google.auth.default()
            self.client = ImageAnnotatorClient(credentials=credentials)
            log.info("Using Google Vision API. Charges apply.")
        image = types.Image(content=data)
        res = self.client.document_text_detection(image)
        return res.full_text_annotation.text
Exemple #9
0
def analyze_image(path):
    path = "gs://" + app.config["CLOUD_STORAGE_BUCKET"] + "/" + path
    try:
        analyze = dict()
        client = ImageAnnotatorClient()
        image = gcv_types.Image()
        image.source.image_uri = path

        vision_response = client.label_detection(image=image)
        analyze["label"] = get_image_labels(vision_response)

        vision_response = client.landmark_detection(image=image)
        analyze["landmarks"] = list(get_image_landmarks(vision_response).keys())

        return analyze, None
    except:
        return None, get_response(status=HTTPStatus.INTERNAL_SERVER_ERROR)
Exemple #10
0
def analyze(file_path):
    spider_json_path = 'spidey/spiders.JSON'
    spider_dict = json.load(open(spider_json_path,'rb'))

    # Pass image into Google API
    # Instatiate client
    cl = ImageAnnotatorClient()

    # Load image into memory from cloud storage
    client = storage.Client()
    bucket = client.get_bucket(MY_BUCKET) 
    blob = bucket.get_blob(file_path)
    img_content = blob.download_as_string()

    # Import image
    img = types.Image(content=img_content)

    # Get response from client, assign labels
    resp = cl.web_detection(image=img)
    annotations = resp.web_detection

    if annotations.web_entities:
        is_spider = False
        
        for entity in annotations.web_entities:
            if entity.description == "Spider" or entity.description == "Tarantula":
                is_spider = True      
        if not is_spider:
            return Spider("", "","Uhhh...","Spidey could not identify a spider in this picture","icons/notaspider.png")

        #Find out if the spider is in our dictionary   
        for entity in annotations.web_entities:
            key = str(entity.description).lower().replace('spider','').strip()

            if key in spider_dict:
                #Accepted spider
                return Spider(str(entity.description),spider_dict[key]["Scientific Name"],spider_dict[key]["Type"],spider_dict[key]["Help"],"icons/" + spider_dict[key]["Type"].lower() +'.png')
        
        if is_spider:
            return Spider()
    else:
        return Spider("", "","Uhhh...","Spidey could not identify a spider in this picture","icons/notaspider.png")
Exemple #11
0
def detect_text(image_file, from_path=True):
    '''Detects and returns texts in image.

    :param path: string
    :return: string, string
    '''
    client = ImageAnnotatorClient()

    if from_path:
        with io.open(image_file, 'rb') as image_file:
            content = image_file.read()
    else:
        content = image_file.read()

    image = types.Image(content=content)

    response = client.text_detection(image=image)
    texts = response.text_annotations

    # print_bounding_poly_vertices(texts)

    search_query = return_search_query(texts)

    return search_query
def Vision_API_OCR(stacked_image):
    """ Send image to Google Cloud Vision APi for text detection and parse result"""
    with io.BytesIO() as output:
        stacked_image.save(output, format="JPEG")
        content = output.getvalue()
    OCR_client = ImageAnnotatorClient()  # Instantiates a client
    image = types.Image(content=content)
    # Detect text using API. Note, setting language_hints to english. This seems to have fixed
    # a problem where it was interpreting some characters as written in cyrillic.
    response = OCR_client.text_detection(
        image=image, image_context={"language_hints": ["en"]})
    texts = response.text_annotations
    OCR_words_all = []
    OCR_vertices = []
    for text in texts[1:]:
        OCR_words_all.append(text.description.lower())
        x = np.empty([0], dtype=int)
        y = np.empty([0], dtype=int)
        for vertex in text.bounding_poly.vertices:
            x = np.append(x, vertex.x)
            y = np.append(y, vertex.y)
        vertices = {"x": x, "y": y}
        OCR_vertices.append(vertices)
    return OCR_words_all, OCR_vertices
Exemple #13
0
class GoogleOCRService(object):
    """Use Google's Vision API to perform OCR. This has very good quality
    but is quite expensive. For this reason, its use is controlled via a
    separate configuration variable, OCR_VISION_API, which must be set to
    'true'. To use the API, you must also have a service account JSON file
    under GOOGLE_APPLICATION_CREDENTIALS."""
    def __init__(self):
        import google.auth
        from google.cloud.vision import ImageAnnotatorClient
        credentials, project_id = google.auth.default()
        self.client = ImageAnnotatorClient(credentials=credentials)
        log.info("Using Google Vision API. Charges apply.")

    def extract_text(self, data, languages=None):
        from google.cloud.vision import types
        image = types.Image(content=data)
        res = self.client.document_text_detection(image)
        return res.full_text_annotation.text or ''
Exemple #14
0
class GoogleVisionService(OCRService, OCRUtils):
    def __init__(self):
        credentials, project_id = google.auth.default()
        self.client = ImageAnnotatorClient(credentials=credentials)
        log.info("Using Google Vision API. Charges apply.")

    def extract_text(self, data, languages=None):
        key = sha1(data).hexdigest()
        text = Cache.get_cache(key)
        if text is not None:
            log.info('Vision API: %s chars cached', len(text))
            return text

        data = self.ensure_size(data)
        if data is not None:
            image = types.Image(content=data)
            res = self.client.document_text_detection(image)
            ann = res.full_text_annotation
            log.info('Vision API: %s chars recognized', len(ann.text))
            Cache.set_cache(key, ann.text)
            return ann.text
class Google_API:
    def __init__(self, access_token):
        self.client = ImageAnnotatorClient(
            credentials=Credentials(access_token))

    def annotate(self, imgbytes=None, url=None):
        """Annotate an image from 2 sources, choose one.
        imgbytes: image bytes
        url: public url of the image
        """
        image = types.Image(content=imgbytes, source=url)
        # Functions available: https://google-cloud-python.readthedocs.io/en/latest/vision/gapic/v1/api.html

        ### TODO: put your logic here. Example:
        try:
            response = self.client.label_detection(
                image=image).label_annotations  # Label detection
        except Exception as e:
            print(e.message)
            response = []

        return (response)
Exemple #16
0
class MainProcess:
    def __init__(self, session: AuthorizedSession, userId):
        self.IFR = './static'
        self.session = session
        self.queue = queue.Queue()
        self.session.mount(
            'https://',
            HTTPAdapter(pool_maxsize=8, max_retries=10, pool_block=True))
        self.userId = userId
        self.client = ImageAnnotatorClient(
            credentials=service_account.Credentials.from_service_account_file(
                'anster-1593361678608.json'))
        self.pageNum = int(os.getenv('PHOTO_THREAD_NUM'))

    def pipeline(self, mediaItem):
        # only download images
        try:
            # get the image data
            filename = mediaItem['filename']
            imagebinary = self.session.get(mediaItem['baseUrl'] + '=d').content
            with open(f'{self.IFR}/{self.userId}/{filename}',
                      mode='wb') as handler:
                handler.write(imagebinary)
            image = Image(content=imagebinary)
            response = self.client.label_detection(image=image)
            if response.error.message:
                raise Exception(response.error.message)
            labels = response.label_annotations
            ltemp = list(map(getLabelDescription, labels))
            mLabels = toMandarin(ltemp)
            t = Tag()
            bs = BasicStructure()
            for el, l in zip(ltemp, labels):
                bs.main_tag.append(ATag(tag=el, precision=l.score))
            t.en = bs
            if mLabels and len(mLabels) > 0:
                bs = BasicStructure()
                for ml, l in zip(mLabels, labels):
                    bs.main_tag.append(ATag(tag=ml, precision=l.score))
                t.zh_tw = bs
            tempcreationTime = mediaItem['mediaMetadata']['creationTime']
            sliceTime = tempcreationTime.split('Z')[0].split(
                '.')[0] if '.' in tempcreationTime else tempcreationTime.split(
                    'Z')[0]
            realTime = datetime.datetime.strptime(sliceTime,
                                                  "%Y-%m-%dT%H:%M:%S")
            pho = Photo(
                photoId=mediaItem['id'],
                filename=filename,
                userId=self.userId,
                tag=t,
                createTime=make_aware(realTime,
                                      timezone=pytz.timezone(
                                          settings.TIME_ZONE)),
            )
            pho.save()
        except Exception as e:
            logging.error(e)
            print(f'Error from initial vision api pipline {e}')
            print(traceback.format_exc())

    def afterall(self, tic, i):
        self.queue.join()
        toc = time.perf_counter()
        print(f"\rTotal process {i} images in {toc - tic:0.4f} seconds")
        user = User.objects(userId=self.userId)
        user.update(set__isSync=True,
                    set__isFreshing=False,
                    set__lastSync=make_aware(datetime.datetime.utcnow(),
                                             timezone=pytz.timezone(
                                                 settings.TIME_ZONE)))
        # People
        people_ontology = PeopleOntology(session=self.session,
                                         userId=self.userId)
        Thread(target=people_ontology.initial, daemon=True).start()
        # color_process = ColorProcess(session=self.session, userId=self.userId)
        # Thread(target=color_process.initial,daemon=True).start()
    def initial(self):
        tic = time.perf_counter()
        User.objects(userId=self.userId).update(set__isFreshing=True,
                                                set__isSync=False)
        nPT = ''
        pool = ThreadPool(self.queue)
        # subscribed = {'color':True}
        params = {'pageSize': 1}
        i = 0
        try:
            if not os.path.isdir(f'{self.IFR}/{self.userId}'):
                os.mkdir(f'{self.IFR}/{self.userId}')
            while True:
                if nPT:
                    params['pageToken'] = nPT
                photoRes = self.session.get(
                    'https://photoslibrary.googleapis.com/v1/mediaItems',
                    params=params).json()
                mediaItems = photoRes.get('mediaItems', None)
                if not mediaItems:
                    break
                print(f'Handling {len(mediaItems)} items')
                for mediaItem in mediaItems:
                    mimeType, _ = mediaItem['mimeType'].split('/')
                    if mimeType == 'image':
                        pool.add_task(self.pipeline, mediaItem=mediaItem)
                        i = i + 1
                if not os.getenv('CV_RELEASE',
                                 None) == "True" or not photoRes.get(
                                     'nextPageToken', None):
                    break
                else:
                    nPT = photoRes['nextPageToken']
        except Exception as e:
            logging.error(e)
            print(f'Error from initial vision api {e}')
        Thread(target=self.afterall, args=(tic, i), daemon=True).start()

    def refresh(self):
        tic = time.perf_counter()
        User.objects(userId=self.userId).update(set__isFreshing=True,
                                                set__isSync=False)
        nPT = ''
        pool = ThreadPool(self.queue)
        params = {'pageSize': self.pageNum}
        i = 0
        try:
            if not os.path.isdir(f'{self.IFR}/{self.userId}'):
                os.mkdir(f'{self.IFR}/{self.userId}')
            while True:
                if nPT:
                    params['pageToken'] = nPT
                photoRes = self.session.get(
                    'https://photoslibrary.googleapis.com/v1/mediaItems',
                    params=params).json()
                mediaItems = photoRes.get('mediaItems', None)
                if not mediaItems:
                    break
                print(f'Handling {len(mediaItems)} items')
                for mediaItem in mediaItems:
                    dbres = Photo.objects(photoId=mediaItem['id'])
                    mimeType, _ = mediaItem['mimeType'].split('/')
                    if not dbres and mimeType == 'image':
                        pool.add_task(self.pipeline, mediaItem=mediaItem)
                        i = i + 1
                if not os.getenv('CV_RELEASE',
                                 None) == "True" or not photoRes.get(
                                     'nextPageToken', None):
                    break
                else:
                    nPT = photoRes['nextPageToken']
        except Exception as e:
            logging.error(e)
            print(e)
        Thread(target=self.afterall, args=(tic, i), daemon=True).start()
import os
import platform
import re
import subprocess

import wx.adv
import yaml
from PIL.Image import Image
from google.cloud import texttospeech
from google.cloud.translate import Client as TranslateClient
from google.cloud.vision import ImageAnnotatorClient
from google.oauth2 import service_account

credentials = service_account.Credentials.from_service_account_file('service_account_creds.json')
translate_client = TranslateClient(credentials=credentials)
vision_client = ImageAnnotatorClient(credentials=credentials)
text_to_speech_client = texttospeech.TextToSpeechClient(credentials=credentials)


def text_detection(image: Image) -> str:
    """Detect text from PIL.Image data using Google Cloud Translate."""

    # Create bytestream of the given image
    bytes_io = io.BytesIO()
    image.save(bytes_io, 'png')
    bytes_io.seek(0)
    content = bytes_io.read()
    bytes_io.close()

    res = vision_client.text_detection({
        'content': content,
Exemple #18
0
def get_annotation(photo: IIIF_Photo, client: vision.ImageAnnotatorClient):
    """
    This function accepts the photo, the google vision client and
    returns the annotation provided by Google Vision API

    :param photo (IIIF_Photo): The photo for which the annotation is needed.
    :param client (vision.ImageAnnotatorClient): The client to call the Google Vision Annotation API
    :return result (Dict): The result of annotation as a dictionary
    """

    result = {}
    # reading the image
    image_data = ur.urlopen(photo.get_photo_link()).read()
    image = types.Image(content=image_data)
    replace_chars = [".", "$"]

    # call the goole vision api to get the annotations of various types
    response = client.annotate_image({
        'image':
        image,
        'features': [{
            'type': vision.enums.Feature.Type.LANDMARK_DETECTION
        }, {
            'type': vision.enums.Feature.Type.LOGO_DETECTION
        }, {
            'type': vision.enums.Feature.Type.LABEL_DETECTION
        }, {
            'type': vision.enums.Feature.Type.TEXT_DETECTION
        }, {
            'type': vision.enums.Feature.Type.OBJECT_LOCALIZATION
        }, {
            'type': vision.enums.Feature.Type.WEB_DETECTION
        }],
    })

    success = False
    # check if there is any error returned by the api
    if response.error.code != 0:
        result["success"] = success
        result["error_code"] = response.error.code
        result["error_message"] = response.error.message
        return result

    else:
        success = True
        result["success"] = success

        # Initialising empty lists to store string tags
        result["lndmks"] = []
        result["logos"] = []
        result["objects"] = []
        result["text"] = []

        # get the list of labels
        labels = list()
        for lbl in response.label_annotations:
            labels.extend(clean_text(lbl.description))
        labels = list(set(labels))
        result["labels"] = labels

        # get the list of web entities
        webent = list()
        for weben in response.web_detection.web_entities:
            webent.extend(clean_text(weben.description))
        webent = list(set(webent))
        result["webent"] = webent

        # this dictionary will store the information of annotations along with bounding boxes.
        obj_boxes = {}
        # The key will the the name to identify the annotation and the value be a list of lists containing the top left x coordinate
        # top left y coordinate, width and height of for the bounding box.
        # It would be a list of list to store coordinates for different boxes for same tag

        # storing the landmarks
        # this dictionary will store the information of landmarks which are name, latitude, longitude.
        landmark_info = dict()
        for lndmk in response.landmark_annotations:
            # if there are any landamrks identified, we store them in a seperate field,to access easily.
            landmark_name = lndmk.description.replace(".", " ")
            landmark_info[landmark_name] = {
                "latitude": lndmk.locations[0].lat_lng.latitude,
                "longitude": lndmk.locations[0].lat_lng.longitude
            }

            lndmks = clean_text(lndmk.description)
            result["lndmks"].extend(lndmks)

            # storing the landmarks with bounding boxes
            lndmk_desc = '_'.join(lndmks)
            obj_boxes = add_bbox(lndmk, lndmk_desc, obj_boxes)

        result["landmark_info"] = landmark_info

        # storing the logos
        for lgo in response.logo_annotations:

            logos = clean_text(lgo.description)
            result["logos"].extend(logos)

            lgo_desc = '_'.join(logos)
            obj_boxes = add_bbox(lgo, lgo_desc, obj_boxes)

        # storing the localised objects
        if len(response.localized_object_annotations) > 0:
            img_width, img_height = photo.iiif["width"], photo.iiif["height"]

            for lobj in response.localized_object_annotations:

                objects = clean_text(lobj.name)
                result["objects"].extend(objects)

                lobj_name = '_'.join(objects)
                obj_boxes = add_bbox(lobj,
                                     lobj_name,
                                     obj_boxes,
                                     is_lobject=True,
                                     img_width=img_width,
                                     img_height=img_height)

        # storing the text
        for txt in response.text_annotations:
            modified_text = txt.description.replace(".",
                                                    "_").replace("$",
                                                                 "_").lower()
            if modified_text == "_":
                continue
            result["text"].extend([modified_text])

            # the text identified on the images in not cleaned to store the original information.
            obj_boxes = add_bbox(txt, modified_text, obj_boxes)

        result["obj_boxes"] = obj_boxes

        return result
from google.cloud import firestore
from google.cloud.vision import ImageAnnotatorClient
from google.cloud.speech import SpeechClient, RecognitionAudio, RecognitionConfig
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types

service_account_name = './service_account.json'
db = firestore.Client.from_service_account_json(service_account_name)
vision_client = ImageAnnotatorClient.from_service_account_json(service_account_name)
speech_client = SpeechClient.from_service_account_json(service_account_name)
language_client = language.LanguageServiceClient.from_service_account_json(service_account_name)

def max_window():
    return 60
    
def database():
    return db

def vision():
    return vision_client

def speech():
    return speech_client, RecognitionAudio, RecognitionConfig

def language(text):
    document = types.Document(content = text,
        type = enums.Document.Type.PLAIN_TEXT)

    return language_client, document
Exemple #20
0
import argparse
import io
import os
import re
from google.cloud.vision import types
import pandas as pd
from google.cloud import vision
from google.cloud.vision import ImageAnnotatorClient

os.environ[
    'GOOGLE_APPLICATION_CREDENTIALS'] = r'atomic-key-273611-8a5180854a67.json'
client = ImageAnnotatorClient()

FOLDER_PATH = r'C:\Users\Pushpraj\Desktop\Finish'
IMAGE_FILE = 'pic.jpg'
FILE_PATH = os.path.join(FOLDER_PATH, IMAGE_FILE)

with io.open(FILE_PATH, 'rb') as image_file:
    content = image_file.read()

image = vision.types.Image(content=content)
response = client.document_text_detection(image=image)

docText = response.full_text_annotation.text

with open('textfile.txt', mode='w') as file:
    file.write(docText)

print("see the text file")
 def __init__(self, client=None):
     if client is None:
         from google.cloud.vision import ImageAnnotatorClient
         client = ImageAnnotatorClient()
     self._client = client
Exemple #22
0
class ColorProcess:
    def __init__(self, session:AuthorizedSession, userId):
        self.IFR = './static'
        self.session = session
        self.queue = queue.Queue()
        self.session.mount('https://', HTTPAdapter(pool_maxsize=8, max_retries=10, pool_block=True))
        self.userId = userId
        self.client = ImageAnnotatorClient(credentials=service_account.Credentials.from_service_account_file('anster-1593361678608.json'))
        self.pageNum = int(os.getenv('PHOTO_THREAD_NUM'))
    def afterall(self, tic, i):
        self.queue.join()
        toc = time.perf_counter()
        print(f"\rTotal color process {i} images in {toc - tic:0.4f} seconds")
        user = User.objects(userId=self.userId)
        user.update(
            set__color_onto__lastSync=make_aware(datetime.datetime.utcnow(),
                                    timezone=pytz.timezone(settings.TIME_ZONE))
        )
    def color_pipline(self, mediaItem):
        try:
        # get the image data
            filename = mediaItem['filename']
            with open(f'{self.IFR}/{self.userId}/{filename}', mode='rb') as handler:
                image = Image(content = handler.read())
            objects = self.client.object_localization(image=image).localized_object_annotations
            result_array = color_detection(objects, f'{self.IFR}/{self.userId}/{filename}')
            for o, r in zip(objects, result_array):
                tempName = toSingleMan(o.name)
                name = tempName if tempName else o.name
                cm = ColorModel(obj=name)
                for i in r:
                    cm.color.append(i)
                Photo.objects(photoId=mediaItem['id']).update(push__tag__zh_tw__color=cm)
                cm = ColorModel(obj=o.name)
                for i in r:
                    cm.color.append(i)
                Photo.objects(photoId=mediaItem['id']).update(push__tag__en__color=cm)
        except Exception as e:
            print(f'Error from initial color api pipline {e}')
            print(traceback.format_exc())
    def initial(self):
        tic = time.perf_counter()
        nPT = ''
        pool=ThreadPool(self.queue)
        params = {'pageSize': self.pageNum}
        i = 0
        try:
            if not os.path.isdir(f'{self.IFR}/{self.userId}'):
                os.mkdir(f'{self.IFR}/{self.userId}')
            while True:
                if nPT:
                    params['pageToken'] = nPT
                photoRes = self.session.get(
                    'https://photoslibrary.googleapis.com/v1/mediaItems', params=params).json()
                mediaItems = photoRes.get('mediaItems', None)
                if not mediaItems:
                    break
                print(f'Handling {len(mediaItems)} color items')
                for mediaItem in mediaItems:
                    mimeType, _ = mediaItem['mimeType'].split('/')
                    if mimeType == 'image':
                        pool.add_task(self.color_pipline, mediaItem=mediaItem)
                        i=i+1
                if not os.getenv('CV_RELEASE', None) == "True" or not photoRes.get('nextPageToken', None):
                    break
                else:
                    nPT = photoRes['nextPageToken']
        except Exception as e:
            print(f'Error from initial color api {e}')
        Thread(target=self.afterall, args=(tic,i), daemon=True).start()
    def refresh(self):
        tic = time.perf_counter()
        User.objects(userId=self.userId).update(set__isFreshing=True, set__isSync=False)
        nPT = ''
        pool=ThreadPool(self.queue)
        params = {'pageSize': self.pageNum}
        i = 0
        try:
            if not os.path.isdir(f'{self.IFR}/{self.userId}'):
                os.mkdir(f'{self.IFR}/{self.userId}')
            while True:
                if nPT:
                    params['pageToken'] = nPT
                photoRes = self.session.get(
                    'https://photoslibrary.googleapis.com/v1/mediaItems', params=params).json()
                mediaItems = photoRes.get('mediaItems', None)
                if not mediaItems:
                    break
                print(f'Handling {len(mediaItems)} items')
                for mediaItem in mediaItems:
                    dbres = Photo.objects(photoId=mediaItem['id'])
                    mimeType, _ = mediaItem['mimeType'].split('/')
                    if not dbres and mimeType == 'image':
                        pool.add_task(self.color_pipline, mediaItem=mediaItem)
                        i=i+1
                if not os.getenv('CV_RELEASE', None) == "True" or not photoRes.get('nextPageToken', None):
                    break
                else:
                    nPT = photoRes['nextPageToken']
        except Exception as e:
            print(e)
        Thread(target=self.afterall, args=(tic,i), daemon=True).start()
 def __init__(self, access_token):
     self.client = ImageAnnotatorClient(
         credentials=Credentials(access_token))