Exemplo n.º 1
0
def local_image(img_filepath):
    with io.open(img_filepath, "rb") as img_file:
        img_contents = img_file.read()
    img = types.Image(content=img_contents)
    return img
Exemplo n.º 2
0
def index05(request):

    candidate = Candidate.objects.all()

    no1 = Candidate.objects.filter(
        party_number=4)  #party_number는 게시물올릴때 번호 다른걸로 바뀌줄수있음
    no1[0].party_number
    image_db01 = no1[0].image_file

    image_db = str(image_db01)
    client = vision.ImageAnnotatorClient()

    file_name = os.path.join(os.path.dirname('C:'),
                             '/Users/student/mysite02/media/' + image_db)

    with io.open(file_name, 'rb') as image_file:
        content = image_file.read()

    image = types.Image(content=content)

    response = client.text_detection(image=image)
    texts = response.text_annotations

    my_list = list()

    for text in texts:
        result = text.description
        my_list.append(result)

    data = my_list[0]

    data1 = data.replace('\n', ' ')
    data2 = data1.replace('(', ' ')
    data3 = data2.replace(')', ' ')
    data4 = data3.replace('/', ' ')
    data5 = data4.split(' ')

    df = pd.DataFrame(data5, columns=["총리스트"])

    df1 = pd.DataFrame(columns=["0", "1", "2", "3"])
    df2 = pd.DataFrame(columns=["0", "1", "2", "3"])

    df1.loc[0, '3'] = "아메리카노"
    df1.loc[1, '2'] = "아이스"
    df1.loc[1, '3'] = "아메리카노"
    df1.loc[2, '2'] = "아이스"
    df1.loc[2, '3'] = "카페라떼"
    df1.loc[3, '3'] = "카페라떼"
    df1.loc[4, '1'] = "아이스"
    df1.loc[4, '2'] = "바닐라라떼"
    df1.loc[4, '3'] = "마끼아또"
    df1.loc[5, '1'] = "아이스"
    df1.loc[5, '2'] = "카라멜라떼"
    df1.loc[5, '3'] = "마끼아또"
    df1.loc[6, '2'] = "카라멜라떼"
    df1.loc[6, '3'] = "마끼아또"
    df1.loc[7, '3'] = "마끼아또"
    df1.loc[7, '2'] = "라떼"
    df1.loc[7, '1'] = "카라멜"
    df1.loc[8, '2'] = "바닐라라떼"
    df1.loc[8, '3'] = "마끼아또"
    df1.loc[9, '1'] = "바닐라"
    df1.loc[9, '2'] = "라떼"
    df1.loc[9, '3'] = "마끼아또"
    df1.loc[10, '1'] = "화이트초콜릿"
    df1.loc[10, '2'] = "라떼"
    df1.loc[10, '3'] = "마끼아또"
    df1.loc[11, '3'] = "카푸치노"
    df1.loc[12, '2'] = "헤이즐넛"
    df1.loc[12, '3'] = "카푸치노"
    df1.loc[13, '2'] = "오리지널"
    df1.loc[13, '3'] = "드립커피"
    df1.loc[14, '1'] = "아이스"
    df1.loc[14, '2'] = "오리지널"
    df1.loc[14, '3'] = "드립커피"
    df1.loc[15, '2'] = "카페"
    df1.loc[15, '3'] = "모카"
    df1.loc[16, '3'] = "카페모카"
    df1.loc[17, '2'] = "아이스"
    df1.loc[17, '3'] = "카페모카"
    df1.loc[18, '2'] = "화이트초콜릿라떼"
    df1.loc[18, '3'] = "마끼아또"
    df1.loc[19, '1'] = "아이스"
    df1.loc[19, '2'] = "화이트초콜릿라떼"
    df1.loc[19, '3'] = "마끼아또"
    df1.loc[20, '2'] = "콜드브루"
    df1.loc[20, '3'] = "아메리카노"
    df1.loc[21, '2'] = "콜드브루"
    df1.loc[21, '3'] = "원액"
    df1.loc[22, '2'] = "니트로"
    df1.loc[22, '3'] = "콜드브루"
    df1.loc[23, '2'] = "콜드브루"
    df1.loc[23, '3'] = "라떼"
    df1.loc[24, '3'] = "그라니때"
    df1.loc[24, '2'] = "콘파나"
    df1.loc[24, '1'] = "모카"
    df1.loc[25, '1'] = "카라멜"
    df1.loc[25, '2'] = "콘파나"
    df1.loc[25, '3'] = "그라니때"
    df1.loc[26, '3'] = "그라니때"
    df1.loc[26, '2'] = "망고요거트"
    df1.loc[27, '3'] = "그라니때"
    df1.loc[27, '2'] = "요거트"
    df1.loc[27, '1'] = "망고"
    df1.loc[28, '3'] = "그라니때"
    df1.loc[28, '2'] = "플레인요거트"
    df1.loc[29, '3'] = "그라니때"
    df1.loc[29, '1'] = "플레인"
    df1.loc[29, '2'] = "요거트"
    df1.loc[30, '3'] = "그라니때"
    df1.loc[30, '2'] = "자바칩민트"
    df1.loc[31, '3'] = "그라니때"
    df1.loc[31, '2'] = "에스프레소콘파나"
    df1.loc[32, '3'] = "그라니때"
    df1.loc[32, '2'] = "콘파나"
    df1.loc[32, '1'] = "에스프레소"
    df1.loc[33, '3'] = "그라니때"
    df1.loc[33, '2'] = "스트로베리요거트"
    df1.loc[34, '3'] = "그라니때"
    df1.loc[34, '2'] = "요거트"
    df1.loc[34, '1'] = "스트로베리"
    df1.loc[35, '3'] = "그라니때"
    df1.loc[35, '2'] = "스트로베리"
    df1.loc[36, '3'] = "그라니때"
    df1.loc[36, '2'] = "블루베리요거트"
    df1.loc[37, '3'] = "그라니때"
    df1.loc[37, '2'] = "요거트"
    df1.loc[37, '1'] = "블루베리"
    df1.loc[38, '3'] = "그라니때"
    df1.loc[38, '2'] = "복숭아"
    df1.loc[39, '3'] = "그라니때"
    df1.loc[39, '2'] = "그린티"
    df1.loc[40, '3'] = "그라니때"
    df1.loc[40, '2'] = "찰인절미"
    df1.loc[40, '1'] = "레드빈"
    df1.loc[41, '3'] = "그라니때"
    df1.loc[41, '2'] = "흑임자"
    df1.loc[41, '1'] = "레드빈"
    df1.loc[42, '3'] = "레드빈흑임자그라니때"
    df1.loc[43, '3'] = "그라니때"
    df1.loc[43, '2'] = "쑥"
    df1.loc[43, '1'] = "레드빈"
    df1.loc[44, '3'] = "레드빈쑥그라니때"
    df1.loc[45, '3'] = "그라니때"
    df1.loc[45, '2'] = "민트"
    df1.loc[45, '1'] = "레몬"
    df1.loc[46, '3'] = "그라니때"
    df1.loc[46, '2'] = "민트"
    df1.loc[46, '1'] = "자바칩"
    df1.loc[47, '3'] = "아이스티"
    df1.loc[48, '3'] = "아이스티"
    df1.loc[48, '2'] = "라즈베리"
    df1.loc[49, '3'] = "아이스티"
    df1.loc[49, '2'] = "복숭아"
    df1.loc[50, '3'] = "그린티라떼"
    df1.loc[50, '2'] = "아이스"
    df1.loc[51, '1'] = "아이스"
    df1.loc[51, '2'] = "그린티"
    df1.loc[51, '3'] = "라떼"
    df1.loc[52, '3'] = "그린티라떼"
    df1.loc[53, '2'] = "그린티"
    df1.loc[53, '3'] = "라떼"
    df1.loc[54, '2'] = "아이스x"
    df1.loc[54, '3'] = "초콜릿x"
    df1.loc[55, '2'] = "콜드브루"
    df1.loc[55, '3'] = "밀크티"
    df1.loc[56, '2'] = "핫"
    df1.loc[56, '3'] = "초콜릿"
    df1.loc[57, '2'] = "아이스"
    df1.loc[57, '3'] = "초콜릿"
    df1.loc[58, '2'] = "레몬"
    df1.loc[58, '3'] = "스파클링"
    df1.loc[59, '2'] = "자몽"
    df1.loc[59, '3'] = "스파클링"
    df1.loc[60, '2'] = "베리"
    df1.loc[60, '3'] = "스파클링"
    df1.loc[61, '2'] = "청포도"
    df1.loc[61, '3'] = "스파클링"
    df1.loc[62, '3'] = "딸기플라워밀크쉐이크"
    df1.loc[63, '3'] = "딸기프룻티펀치"
    df1.loc[64, '3'] = "딸기치즈큐브쉐이크"
    df1.loc[65, '3'] = "딸기요거트그래놀라"
    df1.loc[66, '3'] = "딸기라떼"
    df1.loc[67, '3'] = "딸기주스"
    df1.loc[68, '2'] = "딸기"
    df1.loc[68, '3'] = "주스"
    df1.loc[69, '2'] = "키위"
    df1.loc[69, '3'] = "주스"
    df1.loc[70, '2'] = "토마토"
    df1.loc[70, '3'] = "주스"
    df1.loc[71, '2'] = "루비자몽"
    df1.loc[71, '3'] = "주스"
    df1.loc[72, '2'] = "루비자몽"
    df1.loc[72, '3'] = "핫주스"
    df1.loc[73, '2'] = "오렌지"
    df1.loc[73, '3'] = "주스"
    df1.loc[74, '2'] = "프루티"
    df1.loc[74, '3'] = "하동"
    df1.loc[75, '2'] = "머스캣"
    df1.loc[75, '3'] = "그린티"
    df1.loc[76, '3'] = "민트크루"
    df1.loc[77, '2'] = "오렌지"
    df1.loc[77, '3'] = "보스"
    df1.loc[78, '2'] = "루이보스"
    df1.loc[78, '3'] = "오렌지"
    df1.loc[79, '3'] = "커즈마인"
    df1.loc[80, '2'] = "시트러스"
    df1.loc[80, '3'] = "캐모마일"
    df1.loc[81, '2'] = "퍼스트"
    df1.loc[81, '3'] = "브레이크"
    df1.loc[82, '3'] = "영그레이"
    df1.loc[83, '1'] = "아이스"
    df1.loc[83, '2'] = "루이보스"
    df1.loc[83, '3'] = "크림티"
    df1.loc[84, '2'] = "루이보스"
    df1.loc[84, '3'] = "크림티"
    df1.loc[85, '1'] = "아이스"
    df1.loc[85, '2'] = "캐모마일"
    df1.loc[85, '3'] = "프루티"
    df1.loc[86, '2'] = "캐모마일"
    df1.loc[86, '3'] = "프루티"
    df1.loc[87, '2'] = "파니니"
    df1.loc[87, '3'] = "클래식"
    df1.loc[88, '2'] = "파니니"
    df1.loc[88, '3'] = "불고기"
    df1.loc[89, '3'] = "허니브레드"
    df1.loc[90, '2'] = "수플레"
    df1.loc[90, '3'] = "치즈케익"
    df1.loc[91, '3'] = "흑당이달고나빙산"
    df1.loc[92, '3'] = "피치얼그레이빙산"
    df1.loc[93, '3'] = "요거딸기빙산"
    df1.loc[94, '3'] = "망고딸기동산"
    df1.loc[95, '3'] = "인절미팥동산"
    df1.loc[96, '3'] = "찹찹딸기라떼보틀"
    df1.loc[97, '1'] = "홀)"
    df1.loc[97, '2'] = "수플레"
    df1.loc[97, '3'] = "치즈케익"
    df1.loc[98, '2'] = "애플시나몬"
    df1.loc[98, '3'] = "허니브레드"
    df1.loc[99, '1'] = "까사링고"
    df1.loc[99, '2'] = "베리"
    df1.loc[99, '3'] = "케익"
    df1.loc[100, '1'] = "그린티"
    df1.loc[100, '2'] = "티"
    df1.loc[100, '3'] = "라떼"
    df1.loc[101, '2'] = "그린티"
    df1.loc[101, '3'] = "티라떼"
    df1.loc[102, '0'] = "잉글리시"
    df1.loc[102, '1'] = "블랙퍼스트"
    df1.loc[102, '2'] = "티"
    df1.loc[102, '3'] = "라떼"
    df1.loc[103, '1'] = "잉글리시"
    df1.loc[103, '2'] = "블랙퍼스트"
    df1.loc[103, '3'] = "티라떼"
    df1.loc[104, '3'] = "BLT샌드위치"
    df1.loc[105, '3'] = "샌드위치"
    df1.loc[105, '2'] = "BLT"
    df1.loc[106, '3'] = "그라니때"
    df1.loc[106, '2'] = "단팥통통"
    df1.loc[107, '3'] = "그라니때"
    df1.loc[107, '2'] = "통통"
    df1.loc[107, '1'] = "단팥"
    df1.loc[108, '3'] = "그라니때"
    df1.loc[108, '2'] = "쑥떡쑥떡"
    df1.loc[109, '3'] = "그라니때"
    df1.loc[109, '2'] = "쑥떡"
    df1.loc[109, '1'] = "쑥떡"
    df1.loc[110, '3'] = "그라니때"
    df1.loc[110, '2'] = "플라이하이"
    df1.loc[111, '3'] = "그라니때"
    df1.loc[111, '2'] = "하이"
    df1.loc[111, '1'] = "플라이"
    df1.loc[112, '3'] = "뱅쇼"
    df1.loc[112, '2'] = "히비스커스"
    df1.loc[113, '2'] = "레몬"
    df1.loc[113, '3'] = "Sparkling"
    df1.loc[114, '2'] = "자몽"
    df1.loc[114, '3'] = "Sparkling"
    df1.loc[115, '2'] = "베리"
    df1.loc[115, '3'] = "Sparkling"
    df1.loc[116, '2'] = "청포도"
    df1.loc[116, '3'] = "Sparkling"

    menu = df1[['0', '1', '2', '3']].astype(str).sum(axis=1)
    menu = menu.str.replace('nan', '')
    m2 = menu.unique()
    Allmenu = pd.DataFrame(m2, columns=["AllMenu"])

    for i in range(0, len(df.index)):
        for i2 in range(0, len(df1.index)):
            if df1.loc[i2, '0'] == df.loc[i, "총리스트"]:
                df2.loc[i2, '0'] = df.loc[i, "총리스트"]
            elif df1.loc[i2, '1'] == df.loc[i, "총리스트"]:
                df2.loc[i2, '1'] = df.loc[i, "총리스트"]
            elif df1.loc[i2, '2'] == df.loc[i, "총리스트"]:
                df2.loc[i2, '2'] = df.loc[i, "총리스트"]
            elif df1.loc[i2, '3'] == df.loc[i, "총리스트"]:
                df2.loc[i2, '3'] = df.loc[i, "총리스트"]

    df3 = df2.sort_index()
    match = df3[['0', '1', '2', '3']].astype(str).sum(
        axis=1)  #match -> array 형식 astype -> 문자로 변환
    match = match.str.replace('nan', '')  # 난값을 공백
    match = match.unique()  # 중복메뉴 (아이스 4개행 중복 걸러줌)
    imgtxt = pd.DataFrame(match, columns=["imgtxt"])

    con = cx_Oracle.connect("dator/me@localhost:1521/XE")

    cur = con.cursor()

    cur.execute("SELECT menu FROM data_no_rain ORDER BY NoRainResult DESC")
    stt = ()
    sttdf = pd.DataFrame(columns=["순위메뉴"])
    i6 = 0

    for row in cur:
        stt += row
        sttdf.loc[i6, "순위메뉴"] = stt[i6]
        i6 += 1

    result_finish = pd.DataFrame(columns=["최종결과", "순위"])

    for i7 in range(0, len(imgtxt.index)):
        for i8 in range(0, len(sttdf.index)):
            if sttdf.loc[i8, '순위메뉴'] == imgtxt.loc[i7, "imgtxt"]:
                result_finish.loc[i8, '최종결과'] = imgtxt.loc[i7, "imgtxt"]
                result_finish.loc[i8, '순위'] = i8

    result_finish01 = result_finish.sort_index()

    sttt = ""
    for i9 in range(0, len(result_finish01.index)):
        sttt += result_finish01.iloc[i9, 0]
        sttt += "\n"

    print(sttt)

    return HttpResponse(sttt)
Exemplo n.º 3
0
	description
	score
	topicality
	mid



google.cloud.vision_v1.types.BatchAnnotateImagesRequest
requests



filename = os.path.basename(file).split('.')[0] # Get image ID
 image_file = io.open(ImageFolder+file, 'rb') # Open image
 content = image_file.read() # Read image into memory
 image = types.Image(content=content) # Does something
 response = client.label_detection(image=image) # Gets response from API for image
 labels = response.label_annotations # Get labels from response
 Nlabels = len(labels) # Get the number of labels that were returned
 for i in range(0, Nlabels): # For each label we will store the MID, label, and score
 ImageID.append(filename) # Keep track Image ID
 MID.append(labels[i].mid) # Store MID
 Description.append(labels[i].description) # Store label
 Score.append(labels[i].score) # Store score of label
# Put Image ID, MID, label, and score into data frame
ImageLabels["imageid"] = ImageID
ImageLabels["mid"] = MID
ImageLabels["desc"] = Description
ImageLabels["score"] = Score

Exemplo n.º 4
0
def ocrinput(filename):
    PDFTOPPMPATH = basepath + r"\krait\krait\static\poppler-0.67.0_x86\poppler-0.67.0\bin\pdftoppm.exe"
    PDFFILE = filename
    #print PDFTOPPMPATH
    import subprocess
    subprocess.call('"%s" -png %s out1' % (PDFTOPPMPATH, PDFFILE))

    # Imports the Google Cloud client library
    from google.cloud import vision
    from google.cloud.vision import types

    # Instantiates a client
    client = vision.ImageAnnotatorClient()

    # The name of the image file to annotate
    file_name = basepath + '/krait/compare/out1-1.png'

    char = []
    cood = []
    #print file_name
    # loop over the rotation angles again, this time ensuring
    # no part of the image is cut off
    for angle in np.arange(0, 91, 90):
        coodrot = []
        rotate = []
        charall = []
        coodall = []
        image = cv2.imread(file_name)
        rotated = imutils.rotate_bound(image, angle)
        cv2.imwrite(str(angle) + ".jpg", rotated)
        #print "Rotate Done"

        file_name1 = basepath + "/krait/compare/" + str(angle) + ".jpg"
        #print "@@@@@@@"
        #print file_name1
        img = cv2.imread(file_name1, 0)
        ret, thresh = cv2.threshold(img, 10, 255, cv2.THRESH_OTSU)
        #print "Threshold selected : ", ret
        cv2.imwrite(str(angle) + ".jpg", thresh)

        # Loads the image into memory
        with io.open(file_name1, 'rb') as image_file:
            content = image_file.read()

        image = types.Image(content=content)

        # Performs label detection on the image file
        response = client.label_detection(image=image)
        labels = response.label_annotations

        #print('Labels:')
        #for label in labels:
        #    print(label.description)

        response = client.text_detection(image=image)
        texts = response.text_annotations
        for text in texts:
            #print('\n"{}"'.format(text.description.encode('utf-8').strip()))
            charall.append('{}'.format(
                text.description.encode('utf-8').strip()))
            vertices = ([
                '({},{})'.format(vertex.x, vertex.y)
                for vertex in text.bounding_poly.vertices
            ])

            #print('bounds: {}'.format(','.join(vertices)))
            coodall.append('{}'.format(','.join(vertices)))
        char.append(list(charall[1:]))
        cood.append(list(coodall[1:]))
        rotate.append(list(coodall[1:]))
        for a in list(itertools.chain(*rotate)):
            coodrot.append(list(eval(a)))
        #print coodrot
    coodnewf = []
    coodnews = []
    charnews = []
    charnewf = []
    l01 = copy.deepcopy(cood[0])
    l902 = copy.deepcopy(cood[1])
    ch01 = copy.deepcopy(char[0])
    ch902 = copy.deepcopy(char[1])
    #print l01
    #print char[1]

    for a in l01:
        coodnewf.append(list(eval(a)))
    #print coodnew
    #charnewf=list(itertools.chain(*ch01))
    val = zip(ch01, coodnewf)
    #print val
    l1 = []

    def solve(lis):
        try:
            float(lis)
            return True
        except:
            pass

    for x in val:
        #print x[0]
        if solve(x[0]):
            l1.append(x)

    for a in l902:
        #    # for a in cood:
        #    # print a
        coodnews.append(list(eval(a)))
    #    # print coodnew
    #charnews = list(itertools.chain(*ch902))
    val2 = zip(ch902, coodnews)
    # print val
    l2 = []

    for x in val2:
        # print x[0]
        if solve(x[0]):
            l2.append(x)
    #print l1
    #print l2
    return l1, l2
Exemplo n.º 5
0
def smart_scan(img_path):
    books = []
    # import pdb; pdb.set_trace()
    client = vision.ImageAnnotatorClient()
    img_path = '.' + img_path
    with io.open(img_path, 'rb') as image_file:
        content = image_file.read()

    image = types.Image(content=content)

    results = client.text_detection(image=image)
    serialized = json.loads(MessageToJson(results))
    if 'textAnnotations' in serialized:
        blocks = serialized['textAnnotations']

        text_boxes = []
        for i in range(1, len(blocks)):
            block = blocks[i]
            text_box = {
                'text': block['description'],
                'boundingPoly': [],
                'midpoints': [],
                'slope': None,
                'intercept': None
            }
            for vertex in block['boundingPoly']['vertices']:
                curr_tuple = [vertex['x'], vertex['y']]
                text_box['boundingPoly'].append(curr_tuple)
            x = 0
            y = 1
            text_box['midpoints'].append(
                ((text_box['boundingPoly'][0][x] +
                  text_box['boundingPoly'][3][x]) // 2,
                 (text_box['boundingPoly'][0][y] +
                  text_box['boundingPoly'][3][y]) // 2))
            text_box['midpoints'].append(
                ((text_box['boundingPoly'][1][x] +
                  text_box['boundingPoly'][2][x]) // 2,
                 (text_box['boundingPoly'][1][y] +
                  text_box['boundingPoly'][2][y]) // 2))
            try:
                text_box['slope'] = (
                    text_box['midpoints'][1][y] - text_box['midpoints'][0][y]
                ) / (text_box['midpoints'][1][x] - text_box['midpoints'][0][x])
                if text_box['slope'] > 50:
                    text_box['slope'] = 50
                    text_box['intercept'] = text_box['midpoints'][1][x]
                else:
                    text_box['intercept'] = (-text_box['slope'] *
                                             text_box['midpoints'][1][x]
                                             ) + text_box['midpoints'][1][y]
            except ZeroDivisionError:
                text_box['slope'] = 1000000
                text_box['intercept'] = text_box['midpoints'][1][x]

            text_boxes.append(text_box)

    # pp.pprint(text_boxes)

        added = {}
        for i, first_text_box in enumerate(text_boxes):
            if i not in added:
                first_slope = first_text_box['slope']
                first_intercept = first_text_box['intercept']
                curr_book = first_text_box['text']
                for j, second_text_box in enumerate(text_boxes):
                    if j not in added and i is not j:
                        second_slope = second_text_box['slope']
                        second_intercept = second_text_box['intercept']
                        next_book = second_text_box['text']
                        #                 print(curr_word,first_slope, next_word, second_slope)
                        if (abs(
                            (first_slope * 0.90)) <= abs(second_slope) <= abs(
                                (first_slope * 1.10))) and (abs(
                                    (first_intercept *
                                     0.90)) <= abs(second_intercept) <= abs(
                                         (first_intercept * 1.10))):
                            curr_book += ' ' + next_book
                            added[j] = True
                if len(curr_book) > 5:
                    books.append(curr_book)
        # print(books)
        img = cv2.imread(img_path)
        for text in text_boxes:
            bounds = text['boundingPoly']
            pts = np.array(bounds, np.int32)
            pts = pts.reshape((-1, 1, 2))
            img = cv2.polylines(img, [pts], True, (0, 255, 0), 3)
            img = cv2.line(img, text['midpoints'][0], text['midpoints'][1],
                           (255, 0, 0), 5)
        plt.rcParams['figure.figsize'] = (10, 10)

        plt.imshow(img)
        cv2.imwrite(img_path, img)

    return books
Exemplo n.º 6
0
 def find(self, image_file):
     image = types.Image(content=image_file.read())
     response = self.image_annotator.landmark_detection(image)
     self._raise_for_error(response)
     return self._make_response(response.landmark_annotations)
Exemplo n.º 7
0
def test(request, id=None):
    IMG_SIZE = 50
    LR = 1e-3
    MODEL_NAME = 'dwij28leafdiseasedetection-{}-{}.model'.format(
        LR, '2conv-basic')
    tf.logging.set_verbosity(tf.logging.ERROR)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    verifying_data = []
    instance = get_object_or_404(Post, id=id)
    filepath = instance.image.url
    filepath = '.' + filepath
    img_name = filepath.split('.')[:2]
    img = cv2.imread(filepath, cv2.IMREAD_COLOR)
    img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))

    verifying_data = [np.array(img), img_name]

    np.save('verify_data.npy', verifying_data)
    verify_data = verifying_data

    str_label = "Cannot make a prediction."
    status = "Error"

    tf.reset_default_graph()

    convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3], name='input')

    convnet = conv_2d(convnet, 32, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = conv_2d(convnet, 64, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = conv_2d(convnet, 128, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = conv_2d(convnet, 32, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = conv_2d(convnet, 64, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = fully_connected(convnet, 1024, activation='relu')
    convnet = dropout(convnet, 0.8)

    convnet = fully_connected(convnet, 4, activation='softmax')
    convnet = regression(convnet,
                         optimizer='adam',
                         learning_rate=LR,
                         loss='categorical_crossentropy',
                         name='targets')

    model = tflearn.DNN(convnet, tensorboard_dir='log')

    if os.path.exists('{}.meta'.format(MODEL_NAME)):
        model.load(MODEL_NAME)
        #print ('Model loaded successfully.')
    else:
        #print ('Error: Create a model using neural_network.py first.')
        pass
    img_data, img_name = verify_data[0], verify_data[1]
    orig = img_data
    data = img_data.reshape(IMG_SIZE, IMG_SIZE, 3)

    model_out = model.predict([data])[0]
    if np.argmax(model_out) == 0: str_label = 'Healthy'
    elif np.argmax(model_out) == 1: str_label = 'Bacterial'
    elif np.argmax(model_out) == 2: str_label = 'Viral'
    elif np.argmax(model_out) == 3: str_label = 'Late blight'

    if str_label == 'Healthy': status = 'Healthy'
    else: status = 'Unhealthy'
    print(status)
    # result = 'Status: ' + status + '.'
    result = ''

    if (str_label != 'Healthy'): result = '\nDisease: ' + str_label + '.'

    credentials = service_account.Credentials.from_service_account_file(
        './posts/apikey.json')
    vision_client = vision.ImageAnnotatorClient(credentials=credentials)
    file_name = filepath
    with io.open(file_name, 'rb') as image_file:
        content = image_file.read()
    image = types.Image(content=content)

    response = vision_client.label_detection(image=image)
    labels = response.label_annotations
    print(labels)
    leaf_identify = ''
    leaf_condition = ''
    for label in labels:
        if label.description == 'leaf':
            leaf_identify = 'It is a leaf'
            break

    if leaf_identify != 'It is a leaf':
        leaf_identify = 'It seems not a leaf picture, If you want to challenge me, COME ON ^_^'
        context = {
            "leaf_indentify": leaf_identify,
            "instance": instance,
        }
        return render(request, "re_notleaf.html", context)
    tips = 'It is an Unhealthy leaf, but I can not identify its possible disease, Please try another image with different angle of view'
    if leaf_identify == 'It is a leaf':
        for i in labels:
            if i.description == 'plant pathology':
                leaf_condition = 'Status : Unhealthy'
                break
        if leaf_condition == 'Status : Unhealthy' and status == 'Healthy':
            print('a')
            context = {
                "leaf_indentify": leaf_identify,
                "instance": instance,
                "leaf_condition": leaf_condition,
                "tips": tips,
            }
            return render(request, "re_Unidentify.html", context)
        elif leaf_condition == 'Status : Unhealthy' and status == 'Unhealthy':
            print('b')
            context = {
                "leaf_indentify": leaf_identify,
                "instance": instance,
                "leaf_condition": leaf_condition,
                "result": result,
            }
            if str_label == "Bacterial":
                return render(request, "re_bacteria.html", context)
            if str_label == "Viral":
                return render(request, "re_viral.html", context)
            if str_label == "Late blight":
                return render(request, "re_lateblight.html", context)
        elif leaf_condition != 'Status : Unhealthy' and status == 'Unhealthy':
            print('c')
            text = "It seems a healthy leaf's picture"
            context = {
                "leaf_indentify": leaf_identify,
                "instance": instance,
                "text": text,
                "leaf_condition": leaf_condition
            }
            return render(request, "re_healthleaf.html", context)
        elif leaf_condition != 'Status : Unhealthy' and status == 'Healthy':
            print('d')
            context = {
                "leaf_indentify": leaf_identify,
                "instance": instance,
                "leaf_condition": leaf_condition
            }
            return render(request, "re_healthleaf.html", context)
Exemplo n.º 8
0
    print (folder_content)
    if not os.path.exists(image_folder):
        print ("Path of the file is invalid")
    else:
        image_selection = input ("Enter a Valid Image Name (In Lower Case Only): ")
        print(folder_content)
    
    if image_selection.lower() in folder_content:
        print("Your Landmark Detection is on Its Way!")
    else:
        print("Oops! I Don't See This File In Your Folder. Please Try Again")  # rerun the original input prompt
    
    file_path = os.path.join(str(image_folder), str(image_selection))
    with image.open(file_path),'rb') as image:
        content = image.read()
    image = types.Image(content=content) #> <class 'google.cloud.vision_v1.types.Image'>

    breakpoint()
# 
# 
# print("CREDENTIALS FILEPATH:", os.environ.get("GOOGLE_APPLICATION_CREDENTIALS"))
#

#    

#
#    print(type(client))
#    response = client.landmark_detection(image=image)
#    print(type(response))
#
#  
Exemplo n.º 9
0
def apicall(username):
    #username = '******'
    hi = ' '
    start_time = time.time()
    #getting the tweet from the user, number of tweets are 200
    invalid = 0
    try:
        tweets = api.user_timeline(screen_name=username,
                                   count=200,
                                   include_rts=False,
                                   exclude_replies=True)
        last_id = tweets[-1].id
    except:
        print('\nInvalid Username!\n')
        return 'invalid username'
        invalid = 1

    if (invalid == 0):
        while (True):
            more_tweets = api.user_timeline(screen_name=username,
                                            count=200,
                                            include_rts=False,
                                            exclude_replies=True,
                                            max_id=last_id - 1)
            # There are no more tweets
            if (len(more_tweets) == 0):
                break
            else:
                last_id = more_tweets[-1].id - 1
                tweets = tweets + more_tweets

        #Obtaining the full path of the image
        media_files = set()
        for status in tweets:
            media = status.entities.get('media', [])
            if (len(media) > 0):
                media_files.add(media[0]['media_url'])

        #directory = input('Enter the directory for the saved photos (eg: /Users/Hayato/Desktop/Media/): ')
        directory = '/Users/Hayato/Desktop/Media/'

        #Downloading the images
        counter = 0
        for media_file in media_files:
            if (counter < 10):
                counter = counter + 1
                #change your directory here
                address = directory + str(counter) + '.jpg'
                wget.download(media_file, address)
                filename = str(counter) + '.jpg'
                # image = Image.open(filename)
                # new_image = image.resize((500, 500))						#for resizing the image (optional)
                # hello = counter * 10
                # newname = str(counter) + '.jpg'
                # new_image.save(newname)

    #--------------------------------------------Google Cloud Vision API-------------------------------------------

    # Instantiates a client
        client = vision.ImageAnnotatorClient()

        #This counter is to name the 10 images 1.jpg, 2.jpg, ... , 10.jpg
        newcounter = 0

        for x in range(1, 11):
            newcounter = newcounter + 1
            name = str(newcounter) + '.jpg'  #names the image names differently

            # The name of the image file to annotate
            file_name = os.path.join(os.path.dirname(__file__), name)

            # Loads the image into memory
            with io.open(file_name, 'rb') as image_file:
                content = image_file.read()

            image = types.Image(content=content)

            # Performs label detection on the image file
            response = client.label_detection(image=image)
            labels = response.label_annotations

            x = 0  #This is a counter to ONLY download the first description label
            marker = '*************************************************************'

            for label in labels:
                if (x == 0):
                    x = x + 1
                    y = label.description  #This is the output text when run though google cloud vision
                    print(marker)
                    print(' ')
                    print(y)
                    hi = hi + '<br>' + y
                    print(' ')
            new = 'new' + str(
                newcounter
            ) + '.jpg'  #The new file is called new1.jpg, new2.jpg, ... , new10.jpg
            image = Image.open(name)
            font_type = ImageFont.truetype(
                'arial.ttf', 35)  # if you want to change the font
            draw = ImageDraw.Draw(image)
            draw.text(xy=(0, 0), text=y, font=font_type, fill=(255, 69, 0))
            #Saves the new image, then deletes the old one
            image.save(new)
            newcommand = "rm " + name
            os.system(newcommand)
            #image.show()

    #---------------------Converting the pictures to Video--------------------------
    #     Using ffmpeg

    #os.system("ffmpeg -framerate .5 -pattern_type glob -i '*.jpg' out.mp4")

    #-------------------------------------------------------------------------------

    #Aditionally, if you would like to delete the new pictures as well, include the following

        count = 0

        for z in range(1, 11):
            count = count + 1
            file = 'new' + str(count) + '.jpg'
            deletepic = "rm " + file
            os.system(deletepic)

        #automatically open the video:
        #video_out = 'open ' + directory + 'out.mp4'
        #os.system(directory)

    return hi
Exemplo n.º 10
0
def ocr_image(image_uri, ocr_hints):

    logging.debug("OCRing %s via google vision", image_uri)

    # image uri is IIIF endpoint
    # TODO : revisit - update for max vs full?
    full_image = ''.join([image_uri, '/full/full/0/default.jpg'])

    with requests.Session() as s:
        image_response = s.get(full_image)
        if not str(image_response.status_code).startswith("2"):
            logging.debug("Could not get source image")
            return None, None
        local_image = Image.open(io.BytesIO(image_response.content))
        image = types.Image(content=image_response.content)
        response = VISION_CLIENT.document_text_detection(image=image)
        texts = response.full_text_annotation

    if len(texts.pages) == 0:
        logging.info("No pages returned from Vision API")
        return None, None
    # logging.debug(vars(texts))

    source_page = texts.pages[0]
    page = {
        'id': 'page_1',
        'languages':
        get_language_codes(source_page.property.detected_languages),
        # TODO : its unclear from the documentation how to interpret multiple language codes in vision api
        'main_language':
        source_page.property.detected_languages[0].language_code,
        'width': local_image.width,
        'height': local_image.height,
        'careas': []
    }
    carea_count = 1
    par_count = 1
    line_count = 1
    word_count = 1

    for source_block in source_page.blocks:

        # TODO : check if block is text or image etc.

        carea = {
            'id': 'carea_' + str(carea_count),
            'bbox': get_bbox(source_block.bounding_box.vertices),
            'paragraphs': []
        }

        page['careas'].append(carea)
        carea_count += 1
        for source_paragraph in source_block.paragraphs:
            paragraph = {
                'id': 'par_' + str(par_count),
                'bbox': get_bbox(source_paragraph.bounding_box.vertices),
                'lines': []
            }
            carea['paragraphs'].append(paragraph)
            par_count += 1

            current_line_words = []
            last_word = None
            last_y = 0

            for source_word in source_paragraph.words:
                current_y = min(
                    [v.y for v in source_word.bounding_box.vertices])
                if (current_y > last_y + NEW_LINE_HYSTERESIS) and last_y > 0:
                    add_line_to_paragraph(current_line_words, line_count,
                                          paragraph)
                    current_line_words = []
                    last_word = None

                word_text = get_word_text(source_word)
                # if word text only punctuation and last_word not None, merge this text into that word and extend bbox
                if all(c in string.punctuation
                       for c in word_text) and last_word is not None:

                    last_word['text'] += escape(word_text)
                    last_word['vertices'].extend(
                        source_word.bounding_box.vertices)
                    last_word['bbox'] = get_bbox(last_word['vertices'])

                else:
                    word = {
                        'id': 'word_' + str(word_count),
                        'bbox': get_bbox(source_word.bounding_box.vertices),
                        'text': escape(word_text),
                        'vertices': source_word.bounding_box.
                        vertices  # to generate line bbox
                    }
                    word_count += 1
                    current_line_words.append(word)
                    last_word = word
                last_y = current_y

            add_line_to_paragraph(current_line_words, line_count,
                                  paragraph)  # add last line

    hocr = render_template('vision_template.html', {"page": page})
    return hocr, 'hocr'
Exemplo n.º 11
0
def get_document_bounds(videoIO, feature, folderPath, youtube_id):

    client = MongoClient(
        'mongodb+srv://qhacks:[email protected]/test?retryWrites=true'
    )
    db = client['qhacks']

    global notes_screenshots
    global glob_data
    global length
    global prev_file
    global prev_time
    global secs
    global temp
    """Returns document bounds given an image."""

    # setting up frame by frame per 5 secs
    myclip = VideoFileClip(videoIO)
    frames = []

    for frame in myclip.iter_frames(fps=0.2):
        frames.append(frame)
        # print("hi")

    for count, single_frame in enumerate(frames, start=1):
        # print("stephen")
        #print(i)
        img = Image.fromarray(single_frame, 'RGB')

        dir_path = os.path.dirname(os.path.realpath(__file__))

        #print(dir_path)

        file = "/file%d.png" % count

        #print(file)

        #print(folderPath)

        filename = dir_path + "/" + folderPath + "/" + file

        #print(filename)

        img.save(filename)
        #img.show()

        # if length is 5:
        #     break
        # img_process = img.tobytes()

        #runnin the image processor
        first = True
        build_word = ""
        words = []
        # words.append("a")

        client = vision.ImageAnnotatorClient()

        bounds = []

        temp = filename

        #CHANGED HERE
        # content = img_process

        # content = Image.open(filename)
        with io.open(filename, 'rb') as image_file:
            content = image_file.read()

        # print(content)

        image = types.Image(content=content)

        response = client.document_text_detection(image=image)
        document = response.full_text_annotation

        # Collect specified feature bounds by enumerating all document features
        for page in document.pages:
            for block in page.blocks:
                for paragraph in block.paragraphs:

                    for word in paragraph.words:

                        # for symbol in word.symbols:
                        #if (feature == FeatureType.SYMBOL):
                        #bounds.append(symbol.bounding_box)

                        #if (feature == FeatureType.WORD):

                        if first and feature == FeatureType.WORD:
                            bounds.append(word.bounding_box)
                            first = False

                        for i in word.symbols:
                            if hasattr(i, "text"):
                                if i.property.detected_break.type is 0:
                                    build_word += i.text
                                else:
                                    build_word += i.text
                                    # print(build_word)
                                    words.append(build_word)
                                    build_word = ""

                    #if (feature == FeatureType.PARA):
                    #bounds.append(paragraph.bounding_box)

                #if (feature == FeatureType.BLOCK):
                #bounds.append(block.bounding_box)

            #if (feature == FeatureType.PAGE):
            #bounds.append(block.bounding_box)

        # The list `bounds` contains the coordinates of the bounding boxes.

        # temp = {
        #     "bound": bounds
        # }

        # bound_data = {
        #     "v0x": temp['bound'][0].vertices[0].x,
        #     "v0y": temp['bound'][0].vertices[0].y,
        #     "v1x": temp['bound'][0].vertices[1].x,
        #     "v1y": temp['bound'][0].vertices[1].y,
        #     "v2x": temp['bound'][0].vertices[2].x,
        #     "v2y": temp['bound'][0].vertices[2].y,
        #     "v3x": temp['bound'][0].vertices[3].x,
        #     "v3y": temp['bound'][0].vertices[3].y,
        # }

        collection_texts = db['timestamps']

        for i in words:
            db_data = {"secs": secs, "keyword": i, "youtube_id": "tasty"}
            collection_texts.insert_one(db_data)

        # db_data = {
        #     "secs": secs,
        #     "keyword": words,
        #     "youtube_id": "hello"
        # }

        data = {secs: words}

        if (len(data[secs]) == 0):
            data = {secs: "a"}

        #print(data)

        # print(data['bound'])

        # print(data['bound'])
        # print(type(data['bound'][0]))
        # print(type(data['bound'][0].vertices))
        # print(type(data['bound'][0].vertices[0].x))

        glob_data.append(data)
        length += 1

        if length > 1:
            if glob_data[length - 1][secs][0] and glob_data[length -
                                                            2][prev_time][0]:
                if glob_data[length -
                             1][secs][0] == glob_data[length -
                                                      2][prev_time][0]:
                    prev_file = temp
                    prev_time = secs
                else:
                    screenshot_data = {
                        "secs": secs,
                        "file": prev_file,
                        "youtube_id": "tasty"
                    }
                    notes_screenshots.append(screenshot_data)
                    prev_file = temp
                    prev_time = secs  #HERE BABY
                    # imagerino = Image.open(prev_file)
                    # imagerino.show()
        secs += 5

    # print(glob_data)
    # print("STEPHENNNNNN")
    # print(screenshot_data)

    collection_screenshots = db['screenshots']
    collection_screenshots.insert_many(notes_screenshots)
Exemplo n.º 12
0
def load_image(image):
    with io.open(image, 'rb') as image:
        content = image.read()
    return types.Image(content=content)
def TwitterDownload(twitterHandler):

    import twitter  #the twitter api
    import urllib.request  #used to actually download the images and save them
    import subprocess  #runs command lines in program, used to run ffmpeg
    import os  #this library if for operating os things such as removing files and adding the google app credentials
    import io  #used for reading the images we saved so we can send them to google vision
    from google.cloud import vision
    from google.cloud.vision import types

    #setting up the Google API/Vision API
    #replace PATH/TO/GOOGLE/JSON/AUTH with your file path to your google json authentification file
    os.environ[
        "GOOGLE_APPLICATION_CREDENTIALS"] = "PATH/TO/GOOGLE/JSON/AUTH"  #sets up the GOOGLE_APPLICATION_CREDENTIALS as an enviornment variable

    vision_client = vision.ImageAnnotatorClient(
    )  #setting up the image annotator client for Google Vision

    #setting up the twitter API
    #add you own keys here
    api = twitter.Api(consumer_key='',
                      consumer_secret='',
                      access_token_key='',
                      access_token_secret='')

    #this deletes all the .jpg in the current folder in case you run the program multiple times in a row
    #for each file in the current directory (where the downloaded images will be) if it is a .jpg delete it
    for file in os.listdir():
        if file.endswith('.jpg'):
            os.remove(file)

    #if you ran the program before delete the old video
    if os.path.isfile('output.mp4'):
        os.remove('output.mp4')

    # -----------------Twitter Section----------------------------------------------
    # This section uses the twitter api to download pictures from the Twitter handle the user inputs
    # It checks the number of tweets the user inputs

    try:
        status = api.GetUserTimeline(
            screen_name=twitterHandler,
            count=100)  #the twitter user and how many tweets to check
    except:
        return 'Error 001: This Twitter handle is not valid'

    picTweets = [
    ]  #this is a list that will hold all the image urls for download later

    length = len(status)  #how many tweets there are in status

    for i in range(0, length):  #for each of the tweets grabbed
        #a try except block because if you try to read the media of a tweet that doesn't have media you get an error
        try:
            if status[i].media[
                    0].type == 'photo':  #is there an image attached to the tweet
                picTweets.append(status[i].media[0].media_url
                                 )  #add the media url to the picsTweets list
        except:  #if we would error, meaning the tweet doesn't have the correct media, do nothing
            pass

    picTweetsLength = len(
        picTweets)  #gets the length of the pic tweets list for a for loop
    imgList = []  #a list of the name of all the images that will be saved

    #this for loop goes through the urls we found for the images and saves them to the local files as JPEGs
    for x in range(0, picTweetsLength):
        string = 'twitterImage'
        stringNum = str(x)
        #the following if statements find the digits in the current photo so that it can add a correct number of
        #leading 0s to the name of the file, for example the stringNum is 1 so we need it to be 001, 10 we need 010 etc.
        if len(stringNum) == 1:
            string += '00'
            string += stringNum
        elif len(stringNum) == 2:
            string += '0'
            string += stringNum
        elif len(stringNum) == 3:  #example 100 so no leading zeroes
            string += stringNum
        string += '.jpg'
        urllib.request.urlretrieve(
            picTweets[x],
            string)  #downloads the image and saves it as twitterImageXXX.jpg
        imgList.append(
            string)  #adding the name of the file to the list of images

        # Checking if any images were found within the 100 tweets scanned, if there are none then there is an error
        # returned as the output
    if len(imgList) == 0:
        return 'Error 002: There are no images found'

    #--------------------------Google Vision-----------------------------------------------------
    #I used this tutorial below to figure out how it works
    #https://www.youtube.com/watch?v=nMY0qDg16y4
    #Thanks to Doug Beney in the comments for showing the updated code for python 3

    imageLabels = [
    ]  #a list that will hold the labels of each image, will be the final output

    #for all the images we downloaded open the image, run it through google vision and
    for i in range(0, len(imgList)):  #for all the images we downloaded
        with io.open(imgList[i], 'rb') as image_file:  #open
            content = image_file.read()

        image = types.Image(content=content)
        response = vision_client.label_detection(image=image)
        labels = response.label_annotations

        tempList = [
        ]  #temporary list to save all the labels in one list then save that list in the imageLabels list
        for label in labels:
            tempList.append(label.description)

        imageLabels.append(tempList)  #add the list to imageLabels

    # ---------------------------FFMPEG--------------------------------------
    #this part creates the movies out of the images downloaded
    #change the number after framerate to change how long each image is on screen
    #1/2 means each image is on screen for 2 seconds, 1/5 5 seconds etc

    #note that there is a bug that the first image stays on screen 3x longer than it should
    #prof said to ignore it
    subprocess.run('ffmpeg -framerate 1/2 -i twitterImage%03d.jpg output.mp4')

    #getting the file path of the video, adds it to the end of the labels for the images
    if os.path.isfile('output.mp4'):
        videoPath = os.getcwd()
        videoPath += '/output.mp4'
        imageLabels.append(videoPath)
    else:
        return 'Error 003: ffmpeg could not create the video properly'

    return imageLabels  #return the labels of all the functions as the final output
Exemplo n.º 14
0
def gcp(imageName):
  #Instantiates a client
  client = vision.ImageAnnotatorClient()

  #The name of the image file to annotate
  file_name = os.path.join(
      os.path.dirname(__file__),
      imageName)

  # Loads the image into memory
  with io.open(file_name, 'rb') as image_file:
      content = image_file.read()

  image = types.Image(content=content)

  # Performs document text detection on the image file
  response = client.document_text_detection(image=image)

  #Strip response for just the body of the resume
  toAWS = str(response)
  toAWS = toAWS.split("}")[-2].partition(': "')[2][:-2].strip()

  ##############################################################
  #Function that calls aws api and returns comprehension on text
  comprehend = boto3.client(service_name='comprehend', aws_access_key_id=ACCESS_KEY,
  aws_secret_access_key=SECRET_KEY, region_name='us-east-1')
  toAWS.replace("\n", ' ')
  print('Calling DetectEntities')
  with open('awsResponse.json', 'w') as outfile:
    json.dump(comprehend.detect_entities(Text=toAWS, LanguageCode='en'), outfile, sort_keys=True, indent=4)
  print('End of DetectEntities\n')

  #Open data for comparison to fraternities
  f = open('data/frats.txt')
  frats = f.read()

  #frats.replace('', ' ')
  for value in frats.split(','):
    for val in strip_newline(value.replace("'", "").strip().lower()).split():
      #print(val)
      if val.lower() not in BLACKLIST:
        fratList.append(val)
  f.close()

  #Open data for comparison to sororities
  f = open('data/Sororities.txt')
  soros = f.read()

  #soros.replace('', ' ')
  for value in soros.split(','):
    for val in strip_newline(value.replace("'", "").strip().lower()).split():
      #print(val)
      if val.lower() not in BLACKLIST:
        soroList.append(val)
  f.close()

  #print(soroList)

  response = MessageToJson(response)
  response = json.loads(str(response))

  keywords = findKeywords()

  with open('gcpResponse.json', 'w') as outfile:
    json.dump(response, outfile, indent=4)

  with open('gcpResponse.json') as f:
    gcpResponse = json.load(f)

  for values in gcpResponse['textAnnotations']:
    #strip_newline(values['description'].lower()).split()
    if strip_newline(values['description'].lower()) in fratList or strip_newline(values['description'].lower()) in soroList:
      if strip_newline(values['description'].lower()) not in BLACKLIST:
        print(strip_newline(values['description'].lower()))
        keywords.append(values['description'])

  for i in range(len(keywords)):
    for values in gcpResponse['textAnnotations']:
      if values['description'].lower() == keywords[i].lower():
        vertices = values['boundingPoly']['vertices']
        first = vertices[0]
        third = vertices[2]
        wordDict = {"x1": first['x'], "y1": first['y'], 'x2': third['x'], 'y2': third['y']}
        censorList.append(wordDict)
Exemplo n.º 15
0
 def bytes_to_gcv_img(img_bytes):
     return types.Image(content=img_bytes)
Exemplo n.º 16
0
def getlabeldata(image_file):
    content = image_file.read()
    image = types.Image(content=content)
    # Performs label detection on the image file
    response = client.label_detection(image=image)
    return response
Exemplo n.º 17
0
def googlevision_ocr(segment_type, uri, file_name, text_file, file_item):
    """Method to process with google vision"""
    _, _ = google.auth.default()
    vision_client = vision.ImageAnnotatorClient()
    image = types.Image()
    image.source.image_uri = uri
    if segment_type == 'full_page':
        response = vision_client.document_text_detection(image=image)
        if response.error:
            with io.open(file_name, 'rb') as image_file:
                content = image_file.read()
                image = types.Image(content=content)
                response = vision_client.document_text_detection(image=image)
        texts = response.text_annotations
        text_list = " "
        text_data = {}
        for index, text in enumerate(texts):
            if index == 0:
                word = text.description + " "
                text_list += word
            else:
                text_coords = []
                for vertice in text.bounding_poly.vertices:
                    dict_text = {}
                    dict_text['x'] = vertice.x
                    dict_text['y'] = vertice.y
                    text_coords.append(dict_text)
                text_data[text.description] = text_coords
        text_file.google_vision_text = text_list
        text_file.google_vision_response = text_data
        text_file.save()
    if segment_type == 'segment_page':
        response = vision_client.document_text_detection(image=image)
        if response.error:
            with io.open(file_name, 'rb') as image_file:
                content = image_file.read()
                image = types.Image(content=content)
                response = vision_client.document_text_detection(image=image)
        texts = response.text_annotations
        text_list = " "
        text_data = {}
        for index, text in enumerate(texts):
            if index == 0:
                word = text.description + " "
                text_list += word
            else:
                text_coords = []
                for vertice in text.bounding_poly.vertices:
                    dict_text = {}
                    dict_text['x'] = vertice.x
                    dict_text['y'] = vertice.y
                    text_coords.append(dict_text)
                text_data[text.description] = text_coords
        text_file.google_vision_text = text_list
        text_file.google_vision_response = text_data
        text_file.save()
    if file_item.google_vision_processed is False:
        file_item.google_vision_processed = True
        file_item.save()
        os.remove(file_name)
    return serializers.serialize("json", [
        text_file,
    ])
Exemplo n.º 18
0
def detect_face(face_file, max_results=5):
  content = face_file.read()
  image = types.Image(content=content)
  return client.face_detection(image=image).face_annotations
Exemplo n.º 19
0
def VisionScript(indir):
    # Instantiates a client
    client = vision.ImageAnnotatorClient()

    folders = []
    files = []

    print("\nReading Directory images\\ ... ", end="")
    for entry in os.scandir(indir):
        if entry.is_dir():
            folders.append(entry.path)
        elif entry.is_file():
            files.append(entry.path)
    print("done\n")

    i = 0
    total = len(files)
    #output_txt = ""
    #f = open('output_old.txt', 'w', encoding='utf-8')

    #Preparing docx file with formatting and margins.
    document = Document()
    section = document.sections[0]
    section.left_margin, section.right_margin = (Cm(1.27), Cm(1.27))
    section.top_margin, section.bottom_margin = (Cm(1.27), Cm(1.27))
    section.gutter = 0

    style = document.styles['Normal']
    font = style.font
    font.name = 'Kokila'
    font.size = Pt(18)

    document2 = Document()

    style2 = document2.styles['Normal']
    font = style2.font
    font.name = 'Kokila'
    font.size = Pt(18)

    for image in files:
        i = i + 1
        file_name = str(image)
        print("Processing {:d} of {:d} images: {:s}...".format(
            i, total, file_name),
              end="")

        with io.open(file_name, 'rb') as image_file:
            content = image_file.read()

        image = types.Image(content=content)

        response = client.document_text_detection(image=image)
        original_output = response.full_text_annotation.text

        #Replace newline characters with space
        new_output = original_output.replace("\r", "")
        new_output = new_output.replace("\n", " ")

        #generate output for both type of files
        #output_txt = original_output + "\n\n"
        #f.write(output_txt)

        paragraph = document.add_paragraph(new_output + "\n\n")
        paragraph.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY
        paragraph.style = document.styles['Normal']

        paragraph2 = document2.add_paragraph(original_output + "\n\n")
        paragraph2.style = document2.styles['Normal']

        print("... done!")

    document.save('Word.docx')
    document2.save('Word_old.docx')
    #f.close()
    print("\nAll images processed. Output saved to Word.docx, Word_old.docx\n")
Exemplo n.º 20
0
def detect_text(path):
    """Detects text in the file."""
    imgclient = vision.ImageAnnotatorClient()
    nlpclient = language.LanguageServiceClient()

    with io.open(path, 'rb') as image_file:
        content = image_file.read()

    image = type1.Image(content=content)
    response = imgclient.text_detection(image=image)
    texts = response.text_annotations
    if len(texts) != 0:
        for text in texts:
            textcontent = text.description
            textlanguage = text.locale
            document = type2.Document(content=text.description,
                                      type=enums.Document.Type.PLAIN_TEXT)
            # Detects the sentiment of the text
            try:
                sentiment = nlpclient.analyze_sentiment(
                    document=document).document_sentiment
                sentimentscore = sentiment.score
                sentimentmagnitude = sentiment.magnitude
            except:
                sentimentscore = 'N/A1'
                sentimentmagnitude = 'N/A1'
            break
    else:
        textcontent = 'N/A'
        textlanguage = 'N/A'
        sentimentscore = 'N/A'
        sentimentmagnitude = 'N/A'

    # detect labels in picture


#    response1 = imgclient.label_detection(image=image)
#    labels = response1.label_annotations
#    if len(labels) != 0:
#        for label in labels:
#            labeldescription=label.description
#            labeltopicality=label.topicality
#            break
#    else:
#        labeldescription = 'N/A'
#        labeltopicality = 'N/A'

# detect image properties (e.g., rgb, dominant color, etc.)
#    response2 = imgclient.image_properties(image=image)
#    props = response2.image_properties_annotation
#    lista=[]
#    for c in props.dominant_colors.colors:
#        lista.append(c.pixel_fraction)
#    dominantcolorperc = max(lista)
#    redtotal = 0
#    greentotal = 0
#    bluetotal = 0
#    for c in props.dominant_colors.colors:
#        # return dominant color's rgb
#        if c.pixel_fraction == dominantcolorperc:
#            reddom = c.color.red
#            greendom = c.color.green
#            bluedom = c.color.blue
#        redtotal += c.color.red * c.pixel_fraction
#        greentotal += c.color.green * c.pixel_fraction
#        bluetotal += c.color.blue * c.pixel_fraction
    return textcontent, textlanguage, sentimentscore, sentimentmagnitude
Exemplo n.º 21
0
def get_labels(Name):
    # Setup to access to the Google Vision API
    # os.system cannot upload the credential correctly, so FOR NOW it is necessary to run this in shell
    client = vision.ImageAnnotatorClient()
    i = 1

    # Initialize the posts of MongoDB
    posts = db.posts

    print('Getting labels from google and printing labels on it')
    while (1):
        # Check if there are pictures inside the folder
        if os.path.exists('./PICS/' + str(i) + '.jpg') == True:
            file_name = os.path.join(os.path.dirname(__file__),
                                     './PICS/' + str(i) + '.jpg')

            post = {
                "Vaild": 1,
                "User_name": User_name,
                "Twitter_ID": Name,
                "link": urls[i - 1],
                "tags": []
            }

            # Read the pictures and get ready to push it to Google
            with io.open(file_name, 'rb') as image_file:
                content = image_file.read()

            image = types.Image(content=content)

            # Get the labels from Google Vision
            try:
                response = client.label_detection(image=image)
                labels = response.label_annotations
            except:
                print(
                    'Google API is not accessable at this time, please check your creditional or try again later.'
                )
                return 0
            # Setup PILLOW to put labels into the picture
            # Input the direction of your fonts here
            im = Image.open('./PICS/' + str(i) + '.jpg')
            draw = ImageDraw.Draw(im)
            myfont = ImageFont.truetype(fonts, size=35)
            # As a result, the FONTs.ttf should be copied to the same folders
            fillcolor = 'red'
            # Put label into the picture
            m = 0
            for label in labels:
                m = m + 1
                if m <= 2:
                    draw.text((40, 40 * m),
                              label.description,
                              font=myfont,
                              fill=fillcolor)
                    post["tags"].append(label.description)
                else:
                    post["tags"].append(label.description)

            im.save('./PICS/' + str(i) + '.jpg', 'JPEG')
            print('Printing labels on the ' + str(i) + 'th Picture')

            post_id = posts.insert_one(post).inserted_id
            i = i + 1
        # Print the total number of the pictures
        else:
            print(' ')
            print('**********************')
            print(str(i - 1) + ' pictures completed')
            print('**********************')
            return 1
            break
Exemplo n.º 22
0
def get_all_tweets(screen_name, videoname, labels_name):
    try:
        for n in range(20):
            os.remove(str(n) + ".jpg")
    except:
        pass
    try:
        os.remove(videoname)
    except:
        pass
    try:
        os.remove(labels_name)
    except:
        pass
    #Twitter only allows access to a users most recent 3240 tweets with this method

    #authorize twitter, initialize tweepy
    auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_key, access_secret)
    api = tweepy.API(auth)

    #initialize a list to hold all the tweepy Tweets
    alltweets = []

    #make initial request for most recent tweets (200 is the maximum allowed count)
    new_tweets = api.user_timeline(screen_name=screen_name, count=20)

    #save most recent tweets
    alltweets.extend(new_tweets)

    #save the id of the oldest tweet less one
    oldest = alltweets[-1].id - 1

    #keep grabbing tweets until there are no tweets left to grab
    while len(new_tweets) > 0:

        #all subsiquent requests use the max_id param to prevent duplicates
        new_tweets = api.user_timeline(screen_name=screen_name,
                                       count=20,
                                       max_id=oldest)

        #save most recent tweets
        alltweets.extend(new_tweets)

        #update the id of the oldest tweet less one
        oldest = alltweets[-1].id - 1
        if (len(alltweets) > 25):
            break
        print("...%s tweets downloaded so far" % (len(alltweets)))
    data = {}
    data['Pictures'] = []
    data['Account'] = screen_name
    media_files = set()
    for status in alltweets:
        try:
            media = status.extended_entities.get('media', [])
        except:
            media = status.entities.get('mdeia', [])
        # print (media[0])
        if (len(media) > 0):
            for i in range(len(media)):
                media_files.add(media[i]['media_url'])
    for media_file in media_files:
        print(media_file)
        wget.download(media_file)

    os.system(
        "cat *.jpg | ffmpeg -f image2pipe -framerate .5 -i - -vf 'crop=in_w-1:in_h' -vcodec libx264 "
        + videoname)

    # for google vision
    client = vision.ImageAnnotatorClient()

    picNum = 0
    OBJ = [
        pic for pic in listdir(".")
        if pic.endswith('jpg') or pic.endswith('png')
    ]

    idx = 0
    for i in OBJ:

        file_name = os.path.join(os.path.dirname(__file__), i)
        new_name = str(picNum) + '.jpg'

        os.renames(file_name, new_name)
        nestDIC = {}
        nestDIC['Picture ' + str(idx)] = new_name
        picNum = picNum + 1

        # Loads the image into memory
        with io.open(new_name, 'rb') as image_file:
            content = image_file.read()

        image = types.Image(content=content)

        # Performs label detection on the image file
        response = client.label_detection(image=image)
        labels = response.label_annotations

        label_list = []
        for label in labels:
            label_list.append(label.description)

        nestDIC['Description ' + str(idx)] = label_list
        data['Pictures'].append(nestDIC)

        idx += 1

    print(data)
    with open(labels_name, 'w') as JSONObject:
        json.dump(data, JSONObject, indent=4, sort_keys=True)
    client = MongoClient()
    db = client.picture.database
    collection = db.picture_collection

    posts = db.posts
    # Delete everything if you need
    # posts.delete_many({})
    posts.insert_one(data)

    print("This is the data stored in MongoDB: \n")
    pprint.pprint(posts.find_one({'Account': screen_name}))
    async def onconnect(obniz):
        led = obniz.wired("LED", {"anode": 0, "cathode": 1})

        #今回の作業用ディレクトリ
        base_dir = r'ディレクトリ名'
        #さっきのJSONファイルのファイル名
        credential_path = base_dir + r'API_KEY.json'
        #サービスアカウントキーへのパスを通す
        os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credential_path
        #visionクライアントの初期化
        client = vision.ImageAnnotatorClient()

        i = 0
        while True:
            # 2 回撮影
            #時間経過はまた今度
            if (i == 2):
                print("end")
                time.sleep(3)
                break

            #ledがおっつかない
            if (i > 1):
                time.sleep(3)

            fileName = "photo_" + str(i) + ".png"
            # 内蔵カメラのデバイスIDは0、USBで接続したカメラは1以降。
            capture = cv2.VideoCapture(0)
            # 取得した画像データは変数imageに格納。retは取得成功変数。
            ret, image = capture.read()
            if ret == True:
                # 取得した画像を出力。fileNameは出力する画像名。
                cv2.imwrite("./photo/" + fileName, image)
            #対象となる画像のファイル名
            file_name = base_dir + "photo/" + fileName
            with io.open(file_name, 'rb') as image_file:
                content = image_file.read()
            image = types.Image(content=content)
            objects = client.object_localization(
                image=image).localized_object_annotations
            #print('Number of objects found: {}'.format(len(objects)))

            p = 0
            for object_ in objects:
                #↓一覧表示
                # print('\n{} (confidence: {})'.format(object_.name, object_.score))
                #合致率60%以上でカウント
                if object_.name == "Person":
                    if object_.score > 0.60:
                        p = p + 1
                # print('Normalized bounding polygon vertices: ')
                # for vertex in object_.bounding_poly.normalized_vertices:
                #     print(' - ({}, {})'.format(vertex.x, vertex.y))
            print(str(p) + "名")
            if (p > 0):
                ##obnizでやりたい処理
                for l in range(0, 1):
                    led.on()
                    obniz.wait(500)
                    led.off()
                    break
                ##↑↑obnizでやりたい処理
            #visionクライアントの初期化
            client = vision.ImageAnnotatorClient()
            i = i + 1
        #↓別プログラム(写真を削除する用)
        remove_photo.remove_photo()
        #obnizのクリーンアップ?
        obniz.close()
Exemplo n.º 24
0
def detect_labels_uri(uri):
    path = uri
    """Detects labels in the file located in Google Cloud Storage or on the Web."""
    client = vision.ImageAnnotatorClient()
    #image = types.Image()
    with io.open(path, 'rb') as image_file:
        content = image_file.read()
    #print content
    image = types.Image(content=content)
    #image.source.image_uri = uri
    macy_keywords = []
    #print(dir(client))
    response_label = client.label_detection(image=image)
    response_web = client.web_detection(image=image)
    labels = response_label.label_annotations
    web_annotations = response_web.web_detection
    response_image_properties = client.image_properties(image=image)
    image_props = response_image_properties.image_properties_annotation
    gender = ""
    color = ""
    for color in image_props.dominant_colors.colors:
        print int(color.color.red)
        xx, color = get_colour_name(
            (int(color.color.red), int(color.color.green),
             int(color.color.blue)))
        print color, xx
        if xx is not None:
            color = xx
        break

    #print(dir(response_web))
    #print((dir(labels)))
    print('Labels:')

    for label in labels:
        #print (label.description)
        if any(word in label.description.lower() for word in
               clw) and label.score > 0.90 and label.description not in [
                   "t shirt", "t-shirt", "long-sleeved t-shirt"
               ]:
            if label.description.lower() in ["male", "female"]:
                gender = label.description
            print(label.description, label.score)
            macy_keywords.append(label.description)
    #for annotation in web_annotations():
    #print (annotation)

    for web_entity in web_annotations.web_entities:
        #print (web_entity.description)
        if any(
                word in web_entity.description.lower() for word in clw
        ) and web_entity.score > 0.55 and web_entity.description.lower not in [
                "dress", "t-shirt", "t shirt", "long-sleeved t-shirt"
        ]:
            #print(web_entity)
            if web_entity.description.lower() in ["male", "female"]:
                gender = web_entity.description
            macy_keywords.append(web_entity.description.lower())
    #print gender
    resp = set(macy_keywords[:3])
    resp.add(gender)
    resp.add(color)
    #print set(macy_keywords)
    return get_macy_links(resp)
Exemplo n.º 25
0
def index01(request):
    candidate = Candidate.objects.all()

    no1 = Candidate.objects.filter(party_number=1)
    no1[0].party_number
    image_db01 = no1[0].image_file

    image_db = str(image_db01)
    client = vision.ImageAnnotatorClient()

    file_name = os.path.join(os.path.dirname('C:'),
                             '/Users/student/mysite02/media/' + image_db)

    with io.open(file_name, 'rb') as image_file:
        content = image_file.read()

    image = types.Image(content=content)

    response = client.text_detection(image=image)
    texts = response.text_annotations

    my_list = list()

    for text in texts:
        result = text.description
        my_list.append(result)

    data = my_list[0]

    data1 = data.replace('\n', ' ')
    data2 = data1.replace('(', ' ')
    data3 = data2.replace(')', ' ')
    data4 = data3.replace('/', ' ')
    data5 = data4.split(' ')

    df = pd.DataFrame(data5, columns=["총리스트"])
    df1 = pd.DataFrame(columns=["1", "2", "3", "4"])
    df2 = pd.DataFrame(columns=["1", "2", "3", "4"])

    df1.loc[0, '3'] = "아메리카노"
    df1.loc[1, '2'] = "아이스"
    df1.loc[1, '3'] = "아메리카노"
    df1.loc[2, '2'] = "아이스"
    df1.loc[2, '3'] = "카페라떼"
    df1.loc[3, '3'] = "카페라떼"
    df1.loc[4, '1'] = "아이스"
    df1.loc[4, '2'] = "바닐라라떼"
    df1.loc[4, '3'] = "마끼아또"
    df1.loc[5, '1'] = "아이스"
    df1.loc[5, '2'] = "카라멜라떼"
    df1.loc[5, '3'] = "마끼아또"
    df1.loc[6, '2'] = "카라멜라떼"
    df1.loc[6, '3'] = "마끼아또"
    df1.loc[7, '3'] = "마끼아또"
    df1.loc[7, '2'] = "라떼"
    df1.loc[7, '1'] = "카라멜"
    df1.loc[8, '2'] = "바닐라라떼"
    df1.loc[8, '3'] = "마끼아또"
    df1.loc[9, '1'] = "바닐라"
    df1.loc[9, '2'] = "라떼"
    df1.loc[9, '3'] = "마끼아또"
    df1.loc[10, '1'] = "화이트초콜릿"
    df1.loc[10, '2'] = "라떼"
    df1.loc[10, '3'] = "마끼아또"
    df1.loc[11, '3'] = "카푸치노"
    df1.loc[12, '2'] = "헤이즐넛"
    df1.loc[12, '3'] = "카푸치노"
    df1.loc[13, '2'] = "오리지널"
    df1.loc[13, '3'] = "드립커피"
    df1.loc[14, '1'] = "아이스"
    df1.loc[14, '2'] = "오리지널"
    df1.loc[14, '3'] = "드립커피"
    df1.loc[15, '2'] = "카페"
    df1.loc[15, '3'] = "모카"
    df1.loc[16, '3'] = "카페모카"
    df1.loc[17, '2'] = "아이스"
    df1.loc[17, '3'] = "카페모카"
    df1.loc[18, '2'] = "화이트초콜릿라떼"
    df1.loc[18, '3'] = "마끼아또"
    df1.loc[19, '1'] = "아이스"
    df1.loc[19, '2'] = "화이트초콜릿라떼"
    df1.loc[19, '3'] = "마끼아또"
    df1.loc[20, '2'] = "콜드브루"
    df1.loc[20, '3'] = "아메리카노"
    df1.loc[21, '2'] = "콜드브루"
    df1.loc[21, '3'] = "원액"
    df1.loc[22, '2'] = "니트로"
    df1.loc[22, '3'] = "콜드브루"
    df1.loc[23, '2'] = "콜드브루"
    df1.loc[23, '3'] = "라떼"
    df1.loc[24, '3'] = "그라니따"
    df1.loc[24, '2'] = "콘파나"
    df1.loc[24, '1'] = "모카"
    df1.loc[25, '1'] = "카라멜"
    df1.loc[25, '2'] = "콘파나"
    df1.loc[25, '3'] = "그라니따"
    df1.loc[26, '3'] = "그라니따"
    df1.loc[26, '2'] = "망고요거트"
    df1.loc[27, '3'] = "그라니따"
    df1.loc[27, '2'] = "요거트"
    df1.loc[27, '1'] = "망고"
    df1.loc[28, '3'] = "그라니따"
    df1.loc[28, '2'] = "플레인요거트"
    df1.loc[29, '3'] = "그라니따"
    df1.loc[29, '1'] = "플레인"
    df1.loc[29, '2'] = "요거트"
    df1.loc[30, '3'] = "그라니따"
    df1.loc[30, '2'] = "자바칩민트"
    df1.loc[31, '3'] = "그라니따"
    df1.loc[31, '2'] = "에스프레소콘파나"
    df1.loc[32, '3'] = "그라니따"
    df1.loc[32, '2'] = "콘파나"
    df1.loc[32, '1'] = "에스프레소"
    df1.loc[33, '3'] = "그라니따"
    df1.loc[33, '2'] = "스트로베리요거트"
    df1.loc[34, '3'] = "그라니따"
    df1.loc[34, '2'] = "요거트"
    df1.loc[34, '1'] = "스트로베리"
    df1.loc[35, '3'] = "그라니따"
    df1.loc[35, '2'] = "스트로베리"
    df1.loc[36, '3'] = "그라니따"
    df1.loc[36, '2'] = "블루베리요거트"
    df1.loc[37, '3'] = "그라니따"
    df1.loc[37, '2'] = "요거트"
    df1.loc[37, '1'] = "블루베리"
    df1.loc[38, '3'] = "그라니따"
    df1.loc[38, '2'] = "복숭아"
    df1.loc[39, '3'] = "그라니따"
    df1.loc[39, '2'] = "그린티"
    df1.loc[40, '3'] = "그라니따"
    df1.loc[40, '2'] = "찰인절미"
    df1.loc[40, '1'] = "레드빈"
    df1.loc[41, '3'] = "그라니따"
    df1.loc[41, '2'] = "흑임자"
    df1.loc[41, '1'] = "레드빈"
    df1.loc[42, '3'] = "레드빈흑임자그라니따"
    df1.loc[43, '3'] = "그라니따"
    df1.loc[43, '2'] = "쑥"
    df1.loc[43, '1'] = "레드빈"
    df1.loc[44, '3'] = "레드빈쑥그라니따"
    df1.loc[45, '3'] = "그라니따"
    df1.loc[45, '2'] = "민트"
    df1.loc[45, '1'] = "레몬"
    df1.loc[46, '3'] = "그라니따"
    df1.loc[46, '2'] = "민트"
    df1.loc[46, '1'] = "자바칩"
    df1.loc[47, '3'] = "아이스티"
    df1.loc[48, '3'] = "아이스티"
    df1.loc[48, '2'] = "라즈베리"
    df1.loc[49, '3'] = "아이스티"
    df1.loc[49, '2'] = "복숭아"
    df1.loc[50, '3'] = "그린티라떼"
    df1.loc[50, '2'] = "아이스"
    df1.loc[51, '1'] = "아이스"
    df1.loc[51, '2'] = "그린티"
    df1.loc[51, '3'] = "라떼"
    df1.loc[52, '3'] = "그린티라떼"
    df1.loc[53, '2'] = "그린티"
    df1.loc[53, '3'] = "라떼"
    df1.loc[54, '2'] = "아이스x"
    df1.loc[54, '3'] = "초콜릿x"
    df1.loc[55, '2'] = "콜드브루"
    df1.loc[55, '3'] = "밀크티"
    df1.loc[56, '2'] = "핫"
    df1.loc[56, '3'] = "초콜릿"
    df1.loc[57, '2'] = "아이스"
    df1.loc[57, '3'] = "초콜릿"
    df1.loc[58, '2'] = "레몬"
    df1.loc[58, '3'] = "스파클링"
    df1.loc[59, '2'] = "자몽"
    df1.loc[59, '3'] = "스파클링"
    df1.loc[60, '2'] = "베리"
    df1.loc[60, '3'] = "스파클링"
    df1.loc[61, '2'] = "청포도"
    df1.loc[61, '3'] = "스파클링"
    df1.loc[62, '3'] = "딸기플라워밀크쉐이크"
    df1.loc[63, '3'] = "딸기프룻티펀치"
    df1.loc[64, '3'] = "딸기치즈큐브쉐이크"
    df1.loc[65, '3'] = "딸기요거트그래놀라"
    df1.loc[66, '3'] = "딸기라떼"
    df1.loc[67, '3'] = "딸기주스"
    df1.loc[68, '2'] = "딸기"
    df1.loc[68, '3'] = "주스"
    df1.loc[69, '2'] = "키위"
    df1.loc[69, '3'] = "주스"
    df1.loc[70, '2'] = "토마토"
    df1.loc[70, '3'] = "주스"
    df1.loc[71, '2'] = "루비자몽"
    df1.loc[71, '3'] = "주스"
    df1.loc[72, '2'] = "루비자몽"
    df1.loc[72, '3'] = "핫주스"
    df1.loc[73, '2'] = "오렌지"
    df1.loc[73, '3'] = "주스"
    df1.loc[74, '2'] = "프루티"
    df1.loc[74, '3'] = "하동"
    df1.loc[75, '2'] = "머스캣"
    df1.loc[75, '3'] = "그린티"
    df1.loc[76, '3'] = "민트크루"
    df1.loc[77, '2'] = "오렌지"
    df1.loc[77, '3'] = "보스"
    df1.loc[78, '2'] = "루이보스"
    df1.loc[78, '3'] = "오렌지"
    df1.loc[79, '3'] = "커즈마인"
    df1.loc[80, '2'] = "시트러스"
    df1.loc[80, '3'] = "캐모마일"
    df1.loc[81, '2'] = "퍼스트"
    df1.loc[81, '3'] = "브레이크"
    df1.loc[82, '3'] = "영그레이"
    df1.loc[83, '1'] = "아이스"
    df1.loc[83, '2'] = "루이보스"
    df1.loc[83, '3'] = "크림티"
    df1.loc[84, '2'] = "루이보스"
    df1.loc[84, '3'] = "크림티"
    df1.loc[85, '1'] = "아이스"
    df1.loc[85, '2'] = "캐모마일"
    df1.loc[85, '3'] = "프루티"
    df1.loc[86, '2'] = "캐모마일"
    df1.loc[86, '3'] = "프루티"
    df1.loc[87, '2'] = "파니니"
    df1.loc[87, '3'] = "클래식"
    df1.loc[88, '2'] = "파니니"
    df1.loc[88, '3'] = "불고기"
    df1.loc[89, '3'] = "허니브레드"
    df1.loc[90, '2'] = "수플레"
    df1.loc[90, '3'] = "치즈케익"
    df1.loc[91, '3'] = "흑당이달고나빙산"
    df1.loc[92, '3'] = "피치얼그레이빙산"
    df1.loc[93, '3'] = "요거딸기빙산"
    df1.loc[94, '3'] = "망고딸기동산"
    df1.loc[95, '3'] = "인절미팥동산"
    df1.loc[96, '3'] = "찹찹딸기라떼보틀"
    df1.loc[97, '1'] = "홀)"
    df1.loc[97, '2'] = "수플레"
    df1.loc[97, '3'] = "치즈케익"
    df1.loc[98, '2'] = "애플시나몬"
    df1.loc[98, '3'] = "허니브레드"
    df1.loc[99, '1'] = "까사링고"
    df1.loc[99, '2'] = "베리"
    df1.loc[99, '3'] = "케익"

    for i in range(0, len(df.index)):
        for i2 in range(0, len(df1.index)):
            if df1.loc[i2, '1'] == df.loc[i, "총리스트"]:
                df2.loc[i2, '1'] = df.loc[i, "총리스트"]
            elif df1.loc[i2, '2'] == df.loc[i, "총리스트"]:
                df2.loc[i2, '2'] = df.loc[i, "총리스트"]
            elif df1.loc[i2, '3'] == df.loc[i, "총리스트"]:
                df2.loc[i2, '3'] = df.loc[i, "총리스트"]

    str02 = ""
    str02 += df2.loc[0, "3"]

    #for i3 in range(1,len(df2.index)):
    #	str02 +=","
    #	str02+=df2.loc[i,"3"]
    # df3=df2.sort_index()

    # str02=""
    # for i in range(0,len(df3.index)):
    # 	str02 +=","
    # 	str02 += df3.loc[i,"3"]

    # str02=""
    # for i in range(0,len(df.index)):
    # 	str02 +=","
    # 	str02 += df.loc[i,"총리스트"]

    #print(df.loc[0,"총리스트"])
    return HttpResponse(str02)
Exemplo n.º 26
0
def detect_face(face_file, max_results=4):
    client = vision.ImageAnnotatorClient()
    content = face_file.read()
    image = types.Image(content=content)
    return client.face_detection(image=image).face_annotations
import openpyxl,os
import xlwings as xw
import string
import pandas as pd
from google.cloud import vision
from google.cloud.vision import types

filepath = os.path.dirname(os.path.abspath(__file__))
filepath = filepath+"/apikey.json"
os.environ['GOOGLE_APPLICATION_CREDENTIALS']=filepath

wb = xw.Book.caller()
wb.sheets[0].range('B22').value = "I am processing Image Analysis Request..."

vision_client = vision.ImageAnnotatorClient()
image = types.Image()

def text_process(message):
    no_punctuation = [char for char in message if char not in string.punctuation]
    no_punctuation = ' '.join(no_punctuation)
    return no_punctuation

def run_twitter_analysis():
    file_label,file_face,file_logo,file_land = [],[],[],[]
    file_text,file_web,file_safe,file_color = [],[],[],[]
    labelnames,label_scores,logonames = [],[],[]
    landmarknames,text_info,webentities = [],[],[]
    angerArr,joyArr,surpriseArr = [],[],[]
    adult,medical,spoof,violence = [],[],[],[]
    f,r,g,b = [],[],[],[]
    sample_array = []
Exemplo n.º 28
0
def vision():
    global img_name
    global choices
    global decision
    # initial upload
    if (request.method == 'POST') and (request.form["submit-button"]
                                       == "Upload"):
        if 'file' not in request.files:
            flash('No file part')
        file = request.files['file']
        if file.filename == '':
            flash('No selected file')
        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)

            save_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
            file.save(save_path)
            import io
            from google.cloud import vision
            from google.cloud.vision import types

            client = vision.ImageAnnotatorClient()

            with io.open(save_path, 'rb') as image_file:
                content = image_file.read()

            image = types.Image(content=content)

            response = client.text_detection(image=image)
            texts = response.text_annotations
            img_name = file.filename

            # reformat choices to make conditions compatible.
            choices = [_.description for _ in texts]

            return render_template('vision.html',
                                   img_name=img_name,
                                   choices=choices)

    # when user hit decide for me, or re-select.
    if (request.method == 'POST') and (request.form["submit-button"]
                                       == "decide-for-me"):
        data = request.form
        choices = list(data.values())[:-1]
        decision = random.choice(choices)
        return render_template('vision.html',
                               img_name=img_name,
                               choices=choices,
                               decision=decision)

    # when user hit go-with-it, we push and save it to database.
    if (request.method == 'POST') and (request.form["submit-button"]
                                       == "go-with-it"):
        if g.user:
            options = ', '.join(choices)
            try:
                history = History(options=options, account_id=g.user.id)

                new_decision = Decision(decision=decision,
                                        account_id=g.user.id)
                db.session.add(history)
                db.session.add(new_decision)
                db.session.commit()

            except (DBAPIError, IntegrityError):
                flash('Something went wrong.')
                return render_template('vision.html',
                                       img_name=img_name,
                                       choices=choices)
        # hum...maybe we shall re direct user to some other place
        flash("Success! Decision made and saved into your user history!")
        return render_template('vision.html',
                               img_name=img_name,
                               choices=choices,
                               decision=decision)

    return render_template('vision.html')
Exemplo n.º 29
0
import io
import os

from google.cloud import vision
from google.cloud.vision import types

# 인증하는 방법
os.environ[
    "GOOGLE_APPLICATION_CREDENTIALS"] = 'resources/cloudapi-2eba7a867f32.json'

client = vision.ImageAnnotatorClient()

file_name = os.path.join(os.path.dirname(__file__), 'resources/faulkner.jpg')

# Loads the image into memory
with io.open(file_name, 'rb') as image_file:
    content = image_file.read()

image = types.Image(content=content)

# Performs label detection on the image file
response = client.label_detection(image=image)
labels = response.label_annotations

print('Labels:')
for label in labels:
    print(label.description)
Exemplo n.º 30
0
def get_all_tweets(screen_name):

    #Twitter only allows access to a users most recent 3240 tweets with this method
    
    #authorize twitter, initialize tweepy
    auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_key, access_secret)
    api = tweepy.API(auth)
    
    #initialize a list to hold all the tweepy Tweets
    alltweets = []    
    
    #make initial request for most recent tweets (200 is the maximum allowed count)
    new_tweets = api.user_timeline(screen_name = screen_name,count=10)
    
    #save most recent tweets
    alltweets.extend(new_tweets)
    
    #save the id of the oldest tweet less one
    oldest = alltweets[-1].id - 1
    
    #keep grabbing tweets until there are no tweets left to grab
    while len(new_tweets) > 0:
        
        #all subsiquent requests use the max_id param to prevent duplicates
        new_tweets = api.user_timeline(screen_name = screen_name,count=10,max_id=oldest)
        
        #save most recent tweets
        alltweets.extend(new_tweets)
        
        #update the id of the oldest tweet less one
        oldest = alltweets[-1].id - 1
        if(len(alltweets) > 15):
            break
        print ("...%s tweets downloaded so far" % (len(alltweets)))

    media_files = set()
    for status in alltweets :
        try :
            media = status.extended_entities.get('media', [])
        except :
            media = status.entities.get('mdeia',[])
        # print (media[0])
        if(len(media) > 0):
            for i in range(len(media)):
             media_files.add(media[i]['media_url'])

    for media_file in media_files:

        print(media_file)
        wget.download(media_file)
    
    os.system("ffmpeg -framerate 1 -pattern_type glob -i '*.jpg'  -c:v libx264 -r 30 -pix_fmt yuv420p out1.mp4")
    os.system("ffmpeg -framerate 1 -pattern_type glob -i '*.png' -c:v libx264 -r 30 -pix_fmt yuv420p  out2.mp4")



    

   # for google vision
    client = vision.ImageAnnotatorClient()
    file = open("label.txt","w")

    
    point = 0
    numlist = '0123456789'
    OBJ = [pic for pic in listdir(".") if pic.endswith('jpg') or pic.endswith('png')]
    # print(OBJ)
    for i in OBJ:
        file_name = os.path.join(os.path.dirname(__file__),i)
        
        
                    
        new_name = numlist[point] +'.jpg'
        
         
        os.renames(file_name,new_name)
             
        print(file_name)
        print("changed down")
        point = point + 1
    # Loads the image into memory
        with io.open(new_name, 'rb') as image_file:
             content = image_file.read()
       
        image = types.Image(content=content)

        # Performs label detection on the image file
        response = client.label_detection(image=image)
        labels = response.label_annotations
       
        file.write('Lables for  '+new_name+'  :\n')
       
        
        
        for label in labels:
           
           file.write(label.description+'\n')
           
        
    file.close()