def faceRecognition(imgUser, imgBD):

    try:
        hasFace = faceDetect(imgUser)

        if type(hasFace) == dict:
            return hasFace
        elif hasFace != 1:
            raise Exception('Foto invalida! Tente novamente ou selecione outra foto.')
        else:
            # imageUser= face_recognition.load_image_file(imgUser)
            imageUser= imutils.url_to_image(imgUser)
            imageUser_encoding = face_recognition.face_encodings(imageUser)[0]

            # imageBD = face_recognition.load_image_file(imgBD)
            imageBD= imutils.url_to_image(imgBD)
            imageBD_encoding = face_recognition.face_encodings(imageBD)[0]
                
            # Compara as faces
            results = face_recognition.compare_faces([imageUser_encoding], imageBD_encoding, 0.5)
            
            if not results[0]:
                raise Exception ('Usuário não reconhecido! Tente novamente ou cadastre-se.')
            else:
                return True
    
    except Exception as error:
        return({'message':{'title':'Erro',
                'content': str(error)},
                'status':'erro'}) 
예제 #2
0
    def test_requested_location_with_points(self):
        points = get_random_shape_points(SImg.SIZE[0], SImg.SIZE[1], 5)
        self.logger.info("Generated Random Points:\n{}".format(points))

        bounds = Navigation.get_static_map_bounds(SImg.LOC[0], SImg.LOC[1],
                                                  SImg.ZOOM, SImg.SIZE[0],
                                                  SImg.SIZE[1])

        self.logger.info("Current Img Loc Bounds:\n{}".format(str(bounds)))

        locations = Navigation.get_image_points_location(
            bounds, SImg.SIZE[0], SImg.SIZE[1], points)

        self.logger.info("Build Points Locations:\n{}".format(str(locations)))

        new_builder = SDirct.get_simple_direction_builder()
        new_builder.set_origin(locations[0])
        new_builder.set_destination(locations[0])
        new_builder.set_waypoints(locations)
        self.logger.info('Working with Direction Url:\n{}'.format(
            new_builder.build()))

        response = requests.get(new_builder.build())
        enc = get_enc_polylines(response)

        img_dirct = imutils.url_to_image(
            SImg.get_sample_img_path_builder(enc).build())
        self.show_built_images(points, img_dirct)
예제 #3
0
def count_of_objects():
    source_image_url = "https://task3bucket.s3.ap-south-1.amazonaws.com/solar_system.jpg"
    # source_image_url = "https://task3bucket.s3.ap-south-1.amazonaws.com/google_logo.jpg"
    image = imutils.url_to_image(source_image_url)

    # resizing the given image
    adjusted_img = cv2.resize(image, (1000, 1000), cv2.INTER_LINEAR)
    # cv2.imshow("Resized_Image", adjusted_img)
    # cv2.waitKey(0)

    # # converting to grayscale image
    gray_img = cv2.cvtColor(adjusted_img, cv2.COLOR_BGR2GRAY)
    # cv2.imshow("Grayscale_image", gray_img)
    # cv2.waitKey(0)
    # applying threshold to grayscale image
    threshold_img = cv2.threshold(gray_img, 50, 255, cv2.THRESH_BINARY)[1]
    # cv2.imshow("Threshold", threshold_img)
    # cv2.waitKey(0)
    # contours to threshold
    cnts = cv2.findContours(threshold_img, cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_NONE)
    cnts = imutils.grab_contours(cnts)
    output_img = threshold_img.copy()
    # loop over the contours
    i = 0
    # name = 'Google Logo'
    name = 'Solar_System'
    for c in cnts:
        if cv2.contourArea(c) > 640:
            # print(cv2.contourArea(c))
            i = i + 1
            cv2.drawContours(adjusted_img, [c], -1, (0, 0, 255), 5)
    mydict = {"Image_Name": name, "Count": i}
    return mydict
def index(request):
    if request.method == 'POST':

        url = models.Url.objects.create(
            image_url=request.POST.get('image_url', ''))

        img = url_to_image(request.POST.get('image_url', ''))
        ih, iw, _ = img.shape
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray, 1.3, 5)
        url.save()

        for (x, y, w, h) in faces:
            top = round(y * 100 / ih, 2)
            right = round((iw - x - w) * 100 / iw, 2)
            left = round(x * 100 / iw, 2)
            bottom = round((ih - y - h) * 100 / ih, 2)
            bb = models.BoundingBox.objects.create(top=top,
                                                   right=right,
                                                   left=left,
                                                   bottom=bottom,
                                                   image=url)
            bb.save()

        return redirect('/face')

    urls = models.Url.objects.all()

    context = {'image_urls': urls}
    return render(request, 'face/index.html', context)
예제 #5
0
def handwritten_ocr(url, save_img=False):
    '''
    Takes in a url, processes it using pytesseract dataframe, cleans the 
    dataframe and returns the barode, ocr text and the segmented image
    
    Save image as jpg if save_img is True
    '''

    try:
        img = imutils.url_to_image(url)
        img = img[int(img.shape[0] * 0.7):, :]

        client = vision.ImageAnnotatorClient()
        image = vision.types.Image()
        image.source.image_uri = url

        response = client.document_text_detection(image=image)
        text = response.full_text_annotation.text
        barcode = find_barcode(text)
        if barcode is None:
            barcode = '9999999'

        # Look for text in the right half of the image,
        # brighten the image and load results to dataframe
        img = img[:, int(img.shape[1] * 0.5):]

        if save_img:
            cv2.imwrite(str(barcode) + '.jpg', img)

        return barcode, text.replace('\n', ' '), img

    except:
        return None, None, None
예제 #6
0
파일: routes.py 프로젝트: KaidDuong/kaidd
def search():
    # load the VP-Tree and hashes dictionary
    print("[INFO] loading VP-Tree and hashes...")
    tree_path = os.path.join(basedir, 'static', 'indexing', 'vptree.pickle')
    tree = pickle.loads(open(tree_path, "rb").read())

    hash_path = os.path.join(basedir, 'static', 'indexing', 'hashes.pickle')
    hashes = pickle.loads(open(hash_path, "rb").read())

    # load the input query image
    form = request.form.to_dict()
    file = request.files.to_dict()

    if form['link'] == '':
        filename = secure_filename(file['file'].filename)
        image_query = os.path.join(basedir, 'static', 'queries', filename)
        file['file'].save(image_query)
        query = '/queries/{}'.format(filename)
        image = cv2.imread(image_query)
    else:
        image_query = form['link']
        image = url_to_image(image_query)

    # compute the hash for the query image, then convert it
    queryHash = dhash(image)
    queryHash = convert_hash(queryHash)

    # perform the search
    print("[INFO] performing search...")
    start = time.time()
    results = tree.get_all_in_range(queryHash, 20)
    results = sorted(results)
    end = time.time()
    print("[INFO] search took {} seconds".format(end - start))
    response = []
    # loop over the results
    for (d, h) in results:
        # grab all image paths in our dataset with the same hash
        paths = [
            '/' + '/'.join(path.split('\\')[5:]) for path in hashes.get(h, [])
        ]

        r = {'score': (20 - d) * 5, 'hash': h, 'paths': paths}
        # print("[INFO] {} total image(s) with d: {}, h: {}".format(
        #     len(resultPaths), d, h))
        # print(resultPaths)
        response.append(r)

    #res = json.dumps({'status': 'OK','message':'The Result of the search is displayed!', 'response' : response}, ensure_ascii=False)

    global result
    result = {
        'response': response,
        'time': round(end - start, 5),
        'query': query
    }
    return json.dumps({
        'status': 'OK',
        'message': 'The Result of the search is displayed!'
    })
예제 #7
0
    def mock(self):
        global image_roi
        if not 'image_roi' in globals():
            image_roi = imutils.url_to_image(
                "https://docs.google.com/uc?export=download&id=1TMmEYfFKbQ7bmneukBlelQCvgTo1DNsP"
            )
        image_roi_rand = image_roi.copy()

        char_set = string.ascii_uppercase + string.digits
        code = ''.join(random.sample(char_set * 6, 6))
        cv2.putText(image_roi_rand, code, (int(315), int(80)),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
        #cv2.rectangle(image_roi_rand, (310,50), (444,90), (0,0, 255), thickness= 4)

        image_roi_rand = imutils.rotate_bound(image_roi_rand,
                                              random.randrange(-25, 25))

        size = 1000, 1000, 3
        margin = 20

        x = np.clip(random.randrange(margin, 300), 0,
                    size[1] - image_roi_rand.shape[1] - margin)
        y = np.clip(random.randrange(margin, 300), 0,
                    size[0] - image_roi_rand.shape[1] - margin)

        image = np.zeros(size, dtype=np.uint8)
        image[y:y + image_roi_rand.shape[0],
              x:x + image_roi_rand.shape[1]] = image_roi_rand
        #cv2_imshow(image_roi_rand)
        return image, code
예제 #8
0
    def search_shopping(self, filename, inp):
        frame_id = int(self.search_caption(filename, inp) / 2)
        urls = []
        stg_loc = db.child(
            f'{filename}/frames/{frame_id}/storage_loc').get().val()
        print(stg_loc)
        img = imutils.url_to_image(stg_loc)
        cv2.imwrite('./search.jpg', img)
        # retrieve the image
        ebay = db.child(f'{filename}/ebay/{frame_id}').get()
        for key, item in ebay.val().items():
            for key2, bb in item.items():
                new_img = img[bb['y_min']:bb['y_max'], bb['x_min']:bb['x_max']]
                cv2.imwrite('search.jpg', new_img)
                b64_img = base64.standard_b64encode(
                    open('search.jpg', 'rb').read())
                # generate boxes and upload
                data = {"image": b64_img, "title": f'{key2}.jpg'}

                session = requests.Session()
                response = session.post(API_ENDPOINT,
                                        headers=headers,
                                        data=data)

                if (response.status_code == 200):
                    urls.append(response.json()['data']['link'])

                break

        # return the second of the video
        return 2 * frame_id, urls
예제 #9
0
def test_read_file_from_url():
    rows = WIRKUNGS
    cols = wirkungs_ycoords
    url = "https://www.phineo.org/typo3temp/GB/9ed448bd3f.jpg"
    image = imutils.url_to_image(url)
    scale = Rating()
    ratings = scale.compute_ratings(image, IMAGE_TYPE.WIRK)
    assert ratings[WIRKUNGS.KONZEPT] == 4
예제 #10
0
    def test_random_points_creation(self):
        img_builder = SImg.get_sample_img_builder()
        self.assertIsNotNone(img_builder)

        points = get_random_shape_points(SImg.SIZE[0], SImg.SIZE[1], edges=2)
        self.assertIsNotNone(points)
        self.assertEqual(len(points), 2)

        img_sample = imutils.url_to_image(img_builder.build())
        self.assertIsNotNone(img_sample)
예제 #11
0
def test_image_case(image_url):

    img = imutils.url_to_image(image_url)
    img = cv2.resize(img, (80, 80))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    plt.imshow(img)
    prediction = np.argmax(model.predict(img.reshape(1, 80, 80, 1)))
    print("The image is for classification: ",
          get_emotion_from_class(prediction))
예제 #12
0
    def show_built_images(points, img_dirct):
        img_builder = SImg.get_sample_img_builder()
        img_sample = imutils.url_to_image(img_builder.build())

        cv2.polylines(img_sample, [points.reshape((-1, 1, 2))],
                      True, (0, 0, 255),
                      thickness=5)

        cv2.imshow("ImageSample", img_sample)
        cv2.imshow('ImageDirections', img_dirct)
        cv2.waitKey(delay=86000)
예제 #13
0
def get_skin_colors():

    compiled_skins = []
    i = 0
    for url in range(len(artists_in_playlist)):
        image = imutils.url_to_image(artists_urls[url][1])
        #image = imutils.url_to_image('https://images-na.ssl-images-amazon.com/images/I/61n2ctN6jBL._AC_SX522_.jpg')

        # Resize image to a width of 250
        image = imutils.resize(image, width=250)

        # Show image
        #plt.subplot(3, 1, 1)
        #plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
        #plt.title("Original Image")
        # plt.show()

        # Apply Skin Mask
        skin = extractSkin(image)

        #plt.subplot(3, 1, 2)
        #plt.imshow(cv2.cvtColor(skin, cv2.COLOR_BGR2RGB))
        #plt.title("Thresholded  Image")
        # plt.show()

        # Find the dominant color. Default is 1 , pass the parameter 'number_of_colors=N' where N is the specified number of color
        dominantColors = extractDominantColor(skin, hasThresholding=True)

        # Show in the dominant color information
        print("Color Information")
        #$prety_print_data(dominantColors)
        rgb_avgs = get_color_averages(dominantColors)
        k = 3
        if ((rgb_avgs[0] >= 224.3 + (9.6 * (-k)))
                and ((rgb_avgs[1] >= 193.1 + (17.0 * (-k))))
                and ((rgb_avgs[2] >= 177.6 + (21.0 * (-k))))):
            compiled_skins.append([artists_urls[url][0], "white"])
            i += 1
        elif ((rgb_avgs[0] < 224.3 + (9.6 * (-k)))
              and ((rgb_avgs[1] < 193.1 + (17.0 * (-k))))
              and ((rgb_avgs[2] < 177.6 + (21.0 * (-k))))):
            compiled_skins.append([artists_urls[url][0], "black"])
            i += 1
        # Show in the dominant color as bar
        #print("Color Bar")
        #colour_bar = plotColorBar(dominantColors)
        #plt.subplot(3, 1, 3)
        #plt.axis("off")
        #plt.imshow(colour_bar)
        #plt.title("Color Bar")

        #plt.tight_layout()
        #plt.show()
    return compiled_skins
예제 #14
0
    def test_directions_image_shape(self):
        response = self.test_builder_direction()
        enc = get_enc_polylines(response)
        self.assertIsNotNone(enc)

        image = imutils.url_to_image(
            SImg.get_sample_img_path_builder(enc).build())
        self.assertIsNotNone(image)

        cv2.imshow('Image', image)
        cv2.waitKey(delay=2000)
예제 #15
0
def getImages(url):
    full_img = imutils.url_to_image(url)

    vaccinated = processImg(full_img[253:310, 100:623]) # amount of applied vaccinations
    second_doses = processImg(full_img[319:367, 100:612]) # amount of second doses applied
    # full_img[y1:y2, x1:x2] crops the image to a specific position

    # cv2.imwrite("vaccinated.jpeg", vaccinated)
    # cv2.imwrite("second_doses.jpeg", second_doses)
    ## Only uncomment these to troubleshoot!

    return getImgText(vaccinated), getImgText(second_doses)
예제 #16
0
def getFeatures(url):
    currentDT = datetime.datetime.now()
    print (str(currentDT) + " REQUEST ARRIVED\n")    
    directory = "https://ruapfruitclassification.azurewebsites.net/images/"
    img = imutils.url_to_image(directory+url)    
    dim = (100, 100)
    # resize image
    resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
    img = Image.image(resized,"-")          
    result = img.getFeatures()
    obj={
        "R-avg" : result[0],
        "G-avg" : result[1],
        "B-avg" : result[2],
        "Shape" : result[3],
        "Energy": result[4],
        "Correlation": result[5],
        "Contrast" : result[6],
        "Homogenity": result[7]
    }       
    data =  {
        "Inputs": {
                "input1":
                {
                    "ColumnNames": ["Class", "R-avg", "G-avg", "B-avg", "Shape", "Energy", "Correlation", "Contrast", "Homogenity"],
                     "Values": [ [ "value", result[0], result[1], result[2], result[3], result[4], result[5], result[6], result[7] ] ]
                },        },
            "GlobalParameters": {
                    }
    }
    body = str.encode(json.dumps(data))
    url = 'https://ussouthcentral.services.azureml.net/workspaces/3381f16d57184d1e9b30dea2f1e70257/services/266c87bfb6a14e52aa3a66fbef7f5120/execute?api-version=2.0&details=true'
    api_key = '5+3eMTkiQZSMZbHJ32ggqc09WwZx3iSzDBWt4ec/mbkp5jESGWvkpgQfc79qU9MZQXwJEafpV+rT1oZhU/9Dwg==' # Replace this with the API key for the web service
    headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
    currentDT = datetime.datetime.now()
    print (str(currentDT)+" SENDING REQUEST TO AZURE")
    req = urllib.Request(url, body, headers) 
    try:
        response = urllib.urlopen(req)
        # If you are using Python 3+, replace urllib2 with urllib.request in the above code:
        # req = urllib.request.Request(url, body, headers) 
        # response = urllib.request.urlopen(req)
        result = response.read()
        currentDT = datetime.datetime.now()
        print (str(currentDT)+" RETURNING JSON TO CLIENT")
        return result
    except urllib.HTTPError as error:
        print("The request failed with status code: " + str(error.code))
        # Print the headers - they include the requert ID and the timestamp, which are useful for debugging the failure
        

        return "error"
예제 #17
0
def capture_img_feed(feed: str) -> tuple:
    """
    capture image from an image feed.

    :param feed: video url to capture frames from.
    :return: tuple of image still and the captured.
    """
    try:
        start_time = str(float(time.time()))
        frame = imutils.url_to_image(feed)
        return frame, start_time
    except:
        logger.error(traceback.format_exc())
예제 #18
0
def url_to_blur(url):
    #Read Image
    img = imutils.url_to_image(url)

    #Blur
    blur = cv2.GaussianBlur(img, (5, 5), 0)

    #save image
    cv2.imwrite('imageBLUR.jpg', blur)

    #display
    cv2.imshow('display', blur)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
예제 #19
0
def runDressme():
    pic = request.form['pic']
    # Resize image to a width of 250
    image = imutils.url_to_image(pic)
    image = imutils.resize(image, width=250)
    skin = extractSkin(image)
    global dominantColors
    dominantColors = extractDominantColor(skin, hasThresholding=True)
    # print(pprint.pformat(dominantColors[0]))
    rgb = (pprint.pformat(dominantColors[0]))
    hexColor = (toHex(int(dominantColors[0][0]), int(dominantColors[0][1]),
                      int(dominantColors[0][2])))

    return (hexColor)
def get_image(uri):
    """
    get image by local uri or remote url
    Parameters
    ----------
    uri: str
        local uri or remote url
    Returns
    -------
    asarray
        image data
    """
    if os.path.exists(uri):
        return cv2.imread(uri, cv2.IMREAD_UNCHANGED)
    else:
        return imutils.url_to_image(uri, cv2.IMREAD_UNCHANGED)
예제 #21
0
def processImg(url):
    img = imutils.url_to_image(url)
    img = img[299:389,
              105:616]  # crops the image to a specific position [y1:y2, x1:x2]
    # in this case, it's cropping the image to the amount of applied vaccines position
    img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    img_gray, img_bin = cv2.threshold(img_gray, 128, 255,
                                      cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    img_gray = cv2.bitwise_not(img_bin)
    kernel = np.ones((2, 1), np.uint8)
    img = cv2.erode(img_gray, kernel, iterations=1)
    img = cv2.dilate(img, kernel, iterations=1)
    # credit to hucker marius for this portion of code.
    # this literally saved my life since imutils' tools weren't working the way i expected
    # https://towardsdatascience.com/optical-character-recognition-ocr-with-less-than-12-lines-of-code-using-python-48404218cccb
    # cv2.imwrite("img.jpeg", img)
    return getImgText(img)
예제 #22
0
파일: spider.py 프로젝트: instautils/spider
 def run(self):        
     username = self.target['username']
     try:
         image = url_to_image(self.target['profile_pic_url'])
         gender = self.detector.process(image)
         self.graph.add_edge(
             username,
             gender,
             self.follower['username'],
             self.follower['gender'],
         )
         return 1
     except Exception as e:
         log_event(
             'warn',
             'error on fetching user {} {}'.format(username, e),
         )
     return 0
예제 #23
0
def test2():
    """Test license plate analyzer with an image
    """
    analyzer = LicensePlateDetector(at_movement=False)

    filename = "IMG_20170308_093511.jpg"
    dirname = "/home/sampsa/python3/tests/lprtest/RealImages/"
    img = imutils.url_to_image("file:" + dirname + filename)
    print("img=", img.shape)
    # img= (1232, 2048, 3)

    # writePng("kokkelis.png", img)

    result = analyzer(img)
    print("\nresult =", result, "\n")

    result = analyzer(img)
    print("\nresult =", result, "\n")
def run():
    if (response[0:5] == "https"):
        image = imutils.url_to_image(response)
    else:
        imgdata = base64.b64decode(response)
        print(response)
        imgOne = PIL.Image.open(io.BytesIO(imgdata))
        image = cv2.cvtColor(np.array(imgOne), cv2.COLOR_BGR2RGB)

    image = imutils.resize(image, width=250)

    skin = extractSkin(image)

    # Find the dominant color. Default is 1 , pass the parameter 'number_of_colors=N' where N is the specified number of colors
    dominantColors = extractDominantColor(skin, hasThresholding=True)

    # Show in the dominant color information
    print(dominantColors)
    return dominantColors
예제 #25
0
def get_detection_pan(firebase_path):
    interpreter = tf.lite.Interpreter(model_path=os.path.join(
        WORKING_DIR,
        'common/Modeling_Code/pan_data_extraction_28_10_2019__model_tflite.tflite'
    ))
    interpreter.allocate_tensors()

    # Get input and output tensors.
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()
    '''takes firebase http url and returns
       the detection in form of a dataframe'''
    image = imutils.url_to_image(firebase_path)

    #req = urllib.urlopen(firebase_path)
    #arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
    #img = cv2.imdecode(arr, -1) # 'Load it as it is'

    if image is not None:
        temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
        cv2.imwrite(temp_file.name, image)

        x_img = load_img(temp_file.name, target_size=(300, 300))
        x_img = img_to_array(x_img) / 255
        img = np.expand_dims(x_img, axis=0)
        interpreter.set_tensor(input_details[0]['index'], img)
        interpreter.invoke()
        output_data = interpreter.get_tensor(output_details[0]['index'])
        (height, width, _) = image.shape

        result_data = pd.DataFrame(output_data[0],
                                   columns=['y1', 'x1', 'y2', 'x2'])
        output_data = interpreter.get_tensor(output_details[1]['index'])
        result_data['class'] = list(output_data[0])
        output_data = interpreter.get_tensor(output_details[2]['index'])
        result_data['score'] = list(output_data[0])
        result_data['x1'] = (result_data['x1'] * width).astype(int)
        result_data['y1'] = (result_data['y1'] * height).astype(int)
        result_data['x2'] = (result_data['x2'] * width).astype(int)
        result_data['y2'] = (result_data['y2'] * height).astype(int)

    return result_data, image
def faceDetect(img):
    try:
        loadAlgoritmo = cv2.CascadeClassifier(os.path.abspath('C:/Users/Ruty Ribeiro/Documents/MeusProjetos/TCC-Faculdade/backend/Haar/haarcascade_frontalface_default.xml'))

        imagem = imutils.url_to_image(img)
        # print(imagem)
        # imagem=cv2.imread(img)
        imagemcinza=cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY)

        faces = loadAlgoritmo.detectMultiScale(imagemcinza,
            scaleFactor=1.3,
            minNeighbors=5,
            minSize=(30, 30))
 
        return (len(faces))

    except Exception as error:
         return({'message':{'title':'Erro',
                'content': str(error)},
                'status':'erro'})
예제 #27
0
파일: catalog.py 프로젝트: chalendony/css
 def build(self):
     cat = []
     pages = self.list_all_pages()
     self.logger.debug(f"Total pages: {len(pages)}")
     rater = Rating()
     self.logger.info(f"Building Catalog .....")
     for page in pages:
         for project_url in self.get_projects(page):
             id_m = hashlib.md5(project_url.encode("utf-8")).hexdigest()
             url = base_url + project_url
             name_m = self.name(url)
             tagline_m = self.tagline(url)
             mission_m = self.mission(url)
             location_m = self.location(url)
             geo_reach_m = self.geo_reach(url)
             category_m = self.category(url)
             img_rating = self.rating(url)
             input_image = imutils.url_to_image(base_url + img_rating)
             rating_m = rater.compute_ratings(input_image, IMAGE_TYPE.WIRK)
             key_visual_m = self.key_visual(url)
             target_group_m = self.target_group(url)
             home_page_m = self.home_page(url)
             e = {
                 "id": id_m,
                 "name": name_m,
                 "tagline": tagline_m,
                 "mission": mission_m,
                 "location": location_m,
                 "goe_reach": geo_reach_m,
                 "category": category_m,
                 "rating": rating_m,
                 "key_visual": key_visual_m,
                 "target_group": target_group_m,
                 "home_page": home_page_m,
             }
             cat.append(e)
             #self.COUNTER = self.COUNTER + 1
             #if self.COUNTER == self.LIMIT:
             #    self.trydump(cat)
     self.logger.info(f"Catalog built with {len(cat)} records")
     return cat
예제 #28
0
def work(event, context):
    #img = cv.imread('Googleimage.png')
    #img = cv.imread('Solarsystem.png')
    img = ('https://task3image.s3.ap-south-1.amazonaws.com/Solarsystem.png')
    logo = imutils.url_to_image(img)
    #img = cv.imread('salesforcelogo.png')
    #print(img.shape)
    img = cv.cvtColor(logo, cv.COLOR_BGR2GRAY)
    blur = cv.GaussianBlur(img, (15, 15), 0)
    #ret, thresh = cv.threshold(blur, 127, 255, 0)
    ret3, th3 = cv.threshold(blur, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
    #th2 = cv.adaptiveThreshold(blur,255,cv.ADAPTIVE_THRESH_MEAN_C,\
    #        cv.THRESH_BINARY_INV,11,1)
    (contours, hierachy) = cv.findContours(th3, cv.RETR_TREE,
                                           cv.CHAIN_APPROX_SIMPLE)

    cv.drawContours(blur, contours, -1, (0, 255, 0), 3)
    c = 0
    for cnt in contours:

        #area=cv.contourArea(cnt) #contour area
        perimeter = cv.arcLength(cnt, True)

        if (perimeter > 150):
            cv.drawContours(img, [cnt], 0, (0, 255, 0), 2)
            cv.imshow('RGB', img)
            cv.waitKey(1000)
            print(len(cnt))
            c = c + 1

    (x, y), radius = cv.minEnclosingCircle(cnt)
    center = (int(x), int(y))
    radius = int(radius)
    cv.circle(img, center, radius, (0, 255, 0), 2)

    return ("Number of objects in the  Solarsystem:" + str(c))

    cv.imshow("Image", blur)

    cv.waitKey(0)
    cv.destroyAllWindows
예제 #29
0
def get_pred(src, model, file_type, idx, img_size=IMG_SIZE):
    if file_type == 'file':
    	x = np.asarray(bytearray(src), dtype="uint8")
    	x = cv2.imdecode(x, cv2.IMREAD_COLOR)
    	x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)
    elif file_type == 'url':
        x = imutils.url_to_image(src)
        x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)
    elif file_type == 'path':
        x = plt.imread(src)
    else:
    	return 'Invalid file type.'
                
    x = cv2.resize(x, img_size)
    x = image.img_to_array(x)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    y = model.predict(x)
    preds = decode_predictions(y)
    preds = {l:float(p) for _, l, p in preds[0]}
    print('ended task #%d' % idx)
    return preds
예제 #30
0
def detect_face(img, is_url=False):
    cropped_img = None
    if is_url:
        img = imutils.url_to_image(img)
    print("Shape of the image: ", img.shape)
    plt.imshow(img)
    gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    plt.imshow(gray_img, cmap="gray")
    faces = classifier.detectMultiScale(img, scaleFactor=1.1, minNeighbors=5)
    if (len(faces) == 0):
        print("no face found!")
    else:
        for (x, y, w, h) in faces:
            cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
            cropped_img = gray_img[y:y + h, x:x + w]  # the cropped image
            cropped_img = cv2.resize(
                cropped_img, (80, 80))  # this is the input size (80,80,1)
            cropped_img = cropped_img.reshape(80, 80, 1)
        # perform image classification here

    print("Detected faces: ", len(faces))
    return cropped_img
    plt.imshow(img)
cv2.imshow("Skeleton", skeleton)
cv2.waitKey(0)
cv2.destroyAllWindows()

# 5. MATPLOTLIB
# INCORRECT: show the image without converting color spaces
plt.figure("Incorrect")
plt.imshow(cactus)

# CORRECT: convert color spaces before using plt.imshow
plt.figure("Correct")
plt.imshow(imutils.opencv2matplotlib(cactus))
plt.show()

# 6. URL TO IMAGE
# load an image from a URL, convert it to OpenCV, format, and
# display it
url = "http://pyimagesearch.com/static/pyimagesearch_logo_github.png"
logo = imutils.url_to_image(url)
cv2.imshow("URL to Image", logo)
cv2.waitKey(0)
cv2.destroyAllWindows()

# 7. AUTO CANNY
# convert the logo to grayscale and automatically detect edges
gray = cv2.cvtColor(logo, cv2.COLOR_BGR2GRAY)
edgeMap = imutils.auto_canny(gray)
cv2.imshow("Original", logo)
cv2.imshow("Automatic Edge Map", edgeMap)
cv2.waitKey(0)