Exemplo n.º 1
0
    def formZip(self, path):
        count = 0
        try:
            zipf = zipfile.ZipFile(path + '/Images.zip', 'w',
                                   zipfile.ZIP_DEFLATED)
            processed = False
            for files in os.listdir(path):
                if files.endswith('.jpg'):
                    if count <= 17:
                        zipf.write(os.path.join(path, files))
                        count = count + 1
                        os.remove(os.path.join(path, files))
                        processed = True
                    else:
                        zipf.close()
                        visual_recognition = VisualRecognitionV3(
                            '2016-05-20',
                            api_key=self.apikey[self.key_pointer %
                                                len(self.apikey)])
                        taskId = hashlib.sha224(str(
                            time.time()).encode()).hexdigest()
                        self.results[taskId] = visual_recognition.classify(
                            images_file=open(path + '/Images.zip', 'rb'))
                        print("Processing zip file")
                        os.remove(path + '/Images.zip')
                        zipf = zipfile.ZipFile(path + '/Images.zip', 'w',
                                               zipfile.ZIP_DEFLATED)
                        zipf.write(os.path.join(path, files))
                        os.remove(os.path.join(path, files))
                        count = 1
                        processed = False
                        self.key_pointer += 1

            #If any image is still left to be processes
            if processed:
                zipf.close()
                visual_recognition = VisualRecognitionV3(
                    '2016-05-20', api_key=self.apikey[0])
                taskId = hashlib.sha224(str(time.time()).encode()).hexdigest()
                self.results[taskId] = visual_recognition.classify(
                    images_file=open(path + '/Images.zip', 'rb'))
                print("Processing zip file")
                os.remove(path + '/Images.zip')

            #Remove Images.zip which is empty
            if os.path.isfile(path + '/Images.zip'):
                os.remove(path + '/Images.zip')

        except Exception as e:
            print(str(e))
Exemplo n.º 2
0
def vision_ibm_default_classifier_example():
    image_urls = [FRUIT_MAN_URL, CHUCK_NORRIS_URL, CAT_URL, UNICORN_MAN_URL]
    visual_classifier = VisualRecognitionV3(api_key=VISUAL_RECOGNITION_API_KEY, version='2016-05-20')
    results = __classify_all_images(image_urls, visual_classifier)
    dataframe = pandas.DataFrame(results)
    dataframe.to_csv(__get_file_path('ibm-classifier-predictions.csv'))
    print('All images have been classified. Verify the results in ibm-classifier-predictions.csv')
Exemplo n.º 3
0
def watson_test(img_path):

    visual_recognition = VisualRecognitionV3(
        version='2018-03-19', iam_apikey=credentials['watson_API']['API_key'])

    score = None
    threshold = 0.3

    with open(img_path, 'rb') as images_file:
        classes = visual_recognition.classify(
            images_file,
            threshold="{}".format(threshold),
            classifier_ids=credentials['watson_API']
            ['Test_Model']).get_result()
        pprint(classes)
        #get AI facial class model 2 scores
        try:
            score = float((json.dumps(
                classes["images"][0]["classifiers"][0]["classes"][0]["score"])
                           ))
        #score below threshold
        except:
            return False

    return score
Exemplo n.º 4
0
def home(): 
    from ibm_watson import VisualRecognitionV3
    from ibm_cloud_sdk_core.authenticators import IAMAuthenticator 

    apikey = 'e_84LnMSEb2jcIdhBcOb2EYqQfXfeXBXR39q91HBn8X5'
    URL= 'https://api.us-south.visual-recognition.watson.cloud.ibm.com' 
    version = '2019-02-11'
    authenticator = IAMAuthenticator(apikey)

    visual_recognition = VisualRecognitionV3(
        version=version,
        authenticator=authenticator
    )

    visual_recognition.set_service_url(URL) 

    if request.method == 'POST':
        url = request.form['site']
    else:
        url = 'https://www.driven.co.nz/media/100004742/3er.jpg?width=820'

    classes = visual_recognition.classify(url=url).get_result()
    results = classes['images'][0]['classifiers'][0]['classes']
    
    category, car = stockChecker(results)
    return render_template('home.html', results = results, url = url, type = category, car = car)    
Exemplo n.º 5
0
def upload():
    if request.method == 'POST':

        base64data = request.form['data']


        imgdata = base64.b64decode(base64data)
        filename = 'some_image.jpg'  # I assume you have a way of picking unique filenames
        with open(filename, 'wb') as f:
            f.write(imgdata)

        visual_recognition = VisualRecognitionV3(
            '2018-03-19',
            iam_apikey='JnRKAJQI379qv2FyKeREhFi757w3hYHNy-dK5hPkyovc')

        with open('some_image.jpg', 'rb') as images_file:
            classes = visual_recognition.classify(
                images_file,
                threshold='0.55',
        	classifier_ids='Confoodcious_1721487998').get_result()

        return (json.dumps(classes))

    else:
        return("error")
Exemplo n.º 6
0
def watsons():
    visual_recognition = VisualRecognitionV3('2018-03-19', iam_apikey='')

    files_Path = "media/"
    file_name_and_time = []

    for f_name in os.listdir(f"{files_Path}"):
        written_time = os.path.getctime(f"{files_Path}{f_name}")
        file_name_and_time.append((f"{files_Path}{f_name}", written_time))

    sorted_files = sorted(file_name_and_time, key=lambda x: x[1], reverse=True)

    recent_file = sorted_files[0]
    recent_file_name = recent_file[0]

    # media = os.path.join(os.getcwd(), 'media')
    # jpg_files = [file for file in glob.glob(os.path.join(media, '*.jpg'))]
    # jpg_files.sort(key=os.path.getmtime)

    # with open(jpg_files[-1], 'rb') as images_file:
    with open(recent_file_name, 'rb') as images_file:
        classes = visual_recognition.classify(
            images_file,
            threshold='0.6',
            classifier_ids='Chilipepper_1417755566').get_result()

    return classes, recent_file_name
def upload():

    if request.method == 'POST':
        # Get the file from post request
        f = request.files['image']
        x = ""
        # Save the file to ./uploads
        basepath = os.path.dirname(__file__)
        file_path = os.path.join(basepath, 'uploads',
                                 secure_filename(f.filename))
        f.save(file_path)
        visual_recognition = VisualRecognitionV3(
            '2018-03-19',
            iam_apikey='d1ritDZ_IYG2pcxYGByisbcKdrH7MOLSjptMbd5AN9Em')
        with open(file_path, 'rb') as images_file:
            classes = visual_recognition.classify(
                images_file,
                threshold='0.6',
                classifier_ids='DefaultCustomModel_1528303781').get_result()
            a = json.loads(json.dumps(classes, indent=3))
            preds = a['images'][0]['classifiers'][0]['classes']
            print(a)
            for i in preds:
                x = i['class']
                y = i['score']
                global z
                x = x + str(z)
                z = z + 1
                kir.update({x: y})
            print(kir)
        return x + " :" + str(y)
    return None
Exemplo n.º 8
0
    def __init__(self):

        self.classifier = VisualRecognitionV3(
            url=WATSON_VISUAL_REC['url'],
            version=WATSON_VISUAL_REC['version'],
            api_key=WATSON_VISUAL_REC['api_key'],
        )
Exemplo n.º 9
0
def classify_image(img):
    visual_recognition = VisualRecognitionV3(
        version='2018-03-19',
        iam_apikey='DgV4PyKhKM8hEkRU0f3qAf2w-J9bTkCL9hjGAt4i7LbF')

    with open(img, 'rb') as images_file:
        #print(img)
        classes = visual_recognition.classify(images_file,
                                              threshold='0.5',
                                              owners=["IBM"]).get_result()
        # j = json.dumps(classes, indent=2)
        items = classes['images'][0]['classifiers'][0]['classes']
        labels_scores_dict = {}

        for i in items:
            labels_scores_dict[i['class']] = i['score']

        #print(labels_scores_dict)
        print()

        results = []
        for k, v in labels_scores_dict.items():
            if k == 'police cruiser':
                results.append(k)
                return json.dumps(results)

        for key, value in sorted(labels_scores_dict.items(),
                                 key=operator.itemgetter(1),
                                 reverse=True)[-3:]:
            if key != 'police cruiser':
                results.append(key)

        return json.dumps(results)
Exemplo n.º 10
0
def clasificarImagen(pUrlImagen):
    # if 'VCAP_SERVICES' in os.environ:
    #     services = json.loads(os.getenv('VCAP_SERVICES'))

    with open('AutosClasificados\core\config.json') as json_data_file:
        vConfig = json.load(json_data_file)
        vAPIVersion = vConfig["watsonVisualRecognition"]["vAPIVersion"]
        vAPIKey = vConfig["watsonVisualRecognition"]["vAPIKey"]
        vAPIClasificador = vConfig["watsonVisualRecognition"]["vIdClasificador"]

    vVisualRecognition = VisualRecognitionV3(vAPIVersion, api_key=vAPIKey)
    vResultado = json.loads(json.dumps(vVisualRecognition.classify(images_file=pUrlImagen, classifier_ids=vAPIClasificador), indent=2))
    vClase = 'Imagen No detectada'
    vScore = -1
    try:
        for vImagen in vResultado['images']:
            for vClasificador in vImagen['classifiers']:
                for vClases in vClasificador['classes']:
                    if vClases['score'] > vScore:
                        vClase = vClases['class']
                        vScore = vClases['score']

        return vClase, vScore
    except:
        vClase = 'Imagen No detectada'
        vScore = -1
        return vClase, vScore
Exemplo n.º 11
0
 def __init__(self):
     self.classifier = VisualRecognitionV3(
         '2018-03-19',
         iam_apikey=os.environ['IBM_VISION_API_KEY'],
         url=os.environ['IBM_VISION_URL'])
     self.search = FuzzySearchService()
     self.db = MariaDBService()
Exemplo n.º 12
0
def get_photos(public_tweets):
    visual_recognition = VisualRecognitionV3(config.myVersion2, iam_apikey=config.myIam_apikey2)
    unprofessional_photos = []
    alcohol_keywords = set(["beer", "wine", "shot", "beer bottle", "alcoholic beverage", "stout", "ale", "brew", "mixed drink"])
    nudity_keywords = set(["underwear", "undergarment", "lingerie", "bra", "panty", "underpant"])
    other_keywords = set(["drug", "drugs", "party", "illegal", "inappropriate"])
    num_of_images = 0
    for tweet in public_tweets:
        if 'media' in tweet.entities and num_of_images <10:
            for image in  tweet.entities['media']:
                print("Attempting to classify possible image...")
                num_of_images += 1
                try:
                    url = image['media_url']
                    classes_result = visual_recognition.classify(url=url).get_result() # classifies image

                    # Gets json data
                    classify_data = json.dumps(classes_result["images"][0]["classifiers"][0]["classes"], indent=2)
                    # Going through every dictionary in list of json date
                    for dict in json.loads(classify_data):
                        for key, value in dict.items():
                            if value in alcohol_keywords:
                                if(len(unprofessional_photos) == 0 or unprofessional_photos[-1]['tweet'] != tweet.full_text):
                                    unprofessional_photos.append({'tweet': tweet.full_text, 'id': tweet.id,'reason': 'alcohol'})
                            if value in nudity_keywords:
                                if(len(unprofessional_photos) == 0 or unprofessional_photos[-1]['tweet'] != tweet.full_text):
                                    unprofessional_photos.append({'tweet': tweet.full_text, 'id': tweet.id,'reason': 'nudity'})
                            if value in other_keywords:
                                if(len(unprofessional_photos) == 0 or unprofessional_photos[-1]['tweet'] != tweet.full_text):
                                    unprofessional_photos.append({'tweet': tweet.full_text, 'id': tweet.id,'reason': value})

                except:
                    print("Error clasifying image")

    return(unprofessional_photos)
Exemplo n.º 13
0
def trainWatson():

    names = ["empty", "full"]

    cam = initCamera()

    for i in range(2):

        if not os.path.exists(names[i]):
            os.makedirs(names[i])

        for j in range(50):
            takePhoto(cam, names[i] + '/' + names[i] + str(j) + ".jpg")
            print("CLICK")
            time.sleep(.3)


        zipf = zipfile.ZipFile(names[i] + ".zip", 'w', zipfile.ZIP_DEFLATED)
        zipdir(names[i], zipf)
        zipf.close()

        if i == 0:
            confirmation = input("Read to proceed with full?: ")

            while confirmation != "yes":
                confirmation = input("Read to proceed with full?: ")

    with open(join(dirname(__file__), 'full.zip'), 'rb') as full, \
        open(join(dirname(__file__), 'empty.zip'), 'rb') as empty:
      
        visual_recognition = VisualRecognitionV3('2016-05-20', api_key=ibm_auth)
        with open('class.py', 'w') as outfile:
            data = visual_recognition.create_classifier('TrashIdentifier', _positive_examples=full, negative_examples=empty)
            json.dump(data, outfile, indent=2)
Exemplo n.º 14
0
def summarise_watson_img(src, options=False):
    '''
    IBM Bluemix service
    intersted in default clasifier: images.classifiers.classes 
    '''
    import json
    from watson_developer_cloud import VisualRecognitionV3 as VisualRecognition
    
    classifier = VisualRecognition( '2016-05-20', \
    api_key=config.IBM['credentials']['api_key'])

    result = classifier.classify(images_url=src)
    # debug
    print(json.dumps(result, indent=2))

    return result
def visualrecog(x):
    #Visual recognition
    visual_recognition = VisualRecognitionV3(
        '2018-03-19',
        iam_apikey='ZCh5wehuL24VgR4VBcJCPg1ASZLMKrgjzoSeBW8QSE2Z')
    with open(x, 'rb') as images_file:
        classes1 = visual_recognition.classify(
            images_file,
            threshold='0.55',
            classifier_ids='celebrity_1626478899').get_result()
    #print(json.dumps(classes1))
    a1 = classes1['images'][0]['classifiers'][0]['classes'][0]['class']

    #print(a1)
    """with open('re.jpg', 'rb') as images_file:
        classes1 = visual_recognition.classify(
            images_file,
            threshold='0.6',
                    classifier_ids='cars_269609070').get_result()  
    a2=classes1['images'][0]['classifiers'][0]['classes'][0]['class']"""

    if a1 == ("teja"):
        string = "Open"
    else:
        string = "Don't open"
    return string
Exemplo n.º 16
0
def fridgescanner():
    x = mraa.Gpio(26)
    x.dir(mraa.DIR_OUT)
    x.write(255)
    #time.sleep(2)
    call(['./pic'])
    x.write(0)
    outFile = open('Output.txt', 'w')
    visual_recognition = VisualRecognitionV3('2016-05-20', api_key='cf40138eb6764ea300b5fafe571e3369bc4f6ce9')
    
    output = {}
    with open(join(dirname(__file__), 'cpp-headless-output-COLOR.png'), 'rb') as image_file:
        output = visual_recognition.classify(images_file=image_file)
        outFile.write(json.dumps(output, indent=2))
        outFile.close()
    
    
    
    inp = output.get('images')
    inp = inp[0].get('classifiers')
    
    out = []
    
    for group in inp:
        classes = []
        classes = group.get('classes')
        for desc in classes:
            if desc.get('score') >= THRESHOLD:
                out.append(desc.get('class'))
    
    print out
    outfile = open('thingy.txt', 'w')
    outfile.write('\n'.join(out))
Exemplo n.º 17
0
def main():
    print("Hello")
    count = 0
    sensor = Sensor(22)
    camera = PiCamera()
    camera.start_preview()
    while count < 1:
        sensor.waitFor(GPIO.RISING);
        state = sensor.getState()
        if state == 1:
            print "Sensor is %d" % (sensor.getState())
            time.sleep(3)
            timestamp = time.strftime("%Y-%m-%d_%H-%M-%S")
            filepath = "/home/pi/Downloads/Project/Images/"
            filename = "image_" + timestamp + ".jpg"
            camera.capture(filepath + filename)
            count +=1
    camera.stop_preview()

    visual_recognition = VisualRecognitionV3('2016-05-20', api_key='81d3e76ab27c6e270571c1ee50cbdcb2448efa2f')
    with open(join(dirname(__file__), filepath + filename), 'rb') as image_file:
        results = visual_recognition.classify(images_file=image_file)
    data = json.dumps(results, indent=2)
    parsejson = ParseJson(data, filename)
    animaldata = parsejson.methodParse()
    print(animaldata.AnimalName)
    objwildlifedbhelper = WildlifeDBHelper(animaldata)
    objwildlifedbhelper.insert_animal()
    client = mqtt.Client()
    client.connect("172.27.246.86")
    f=open("/home/pi/Downloads/Project/Images/" + filename, "rb")
    fileContent = f.read()
    byteArr = bytearray(fileContent)
    client.publish("image",byteArr,0)
    client.loop_forever()
Exemplo n.º 18
0
def ask_watson_built_in(file_path):
    visual_recognition = VisualRecognitionV3('2018-03-19',
                                             iam_api_key='**********')
    with open(file_path, 'rb') as images:
        response = visual_recognition.detect_faces(images)
        g_a_f = response.items()[0][1][0].items()[1][1][0].items()
        return g_a_f
Exemplo n.º 19
0
    def recognize_image(self, url1, file_description):
        visual_recognition = VisualRecognitionV3(
            '2018-11-11',
            iam_apikey='tbWfPukC9BkvAEDCbAewEGFwW6IjR7SJuZj1FzHOj_GG')
        data = {}
        data['image_url'] = url1
        data['wardrobe'] = 'yes'
        if True:
            classes = visual_recognition.classify(
                url=url1, threshold='0.0',
                classifier_ids=["default"]).get_result()
            # resp_dict = json.loads(classes);
            res_dict = {}
            if len(classes['images']) > 0:
                if len(classes['images'][0]['classifiers']) > 0:
                    if len(classes['images'][0]['classifiers'][0]) > 0:
                        preds = classes['images'][0]['classifiers'][0][
                            'classes']
                        for obj in preds:
                            if float(obj['score']) > 0.0:
                                res_dict[obj['class']] = float(obj['score'])

            data['title'] = file_description
            result = ' '.join(res_dict)
            result = file_description + ' ' + result
            data['text'] = result

        return data
Exemplo n.º 20
0
def predict_malaria_watson():
    if request.method == 'POST':
        # Get the file from post request
        f = request.files['image']

        # Save the file to ./uploads
        basepath = os.path.dirname(__file__)
        file_path = os.path.join(basepath, 'uploads',
                                 secure_filename(f.filename))
        f.save(file_path)

        print(file_path)
        visual_recognition = VisualRecognitionV3(
            '2018-03-19',
            iam_apikey='4y6L0AfQ1fqVzbsVPdeQZfcASrHsxzk7mdOlFZjmJrpX')
        with open(file_path, 'rb') as images_file:
            classes = visual_recognition.classify(
                images_file,
                threshold='0.6',
                classifier_ids='DefaultCustomModel_1403471121').get_result()

        print(classes)
        # res = str(preds[0][0]) + " : " + str(preds[0][1])
        # if preds[0][0] > 0.5:
        #     return 'not ill'
        # elif preds[0][1] > 0.5:
        #     return 'ill'
        # else:
        #     return 'not found'
        return jsonify(classes)
    return "data"
Exemplo n.º 21
0
def validatePreview(request):
    if request.method == 'POST':
        img_data = request.POST.get('image')
        name = str(uuid.uuid4())

        response = urllib.request.urlopen(img_data)
        with open("images/" + name + ".png", "wb") as fh:
            fh.write(response.file.read())

        visual_recognition = VisualRecognitionV3(
            '2018-03-19',
            iam_apikey='GfBGcjYPrd-38OXXTbV1uPjKZ5Xp4o-20av0onxqqUIF')

        classes_result = visual_recognition.classify(
            images_file=open("images/" + name + ".png", 'rb')).get_result()

        classes = [
            (c['class'], c['score'])
            for c in classes_result["images"][0]["classifiers"][0]["classes"]
        ]
        classes = sorted(classes, key=lambda x: x[1], reverse=True)

        top = [c[0] for c in classes[:5]]

        return JsonResponse({'status': 'ok', 'top': top})
Exemplo n.º 22
0
def getWeather(imgData):
    visualRecognition = VisualRecognitionV3('2016-05-20',
                                            api_key=API_WATSON_ID)
    try:
        dataIA = visualRecognition.classify(images_file=imgData,
                                            classifier_ids=CLASSIFIER_ID,
                                            threshold=THRESHOLD)
        resultData = getClasses(dataIA)
        print "-----------"
        print resultData
        print "-----------"
        if (resultData['status']):
            if len(resultData['classes']) >= 2:
                resultText = englishTxt['RESPONSE_ONE'].format(
                    englishTxt[resultData['classes'][0]],
                    englishTxt[resultData['classes'][1]])
                resultText = resultText + englishTxt[
                    'RESPONSE_SELECTED'].format(
                        englishTxt[resultData['classes'][0]])
            elif len(resultData['classes']) == 1:
                resultText = englishTxt['RESPONSE_SELECTED'].format(
                    englishTxt[resultData['classes'][0]])
            else:
                resultText = englishTxt['I_CANT']
        else:
            if len(resultData['classes']) > 0:
                resultText = englishTxt['INVALID_IMAGE'].format(
                    resultData['classes'][0])
            else:
                resultText = englishTxt['NOT_CLASSES']
    except:
        resultText = englishTxt['I_CANT']
    return resultText
Exemplo n.º 23
0
def addkey(request):
    if request.method == 'POST':
        img_data = request.POST.get('image')
        name = str(uuid.uuid4())

        response = urllib.request.urlopen(img_data)
        with open("images/" + name + ".png", "wb") as fh:
            fh.write(response.file.read())

        visual_recognition = VisualRecognitionV3(
            '2018-03-19',
            iam_apikey='GfBGcjYPrd-38OXXTbV1uPjKZ5Xp4o-20av0onxqqUIF')

        classes_result = visual_recognition.classify(
            images_file=open("images/" + name + ".png", 'rb')).get_result()

        classes = [
            (c['class'], c['score'])
            for c in classes_result["images"][0]["classifiers"][0]["classes"]
        ]
        classes = sorted(classes, key=lambda x: x[1], reverse=True)

        top = [c for c in classes[:10]]

        signer = Signer.objects.get(email=request.POST.get('email'))
        signer.visual_fingerprint = json.dumps(top)
        signer.save()

        return JsonResponse({'status': 'ok'})
Exemplo n.º 24
0
def image_process():
    data = json.loads(request.get_data())
    img = data["file"].split(',')[1]
    img_byte = base64.decodebytes(img.encode())
    f = open('./static/image/snapshot.jpg', 'wb')
    f.write(img_byte)
    f.close()

    # Using IBM watson Visual Recognition Model
    visual_recognition = VisualRecognitionV3(
        '2018-03-19',  # version
        iam_api_key='vGG2QFloDD6S3Eo_4dGICrCHnNFtcCOFlDJDlhlgf4I4')  # your API key

    with open('./static/image/snapshot.jpg', 'rb') as images_file:
        classes = visual_recognition.classify(
            images_file,
            parameters=json.dumps({
                'classifier_ids': ["emotion_KDEF_919313430"]
            }))
    result = json.dumps(classes, indent=2)
    print(result)
    emo_class = eval(result)["images"][0]["classifiers"][0]["classes"][0]["class"]
    emo_score = eval(result)["images"][0]["classifiers"][0]["classes"][0]["score"]
    # emoji = mpimg.imread('static/image/' + emo_class+'.png')

    return jsonify({'result': emo_class, 'score': emo_score})
Exemplo n.º 25
0
def detectAges(filea):
	visual_recognition = VisualRecognitionV3('2016-05-20', api_key='31b4a7b4a13baf31bb492ac1e1e42b1ab5dcc62e')
	
	with open(join(dirname(__file__), 'test_image.png'), 'rb') as image_file:        
		output_string=json.dumps(visual_recognition.detect_faces(images_file=image_file),indent=2)

	output_json=json.loads(output_string)
	faces=output_json["images"][0]["faces"]
	male_c= 0
	female_c = 0
	for i in range(0,len(faces)):
						print "person ",i
						print "				gender:",faces[i]["gender"]["gender"]
						if(faces[i]["gender"]["gender"] == "MALE" and faces[i]["gender"]["score"] > 0.5 ):
							male_c += 1
						if(faces[i]["gender"]["gender"] == "FEMALE" and faces[i]["gender"]["score"] > 0.5):
							female_c += 1
						print "				gender score:",faces[i]["gender"]["score"]
						if len(faces[i]["age"])>2:
											print "    max age:",faces[i]["age"]["max"]
											print "    min age:",faces[i]["age"]["min"]
											print "    age score:",faces[i]["age"]["score"]
						else:
										print "has more than 2"
	if(male_c > female_c):
		return 1
	else:
		return 0
Exemplo n.º 26
0
	def __init__(self, imagePath):
		#initialize parameters
		self.visual_recognition = VisualRecognitionV3(
		    '2018-03-19',
		    iam_apikey='Say7-Zq2Z-cv7FEhl8DI7C-0dhUcJ1d0AV3X-s9HJfbc')
		self.classifier_ids = ["food"]
		self.imagePath = imagePath
Exemplo n.º 27
0
def get_trainer_status():
    visual_recognition = VisualRecognitionV3(
        VisualRecognitionV3.latest_version, api_key=watson_key)

    print(
        json.dumps(visual_recognition.get_classifier('Tattoos_502109298'),
                   indent=2))
Exemplo n.º 28
0
def sort():
    try:
        images_file = request.files.get('images_file', '')
        visual_recognition = VisualRecognitionV3('2018-03-19', iam_apikey=apikey)
        global classifier_id
        if classifier_id == '':
            classifier_id = set_classifier()
            if classifier_id == '':
                return json.dumps(
                    {"status code": 500, "result": "Classifier not ready",
                        "confident score": 0})
        parameters = json.dumps({'classifier_ids': [classifier_id]})
        url_result = visual_recognition.classify(
                         images_file=images_file,
                         parameters=parameters).get_result()
        if len(url_result["images"][0]["classifiers"]) < 1:
            return json.dumps(
                    {"status code": 500, "result": "Image is either not "
                        "a waste or it's too blurry, please try it again.",
                        "confident score": 0})
        list_of_result = url_result["images"][0]["classifiers"][0]["classes"]
        result_class = ''
        result_score = 0
        for result in list_of_result:
            if result["score"] >= result_score:
                result_score = result["score"]
                result_class = result["class"]
        return json.dumps(
            {"status code": 200, "result": result_class,
                "confident score": result_score})
    except Exception:
        return json.dumps(
            {"status code": 500, "result": "Not an image",
                "confident score": 0})
Exemplo n.º 29
0
class IBM:
    api_key = os.environ["IBM_API_KEY"]
    visual_recognition = VisualRecognitionV3("2019-11-02", iam_apikey=api_key)

    @classmethod
    def classify_fashion_sense(cls, images_file):
        result = cls._classify(images_file)
        return cls._get_max_class(result)

    @classmethod
    def _classify(cls, file_name):
        print(f"{IMAGE_DIR}/new_{file_name}")
        url = urlopen(f"{IMAGE_DIR}/new_{file_name}")
        with open(f"images/new_{file_name}.png", "wb") as f:
            f.write(url.read())
        result = cls.visual_recognition.classify(
            open(f"images/new_{file_name}.png", "rb"),
            threshold="0.0",
            classifier_ids="DefaultCustomModel_1445172307",
        ).get_result()
        return result

    @classmethod
    def _get_max_class(cls, result):
        classifiers = result["images"][0]["classifiers"]
        fashion_classes = classifiers[0]["classes"]
        max_score = 0
        fashion_sense = None
        for element in fashion_classes:
            if max_score < element["score"]:
                max_score = element["score"]
                fashion_sense = element["class"]
        if fashion_sense is None:
            raise ValueError("not classified fashion sense.")
        return fashion_sense, max_score
Exemplo n.º 30
0
def detecta_objetos(request):
    if request.method == 'POST':
        if request.FILES['image']:
            visual_recognition = VisualRecognitionV3('2018-03-19',
                                                     iam_apikey=API_KEY)
            image = request.FILES['image']
            car_results = visual_recognition.classify(images_file=image,
                                                      threshold='0.1',
                                                      classifier_ids=[
                                                          'default'
                                                      ]).get_result()
            json_data = json.dumps(car_results, indent=2)
            news_objects = json.loads(json_data)

            fs = FileSystemStorage()
            filename = fs.save(image.name, image)
            uploaded_file_url = fs.url(filename)
            return render(
                request, 'detecta_objetos.html', {
                    'uploaded_file_url':
                    uploaded_file_url,
                    'objetos':
                    news_objects['images'][0]['classifiers'][0]['classes'],
                })
    else:
        return render(request, 'detecta_objetos.html')
Exemplo n.º 31
0
def Image_design():

    visual_recognition = VisualRecognitionV3(
        '2016-05-20', api_key='1c5c0cf7321c6c8712c162f7b2153ae69364c974')

    with open(join(dirname(__file__), '11.png'), 'rb') as image_file:
        vs = json.dumps(
            visual_recognition.recognize_text(images_file=image_file),
            indent=2)

    text1 = json.loads(vs)
    text1 = text1["images"][0]["text"]
    text1 = json.dumps(text1, indent=2)
    text1 = text1.replace('\\n', ' ')
    text1 = text1.replace('\"', ' ')

    language_translation = LanguageTranslation(
        username='******',
        password='******')

    translation = language_translation.translate(text=text1,
                                                 source='en',
                                                 target='es')
    eslang = translation

    tone_analyzer = ToneAnalyzerV3(
        username='******',
        password='******',
        version='2016-05-19 ')

    tone = tone_analyze(text1)
    do_with_cassandra(vs, text1, eslang, tone)
    return '<p>IMAGE IDENTIFY: ' + vs + '</p>' + '<p>Spanish: ' + eslang + '</p>' + '<p>English: ' + text1 + '</p>' + '<p>TONE: ' + tone + '</p>'