def model_prediction(image_path, model,input_file, workflow_id): # Add workflow workflow=Workflow(app_clarifai.api, workflow_id=workflow_id) # Predict using clarifai response_data = workflow.predict_by_url(url=image_path) result_cust_model=[] result_search_model=[] # Custom Model: Append results and return a list for concept in response_data['results'][0]['outputs'][0]['data']['concepts']: if concept['value']>0: val = (concept['name'],concept['value']) result_cust_model.append(val) else: pass # Similar Items: Search search_image=app_clarifai_search.inputs.search_by_image(url=image_path) for item in search_image: if item.score>0.5: val=(item.input_id) result_search_model.append(val) # print(item.input_id, item.score) return result_cust_model, result_search_model
def check_image( browser, clarifai_api_key, img_tags, img_tags_skip_if_contain, logger, clarifai_models, workflow, probability, full_match=False, picture_url=None, ): """Uses the link to the image to check for invalid content in the image. If a workflow has been selected, get list of tags from Clarifai API by checking link against models included in the workflow. If a workflow hasn't been provided, InstaPy will check images against given model(s)""" clarifai_api = ClarifaiApp(api_key=clarifai_api_key) clarifai_tags = [] # Set req image to given one or get it from current page if picture_url is None: img_link = get_imagelink(browser) else: img_link = picture_url # Check image using workflow if provided. If no workflow, check image using model(s) if workflow: clarifai_workflow = Workflow(clarifai_api.api, workflow_id=workflow[0]) clarifai_response = clarifai_workflow.predict_by_url(img_link) for response in clarifai_response['results'][0]['outputs']: results = get_clarifai_tags(response, probability) clarifai_tags.extend(results) else: for model in clarifai_models: clarifai_response = get_clarifai_response(clarifai_api, model, img_link) results = get_clarifai_tags(clarifai_response['outputs'][0], probability) clarifai_tags.extend(results) # Will not comment on an image if any of the tags in img_tags_skip_if_contain are matched if given_tags_in_result(img_tags_skip_if_contain, clarifai_tags): logger.info( 'Not Commenting, image contains concept(s): "{}".'.format( ', '.join(list(set(clarifai_tags) & set(img_tags_skip_if_contain))) ) ) return False, [] for (tags, should_comment, comments) in img_tags: if should_comment and given_tags_in_result(tags, clarifai_tags, full_match): return True, comments elif given_tags_in_result(tags, clarifai_tags, full_match): logger.info( 'Not Commenting, image contains concept(s): "{}".'.format( ', '.join(list(set(clarifai_tags) & set(tags))) ) ) return False, [] return True, []
from clarifai.rest import ClarifaiApp from clarifai.rest import Workflow CLARIFAI_DETECT_API_KEY = "f3f988543ee84fd4b9403f028859c9f0" CLARIFAI_DATABASE_API_KEY = "e0e75fcd842a469abe0a7dadc3180d49" CLARIFAI_WORKFLOW_ID = "main-workflow" CLARIFAI_MODEL_ID = "train_model" app = ClarifaiApp(api_key=CLARIFAI_DETECT_API_KEY) cl_database = ClarifaiApp(api_key=CLARIFAI_DATABASE_API_KEY) workflow = Workflow(app.api, workflow_id=CLARIFAI_WORKFLOW_ID) model = app.models.get(model_id=CLARIFAI_MODEL_ID) TEMP_IMG = "temp.jpg" def clarifai_workflow_detect(): data_get = workflow.predict_by_filename( TEMP_IMG)['results'][0]['outputs'][0]['data']['concepts'] data_value = [] if len(data_get) <= 8: for data_dict in data_get: data_value.append(data_dict['name']) else: for index in range(8): data_value.append(data_get[index]['name']) return data_value def food_model_train(food_concept): app.inputs.create_image_from_filename(TEMP_IMG, concepts=[food_concept]) model.add_concepts([food_concept])
def test_workflow_predict_with_url( mock_http_client): # type: (mock.Mock) -> None mock_execute_request = mock_request(mock_http_client, json_response=""" { "status": { "code": 10000, "description": "Ok" }, "workflow": { "id": "@workflowID", "app_id": "@appID", "created_at": "2017-07-10T01:45:05.672880Z" }, "results": [ { "status": { "code": 10000, "description": "Ok" }, "input": { "id": "@inputID", "data": { "image": { "url": "@inputURL" } } }, "outputs": [ { "id": "@outputID1", "status": { "code": 10000, "description": "Ok" }, "created_at": "2017-07-10T12:01:44.929928529Z", "model": { "id": "d16f390eb32cad478c7ae150069bd2c6", "name": "moderation", "created_at": "2017-05-12T21:28:00.471607Z", "app_id": "main", "output_info": { "message": "Show output_info with: GET /models/{model_id}/output_info", "type": "concept", "type_ext": "concept" }, "model_version": { "id": "b42ac907ac93483484483a0040a386be", "created_at": "2017-05-12T21:28:00.471607Z", "status": { "code": 21100, "description": "Model trained successfully" } } }, "data": { "concepts": [ { "id": "@conceptID11", "name": "safe", "value": 0.99999714, "app_id": "main" } ] } }, { "id": "@outputID2", "status": { "code": 10000, "description": "Ok" }, "created_at": "2017-07-10T12:01:44.929941126Z", "model": { "id": "aaa03c23b3724a16a56b629203edc62c", "name": "general-v1.3", "created_at": "2016-02-26T23:38:40.086101Z", "app_id": "main", "output_info": { "message": "Show output_info with: GET /models/{model_id}/output_info", "type": "concept", "type_ext": "concept" }, "model_version": { "id": "aa9ca48295b37401f8af92ad1af0d91d", "created_at": "2016-07-13T00:58:55.915745Z", "status": { "code": 21100, "description": "Model trained successfully" } } }, "data": { "concepts": [ { "id": "@conceptID21", "name": "train", "value": 0.9989112, "app_id": "main" }, { "id": "@conceptID22", "name": "railway", "value": 0.9975532, "app_id": "main" } ] } } ] } ] } """) app = ClarifaiApp() workflow = Workflow(app.api, workflow_id='@workflowID') response = workflow.predict_by_url('@url') assert response['workflow']['id'] == '@workflowID' assert response['results'][0]['outputs'][0]['data']['concepts'][0][ 'id'] == '@conceptID11' assert_request( mock_execute_request, 'POST', '/v2/workflows/@workflowID/results', """ { "inputs": [ { "data": { "image": { "url": "@url" } } } ] } """)
def test_workflow_predict_with_base64( mock_http_client): # type: (mock.Mock) -> None mock_execute_request = mock_request(mock_http_client, json_response=""" { "status": { "code": 10000, "description": "Ok" }, "workflow": { "id": "@workflowID", "app_id": "@appID", "created_at": "2017-06-15T15:17:30.462323Z" }, "results": [ { "status": { "code": 10000, "description": "Ok" }, "input": { "id": "@inputID", "data": { "image": { "base64": "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z/C/HgAGgwJ/lK3Q6wAAAABJRU5ErkJggg==" } } }, "outputs": [ { "id": "@outputID1", "status": { "code": 10000, "description": "Ok" }, "created_at": "2019-01-20T18:22:36.057985725Z", "model": { "id": "bd367be194cf45149e75f01d59f77ba7", "name": "food-items-v1.0", "created_at": "2016-09-17T22:18:59.955626Z", "app_id": "main", "output_info": { "message": "Show output_info with: GET /models/{model_id}/output_info", "type": "concept", "type_ext": "concept" }, "model_version": { "id": "dfebc169854e429086aceb8368662641", "created_at": "2016-09-17T22:18:59.955626Z", "status": { "code": 21100, "description": "Model trained successfully" }, "train_stats": {} }, "display_name": "Food" }, "data": { "concepts": [ { "id": "@conceptID11", "name": "raspberry", "value": 0.8684727, "app_id": "main" }, { "id": "@conceptID12", "name": "strawberry", "value": 0.7979152, "app_id": "main" } ] } }, { "id": "@outputID2", "status": { "code": 10000, "description": "Ok" }, "created_at": "2019-01-20T18:22:36.058002759Z", "model": { "id": "aaa03c23b3724a16a56b629203edc62c", "name": "general", "created_at": "2016-03-09T17:11:39.608845Z", "app_id": "main", "output_info": { "message": "Show output_info with: GET /models/{model_id}/output_info", "type": "concept", "type_ext": "concept" }, "model_version": { "id": "aa9ca48295b37401f8af92ad1af0d91d", "created_at": "2016-07-13T01:19:12.147644Z", "status": { "code": 21100, "description": "Model trained successfully" }, "train_stats": {} }, "display_name": "General" }, "data": { "concepts": [ { "id": "@conceptID21", "name": "design", "value": 0.9859183, "app_id": "main" }, { "id": "@conceptID22", "name": "art", "value": 0.98318106, "app_id": "main" } ] } } ] } ] } """) app = ClarifaiApp() workflow = Workflow(app.api, workflow_id='@workflowID') response = workflow.predict_by_base64(TINY_IMAGE_BASE64) assert response['workflow']['id'] == '@workflowID' assert response['results'][0]['outputs'][0]['data']['concepts'][0][ 'id'] == '@conceptID11' assert_request( mock_execute_request, 'POST', '/v2/workflows/@workflowID/results', """ { "inputs": [ { "data": { "image": { "base64": "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z/C/HgAGgwJ/lK3Q6wAAAABJRU5ErkJggg==" } } } ] } """)
def test_bulk_workflow_predict_with_url( mock_http_client): # type: (mock.Mock) -> None mock_execute_request = mock_request(mock_http_client, json_response=""" { "status": { "code": 10000, "description": "Ok" }, "workflow": { "id": "@workflowID", "app_id": "@appID", "created_at": "2017-06-15T15:17:30.462323Z" }, "results": [ { "status": { "code": 10000, "description": "Ok" }, "input": { "id": "@inputID1", "data": { "image": { "url": "@url1" } } }, "outputs": [ { "id": "@outputID11", "status": { "code": 10000, "description": "Ok" }, "created_at": "2019-01-29T17:36:23.736685542Z", "model": { "id": "@modelID1", "name": "food-items-v1.0", "created_at": "2016-09-17T22:18:59.955626Z", "app_id": "main", "output_info": { "message": "Show output_info with: GET /models/{model_id}/output_info", "type": "concept", "type_ext": "concept" }, "model_version": { "id": "@modelVersionID1", "created_at": "2016-09-17T22:18:59.955626Z", "status": { "code": 21100, "description": "Model trained successfully" }, "train_stats": {} }, "display_name": "Food" }, "data": {} }, { "id": "@outputID12", "status": { "code": 10000, "description": "Ok" }, "created_at": "2019-01-29T17:36:23.736712374Z", "model": { "id": "@modelID2", "name": "general", "created_at": "2016-03-09T17:11:39.608845Z", "app_id": "main", "output_info": { "message": "Show output_info with: GET /models/{model_id}/output_info", "type": "concept", "type_ext": "concept" }, "model_version": { "id": "@modelVersion2", "created_at": "2016-07-13T01:19:12.147644Z", "status": { "code": 21100, "description": "Model trained successfully" }, "train_stats": {} }, "display_name": "General" }, "data": { "concepts": [ { "id": "@conceptID11", "name": "people", "value": 0.9963381, "app_id": "main" }, { "id": "@conceptID12", "name": "one", "value": 0.9879056, "app_id": "main" }, { "id": "@conceptID13", "name": "portrait", "value": 0.9849082, "app_id": "main" } ] } } ] }, { "status": { "code": 10000, "description": "Ok" }, "input": { "id": "@inputID2", "data": { "image": { "url": "@url2" } } }, "outputs": [ { "id": "@outputID21", "status": { "code": 10000, "description": "Ok" }, "created_at": "2019-01-29T17:36:23.736685542Z", "model": { "id": "@modelID1", "name": "food-items-v1.0", "created_at": "2016-09-17T22:18:59.955626Z", "app_id": "main", "output_info": { "message": "Show output_info with: GET /models/{model_id}/output_info", "type": "concept", "type_ext": "concept" }, "model_version": { "id": "@modelVersion1", "created_at": "2016-09-17T22:18:59.955626Z", "status": { "code": 21100, "description": "Model trained successfully" }, "train_stats": {} }, "display_name": "Food" }, "data": { "concepts": [ { "id": "@concept21", "name": "spatula", "value": 0.9805687, "app_id": "main" } ] } }, { "id": "@outputID22", "status": { "code": 10000, "description": "Ok" }, "created_at": "2019-01-29T17:36:23.736712374Z", "model": { "id": "@modelID2", "name": "general", "created_at": "2016-03-09T17:11:39.608845Z", "app_id": "main", "output_info": { "message": "Show output_info with: GET /models/{model_id}/output_info", "type": "concept", "type_ext": "concept" }, "model_version": { "id": "@modelVersion2", "created_at": "2016-07-13T01:19:12.147644Z", "status": { "code": 21100, "description": "Model trained successfully" }, "train_stats": {} }, "display_name": "General" }, "data": { "concepts": [ { "id": "@conceptID31", "name": "eyewear", "value": 0.99984586, "app_id": "main" }, { "id": "@conceptID32", "name": "lens", "value": 0.999823, "app_id": "main" }, { "id": "@conceptID33", "name": "eyeglasses", "value": 0.99980056, "app_id": "main" } ] } } ] } ] } """) app = ClarifaiApp() workflow = Workflow(app.api, workflow_id='@workflowID') response = workflow.predict( [Image(url='@url1'), Image(url='@url2')], ModelOutputConfig(min_value=0.5, max_concepts=3)) assert response['workflow']['id'] == '@workflowID' assert response['results'][0]['outputs'][1]['data']['concepts'][0][ 'id'] == '@conceptID11' assert_request( mock_execute_request, 'POST', '/v2/workflows/@workflowID/results', """ { "inputs": [ { "data": { "image": { "url": "@url1" } } }, { "data": { "image": { "url": "@url2" } } } ], "output_config": { "max_concepts": 3, "min_value": 0.5 } } """)
import json from clarifai.rest import ClarifaiApp from clarifai.rest import Image as ClImage from clarifai.rest import Workflow app = ClarifaiApp(api_key='f721938da77f4cd8b508f8fe61265812') workflow = Workflow(app.api, workflow_id="workflow-1") # model = app.models.get('demographics') # BY URL image = ClImage(url='https://samples.clarifai.com/demographics.jpg') response = workflow.predict([image]) # BY LOCAL FILE # image = model.predict_by_filename('/home/user/image.jpeg') # response = model.predict_by_filename('/home/user/image.jpeg') print(json.dumps(response, sort_keys=True, indent=1))
def __init__(self): self.api = datamuse.Datamuse() self.app = ClarifaiApp(api_key=config.get_api_key()) self.workflow = Workflow(self.app.api, workflow_id="rebus-workflow")
class Rebust: MAX_RESULTS = 5 MAX_WORD_RESULTS = 10 def __init__(self): self.api = datamuse.Datamuse() self.app = ClarifaiApp(api_key=config.get_api_key()) self.workflow = Workflow(self.app.api, workflow_id="rebus-workflow") #given an array of strings and operators, concat individual rebus word def concat_word(self, rebus): word = re.split(" ", rebus) str = "" for i in range(0, len(word)): if word[i] == "-": str = str.replace(word[i + 1], '', 1) i += 2 elif word[i] != "+" and word[i - 1] != "-": str += word[i] return str def get_syllables(self, word): word = word.lower() count = 0 vowels = "aeiouy" if word[0] in vowels: count += 1 for index in range(1, len(word)): if word[index] in vowels and word[index - 1] not in vowels: count += 1 if word.endswith("e"): count -= 1 if count == 0: count += 1 return count def get_sounds_like(self, word, n_syll): res = self.api.words(sl=word, v='enwiki', max=self.MAX_RESULTS) ret = [] # print("Similar sound to: " + word) for w in res: if wordnet.synsets(w['word']): if (self.get_syllables(w['word']) <= n_syll): heapq.heappush(ret, (w['score'], w['word'])) top_results = [] for i in heapq.nlargest(self.MAX_RESULTS, ret): top_results.append(i[1]) return top_results def get_image_predictions(self, type, img): response = None if type == "url": response = self.workflow.predict_by_url(img) else: response = self.workflow.predict_by_filename(img[1]) results = [] for m in response["results"][0]["outputs"]: for x in m["data"]["concepts"]: heapq.heappush(results, (x["value"], x["name"])) top_results = [] for i in heapq.nlargest(5, results): if self.get_syllables(i[1]) <= 2: top_results.append(i[1]) return top_results def generate_combos(self, tokens): arr = [] for tk in tokens: # print(tk) if not isinstance(tk, list): # arr.append(tk) arr += tk elif isinstance(tk, list): if len(arr) == 0: for x in tk: arr.append(x) else: tmp = [] for x in arr: for y in tk: tmp.append(x + y) arr = tmp.copy() print(arr) return arr def solve_word(self, word): tokens = [] for x in word: if x[0] == "str": tokens.append(x[1]) elif x[0] == "img": tokens.append(self.get_image_predictions("img", x[1])) elif x[0] == "url": tokens.append(self.get_image_predictions("url", x[1])) #run through list, if there is a list of words, see what makes the most sense poss = self.generate_combos(tokens) poss = poss[0:self.MAX_WORD_RESULTS] final = [] for x in poss: res = self.get_sounds_like(x, 4) if res != None: final += res[0:3] return final[0:self.MAX_WORD_RESULTS] def parse_rebus(self, rebus): guess = [] #rebus input should be tuple (type, [str or (img_type, img)] for word in rebus: guess.append(self.solve_word(word)) return guess
def check_image(browser, clarifai_api_key, img_tags, img_tags_skip_if_contain, logger, clarifai_models, workflow, probability, full_match=False, check_video=False, proxy=None, picture_url=None, loca=None): try: """Uses the link to the image to check for invalid content in the image. If a workflow has been selected, get list of tags from Clarifai API by checking link against models included in the workflow. If a workflow hasn't been provided, InstaPy will check images against given model( s)""" clarifai_api = ClarifaiApp(api_key=clarifai_api_key) clarifai_tags = [] if proxy is not None: clarifai_api.api.session.proxies = {'https': proxy} # Set req image or video source URL to given one or get it from # current page if picture_url is None: source_link = get_source_link(browser) else: source_link = [picture_url] # No image in page if not source_link: return True, [], [] # Check image using workflow if provided. If no workflow, # check image using model(s) if workflow: clarifai_workflow = Workflow(clarifai_api.api, workflow_id=workflow[0]) # If source is video, checks keyframe against models as video # inputs not supported when using workflows if source_link[0].endswith('mp4'): clarifai_response = clarifai_workflow.predict_by_url( source_link[1]) else: clarifai_response = clarifai_workflow.predict_by_url( source_link[0]) for response in clarifai_response['results'][0]['outputs']: results = get_clarifai_tags(response, probability) clarifai_tags.extend(results) else: for model in clarifai_models: clarifai_response = get_clarifai_response( clarifai_api, model, source_link, check_video) results = get_clarifai_tags(clarifai_response['outputs'][0], probability) clarifai_tags.extend(results) logger.info('source_link {} got predicted result(s):\n{}'.format( source_link, clarifai_tags)) # Will not comment on an image if any of the tags in # img_tags_skip_if_contain are matched if given_tags_in_result(img_tags_skip_if_contain, clarifai_tags): logger.info( 'Not Commenting, image contains concept(s): "{}".'. format(', '.join( list(set(clarifai_tags) & set(img_tags_skip_if_contain))))) return False, [], clarifai_tags for (tags, should_comment, comments) in img_tags: if should_comment and given_tags_in_result(tags, clarifai_tags, full_match): try: element = browser.find_element_by_xpath( read_xpath(get_source_link.__name__, "image")) except NoSuchElementException: element = browser.find_element_by_xpath( read_xpath(get_source_link.__name__, "image_alt")) print(element) location = element.location size = element.size print(location) png = browser.get_screenshot_as_png( ) # saves screenshot of entire page print("got png") im = Image.open( BytesIO(png)) # uses PIL library to open image in memory print(im) left = location['x'] top = location['y'] right = location['x'] + size['width'] bottom = location['y'] + size['height'] im = im.crop((left, top, right, bottom)) # defines crop points print("crop") loc = "" if loca: loc = "xxx" + loca + "xxx" im.convert("RGB").save( "/Users/andreasdickow/InstaPy/logs/andreasdickow/imgs/" + loc + ("__".join(clarifai_tags)).replace(" ", "_") + '.jpg') print("save") return True, comments, clarifai_tags elif given_tags_in_result(tags, clarifai_tags, full_match): logger.info( 'Not Commenting, image contains concept(s): "{}".'.format( ', '.join(list(set(clarifai_tags) & set(tags))))) # now that we have the preliminary stuff out of the way time to get that image :D try: element = browser.find_element_by_xpath( read_xpath(get_source_link.__name__, "image")) except NoSuchElementException: element = browser.find_element_by_xpath( read_xpath(get_source_link.__name__, "image_alt")) print(element) location = element.location size = element.size png = browser.get_screenshot_as_png( ) # saves screenshot of entire page im = Image.open( BytesIO(png)) # uses PIL library to open image in memory left = location['x'] top = location['y'] right = location['x'] + size['width'] bottom = location['y'] + size['height'] im = im.crop((left, top, right, bottom)) # defines crop points loc = "" if loca: loc = "xxx" + loca + "xxx" im.convert("RGB").save( "/Users/andreasdickow/InstaPy/logs/andreasdickow/imgs/" + loc + ("__".join(clarifai_tags)).replace(" ", "_") + '.jpg') print("save") return False, [], clarifai_tags return True, [], clarifai_tags except Exception as err: logger.error('Image check error: {}'.format(err))
def check_image( browser, clarifai_api_key, img_tags, img_tags_skip_if_contain, logger, clarifai_models, workflow, probability, full_match=False, check_video=False, proxy=None, picture_url=None, ): try: """Uses the link to the image to check for invalid content in the image. If a workflow has been selected, get list of tags from Clarifai API by checking link against models included in the workflow. If a workflow hasn't been provided, InstaPy will check images against given model( s)""" clarifai_api = ClarifaiApp(api_key=clarifai_api_key) clarifai_tags = [] if proxy is not None: clarifai_api.api.session.proxies = {"https": proxy} # Set req image or video source URL to given one or get it from # current page if picture_url is None: source_link = get_source_link(browser) else: source_link = [picture_url] # No image in page if not source_link: return True, [], [] # Check image using workflow if provided. If no workflow, # check image using model(s) if workflow: clarifai_workflow = Workflow(clarifai_api.api, workflow_id=workflow[0]) # If source is video, checks keyframe against models as video # inputs not supported when using workflows if source_link[0].endswith("mp4"): clarifai_response = clarifai_workflow.predict_by_url( source_link[1]) else: clarifai_response = clarifai_workflow.predict_by_url( source_link[0]) for response in clarifai_response["results"][0]["outputs"]: results = get_clarifai_tags(response, probability) clarifai_tags.extend(results) else: for model in clarifai_models: clarifai_response = get_clarifai_response( clarifai_api, model, source_link, check_video) results = get_clarifai_tags(clarifai_response["outputs"][0], probability) clarifai_tags.extend(results) logger.info("source_link {} got predicted result(s):\n{}".format( source_link, clarifai_tags)) # Will not comment on an image if any of the tags in # img_tags_skip_if_contain are matched if given_tags_in_result(img_tags_skip_if_contain, clarifai_tags): logger.info( 'Not Commenting, image contains concept(s): "{}".'. format(", ".join( list(set(clarifai_tags) & set(img_tags_skip_if_contain))))) return False, [], clarifai_tags for (tags, should_comment, comments) in img_tags: if should_comment and given_tags_in_result(tags, clarifai_tags, full_match): return True, comments, clarifai_tags elif given_tags_in_result(tags, clarifai_tags, full_match): logger.info( 'Not Commenting, image contains concept(s): "{}".'.format( ", ".join(list(set(clarifai_tags) & set(tags))))) return False, [], clarifai_tags return True, [], clarifai_tags except Exception as err: logger.error("Image check error: {}".format(err))
#/v2/workflows/AMGeneral/results import json from clarifai.rest import ClarifaiApp from clarifai.rest import Workflow from clarifai.rest import Image as ClImage import csv f = open("a.csv", "rb") r = open("output.csv", "ab") rr = csv.writer(r) h = csv.reader(f) for row in h: #print ''.join(row) app = ClarifaiApp(api_key='MYKEY') workflow = Workflow(app.api, workflow_id="AMGeneral") image = ClImage(url=(row[0])) response = workflow.predict([image]) #print json.dumps(response) concepts = response['results'][0]['outputs'][0]['data']['concepts'] colours = response['results'][0]['outputs'][1]['data']['colors'] l = len(colours) # print colours t0 = (concepts[0]['name'], concepts[0]['value']) t1 = (concepts[1]['name'], concepts[1]['value']) t2 = (concepts[2]['name'], concepts[2]['value']) t3 = (concepts[3]['name'], concepts[3]['value']) t4 = (concepts[4]['name'], concepts[4]['value']) t5 = (concepts[5]['name'], concepts[5]['value']) t6 = (concepts[6]['name'], concepts[6]['value']) t7 = (concepts[7]['name'], concepts[7]['value'])
import json from clarifai.rest import ClarifaiApp from clarifai.rest import Workflow from clarifai.rest import Image as ClImage import csv # The CSV file should be URL links f = open("input.csv", "rb") r = open("output.csv", "ab") rr = csv.writer(r) h = csv.reader(f) for row in h: #print ''.join(row) app = ClarifaiApp(api_key='KEY') workflow = Workflow(app.api, workflow_id="WORKFLOW") image = ClImage(url=(row[0])) response = workflow.predict([image]) print json.dumps(response) concepts = response['results'][0]['outputs'][1]['data']['concepts'] colours = response['results'][0]['outputs'][0]['data']['colors'] l = len(colours) t0 = (concepts[0]['name'], concepts[0]['value']) t1 = (concepts[1]['name'], concepts[1]['value']) t2 = (concepts[2]['name'], concepts[2]['value']) t3 = (concepts[3]['name'], concepts[3]['value']) t4 = (concepts[4]['name'], concepts[4]['value']) t5 = (concepts[5]['name'], concepts[5]['value']) t6 = (concepts[6]['name'], concepts[6]['value']) t7 = (concepts[7]['name'], concepts[7]['value'])