def main(): # MetaMind makes it simple to create custom classifiers for both text and images # Create and use a custom image classifier # This classifier classifies an image as 'food' or 'animal' image_classifier = create_image_classifier() print 'Custom image classifier predictions:' pprint.pprint( image_classifier.predict([blueberry_pie_url, deer_url], input_type='urls')) # Create and use a custom text classifier # This classifier classifies text as 'rural' or 'urban' text_classifier = create_text_classifier() print 'Custom text classifier predictions:' pprint.pprint( text_classifier.predict( ['We sheared the sheep yesterday.', 'The traffic is loud.'], input_type='text')) # Use builtin general image classifier print 'MetaMind builtin general image classifier predictions:' pprint.pprint( general_image_classifier.predict([apple_pie_url, zebra_url], input_type='urls')) # Use builtin food image classifier print 'MetaMind builtin food image classifier predictions:' pprint.pprint( food_image_classifier.predict([apple_pie_url, salad_url], input_type='urls')) # Use builtin twitter sentiment classifier # This classifier finds tweets by a given key word, and classifies each tweet as # 'positive', 'negative' or 'neutral' print 'MetaMind builtin twitter sentiment classifier:' pprint.pprint(twitter_text_classifier.query_and_predict('trump')[:3]) # You can create a representation of a given classifier by passing its id into the constructor. # You can explore additional public classifiers here: https://www.metamind.io/vision/explore # You can explore your private classifiers and data here: https://www.metamind.io/my_stuff # You can find more details about the classifier used below here: https://www.metamind.io/classifiers/155 print 'Public sentiment classifier with id=155:' pprint.pprint( ClassificationModel(id=155).predict( "This is such a great, wonderful sentiment", input_type="text"))
def main(): # MetaMind makes it simple to create custom classifiers for both text and images # Create and use a custom image classifier # This classifier classifies an image as 'food' or 'animal' image_classifier = create_image_classifier() print 'Custom image classifier predictions:' pprint.pprint(image_classifier.predict([ blueberry_pie_url, deer_url ], input_type='urls')) # Create and use a custom text classifier # This classifier classifies text as 'rural' or 'urban' text_classifier = create_text_classifier() print 'Custom text classifier predictions:' pprint.pprint(text_classifier.predict([ 'We sheared the sheep yesterday.', 'The traffic is loud.' ], input_type='text')) # Use builtin general image classifier print 'MetaMind builtin general image classifier predictions:' pprint.pprint(general_image_classifier.predict([apple_pie_url, zebra_url], input_type='urls')) # Use builtin food image classifier print 'MetaMind builtin food image classifier predictions:' pprint.pprint(food_image_classifier.predict([apple_pie_url, salad_url], input_type='urls')) # Use builtin twitter sentiment classifier # This classifier finds tweets by a given key word, and classifies each tweet as # 'positive', 'negative' or 'neutral' print 'MetaMind builtin twitter sentiment classifier:' pprint.pprint(twitter_text_classifier.query_and_predict('trump')[:3]) # You can create a representation of a given classifier by passing its id into the constructor. # You can explore additional public classifiers here: https://www.metamind.io/vision/explore # You can explore your private classifiers and data here: https://www.metamind.io/my_stuff # You can find more details about the classifier used below here: https://www.metamind.io/classifiers/155 print 'Public sentiment classifier with id=155:' pprint.pprint(ClassificationModel(id=155).predict("This is such a great, wonderful sentiment", input_type="text"))
import sys import os from metamind.api import ClassificationData, ClassificationModel, set_api_key, general_image_classifier warnings.filterwarnings("ignore") url = sys.argv[1] set_api_key("Xnxh4kNZgcykl9ePz4nfoRV7EVYW5BM9WDQGoMopzVObFJdvzR") # training_data = ClassificationData(private=True, data_type='image', name='training images') # training_data.add_samples([ # ('http://newsimg.bbc.co.uk/media/images/46310000/jpg/_46310103_r850082-andromeda_galaxy_(m31)-spl.jpg', 'galaxy'), # ('https://encrypted-tbn1.gstatic.com/images?q=tbn:ANd9GcSNB1gxHaGwKNV8r-AvYgST0PiM4t9YXU7e8XRELdTHGx50dqtUMg', 'galaxy'), # ('https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcT7KaboL9LLsjMTM346fphYW3fufnVN8zMxcG7FhvDahepCET0sXA', 'galaxy'), # ('https://maleficusamore.files.wordpress.com/2012/06/sirius20crop1.jpg', 'star'), # ('http://aetherforce.com/wp-content/uploads/2014/12/sun_stars_space_light_58237_1920x1180.jpg', 'star'), # ('http://cdn.spacetelescope.org/archives/images/publicationjpg/heic1312a.jpg', 'planet'), # ('http://orig00.deviantart.net/6290/f/2006/336/b/0/planet_stock_5_by_bareck.jpg', 'planet'), # ('https://upload.wikimedia.org/wikipedia/commons/8/85/Venus_globe.jpg', 'planet'), # ('https://stenila.files.wordpress.com/2014/08/purchased-elanon.jpg?w=569&h=367', 'star')], # input_type='urls') # # classifier = ClassificationModel(private=True, name='my classifier') # classifier.fit(training_data) print general_image_classifier.predict(url, input_type='urls') list = [general_image_classifier.predict(url, input_type='urls')] firstList = list[0] dict = firstList[0] print dict['label']
from metamind.api import set_api_key, general_image_classifier set_api_key('aXZxkB3eOMupDZSMNIZSdfD9hxv2zBDpen8qbMOOPLtzYwhx2X') print general_image_classifier.predict(['https://scontent.xx.fbcdn.net/hphotos-xft1/t31.0-8/p180x540/1799935_10153379041592500_4858712342770219261_o.jpg', 'https://scontent.xx.fbcdn.net/hphotos-xfp1/v/t1.0-9/q83/p720x720/945268_10151610992317500_1182525900_n.jpg?oh=377f8509b6dd5353d6f47205405a7df8&oe=566DD49C'], input_type='urls')
import metamind from metamind.api import set_api_key, general_image_classifier import json apiKey = 'd2TshfVAyBuTqRnCto5aDay1XsZOTd0CEhIOZNKEPuCMRlNde0' if __name__ == "__main__": set_api_key(apiKey) res = json.loads(open('out.json', 'r').read()) urlRes = [] for date in res: val = res[date]['data'] for item in val: if len(item['tags']) > 0: #print 'non-empty tags found: ' + str(item['tags']) #print 'image: ' + item['images']['standard_resolution']['url'] urlRes.append(item['images']['standard_resolution']['url']) imgTag = general_image_classifier.predict(urlRes, input_type='urls') f = open('img.json', 'w') f.write(json.dumps(imgTag)) f.close()