def compareModels(model): """ This evaluates the pre-trained model agaisnt metamind's API on sentences in `data/validation` Parameters ---------- model: test.MODEL Namedtuple containing model parameters (dictionary, tfidf learner and labels) """ set_api_key("MohJ53r6kUvoPjHS8tStX1vnfssvN5EDetVcp2uCNISwXus2BS") with open('data/validation', 'r') as fin: validations = fin.read() truth = [model.labels.label2class[i] for i in ['positive']*9 + ['negative']*8] scores_mm = [] scores_joe = [] for validation in validations.split('\n'): mmLabel = testMetaMind(validation)[0]['label'] scores_mm.append(model.labels.label2class[mmLabel]) joeLabel = testDeepModel(validation, model) scores_joe.append(model.labels.label2class[joeLabel]) print 'MetaMind F1 score is %s' % f1_score(truth, scores_mm) print 'My F1 score is %s' % f1_score(truth, scores_joe)
def train_data(): set_api_key('bHIqZD7ZJgRDn4oDWwMiSkDdGiuX3YvHKeCdNV1VF2WkQJO5gR') training_data = ClassificationData(private=True, data_type='image',name='training images') training_data.add_samples([ ('./imgs/banana.jpg','fruits'),('./imgs/blueberries.jpg','fruits'),('./imgs/fruit_collection2.jpg','fruits'),('./imgs/fruit_collection.jpg','fruits'),('./imgs/grapefruit.jpg','fruits'), ('./imgs/grapes.jpg','fruits'),('./imgs/oranges.jpg','fruits'),('./imgs/peaches.jpg','fruits'),('./imgs/pears.jpg','fruits'),('./imgs/strawberries.jpg','fruits'),('./imgs/watermelon.jpg','fruits'),('./imgs/carrots.jpg','vegetables'),('./imgs/lettuce.jpg','vegetables'),('./imgs/radish.jpg','vegetables')], input_type='files') training_data.add_samples([ ('http://punchbowlsocial.com/wp-content/uploads/2015/02/eyg.jpg','eggs'),('http://media.thefowlergroup.com.s3.amazonaws.com/wp-content/uploads/2012/05/copywriting-deli-ham.jpg','meat'),('http://www.tyson.com/~/media/Consumer/Call-Outs/fresh-package.ashx?la=en','meat'),('http://homeguides.sfgate.com/DM-Resize/photos.demandstudios.com/gett/article/83/5/86544602_XS.jpg?w=442&h=442&keep_ratio=1','dairy'),('http://i-store.walmart.ca/images/WMTCNPE/155/016/155016_Large_1.jpeg','dairy'),('http://www.10tv.com/content/graphics/2014/08/29/kraft-singles-american.jpg','dairy')], input_type='urls') fruit.fit(training_data)
def __init__(self, api_key=None, classifier=None): ImageExtractor.__init__(self) api_key = get_api_key() if api_key is None else api_key if api_key is None: raise ValueError("A valid MetaMind API key must be passed the " "first time a MetaMind extractor is initialized.") set_api_key(api_key, verbose=False) # TODO: Can add a lookup dictionary somewhere that has name --> ID # translation for commonly used classifiers. if classifier is None: self.classifier = general_image_classifier else: self.classifier = ClassificationModel(id=classifier)
import json import sqlite3 import sys #import word2vec from flask import Flask, request, abort, redirect, url_for, g, render_template from collections import OrderedDict from collections import defaultdict # to install metamind api # run the command: pip install MetaMindApi --upgrade from metamind.api import ClassificationData, ClassificationModel, set_api_key app = Flask(__name__, static_folder='assets') #set info for metamind set_api_key('asAj55GwZA6r4nO9ijVGWxhbHWHuGVFcUkfxo8b8Tq6aLHqOCH') classifier = ClassificationModel(id='40493') ingredientsToRecipes = {} recipeData = {} # Executes a SQL query and returns a list of dictionaries (of each result row) # Source: http://kevcoxe.github.io/Simple-Flask-App/ def queryDb(conn, query, args=(), one=False): cur = conn.execute(query, args) rv = [dict((cur.description[idx][0], value) for idx, value in enumerate(row)) for row in cur.fetchall()] return (rv[0] if rv else None) if one else rv def getRecipes(userIngredients):
fmlx_data = [] ds = get_dataset(FMLX_DIR) for key in ds.keys(): fmlx_data.append(ds[key]) print "fmlx_data length [" + str(len(fmlx_data)) + "]" cis_data = [] ds = get_dataset(CIS_DIR) for key in ds.keys(): cis_data.append(ds[key]) print "cis_data length [" + str(len(cis_data)) + "]" # setup metamind api_key = "bZQv0loHZItIA6f6Nkw1vZCbyzSgrBb3wGSaQoPnCX0lOo0dAE" path = "/Users/patrickhop/Desktop/metamind/backups/" set_api_key(api_key) cf = ClassificationModel(id=40426) print "running hr test" pairs = append_inferences(cf, hr_data) confusion(pairs) print "running seth test" pairs = append_inferences(cf, seth_data) confusion(pairs) print "running formulatrix test" pairs = append_inferences(cf, fmlx_data) confusion(pairs) print "running CIS test"
string += i + '+' string = string[:-1] return string def get_xml(query): link = 'http://api.wolframalpha.com/v2/query?input={0}&appid={1}'.format(query, api_key) r = requests.get(link) return r def parse_query(req): soup = BeautifulSoup(req.text, "xml") s = None for l in soup.find_all('pod', {'id':'NutritionLabelSingle:ExpandedFoodData'}): s = l.find('plaintext') return s.text def process_result(res): lis = res.split('\n') return lis f00d.set_api_key(meta_key) classifier = f00d.ClassificationModel(id=41125) PORT = 8000 handler = SocketServer.TCPServer(("", PORT), myHandler) print "serving at port 8000" handler.serve_forever()
from flask import Flask,request,session,g,redirect,url_for,abort,render_template,flash from metamind.api import set_api_key set_api_key("k3U0ZYw5U7BiQWnXYCAJGzKHmSk42VSNUoVebKxPC9jlchnXzk") app =Flask('tumor_classification') @app.route('/') def show_entries(): return render_template('index.html') if __name__ == '__main__': app.run()
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash from metamind.api import set_api_key set_api_key("k3U0ZYw5U7BiQWnXYCAJGzKHmSk42VSNUoVebKxPC9jlchnXzk") app = Flask('tumor_classification') @app.route('/') def show_entries(): return render_template('index.html') if __name__ == '__main__': app.run()
from metamind.api import ClassificationData, ClassificationModel, set_api_key from flask import Flask, request import json from flask.ext.cors import CORS set_api_key('sxXhflPZoxXz3USooJB9H2hD4frYjbJF83mZ6mZRscuVxasSuv') classifier = ClassificationModel(id='40323') final=[ ('http://www.mensflair.com/media/michael-jackson-style.jpg', 'michael jackson'), ('https://s-media-cache-ak0.pinimg.com/236x/6c/59/4b/6c594b12af3e0aca0f56282c906b9893.jpg', 'Matthew Perry'), ('http://celebmafia.com/wp-content/uploads/2015/06/taylor-swift-style-at-lax-airport-june-2015_5.jpg', 'Taylor swift'), ('http://i.dailymail.co.uk/i/pix/2015/05/12/16/2892A11600000578-0-image-m-10_1431445560818.jpg', 'David Schimmer'), ('https://s-media-cache-ak0.pinimg.com/736x/0d/00/fb/0d00fb105c433ff431cfc5031ffd372c.jpg','jim parson'), ('http://celebritiestown.com/celebritypictures/var/albums/Anne-Hathaway-style-Brooklyn/anne-hathaway-style-02.jpg', 'Anna Hathway'), ('http://www.itslavida.com/files/2014/07/Smart-Casual.jpg','david beckham'),
from metamind.api import set_api_key, general_image_classifier set_api_key('aXZxkB3eOMupDZSMNIZSdfD9hxv2zBDpen8qbMOOPLtzYwhx2X') print general_image_classifier.predict(['https://scontent.xx.fbcdn.net/hphotos-xft1/t31.0-8/p180x540/1799935_10153379041592500_4858712342770219261_o.jpg', 'https://scontent.xx.fbcdn.net/hphotos-xfp1/v/t1.0-9/q83/p720x720/945268_10151610992317500_1182525900_n.jpg?oh=377f8509b6dd5353d6f47205405a7df8&oe=566DD49C'], input_type='urls')
from metamind.api import ClassificationData, ClassificationModel, set_api_key from story_teller import * import os set_api_key(os.environ.get('METAMIND_KEY')) #print getPostBetweenScores((200,300), 1) #print getContentWithLabel(1) training_data = ClassificationData(private=True, data_type='text', name='hn_stories_2labels_800_samples') #training_data = ClassificationData(id=184417) labels = ('0To15', '150Plus') samples = getContentWithLabel(400, labels) training_data.add_samples(samples, input_type='text') classifier = ClassificationModel(private=True, name='HN score predictor_2labels') #classifier = ClassificationModel(id=27906) classifier.fit(training_data) randomPost = getRandomPost() prediction = classifier.predict(randomPost['content'], input_type='text') print randomPost['score'] print prediction[0]['label'], prediction[0]['confidence'] #print 'prediction of score %d is %s with confidence %f' %(randomPost['score'], prediction['label'], prediction['probability'])
1. Sign up at: https://www.metamind.io/auth/signup 2. Retrieve your api key here https://www.metamind.io/auth/my_account 3. Copy the 'API Key' into the script below More information about MetaMind's API can be found here: http://docs.metamind.io/docs ''' api_key = '' # ENTER YOUR API KEY HERE set_api_key(api_key) def main(): # MetaMind makes it simple to create custom classifiers for both text and images # Create and use a custom image classifier # This classifier classifies an image as 'food' or 'animal' image_classifier = create_image_classifier() print 'Custom image classifier predictions:' pprint.pprint( image_classifier.predict([blueberry_pie_url, deer_url], input_type='urls')) # Create and use a custom text classifier
# AUTHOR ChiaLing Wang [email protected] from flask.ext.cors import CORS, cross_origin from flask import Flask, request, jsonify from flaskext.mysql import MySQL from metamind.api import set_api_key, food_image_classifier import urllib2 import os import json import time set_api_key('R71f8xzpv9EHlNH1HZbYfKFWks80ceLTQOPMoynY0mV3KR7Q0j') app = Flask(__name__) CORS(app) mysql = MySQL() app.config['MYSQL_DATABASE_USER'] = '******' app.config['MYSQL_DATABASE_PASSWORD'] = '******' app.config['MYSQL_DATABASE_DB'] = 'websysS16GB4' app.config['MYSQL_DATABASE_HOST'] = 'websys3.stern.nyu.edu' mysql.init_app(app) api_token = "j1KHV0ZxnLM43gLiaAVqNY1OjgZVOk" '''''' '''''' '''''' '''''' '''''' '''''' '' ''' Login Check ''' '''''' '''''' '''''' '''''' '''''' '''''' '' @app.route("/login/", methods=["POST"]) @cross_origin(origin='*', headers=['Content-Type', 'Authorization']) def login(): data = {}
import json import urllib2 from application_only_auth import Client from metamind.api import ClassificationData, ClassificationModel, set_api_key #twitter API_KEY = "H5lXt22xepXuRUq2Y9zIFWyTk" API_SECRET = "aj4PVeRsmRJjvqYPDO2Jk57qeeFkofWA4n3JQisEiCQQtD78JP" client = Client(API_KEY, API_SECRET) #meta mind set_api_key("dEt77byHr0OuQpqmBbn6HycesPndJ77wMpwUKXudDznYZbf70e") classifier = ClassificationModel(id=88) # to be filled with all matched/possible statuses master = {} sentiments = ["sad", "depressed", "upset", "heartbroken"] quants = ["", "really ", "very ", "extremely "] me = ["I am ", "I'm ", "I'm feeling "] sad_boys = [] for s in sentiments: for q in quants: for m in me: sad_boys.append(m + q + s) i = 0 j = 0 for s in sad_boys: print "query#: " + str(i) + " Using: " + s query = urllib2.quote(s).encode("utf8")
#!/usr/bin/python from Slicer import Slicer from metamind.api import ClassificationData, ClassificationModel, set_api_key import cv2 import sys if len(sys.argv) < 3: print ("Usage: Tester.py <key file> <picture>") exit(1) with open(sys.argv[1], "r") as apikey: key = apikey.read() key = key.rstrip() set_api_key(key) classifier = ClassificationModel(id=25011) print ("-----") s = Slicer(sys.argv[2], "out.jpg", True) s.create_slices(150) i = 0 for slic in s.slics: cv2.imwrite("CURRENT" + str(i) + ".jpg", slic) print classifier.predict(["CURRENT" + str(i) + ".jpg"], input_type="files") i += 1
from metamind.api import set_api_key, twitter_text_classifier, ClassificationData, ClassificationModel, set_api_key set_api_key("5eqwiKI50ym253djlf84VEgQptIb5odohKFpgS1SSWOdeGDzQ3") training_data = ClassificationData(private=True, data_type="text", name="RT snippets training data") training_data.add_samples() classifier = ClassificationModel(private=True, name="RT movie classifier") classifier.fit(training_data) print classifier.predict("This company is the worst and is losing money", input_type="text")
from metamind.api import ClassificationData, ClassificationModel, set_api_key, general_image_classifier set_api_key("uqXM0XTScBW2y46BI5BSiWpRXLYjEsyEatyYw60zyEpH76KyRf") classifier = general_image_classifier jpgs = [] #List of inputs to the classifier, to be populated by video grabs results = {} #Mapping from input (url? jpg?) to [probability, label] labels = {} #Mapping from label to [[inputs][probabilities]] #Sample data jpgs.append('../resources/hamburger.jpg') jpgs.append('../resources/lock.jpg') jpgs.append('../resources/fire.jpg') #Get base64 encoding, which allows an image to work as a URL def toBase64 (jpg): with open(jpg, "rb") as f: data = f.read() data_string = data.encode("base64") return data_string def loop (): while (len(jpgs) > 0): print "Hi!" input = jpgs.pop() base64 = toBase64(input) output = classifier.predict(['data:image/lock.jpg;base64,'+ base64], input_type='urls') probability = output[0].get(u'probability') label = output[0].get(u'label') results[input] = [probability, label] if labels.has_key(label):
# AUTHOR ChiaLing Wang [email protected] from flask.ext.cors import CORS , cross_origin from flask import Flask, request, jsonify from flaskext.mysql import MySQL from metamind.api import set_api_key, food_image_classifier import urllib2 import os import json import time set_api_key('R71f8xzpv9EHlNH1HZbYfKFWks80ceLTQOPMoynY0mV3KR7Q0j') app = Flask(__name__) CORS(app) mysql = MySQL() app.config['MYSQL_DATABASE_USER'] = '******' app.config['MYSQL_DATABASE_PASSWORD'] = '******' app.config['MYSQL_DATABASE_DB'] = 'websysS16GB4' app.config['MYSQL_DATABASE_HOST'] = 'websys3.stern.nyu.edu' mysql.init_app(app) '''''''''''''''''''''''''''''''''''''' ''' Login Check ''' '''''''''''''''''''''''''''''''''''''' @app.route("/login/" , methods=["POST"]) @cross_origin(origin='*',headers=['Content-Type','Authorization']) def login():
from metamind.api import set_api_key, twitter_text_classifier, ClassificationData, ClassificationModel, set_api_key #Set the metamind API key for my account set_api_key("IpdP8N0nsPmYstaqwqL1CWpPWfxxETCj5BzQWa7ANN6ChZ9PYS") #Using the MetaMind API we can look up things #print twitter_text_classifier.query_and_predict("comcast") #Create the classification training data to feed into the model training_data = ClassificationData(private=True, data_type="text", name="RT snippets 3 feature training data") training_data.add_samples("rt.train-3.tsv", input_type="tsv") #Train the classifier classifier = ClassificationModel(private=True, name="RT movie 3-value classifier") classifier.fit(training_data) #print classifier.predict("Furious7 was the worst movie I've ever seen. Period.", input_type="text")
import urllib import mechanize from bs4 import BeautifulSoup from urlparse import urlparse from url_fetcher import fetch from metamind.api import ClassificationData, ClassificationModel, set_api_key k="tbYAbBvgQRgi9QfWRvs6NXAqcIFrp7p8ycYDctzT2duT8vlKkV" set_api_key(k) def getImageUrl(name): browser = mechanize.Browser() browser.set_handle_robots(False) browser.addheaders = [('User-agent', 'Mozilla')] #htmltext = browser.open("https://www.google.com/search?q=r&source=lnms&tbm=isch&sa=X&ved=0ahUKEwil0uyH8qLJAhXKcT4KHX6JCjYQ_AUIBygB&biw=1167&bih=593#tbm=isch&q=car") #htmltext = browser.open("https://www.google.com/search?q=fish&biw=1920&bih=916&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiN6OXs_KLJAhUJdD4KHbZ1CLAQ_AUIBigB") #htmltext = browser.open("https://www.google.com/search?tbm=isch&q=cat") #htmltext = browser.open("https://www.google.com/search?q=fish&tbm=isch") htmltext = browser.open("https://www.google.com/search?site=imghp&tbm=isch&source=hp&biw=1414&bih=709&q=cars&oq=cars") img_urls = [] formatted_images = [] print htmltext soup = BeautifulSoup(htmltext) results = soup.findAll("a") print results for r in results: try: print r print " " if "imgres?imgurl" in r['href']:
from __future__ import division __author__ = 'Jake' from pyechonest import config from pyechonest import song import os, csv, json, operator, twitter from metamind.api import set_api_key, ClassificationModel set_api_key("uqakkdVZiUUr62KISE5pM4GKiAZNaHXXT9B1umpPhIxlOiWZWQ") config.ECHO_NEST_API_KEY = "DLBFUV54VPZIDBJO7" def categorize_tweets_csv(): for tweetsfile in os.listdir(os.getcwd()): excitements = [] happy = 0 exclamations = 0 counter_num = 0 if tweetsfile.endswith(".csv"): print tweetsfile with open(tweetsfile, 'r') as csvfile: csvreader = csv.reader(csvfile) for tweet, sentiment, accuracy in csvreader: counter_num += 1 if sentiment == "positive" and accuracy >= 50: happy += 1 if tweet.count("!") > 1 and tweet.count(".") <= 1: exclamations += 1 exclamation_percentage = exclamations / float(counter_num) # excitement = (sum(excitements) + exclamations) / float(len(excitements))
from __future__ import division __author__ = 'Jake' from pyechonest import config from pyechonest import song import os, csv, json, operator, twitter from metamind.api import set_api_key, ClassificationModel set_api_key("uqakkdVZiUUr62KISE5pM4GKiAZNaHXXT9B1umpPhIxlOiWZWQ") config.ECHO_NEST_API_KEY = "DLBFUV54VPZIDBJO7" def categorize_tweets_csv(): for tweetsfile in os.listdir(os.getcwd()): excitements = [] happy = 0 exclamations = 0 counter_num = 0 if tweetsfile.endswith(".csv"): print tweetsfile with open(tweetsfile, 'r') as csvfile: csvreader = csv.reader(csvfile) for tweet, sentiment, accuracy in csvreader: counter_num += 1 if sentiment == "positive" and accuracy >= 50: happy += 1 if tweet.count("!") > 1 and tweet.count(".") <= 1: exclamations += 1 exclamation_percentage = exclamations / float(counter_num)
__author__ = 'Adam' import warnings import sys import os from metamind.api import ClassificationData, ClassificationModel, set_api_key, general_image_classifier warnings.filterwarnings("ignore") url = sys.argv[1] set_api_key("Xnxh4kNZgcykl9ePz4nfoRV7EVYW5BM9WDQGoMopzVObFJdvzR") # training_data = ClassificationData(private=True, data_type='image', name='training images') # training_data.add_samples([ # ('http://newsimg.bbc.co.uk/media/images/46310000/jpg/_46310103_r850082-andromeda_galaxy_(m31)-spl.jpg', 'galaxy'), # ('https://encrypted-tbn1.gstatic.com/images?q=tbn:ANd9GcSNB1gxHaGwKNV8r-AvYgST0PiM4t9YXU7e8XRELdTHGx50dqtUMg', 'galaxy'), # ('https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcT7KaboL9LLsjMTM346fphYW3fufnVN8zMxcG7FhvDahepCET0sXA', 'galaxy'), # ('https://maleficusamore.files.wordpress.com/2012/06/sirius20crop1.jpg', 'star'), # ('http://aetherforce.com/wp-content/uploads/2014/12/sun_stars_space_light_58237_1920x1180.jpg', 'star'), # ('http://cdn.spacetelescope.org/archives/images/publicationjpg/heic1312a.jpg', 'planet'), # ('http://orig00.deviantart.net/6290/f/2006/336/b/0/planet_stock_5_by_bareck.jpg', 'planet'), # ('https://upload.wikimedia.org/wikipedia/commons/8/85/Venus_globe.jpg', 'planet'), # ('https://stenila.files.wordpress.com/2014/08/purchased-elanon.jpg?w=569&h=367', 'star')], # input_type='urls') # # classifier = ClassificationModel(private=True, name='my classifier') # classifier.fit(training_data) print general_image_classifier.predict(url, input_type='urls') list = [general_image_classifier.predict(url, input_type='urls')] firstList = list[0] dict = firstList[0] print dict['label']
def labelMetamind(request): if 'q' in request.GET and request.GET['q']: url = request.GET['q'] set_api_key('q7dR3chgj4SLJHnnbpIzvDVXLrvyQ9ncJ6ZX2Gqt9ZyyTPr7oH') print food_image_classifier.predict([url], input_type='urls') else: return HttpResponse('Please submit a image url') calories = 0 carbohydrates = 0 protein = 0 fiber = 0 sugar = 0 calcium = 0 iron = 0 magnesium = 0 phosphorus = 0 pottasium = 0 sodium = 0 zinc = 0 vitaminc = 0 thiamin = 0 riboflavin = 0 niacin = 0 vitaminb6 = 0 folate = 0 vitamina = 0 vitamind = 0 saturatedfat = 0 monosaturatedfat = 0 polyunsaturatedfat = 0 transfat = 0 totalfat = 0 fatcalories = 0 cholestrol = 0 serving = "" context = { "serving": serving, "calories": calories, "carbohydrates": carbohydrates, "protein": protein, "fiber": fiber, "sugar": sugar, "calcium": calcium, "iron": iron, "magnesium": magnesium, "phosphorus": phosphorus, "pottasium": pottasium, "sodium": sodium, "zinc": zinc, "vitaminc": vitaminc, "thiamin": thiamin, "riboflavin": riboflavin, "niacin": niacin, "vitaminb6": vitaminb6, "folate": folate, "vitamina": vitamina, "vitamind": vitamind, "saturatedfat": saturatedfat, "monosaturatedfat": monosaturatedfat, "polyunsaturatedfat": polyunsaturatedfat, "transfat": transfat, "totalfat": totalfat, "fatcalories": fatcalories, "cholestrol": cholestrol, } return render(request, 'label.html', context)
# install metamind api first # run the command: pip install MetaMindApi --upgrade try: from metamind.api import ClassificationData, ClassificationModel, set_api_key except ImportError: print "Could not import metamind.api packages" # api key in your profile, might need to change it to work set_api_key( 'Authorization: Basic wC5gH0A9hi37QAQA3i5oH045ofG1jNV07FhLQ1iwe5rmIJBtET') # need classifier id, classifier has to be public classifier = ClassificationModel(id='YOUR_CLASSIFIER_ID') # change urls to image urls for ingredients we trained for print classifier.predict([ 'http://www.grubdaily.com/wp-content/uploads/2011/01/IMG_4514-copy.jpg', 'http://static.chefkoch-cdn.de/ck.de/rezepte/1/1642/103048-960x720-spaghetti-carbonara.jpg' ], input_type='urls')