コード例 #1
0
def create_image_classifier():
    # Create a dataset on the MetaMind servers.
    # You can view your existing datasets here: https://www.metamind.io/my_stuff#my-datasets
    # Each dataset is assigned an id by the server.
    #
    # To create a local representation of any of your datasets, you can simply pass the id into the constructor:
    #
    #      ClassificationData(id=123)
    #
    training_data = ClassificationData(private=True,
                                       data_type='image',
                                       name='image_demo')

    # You can find more information about how to add data to a dataset here: http://docs.metamind.io/docs/datasets
    # There are multiple ways of adding data to both text and image datasets
    training_data.add_samples([(apple_pie_url, 'food'), (salad_url, 'food'),
                               (deer_url, 'animal')],
                              input_type='urls')

    image_directory = os.path.dirname(os.path.realpath(__file__)) + '/images'
    training_data.add_samples([(image_directory + '/bonsai.jpg', 'animal'),
                               (image_directory + '/dog.jpg', 'animal')],
                              input_type='files')

    # Each classifier is assigned an id by the server, much like a dataset.
    # As with a dataset, you can create a representation of a classifier by passing its id into the constructor:
    #
    #      ClassificationModel(id=123)
    #
    classifier = ClassificationModel(private=True, name='image_demo')
    classifier.fit(training_data)
    return classifier
コード例 #2
0
def main():

    # MetaMind makes it simple to create custom classifiers for both text and images

    # Create and use a custom image classifier
    # This classifier classifies an image as 'food' or 'animal'
    image_classifier = create_image_classifier()
    print 'Custom image classifier predictions:'
    pprint.pprint(
        image_classifier.predict([blueberry_pie_url, deer_url],
                                 input_type='urls'))

    # Create and use a custom text classifier
    # This classifier classifies text as 'rural' or 'urban'
    text_classifier = create_text_classifier()
    print 'Custom text classifier predictions:'
    pprint.pprint(
        text_classifier.predict(
            ['We sheared the sheep yesterday.', 'The traffic is loud.'],
            input_type='text'))

    # Use builtin general image classifier
    print 'MetaMind builtin general image classifier predictions:'
    pprint.pprint(
        general_image_classifier.predict([apple_pie_url, zebra_url],
                                         input_type='urls'))

    # Use builtin food image classifier
    print 'MetaMind builtin food image classifier predictions:'
    pprint.pprint(
        food_image_classifier.predict([apple_pie_url, salad_url],
                                      input_type='urls'))

    # Use builtin twitter sentiment classifier
    # This classifier finds tweets by a given key word, and classifies each tweet as
    # 'positive', 'negative' or 'neutral'
    print 'MetaMind builtin twitter sentiment classifier:'
    pprint.pprint(twitter_text_classifier.query_and_predict('trump')[:3])

    # You can create a representation of a given classifier by passing its id into the constructor.
    # You can explore additional public classifiers here: https://www.metamind.io/vision/explore
    # You can explore your private classifiers and data here: https://www.metamind.io/my_stuff

    # You can find more details about the classifier used below here: https://www.metamind.io/classifiers/155
    print 'Public sentiment classifier with id=155:'
    pprint.pprint(
        ClassificationModel(id=155).predict(
            "This is such a great, wonderful sentiment", input_type="text"))
コード例 #3
0
def create_text_classifier():
    training_data = ClassificationData(private=True,
                                       data_type='text',
                                       name='text_demo')
    training_data.add_samples(
        [('The horse got out of the barn.', 'rural'),
         ('It is very quiet at night', 'rural'),
         ('There are 300 cattle in the field.', 'rural'),
         ('The roads are empty', 'rural'),
         ('There is so much traffic today.', 'urban'),
         ('Newer buildings are often made of glass.', 'urban'),
         ('The subways are quite loud.', 'urban'),
         ('How many skyscrapers do you see?', 'urban')],
        input_type='text')

    classifier = ClassificationModel(private=True, name='text_demo')
    classifier.fit(training_data)
    return classifier
コード例 #4
0
ファイル: test.py プロジェクト: TeamNeuron/classifier-code
# install metamind api first
# run the command: pip install MetaMindApi --upgrade

try:
    from metamind.api import ClassificationData, ClassificationModel, set_api_key
except ImportError:
    print "Could not import metamind.api packages"

# api key in your profile, might need to change it to work
set_api_key(
    'Authorization: Basic wC5gH0A9hi37QAQA3i5oH045ofG1jNV07FhLQ1iwe5rmIJBtET')

# need classifier id, classifier has to be public
classifier = ClassificationModel(id='YOUR_CLASSIFIER_ID')

# change urls to image urls for ingredients we trained for
print classifier.predict([
    'http://www.grubdaily.com/wp-content/uploads/2011/01/IMG_4514-copy.jpg',
    'http://static.chefkoch-cdn.de/ck.de/rezepte/1/1642/103048-960x720-spaghetti-carbonara.jpg'
],
                         input_type='urls')
コード例 #5
0
ファイル: logic.py プロジェクト: jakew32/bitcamp15
def pick_song(predict_list):
    mood_counts = {
        "sad": 0.0,
        "excited": 0.0,
        "happy": 0.0,
        "motivated": 0.0,
        "angry": 0.0,
        "energetic": 0.0
    }
    for input in predict_list["content"]["statuses"]:
        class_result = ClassificationModel(id=25073).predict(input["text"],
                                                             input_type="text")
        jsonres = json.loads(json.dumps(class_result[0]))
        mood = jsonres['label'].lower()
        mood_counts[mood] += 1.0

    moods = max(mood_counts.iteritems(), key=operator.itemgetter(1))
    total = sum(mood_counts.values())
    proportion = {
        key: (mood_counts[key] / float(total))
        for key in mood_counts.keys()
    }
    sad = {
        "max_danceability": .3,
        "max_tempo": 110.0,
        "min_acousticness": .3,
        "min_speechiness": .3
    }
    excited = {
        "min_danceability": .3,
        "min_tempo": 100.0,
        "min_energy": .5,
        "max_acousticness": .4
    }
    happy = {"max_danceability": .5, "max_energy": .6}
    motivated = {
        "min_danceability": .4,
        "min_energy": .5,
        "max_acousticness": .4,
        "max_speechiness": .5,
        "min_tempo": 100.0
    }
    angry = {"min_energy": .5, "max_acousticness": .3}
    energetic = {
        "min_energy": .65,
        "min_tempo": 110.0,
        "max_acousticness": .5,
        "max_speechiness": .6
    }
    sad = {key: (sad[key] * proportion["sad"]) for key in sad.iterkeys()}
    sad["proportion"] = proportion["sad"]
    excited = {
        key: (excited[key] * proportion["excited"])
        for key in excited.iterkeys()
    }
    excited["proportion"] = proportion["excited"]
    happy = {
        key: (happy[key] * proportion["happy"])
        for key in happy.iterkeys()
    }
    happy["proportion"] = proportion["happy"]
    motivated = {
        key: (motivated[key] * proportion["motivated"])
        for key in motivated.iterkeys()
    }
    motivated["proportion"] = proportion["motivated"]
    angry = {
        key: (angry[key] * proportion["angry"])
        for key in angry.iterkeys()
    }
    angry["proportion"] = proportion["angry"]
    energetic = {
        key: (energetic[key] * proportion["energetic"])
        for key in energetic.iterkeys()
    }
    energetic["proportion"] = proportion["energetic"]
    stuff = [sad, excited, happy, motivated, angry, energetic]
    newlist = sorted(stuff, key=operator.itemgetter("proportion"))
    result = newlist[5]

    songs_results = song.search(
        artist_min_familiarity=.6,
        style="pop",
        artist_start_year_after="1999",
        max_tempo=result.get("max_tempo", 160),
        min_tempo=result.get("min_tempo", 0),
        max_danceability=result.get("max_danceability", 1),
        min_danceability=result.get("min_danceability", 0),
        max_speechiness=result.get("max_speechiness", 1),
        min_speechiness=result.get("min_speechiness", 0),
        max_energy=result.get("max_energy", 1),
        min_energy=result.get("min_energy", 0),
        max_acousticness=result.get("max_acousticness", 1),
        min_acousticness=result.get("min_acousticness", 0))

    oursong = songs_results[
        0]  # is a slammin screen door, stayin out late, sneakin out your window
    print oursong.title + " - " + oursong.artist_name
    return oursong
コード例 #6
0
from metamind.api import set_api_key, ClassificationData, ClassificationModel
import pymongo
from pymongo import MongoClient
import pickle
import time

### metamind setup
set_api_key("k3U0ZYw5U7BiQWnXYCAJGzKHmSk42VSNUoVebKxPC9jlchnXzk")
training_data = ClassificationData(private=True, data_type='text', name="text snippets")
clf = ClassificationModel(private=True, name='SCF_category_classifier')

### mongodb setup
client = MongoClient()
db = client.nh1

### extract samples
with open('/home/allan/Desktop/new_haven_seeclickfix_visualization/data/wrangling/training_samples.pkl','r') as f: #load the samples...
	training_samples = pickle.load(f)

count=0
training_samples_2=[]
for sample in training_samples:
	count+=1
	training_samples_2.append((sample[0]+' '+sample[1],sample[2]))
training_data.add_samples(training_samples_2,input_type='text') #add them to the training data.


clf.fit(training_data) # train the classifier...

## put all cleaning operations under a single function
コード例 #7
0
from metamind.api import ClassificationData, ClassificationModel, set_api_key
from story_teller import *
import os

set_api_key(os.environ.get('METAMIND_KEY'))

#print getPostBetweenScores((200,300), 1)
#print getContentWithLabel(1)

training_data = ClassificationData(private=True, data_type='text', name='hn_stories_2labels_800_samples')
#training_data = ClassificationData(id=184417)
labels = ('0To15', '150Plus')
samples = getContentWithLabel(400, labels)

training_data.add_samples(samples, input_type='text')
classifier = ClassificationModel(private=True, name='HN score predictor_2labels')
#classifier = ClassificationModel(id=27906)

classifier.fit(training_data)

randomPost = getRandomPost()
prediction = classifier.predict(randomPost['content'], input_type='text')
print randomPost['score']
print prediction[0]['label'], prediction[0]['confidence']
#print 'prediction of score %d is %s with confidence %f' %(randomPost['score'], prediction['label'], prediction['probability'])
コード例 #8
0
ファイル: image_recog.py プロジェクト: yjkimjunior/cheerio
import metamind.api
from metamind.api import set_api_key, ClassificationData, ClassificationModel, food_image_classifier
#import time
import json
from datetime import date

fruit = ClassificationModel(private=True, name='fruits')

def train_data():
	set_api_key('bHIqZD7ZJgRDn4oDWwMiSkDdGiuX3YvHKeCdNV1VF2WkQJO5gR')
	training_data = ClassificationData(private=True, data_type='image',name='training images')
	training_data.add_samples([
		('./imgs/banana.jpg','fruits'),('./imgs/blueberries.jpg','fruits'),('./imgs/fruit_collection2.jpg','fruits'),('./imgs/fruit_collection.jpg','fruits'),('./imgs/grapefruit.jpg','fruits'),
		('./imgs/grapes.jpg','fruits'),('./imgs/oranges.jpg','fruits'),('./imgs/peaches.jpg','fruits'),('./imgs/pears.jpg','fruits'),('./imgs/strawberries.jpg','fruits'),('./imgs/watermelon.jpg','fruits'),('./imgs/carrots.jpg','vegetables'),('./imgs/lettuce.jpg','vegetables'),('./imgs/radish.jpg','vegetables')], input_type='files')
	training_data.add_samples([
	('http://punchbowlsocial.com/wp-content/uploads/2015/02/eyg.jpg','eggs'),('http://media.thefowlergroup.com.s3.amazonaws.com/wp-content/uploads/2012/05/copywriting-deli-ham.jpg','meat'),('http://www.tyson.com/~/media/Consumer/Call-Outs/fresh-package.ashx?la=en','meat'),('http://homeguides.sfgate.com/DM-Resize/photos.demandstudios.com/gett/article/83/5/86544602_XS.jpg?w=442&h=442&keep_ratio=1','dairy'),('http://i-store.walmart.ca/images/WMTCNPE/155/016/155016_Large_1.jpeg','dairy'),('http://www.10tv.com/content/graphics/2014/08/29/kraft-singles-american.jpg','dairy')], input_type='urls')
	
	fruit.fit(training_data)
#train_data()

def classify(in_img, ingredients):
	train_data()
	specific_descript = food_image_classifier.predict(in_img, input_type='files')
	print(specific_descript)
	general_descript = fruit.predict(in_img, input_type='files')
	entry = date(2015, 10, 11)
	if(general_descript[0]['label'] == 'meat'):
		expiary = date(entry.year, entry.month, entry.day + 5)
	elif(general_descript[0]['label'] == 'vegetables'):
		expiary = date(entry.year, entry.month, entry.day + 14)
	elif(general_descript[0]['label'] == 'fruits'):