def put(self, tweets):
        filtered_tweets = filter(lambda t: not tweet.RETWEETED_STATUS in t, tweets)
        normalized_tweets = map(lambda t: tweet.to_date(tweet.to_ascii(t)), filtered_tweets)

        if len(normalized_tweets) == 0:
            return

        try:
            self._tweet_coll.insert(normalized_tweets, continue_on_error = True)
        except DuplicateKeyError:
            pass # Ignored.
    classifier = "classifier.pickle"

    opts, args = getopt.getopt(sys.argv[1:], "hc:d:k:s:e:")
    for o, a in opts:
        if o == "-d":
            db = a
        elif o == "-c":
            classifier = a
        elif o == "-k":
            keywords.append(a)
        elif o == "-s":
            start = datetime.strptime(a, "%Y-%M-%d")
        elif o == "-e":
            end = datetime.strptime(a, "%Y-%M-%d")
        else:
            usage()
            sys.exit(0)

    classifier = Classifier.load(classifier)
    aggregator = RetweetWeightedAggregator()

    ts = TweetStore(db)
    for t in ts.get(keywords, start, end):
        s = classifier.classify(t)
        print("%s -- sentiment: %s" % (tweet.to_ascii(t)[tweet.TEXT], "positive" if (s == 1) else "negative"))
        aggregator.add(t, s)

    print("Aggregated sentiment: %f" % aggregator.get_sentiment())
    print("ID of last tweet: %d" % aggregator.get_last_id())
    print("Total number of tweets: %d" % aggregator.get_num())