def learning(): lexicon_feat, embed_feat = initFeatureProcessors() data = json.loads(request.data) # try 'lucky @USERID ! good luck @USERID & see you soon :) @USERID @USERID' result = parallelClassifier([data], lexicon_feat, embed_feat) emotions = result[0]['emotions'] return jsonify(emotions)
import json, os import codecs, sys from argparse import ArgumentParser from Classifier import parallelClassifier, initFeatureProcessors global lexicon_feat, embed_feat lexicon_feat = None embed_feat = None initFeatureProcessors() def read_tweets(inputdir): files = [f for f in os.listdir(inputdir) if f.endswith(".json")] tweets = [] for filename in files: input_filename = os.path.join(inputdir, filename) with codecs.open(input_filename, encoding="utf8") as tweet_file: for line in tweet_file: data = json.loads(line) tweets.append(data) return tweets def write_results(outputdir, results): fp = open(outputdir + "/results.json", "wb") for result in results: json.dump(result, fp) fp.write("\n") fp.close()
import json, os import codecs, sys from argparse import ArgumentParser from Classifier import parallelClassifier, initFeatureProcessors global lexicon_feat, embed_feat lexicon_feat = None embed_feat = None initFeatureProcessors() def read_tweets(inputdir): files = [f for f in os.listdir(inputdir) if f.endswith('.json')] tweets = [] for filename in files: input_filename = os.path.join(inputdir, filename) with codecs.open(input_filename, encoding='utf8') as tweet_file: for line in tweet_file: data = json.loads(line) tweets.append(data) return tweets def write_results(outputdir, results): fp = open(outputdir + '/results.json', 'wb') for result in results: json.dump(result, fp) fp.write('\n') fp.close()