Пример #1
0
def report():
    report = request.args.get("report")
    report = report[-16:]
    # try:
    try:
        analyzed = r.hgetall(report)
        boss_list = analyzed["kills"]
        rankings = analyzed["details"]
        boss_list = ast.literal_eval(boss_list)
        rankings = ast.literal_eval(rankings)
        # print "I have this report"
        return render_template("report.html",
                               boss_list=boss_list,
                               rankings=rankings,
                               report=report)
    except:
        # print "trying to analyze"
        analyzed = model.analyze(report)
        boss_list = analyzed["kills"]
        rankings = analyzed["details"]
        r.hmset(report, analyzed)
        return render_template("report.html",
                               boss_list=boss_list,
                               rankings=rankings,
                               report=report)
Пример #2
0
def tagDownload(tag):
    tag = tag+" review"
    tag = urllib.quote(tag.encode("utf-8"))
    reviews = {'data': []}
    response = requests.post("https://www.reddit.com/api/v1/access_token",# accquire the user token
                             auth=client_auth, data=post_data, headers=headers)
    response = response.json()
    access_token = response['access_token']
    token_type = response['token_type']
    new_headers = {"Authorization": str(
        token_type+" "+access_token), "User-Agent": "MovieReviewer/0.1 by michaeljavy"}
    response = requests.get("https://oauth.reddit.com/r/all/search?q=" + # call the search api from reddit and sort the results by relevance, only fetch the first 10 posts
                            tag+"&sort=relevance&limit=10", headers=new_headers)
    if response.status_code == 200:
        redditData = response.json()
        redditData = redditData["data"]["children"]
        # loop all of the posts and scrape all of the comments with depth 1.
        for post in redditData:
            post_id = post["data"]["id"]
            response = requests.get(
                "https://oauth.reddit.com/r/all/comments/"+post_id+"?depth=1", headers=new_headers)
            if response.status_code == 200:
                d = response.json()[1]["data"]["children"][:-1]
                for comment in d:
                    reviews['data'] += [{"up": comment["data"]["ups"], "review":comment["data"] # store the number of ups and comment url for further use cases.
                                         ["body"], "url":"https://www.reddit.com"+comment["data"]["permalink"]}]
    else:
        return None
    return analyze(tag, reviews)
Пример #3
0
def refresh_report():
	try:
		report = request.args.get("report")
		report = report[-16:]
		r.delete(report)
		analyzed = model.analyze(report)
		boss_list = analyzed["kills"]
		rankings = analyzed["details"]
		r.hmset(report, analyzed)
		return render_template("report.html", boss_list=boss_list, rankings=rankings, report=report)
	except:
		return render_template("badlog.html")
Пример #4
0
def refresh_report():
    try:
        report = request.args.get("report")
        report = report[-16:]
        r.delete(report)
        analyzed = model.analyze(report)
        boss_list = analyzed["kills"]
        rankings = analyzed["details"]
        r.hmset(report, analyzed)
        return render_template("report.html",
                               boss_list=boss_list,
                               rankings=rankings,
                               report=report)
    except:
        return render_template("badlog.html")
Пример #5
0
def application(environ, start_response):
    write = start_response('200 OK', [
      ('Access-Control-Allow-Origin', '*'),
      ('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, OPTIONS'),
      ('Content-Type', 'application/json')
    ])

    raw = environ['wsgi.input'].read(int(environ.get('CONTENT_LENGTH', '0')))
    if len(raw) > 0:
      log.logit(raw)

      try:
        content = json.loads(raw)
      except ValueError:
        return [json.dumps({"error": "I need JSON, with your input as the value to the data key"})]

# The challenge makes sure that UUIDs don't get stomped
# by people who want to hijack an existing comment.
#
# Both the publically accessible uuid and the privately
# stored challenge has to match before anything is updated

      my_challenge = 0

      if 'uid' in content and content['uid'] != 0:
        my_guid = content['uid']

        if 'c' in content:
          my_challenge = content['c']

      else:
        my_guid = myrand()
        my_challenge = myrand()

      if 'id' in content and content['id'] == 'fave':
        log.faveit(raw)
      else:
        res = model.analyze(content['data'])

      res['uid'] = my_guid
      res['c'] = my_challenge

      return [json.dumps(res)]

    return [json.dumps({"error": "I need some POST input to analyze, dumbfuck."})]
Пример #6
0
def report():
	report = request.args.get("report")
	report = report[-16:]
	# try:
	try:
		analyzed = r.hgetall(report)
		boss_list = analyzed["kills"]
		rankings = analyzed["details"]
		boss_list = ast.literal_eval(boss_list)
		rankings = ast.literal_eval(rankings)
		# print "I have this report"
		return render_template("report.html", boss_list=boss_list, rankings=rankings, report=report)
	except:
		# print "trying to analyze"
		analyzed = model.analyze(report)
		boss_list = analyzed["kills"]
		rankings = analyzed["details"]
		r.hmset(report, analyzed)
		return render_template("report.html", boss_list=boss_list, rankings=rankings, report=report)
Пример #7
0
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import json
import sys
import log
import model

content = sys.stdin.read()
log.logit(content)
print json.dumps(model.analyze(content))
Пример #8
0
import argparse
import model

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='run experiments of language analysis.')
    parser.add_argument('path',
                        help='path to csv file with dataset')
    parser.add_argument('alg', choices=['lasso', 'l1', 'ridge', 'l2', 'svm'], help='learning algorithm')
    parser.add_argument('score',
                        choices=['tr_f1', 'tr_prec', 'tr_rec', 'tr_auc', 'val_f1', 'val_prec', 'val_rec', 'val_auc'],
                        help='scoring function to validate')
    parser.add_argument('-t', '--limit', type=int, default=0, help='maximum number of tweets recovered (0 for all)')
    parser.add_argument('-k', '--kfolds', type=int, default=10, help='k for k-fold cross-validation')
    parser.add_argument('-s', '--stopwords', action='store_true', help='remove stopwords if active')
    parser.add_argument('-r', '--keepwordsrank', type=int,
                        help='number of features to be used ranked by number of appearances')
    parser.add_argument('-a', '--alpha', type=float, nargs='+', default=1.0, help='regularization parameters to try')

    args = parser.parse_args()
    print('path:', args.path)
    print('alg:', args.alg)
    print('score:', args.score)
    print('limit:', args.limit)
    print('kfolds:', args.kfolds)
    print('stopwords:', args.stopwords)
    print('keepwordsrank:', args.keepwordsrank)
    print('alpha:', args.alpha)

    model.analyze(args.path, args.alg, args.score, args.limit, args.kfolds, args.stopwords, args.keepwordsrank,
                  args.alpha)
Пример #9
0
'''
	main file which takes argument from encrypt.py and executes further functions as documented in the file
'''
import imgpros
import sys
import utilities
import model
import results
'''Initializes the inputs taken from encrypt.py and stores them for furthur use'''
utilities.intializer(sys.argv)
'''Displays a preview of initial 50 frames to help cropping of rotating cell by the user'''
utilities.preview()
'''Allows user to select a free-size rectangular portion of the first frame consisting of a cell for furthur analysis'''
imgpros.crop()
'''Performs Linear Regression Analysis the cropped portion of all the frames and outputs 
	the centre of mass(COM) data for and a plot of COM for furthur use'''
model.analyze()
'''Calculates frequency, change in angle per frame, total clockwise/counter-clockwise time interval and no of frames and 
	all else necessary outputs. Also outputs the finally analyzed data in the form of CSV files and graphs'''
model.compute()
'''Saves the graphs and CSV files obtained after analysis in the required folder'''
results.save()
Пример #10
0
    station_index += 1
    print('Processing %s - %s (%d/%d)' %
          (city, station, station_index, len(stations)))
    data = list(zip(*city_data[station]))
    for i in range(len(predict_arr)):
        # if all data is zero, the model gg
        not_zero = 0
        for j in data[predict_arr[i]]:
            if j > 0:
                not_zero += 1
        if not_zero < len(data[predict_arr[i]]) / 3:
            for index in range(48):
                submission_data[station][index][i] = 0
            continue

        predict = model.analyze(data[predict_arr[i]], data[0], predict_start,
                                predict_end)
        index = 0
        for j in range(len(predict.index)):
            if predict.index[j] >= submission_start:
                # print(predict.index[j], predict[j])
                submission_data[station][index][i] = max(predict[j], 0)
                index += 1

f = open('submission.csv', 'w', newline='')
writer = csv.writer(f)
writer.writerow(['test_id', 'PM2.5', 'PM10', 'O3'])

for station in submission_data:
    for i in range(len(submission_data[station])):
        writer.writerow([station + '#' + str(i)] + submission_data[station][i])