def get(self): args = parser.parse_args() if not args['page']: args['page'] = 1 query = Frame.select().order_by(Frame.id) count=query.count() frames = map(lambda x: get_dictionary_from_model(x), query) #query.paginate(args['page'],20) return { 'meta': {'count':count}, 'data': frames }
def post(self, name, description, generation, seed_word, word_count, word_string): frame = Frame(name=name, description=description, generation=generation, seed_word=seed_word, word_count=word_count, word_string=word_string) return { 'meta': null, 'data': get_dictionary_from_model(frame) }
def post(self, name, description, generation, seed_word, word_count, word_string): frame = Frame(name=name, description=description, generation=generation, seed_word=seed_word, word_count=word_count, word_string=word_string) return {'meta': null, 'data': get_dictionary_from_model(frame)}
def get(self): args = parser.parse_args() if not args['page']: args['page'] = 1 query = Frame.select().order_by(Frame.id) count = query.count() frames = map(lambda x: get_dictionary_from_model(x), query) #query.paginate(args['page'],20) return {'meta': {'count': count}, 'data': frames}
def post(self): parser = reqparse.RequestParser() parser.add_argument('id', type=int, required=False, location='json') parser.add_argument('frame', type=int, required=True, location='json') parser.add_argument('phrase', type=str, required=True, location='json') parser.add_argument('start_date', type=str, required=False, location='json') parser.add_argument('end_date', type=str, required=False, location='json') #subgroup a specific args parser.add_argument('states_a', type=list, required=False, location='json') parser.add_argument('party_a', type=str, required=True, location='json') #D or R #subgroup b specific args parser.add_argument('states_b', type=list, required=False, location='json') parser.add_argument('party_b', type=str, required=True, location='json') #D or R """Compute analysis. Place in persistant storage.""" args = parser.parse_args() # if args['start_date']: # args['start_date'] = dateparser.parse(args['start_date']).date() # args['end_date'] = dateparser.parse(args['start_date']).date() analysis_obj = Analysis.compute_analysis( id=args.get('id'), phrase=args.get('phrase'), frame=args.get('frame'), start_date=args.get('start_date'), end_date=args.get('end_date'), #Subgroups to Compare states_a=args.get('states_a'), party_a=args.get('party_a'), states_b=args.get('states_b'), party_b=args.get('party_b'), to_update=False) data = get_dictionary_from_model(analysis_obj) return {'meta': None, 'data': data}
def hello(): before = dateparse(request.args.get('before')).date() after = dateparse(request.args.get('after')).date() ret = [] for contrib in ContributionChanges.select().where(ContributionChanges.date == before): before_contrib_dict = get_dictionary_from_model(contrib) after_contrib_dict = get_dictionary_from_model(Contribution.get(sub_id=contrib.sub_id).get_on_date(after)) before_contrib_dict.pop('id') before_contrib_dict.pop('contribution') after_contrib_dict.pop('id') after_contrib_dict['transaction_pgi'] = transaction_pgi_dict[after_contrib_dict['transaction_pgi']] if after_contrib_dict['transaction_pgi'] else None before_contrib_dict['transaction_pgi'] = transaction_pgi_dict[before_contrib_dict['transaction_pgi']] if before_contrib_dict['transaction_pgi'] else None after_contrib_dict['ammendment_id'] = ammendment_id_dict[after_contrib_dict['ammendment_id']] if after_contrib_dict['ammendment_id'] else None before_contrib_dict['ammendment_id'] = ammendment_id_dict[before_contrib_dict['ammendment_id']] if before_contrib_dict['ammendment_id'] else None after_contrib_dict['transaction_tp'] = transaction_tp_dict[after_contrib_dict['transaction_tp']] if after_contrib_dict['transaction_tp'] else None before_contrib_dict['transaction_tp'] = transaction_tp_dict[before_contrib_dict['transaction_tp']] if before_contrib_dict['transaction_tp'] else None after_contrib_dict['report_type'] = report_type_dict[after_contrib_dict['report_type']] if after_contrib_dict['report_type'] else None before_contrib_dict['report_type'] = report_type_dict[before_contrib_dict['report_type']] if before_contrib_dict['report_type'] else None # report_type_dict ret.append({ "before": before_contrib_dict, "after": after_contrib_dict, "changes": list(set([x[1][0][0] for x in diff(before_contrib_dict, after_contrib_dict).diffs if x[0] not in ["equal", "context_end_container"] and x[1][0][0] not in ['contribution', 'date', 'id']])) }) before_sub_ids = set([x.sub_id for x in ContributionHistory.select().where(ContributionHistory.date == before)]) after_sub_ids = set([x.sub_id for x in ContributionHistory.select().where(ContributionHistory.date == after)]) for sub_id in (before_sub_ids - after_sub_ids): ret.append({ "before": get_dictionary_from_model(Contribution.get(sub_id=sub_id)), "after": None, "changes": None }) return render_template('diff.html', ret=ret, before=before, after=after)
def ingest(filepath): '''Ingest file into database''' print "Ingesting %s" % filepath rows = parse_fec_file(filepath) # check history table to see if this file is done with db.transaction(): for idx, row in enumerate(rows): print "Checking row %d of %d from %s" % (idx, len(rows), filepath) try: contribution_in_db = Contribution.get(cycle=row['cycle'], sub_id=row['sub_id']) except Contribution.DoesNotExist: contribution_in_db = None # If the row isn't already there, insert it if not contribution_in_db: print t.cyan("\tInserting new row %d of %s" % (idx, filepath)) new_contribution = Contribution.create(**row) ContributionHistory.create(contribution=new_contribution.id, date=row['date'], cycle=row['cycle'], sub_id=row['sub_id']) # If the row is there, check for modifications else: # If it has not been modified, simply add a ContributionHistory object contribution_in_db_dict = get_dictionary_from_model(contribution_in_db) # x = {k:v for k,v in contribution_in_db_dict.iteritems() if k not in ["date", "id"]} # y = {k:v for k,v in row.iteritems() if k != "date"} if {k:v for k,v in contribution_in_db_dict.iteritems() if k not in ["date", "id"]} == {k:v for k,v in row.iteritems() if k != "date"}: print t.white("\tNo changes found in row %d of %s" % (idx, filepath)) ContributionHistory.create(contribution=contribution_in_db.id, date=row['date'], cycle=row['cycle'], sub_id=row['sub_id']) # If it has been modified, create a new object and give the new object a contribution history else: print t.magenta("\tDetected change in row %d of %s" % (idx, filepath)) # print diff(x,y) # import pdb; pdb.set_trace() ContributionChanges.create(contribution=contribution_in_db.id, **{k:v for k,v in contribution_in_db_dict.iteritems() if k != "id"}) for k,v in row.iteritems(): if v != getattr(contribution_in_db, k): setattr(contribution_in_db, k, v) contribution_in_db.save() ContributionHistory.create(contribution=contribution_in_db.id, date=row['date'], cycle=row['cycle'], sub_id=row['sub_id']) myfile, _ = File.get_or_create( name = os.path.basename(filepath), years=next(re.finditer(r'\d{4}_\d{4}', os.path.basename(filepath))), sha1 = sha1OfFile(filepath), updated = dateparse(os.path.dirname(filepath).split("/")[-1].replace("downloaded_", "").replace("_", "-")).date(), ingested = True )
def get(self): parser = reqparse.RequestParser() parser.add_argument('page', type = int, required = False, location = 'values') """Search persistant storage for analysis matching argument paramenters.""" args = parser.parse_args() if not args['page']: args['page'] = 1 query = Analysis.select().order_by(Analysis.id) count=query.count() analyses = map(lambda x: get_dictionary_from_model(x), query) #query.paginate(args['page'],20) return { 'meta': {'count':count}, 'data': analyses }
def get(self, id): """ Return percent complete (meta). Return either empty json or completed frame and topic plot (text). """ analysis_obj = Analysis.get(Analysis.id == id) info = analysis_obj.check_if_complete() data = get_dictionary_from_model(analysis_obj) data['topic_plot'] = eval(data['topic_plot']) if data['topic_plot'] else None data['frame_plot'] = eval(data['frame_plot']) if data['frame_plot'] else None data['wordcount_plot'] = eval(data['wordcount_plot']) if data['wordcount_plot'] else None return { 'meta': info, 'data': data }
def get(self): parser = reqparse.RequestParser() parser.add_argument('page', type=int, required=False, location='values') """Search persistant storage for analysis matching argument paramenters.""" args = parser.parse_args() if not args['page']: args['page'] = 1 query = Analysis.select().order_by(Analysis.id) count = query.count() analyses = map(lambda x: get_dictionary_from_model(x), query) #query.paginate(args['page'],20) return {'meta': {'count': count}, 'data': analyses}
def get(self, id): """ Return percent complete (meta). Return either empty json or completed frame and topic plot (text). """ analysis_obj = Analysis.get(Analysis.id == id) info = analysis_obj.check_if_complete() data = get_dictionary_from_model(analysis_obj) data['topic_plot'] = eval( data['topic_plot']) if data['topic_plot'] else None data['frame_plot'] = eval( data['frame_plot']) if data['frame_plot'] else None data['wordcount_plot'] = eval( data['wordcount_plot']) if data['wordcount_plot'] else None return {'meta': info, 'data': data}
def post(self): parser = reqparse.RequestParser() parser.add_argument('id', type = int, required = False, location = 'json') parser.add_argument('frame', type = int, required = True, location = 'json') parser.add_argument('phrase', type = str, required = True, location = 'json') parser.add_argument('start_date', type = str, required = False, location = 'json') parser.add_argument('end_date', type = str, required = False, location = 'json') #subgroup a specific args parser.add_argument('states_a', type = list, required = False, location = 'json') parser.add_argument('party_a', type = str, required = True, location = 'json') #D or R #subgroup b specific args parser.add_argument('states_b', type = list, required = False, location = 'json') parser.add_argument('party_b', type = str, required = True, location = 'json') #D or R """Compute analysis. Place in persistant storage.""" args = parser.parse_args() # if args['start_date']: # args['start_date'] = dateparser.parse(args['start_date']).date() # args['end_date'] = dateparser.parse(args['start_date']).date() analysis_obj = Analysis.compute_analysis( id = args.get('id'), phrase = args.get('phrase'), frame = args.get('frame'), start_date = args.get('start_date'), end_date = args.get('end_date'), #Subgroups to Compare states_a = args.get('states_a'), party_a = args.get('party_a'), states_b = args.get('states_b'), party_b = args.get('party_b'), to_update = False ) data = get_dictionary_from_model(analysis_obj) return { 'meta': None, 'data': data }
def put(self, id, word_string): frame = Frame.get(frame.id == id) frame.word_string = word_string return {'meta': None, 'data': get_dictionary_from_model(frame)}
def get(self, id): frame = Frame.get(Frame.id == id) return {'meta': None, 'data': get_dictionary_from_model(frame)}
def get_dictionary(obj, additional_fields): # note that this will fail miserably if any of these additional fields refer to model objects data = get_dictionary_from_model(obj) for field in additional_fields: data[field] = getattr(obj, field) return data
def serialize_object(self, obj, fields=None, exclude=None): data = get_dictionary_from_model(obj, fields, exclude) return self.clean_data(data)
def put(self, id, word_string): frame = Frame.get(frame.id == id) frame.word_string = word_string return { 'meta': None, 'data': get_dictionary_from_model(frame) }
def get(self, id): frame = Frame.get(Frame.id == id) return { 'meta': None, 'data': get_dictionary_from_model(frame) }
def process_events(events): for event in events: print event pushbullet_reactor(config, get_dictionary_from_model(event)) consume_event(event)