def construct_global_ctx(self): self.gtx = gtx = {} rc = self.rc gtx["len"] = len gtx["sum"] = sum gtx["zip"] = zip gtx["True"] = True gtx["False"] = False gtx["None"] = None gtx["sorted"] = sorted gtx["groupby"] = groupby gtx["range"] = range gtx["gets"] = gets gtx["id_key"] = lambda x: x["_id"] gtx["date_key"] = date_key gtx["doc_date_key"] = doc_date_key gtx["level_val"] = level_val gtx["category_val"] = category_val gtx["rfc822now"] = rfc822now gtx["date_to_rfc822"] = date_to_rfc822 gtx["month_and_year"] = month_and_year gtx["latex_safe"] = latex_safe gtx["all_docs_from_collection"] = all_docs_from_collection gtx["grades"] = list(all_docs_from_collection(rc.client, "grades")) gtx["courses"] = list(all_docs_from_collection(rc.client, "courses")) gtx["assignments"] = list(all_docs_from_collection(rc.client, "assignments"))
def class_email(rc): """Sends an email to all students in the active classes.""" gradedir = os.path.join(rc.builddir, GradeReportBuilder.btype) addresses = {x['_id']: x['email'] for x in \ list(all_docs_from_collection(rc.client, 'students'))} messages = [] for course in all_docs_from_collection(rc.client, 'courses'): if not course.get('active', True): continue course_id = course['_id'] subject = '[{0}] {1}'.format(course_id, rc.subject) for student_id in course['students']: message = make_message(rc, addresses[student_id], subject=subject, body=rc.body, attachments=rc.attachments) messages.append(message) return messages
def filter_publications(self, authors, reverse=False): rc = self.rc pubs = [] for pub in all_docs_from_collection(rc.client, 'citations'): if len(set(pub['author']) & authors) == 0: continue pubs.append(pub) pubs.sort(key=doc_date_key, reverse=reverse) return pubs
def construct_global_ctx(self): self.gtx = gtx = {} rc = self.rc gtx['len'] = len gtx['True'] = True gtx['False'] = False gtx['None'] = None gtx['sorted'] = sorted gtx['groupby'] = groupby gtx['gets'] = gets gtx['date_key'] = date_key gtx['doc_date_key'] = doc_date_key gtx['level_val'] = level_val gtx['category_val'] = category_val gtx['rfc822now'] = rfc822now gtx['date_to_rfc822'] = date_to_rfc822 gtx['jobs'] = list(all_docs_from_collection(rc.client, 'jobs')) gtx['people'] = list(all_docs_from_collection(rc.client, 'people')) gtx['all_docs_from_collection'] = all_docs_from_collection
def filter_projects(self, authors, reverse=False): rc = self.rc projs = [] for proj in all_docs_from_collection(rc.client, 'projects'): team_names = set(gets(proj['team'], 'name')) if len(team_names & authors) == 0: continue proj = dict(proj) proj['team'] = [x for x in proj['team'] if x['name'] in authors] projs.append(proj) projs.sort(key=id_key, reverse=reverse) return projs
def blog(self): rc = self.rc blog_dir = os.path.join(self.bldir, 'blog') os.makedirs(blog_dir, exist_ok=True) posts = list(all_docs_from_collection(rc.client, 'blog')) posts.sort(key=ene_date_key, reverse=True) for post in posts: self.render('blog_post.html', os.path.join('blog', post['_id'] + '.html'), post=post, title=post['title']) self.render('blog_index.html', os.path.join('blog', 'index.html'), title='Blog', posts=posts) self.render('rss.xml', os.path.join('blog', 'rss.xml'), items=posts)
def grade_email(rc): """Sends grade report emails to students.""" gradedir = os.path.join(rc.builddir, GradeReportBuilder.btype) addresses = {x['_id']: x['email'] for x in \ list(all_docs_from_collection(rc.client, 'students'))} messages = [] for course in all_docs_from_collection(rc.client, 'courses'): if not course.get('active', True): continue course_id = course['_id'] for student_id in course['students']: base = GradeReportBuilder.basename(student_id, course_id) + '.pdf' fname = os.path.join(gradedir, base) if not os.path.isfile(fname): raise RuntimeError(fname + ' does not exist, please run ' '"regolith build grade" prior to emailing ' 'grades.') message = make_message(rc, addresses[student_id], subject="Current grades for " + course_id, body='Please see the attached PDF and ' 'please report any errors.', attachments=[fname]) messages.append(message) return messages
def filter_publications(self, authors, reverse=False): rc = self.rc pubs = [] for pub in all_docs_from_collection(rc.client, 'citations'): if len(set(pub['author']) & authors) == 0: continue bold_self = [] for a in pub['author']: if a in authors: bold_self.append('\\textbf{' + a + '}') else: bold_self.append(a) pub['author'] = bold_self pubs.append(pub) pubs.sort(key=doc_date_key, reverse=reverse) return pubs
def construct_global_ctx(self): """Constructs the global context""" super().construct_global_ctx() gtx = self.gtx rc = self.rc if "groups" in self.needed_dbs: rc.pi_id = get_pi_id(rc) rc.coll = f"{TARGET_COLL}" rc.database = rc.databases[0]["name"] gtx[rc.coll] = sorted(all_docs_from_collection(rc.client, rc.coll), key=_id_key) gtx["all_docs_from_collection"] = all_docs_from_collection gtx["float"] = float gtx["str"] = str gtx["zip"] = zip
def filter_publications(self, authors, reverse=False): rc = self.rc pubs = [] for pub in all_docs_from_collection(rc.client, "citations"): if len(set(pub["author"]) & authors) == 0: continue bold_self = [] for a in pub["author"]: if a in authors: bold_self.append("\\textbf{" + a + "}") else: bold_self.append(a) pub["author"] = bold_self pubs.append(pub) pubs.sort(key=doc_date_key, reverse=reverse) return pubs
def blog(self): """Render the blog and rss""" rc = self.rc blog_dir = os.path.join(self.bldir, 'blog') os.makedirs(blog_dir, exist_ok=True) posts = list(all_docs_from_collection(rc.client, 'blog')) posts.sort(key=ene_date_key, reverse=True) for post in posts: self.render('blog_post.html', os.path.join('blog', post['_id'] + '.html'), post=post, title=post['title']) self.render('blog_index.html', os.path.join('blog', 'index.html'), title='Blog', posts=posts) self.render('rss.xml', os.path.join('blog', 'rss.xml'), items=posts)
def format_last_first_instutition_names(rc, ppl_names, excluded_inst_name=None): """Get the last name, first name and institution name.""" ppl = [] for ppl_tup in ppl_names: inst = fuzzy_retrieval( all_docs_from_collection(rc.client, "institutions"), ['aka', 'name', '_id'], ppl_tup[1], case_sensitive=False) if inst: inst_name = inst.get("name", "") else: inst_name = ppl_tup[1] # remove all people who are in the institution of the person if inst_name != excluded_inst_name: name = HumanName(ppl_tup[0]) yield name.last, " ".join([name.first, name.middle]), ppl_tup[1], " ", ppl_tup[2] return ppl
def filter_publications(self, authors, reverse=False): rc = self.rc pubs = [] for pub in all_docs_from_collection(rc.client, 'citations'): if len(set(pub['author']) & authors) == 0: continue pub = deepcopy(pub) bold_self = [] for a in pub['author']: if a in authors: bold_self.append('\\textbf{' + a + '}') else: bold_self.append(a) pub['author'] = bold_self pubs.append(pub) pubs.sort(key=doc_date_key, reverse=reverse) return pubs
def query_ppl(self, target, **filters): """Query the data base for the target's collaborators' information.""" rc = self.rc gtx = self.gtx person = fuzzy_retrieval(all_docs_from_collection(rc.client, "people"), ['aka', 'name', '_id'], target, case_sensitive=False) if not person: raise RuntimeError("Person {} not found in people.".format( target).encode('utf-8')) pubs = get_person_pubs(gtx["citations"], person) if 'since_date' in filters: since_date = filters.get('since_date') pubs = filter_since_date(pubs, since_date) try: if rc.verbose: for pub in pubs: print(f"{pub.get('title')}, ({pub.get('year')})") except AttributeError: pass my_collabs = get_coauthors_from_pubs(pubs, person) people, institutions = query_people_and_institutions(rc, my_collabs) ppl_names = set(zip(people, institutions)) collab_3tups = set(format_last_first_instutition_names(rc, ppl_names)) advisors_3tups = set(get_advisors_name_inst(person, rc)) advisees_3tups = set(get_advisees_name_inst(gtx["people"], person, rc)) ppl_3tups = sorted(list(collab_3tups | advisors_3tups | advisees_3tups)) person_3tups = make_person_3tups(person, rc) coeditors_info = find_coeditors(person, rc) ppl_tab1 = format_to_nsf(person_3tups, '') ppl_tab3 = format_to_nsf(advisors_3tups, 'G:') + format_to_nsf( advisees_3tups, 'T:') ppl_tab4 = format_to_nsf(collab_3tups, 'A:') ppl_tab5 = format_to_nsf(coeditors_info, 'E:') results = { 'person_info': person, 'ppl_tab1': ppl_tab1, 'ppl_tab3': ppl_tab3, 'ppl_tab4': ppl_tab4, 'ppl_tab5': ppl_tab5, 'ppl_3tups': ppl_3tups } return results
def construct_global_ctx(self): """Constructs the global context""" super().construct_global_ctx() gtx = self.gtx rc = self.rc if "groups" in self.needed_colls: rc.pi_id = get_pi_id(rc) rc.coll = f"{TARGET_COLL}" colls = [ sorted(all_docs_from_collection(rc.client, collname), key=_id_key) for collname in self.needed_colls ] for db, coll in zip(self.needed_colls, colls): gtx[db] = coll gtx["all_docs_from_collection"] = all_docs_from_collection gtx["float"] = float gtx["str"] = str gtx["zip"] = zip
def get_inst_name(person, rc): """Get the name of instituion of the person's lastest employment.""" if 'employment' in person: org = get_recent_org(person) person_inst_abbr = org elif 'institution' in person: person_inst_abbr = person.get('institution') else: person_inst_abbr = '' person_inst = fuzzy_retrieval(all_docs_from_collection( rc.client, "institutions"), ["name", "aka", "_id"], person_inst_abbr, case_sensitive=False) if person_inst is not None: person_inst_name = person_inst.get("name") else: person_inst_name = person_inst_abbr print(f"WARNING: {person_inst_abbr} is not found in institutions.") return person_inst_name
def construct_global_ctx(self): self.gtx = gtx = {} rc = self.rc gtx['len'] = len gtx['True'] = True gtx['False'] = False gtx['None'] = None gtx['sorted'] = sorted gtx['groupby'] = groupby gtx['gets'] = gets gtx['date_key'] = date_key gtx['doc_date_key'] = doc_date_key gtx['level_val'] = level_val gtx['category_val'] = category_val gtx['rfc822now'] = rfc822now gtx['date_to_rfc822'] = date_to_rfc822 gtx['month_and_year'] = month_and_year gtx['latex_safe'] = latex_safe gtx['people'] = sorted(all_docs_from_collection(rc.client, 'people'), key=position_key, reverse=True) gtx['all_docs_from_collection'] = all_docs_from_collection
def blog(self): """Render the blog and rss""" rc = self.rc blog_dir = os.path.join(self.bldir, "blog") os.makedirs(blog_dir, exist_ok=True) posts = list(all_docs_from_collection(rc.client, "blog")) posts.sort(key=ene_date_key, reverse=True) for post in posts: self.render( "blog_post.html", os.path.join("blog", post["_id"] + ".html"), post=post, title=post["title"], ) self.render( "blog_index.html", os.path.join("blog", "index.html"), title="Blog", posts=posts, ) self.render("rss.xml", os.path.join("blog", "rss.xml"), items=posts)
def construct_global_ctx(self): """Constructs the global context""" super().construct_global_ctx() gtx = self.gtx rc = self.rc rc.pi_id = get_pi_id(rc) rc.coll = f"{TARGET_COLL}" if not rc.payee: if rc.default_user_id: rc.payee = rc.default_user_id else: raise RuntimeError(" No default_user_id set. Please specify this " f"either in the ~/.conf/regolith/user.json or in" f" regolithrc.json") if not rc.database: rc.database = rc.databases[0]["name"] gtx[rc.coll] = sorted( all_docs_from_collection(rc.client, rc.coll), key=_id_key ) gtx["all_docs_from_collection"] = all_docs_from_collection gtx["float"] = float gtx["str"] = str gtx["zip"] = zip
def construct_global_ctx(self): """Constructs the global context""" super().construct_global_ctx() gtx = self.gtx rc = self.rc if "groups" in self.needed_dbs: rc.pi_id = get_pi_id(rc) rc.coll = f"{TARGET_COLL}" try: if not rc.database: rc.database = rc.databases[0]["name"] except BaseException: pass colls = [ sorted(all_docs_from_collection(rc.client, collname), key=_id_key) for collname in self.needed_dbs ] for db, coll in zip(self.needed_dbs, colls): gtx[db] = coll gtx["all_docs_from_collection"] = all_docs_from_collection gtx["float"] = float gtx["str"] = str gtx["zip"] = zip
def get_advisees_name_inst(coll, advisor, rc): """Get advisor's advisees. Yield (last name, first name, institutions)""" advisor_names = advisor.get('aka', []) + [advisor.get('name'), advisor.get('_id')] for person in coll: edus = person.get("education", []) for edu in edus: if 'advisor' in edu and edu['advisor'] in advisor_names: # if edu['status'] == 'postdoc' person_name = HumanName(person.get("name")) inst_name = edu.get("institution") inst = fuzzy_retrieval( all_docs_from_collection(rc.client, "institutions"), ['aka', 'name', '_id'], inst_name, case_sensitive=False) first_name = " ".join([person_name.first, person_name.middle]) if inst is None: print("WARNING: {} not in institutions".format( inst_name)) yield person_name.last, first_name, inst_name else: yield person_name.last, first_name, inst.get('name', "") break
def latex(self): """Render latex template""" rc = self.rc if not rc.people: raise RuntimeError("ERROR: please rerun specifying --people name") if not rc.from_date: raise RuntimeError("ERROR: please rerun specifying --from") build_target = get_id_from_name( all_docs_from_collection(rc.client, "people"), rc.people[0]) begin_year = int(rc.from_date.split("-")[0]) begin_period = date_parser.parse(rc.from_date).date() pre_begin_period = begin_period - relativedelta(years=1) if rc.to_date: to_date = date_parser.parse(rc.to_date).date() end_period = to_date post_end_period = to_date + relativedelta(years=1) else: end_period = begin_period + relativedelta(years=1) - relativedelta( days=1) post_end_period = begin_period + relativedelta( years=2) - relativedelta(days=1) me = [p for p in self.gtx["people"] if p["_id"] == build_target][0] me["begin_period"] = dt.date.strftime(begin_period, "%m/%d/%Y") me["begin_period"] = dt.date.strftime(begin_period, "%m/%d/%Y") me["pre_begin_period"] = dt.date.strftime(pre_begin_period, "%m/%d/%Y") me["end_period"] = dt.date.strftime(end_period, "%m/%d/%Y") me["post_end_period"] = dt.date.strftime(post_end_period, "%m/%d/%Y") projs = filter_projects(self.gtx["projects"], set([build_target]), group="bg") ######### # highlights ######### for proj in projs: if proj.get('highlights'): proj["current_highlights"] = False for highlight in proj.get('highlights'): highlight_date = dt.date( highlight.get("year"), month_to_int(highlight.get("month", 1)), 1) if highlight_date > begin_period and highlight_date < end_period: highlight["is_current"] = True proj["current_highlights"] = True ######### # current and pending ######### pi = fuzzy_retrieval(self.gtx["people"], ["aka", "name", "_id"], build_target) # pi['initials'] = "SJLB" grants = merge_collections_superior(self.gtx["proposals"], self.gtx["grants"], "proposal_id") for g in grants: for person in g["team"]: rperson = fuzzy_retrieval(self.gtx["people"], ["aka", "name"], person["name"]) if rperson: person["name"] = rperson["name"] if g.get('budget'): amounts = [i.get('amount') for i in g.get('budget')] g['subaward_amount'] = sum(amounts) current_grants = [dict(g) for g in grants if is_current(g)] current_grants, _, _ = filter_grants(current_grants, {pi["name"]}, pi=False, multi_pi=True) pending_grants = [ g for g in self.gtx["proposals"] if g["status"] == "pending" ] for g in pending_grants: for person in g["team"]: rperson = fuzzy_retrieval(self.gtx["people"], ["aka", "name"], person["name"]) if rperson: person["name"] = rperson["name"] pending_grants, _, _ = filter_grants(pending_grants, {pi["name"]}, pi=False, multi_pi=True) grants = pending_grants + current_grants for grant in grants: grant.update( award_start_date="{2}/{1}/{0}".format( grant["begin_day"], month_to_int(grant["begin_month"]), grant["begin_year"], ), award_end_date="{2}/{1}/{0}".format( grant["end_day"], month_to_int(grant["end_month"]), grant["end_year"], ), ) badids = [ i["_id"] for i in current_grants if not i['cpp_info'].get('cppflag', "") ] iter = copy(current_grants) for grant in iter: if grant["_id"] in badids: current_grants.remove(grant) ######### # end current and pending ######### ######### # advising ######### undergrads = filter_employment_for_advisees(self.gtx["people"], begin_period, "undergrad") masters = filter_employment_for_advisees(self.gtx["people"], begin_period, "ms") currents = filter_employment_for_advisees(self.gtx["people"], begin_period, "phd") graduateds = filter_employment_for_advisees( self.gtx["people"], begin_period.replace(year=begin_year - 5), "phd") postdocs = filter_employment_for_advisees(self.gtx["people"], begin_period, "postdoc") visitors = filter_employment_for_advisees(self.gtx["people"], begin_period, "visitor-unsupported") iter = deepcopy(graduateds) for g in iter: if g.get("active"): graduateds.remove(g) iter = deepcopy(currents) for g in iter: if not g.get("active"): currents.remove(g) ###################### # service ##################### mego = deepcopy(me) dept_service = filter_service([mego], begin_period, "department") mego = deepcopy(me) school_service = filter_service([mego], begin_period, "school") mego = deepcopy(me) uni_service = filter_service([mego], begin_period, "university") uni_service.extend(school_service) mego = deepcopy(me) prof_service = filter_service([mego], begin_period, "profession") mego = deepcopy(me) outreach = filter_service([mego], begin_period, "outreach") mego = deepcopy(me) lab = filter_facilities([mego], begin_period, "research") mego = deepcopy(me) shared = filter_facilities([mego], begin_period, "shared") mego = deepcopy(me) fac_other = filter_facilities([mego], begin_period, "other") mego = deepcopy(me) fac_teaching = filter_facilities([mego], begin_period, "teaching") mego = deepcopy(me) fac_wishlist = filter_facilities([mego], begin_period, "research_wish", verbose=False) mego = deepcopy(me) tch_wishlist = filter_facilities([mego], begin_period, "teaching_wish") mego = deepcopy(me) curric_dev = filter_activities([mego], begin_period, "teaching") mego = deepcopy(me) other_activities = filter_activities([mego], begin_period, "other") ########################## # Presentation list ########################## keypres = filter_presentations(self.gtx["people"], self.gtx["presentations"], self.gtx["institutions"], build_target, types=["award", "plenary", "keynote"], since=begin_period, before=end_period, statuses=["accepted"]) invpres = filter_presentations(self.gtx["people"], self.gtx["presentations"], self.gtx["institutions"], build_target, types=["invited"], since=begin_period, before=end_period, statuses=["accepted"]) sempres = filter_presentations(self.gtx["people"], self.gtx["presentations"], self.gtx["institutions"], build_target, types=["colloquium", "seminar"], since=begin_period, before=end_period, statuses=["accepted"]) declpres = filter_presentations(self.gtx["people"], self.gtx["presentations"], self.gtx["institutions"], build_target, types=["all"], since=begin_period, before=end_period, statuses=["declined"]) ######################### # Awards ######################### ahs = awards(me, since=begin_period) ######################## # Publications ######################## names = frozenset(me.get("aka", []) + [me["name"]]) pubs = filter_publications(all_docs_from_collection( rc.client, "citations"), names, reverse=True, bold=False, since=begin_period) bibfile = make_bibtex_file(pubs, pid=me["_id"], person_dir=self.bldir) articles = [prc for prc in pubs if prc.get("entrytype") in "article"] nonarticletypes = [ "book", "inbook", "proceedings", "inproceedings", "incollection", "unpublished", "phdthesis", "misc" ] nonarticles = [ prc for prc in pubs if prc.get("entrytype") in nonarticletypes ] peer_rev_conf_pubs = [prc for prc in pubs if prc.get("peer_rev_conf")] pubiter = deepcopy(pubs) for prc in pubiter: if prc.get("peer_rev_conf"): peer_rev_conf_pubs = prc pubs.pop(prc) ############## # TODO: add Current Projects to Research summary section ############## ############# # IP ############# patents = filter_patents(self.gtx["patents"], self.gtx["people"], build_target, since=begin_period) licenses = filter_licenses(self.gtx["patents"], self.gtx["people"], build_target, since=begin_period) ############# # hindex ############# hindex = sorted(me["hindex"], key=doc_date_key).pop() ######################### # render ######################### self.render( "columbia_annual_report.tex", "billinge-ann-report.tex", pi=pi, p=me, projects=projs, pending=pending_grants, current=current_grants, undergrads=undergrads, masters=masters, currentphds=currents, graduatedphds=graduateds, postdocs=postdocs, visitors=visitors, dept_service=dept_service, uni_service=uni_service, prof_service=prof_service, outreach=outreach, lab=lab, shared=shared, facilities_other=fac_other, fac_teaching=fac_teaching, fac_wishlist=fac_wishlist, tch_wishlist=tch_wishlist, curric_dev=curric_dev, other_activities=other_activities, keypres=keypres, invpres=invpres, sempres=sempres, declpres=declpres, sentencecase=sentencecase, monthstyle=month_fullnames, ahs=ahs, pubs=articles, nonarticles=nonarticles, peer_rev_conf_pubs=peer_rev_conf_pubs, bibfile=bibfile, patents=patents, licenses=licenses, hindex=hindex, ) self.pdf("billinge-ann-report")
def sout(self): rc = self.rc if not rc.assigned_to: try: rc.assigned_to = rc.default_user_id except AttributeError: print( "Please set default_user_id in '~/.config/regolith/user.json', or you need to enter your group id " "in the command line") return try: person = document_by_value( all_docs_from_collection(rc.client, "todos"), "_id", rc.assigned_to) gather_todos = person.get("todos", []) except: print("The id you entered can't be found in todos.yml.") return if not rc.date: today = dt.date.today() else: today = date_parser.parse(rc.date).date() if rc.stati == ["started"]: rc.stati = PROJECTUM_ACTIVE_STATI running_index = 0 for projectum in self.gtx["projecta"]: if projectum.get('lead') != rc.assigned_to: continue if "checklist" in projectum.get('deliverable').get('scope'): continue projectum["deliverable"].update({"name": "deliverable"}) gather_miles = [projectum["deliverable"]] if projectum.get("kickoff"): projectum['kickoff'].update({"type": "meeting"}) gather_miles = [projectum["kickoff"], projectum["deliverable"]] gather_miles.extend(projectum["milestones"]) for ms in gather_miles: if projectum["status"] in PROJECTUM_ACTIVE_STATI: if ms.get('status') in PROJECTUM_ACTIVE_STATI: due_date = get_due_date(ms) ms.update({ 'status': "started", 'id': projectum.get('_id'), 'due_date': due_date, 'assigned_by': projectum.get('pi_id'), 'importance': 2, 'duration': 3600, 'running_index': 9900 + running_index }) running_index += 1 ms.update({ 'description': f'milestone: {ms.get("name")} ({ms.get("id")})' }) gather_todos.append(ms) if rc.filter: gather_todos = key_value_pair_filter(gather_todos, rc.filter) if rc.short: for todo in gather_todos[::-1]: if todo.get('duration') is None or float( todo.get('duration')) > float(rc.short): gather_todos.remove(todo) if rc.tags: for todo in gather_todos[::-1]: takeme = False for tag in rc.tags: if tag in todo.get('tags', []): takeme = True if not takeme: gather_todos.remove(todo) if rc.assigned_by: if rc.assigned_by == "default_id": rc.assigned_by = rc.default_user_id for todo in gather_todos[::-1]: if todo.get('assigned_by') != rc.assigned_by: gather_todos.remove(todo) len_of_started_tasks = 0 milestones = 0 for todo in gather_todos: if 'milestone: ' in todo['description']: milestones += 1 elif todo["status"] == 'started': len_of_started_tasks += 1 len_of_tasks = len(gather_todos) #- milestones for todo in gather_todos: _format_todos(todo, today) gather_todos[:len_of_tasks] = sorted( gather_todos[:len_of_tasks], key=lambda k: (k['status'], k['importance'], k['order'], -k.get( 'duration', 10000))) gather_todos[len_of_started_tasks:len_of_tasks] = sorted( gather_todos[len_of_started_tasks:len_of_tasks], key=lambda k: (-k["sort_finished"])) gather_todos[len_of_tasks:] = sorted( gather_todos[len_of_tasks:], key=lambda k: (k['status'], k['order'], -k.get('duration', 10000))) print( "If the indices are far from being in numerical order, please renumber them by running regolith helper u_todo -r" ) print( "(index) action (days to due date|importance|expected duration (mins)|tags|assigned by)" ) print("-" * 80) if len(gather_todos) != 0: print_task(gather_todos, stati=rc.stati) if rc.outstandingreview: prop = self.gtx['proposalReviews'] man = self.gtx['refereeReports'] outstanding_todo = [] for manuscript in man: if manuscript.get("reviewer") != rc.assigned_to: continue if manuscript.get("status") in STATI: out = f"Manuscript by {manuscript.get('first_author_last_name')} in {manuscript.get('journal')} " \ f"is due on {manuscript.get('due_date')}" outstanding_todo.append((out, manuscript.get('due_date'), manuscript.get("status"))) for proposal in prop: if proposal.get("reviewer") != rc.assigned_to: continue if proposal.get("status") in STATI: if isinstance(proposal.get('names'), str): name = HumanName(proposal.get('names')) else: name = HumanName(proposal.get('names')[0]) out = f"Proposal by {name.last} for {proposal.get('agency')} ({proposal.get('requester')})" \ f"is due on {proposal.get('due_date')}" outstanding_todo.append((out, proposal.get('due_date'), proposal.get("status"))) if len(outstanding_todo) != 0: print("-" * 30) print("Outstanding Reviews:") print("-" * 30) outstanding_todo = sorted(outstanding_todo, key=lambda k: str(k[1])) for stati in STATI: if stati in [output[2] for output in outstanding_todo]: print(f'{stati}:') else: continue for output in outstanding_todo: if output[2] == stati: print(output[0]) return
def latex(self): """Render latex template""" rc = self.rc group = fuzzy_retrieval(self.gtx['groups'], ["_id", "aka", "name"], rc.groupname) if not rc.people: raise RuntimeError("ERROR: please rerun specifying --people name") if not rc.from_date: raise RuntimeError("ERROR: please rerun specifying --from") build_target = get_id_from_name( all_docs_from_collection(rc.client, "people"), rc.people[0]) begin_year = int(rc.from_date.split("-")[0]) begin_period = date_parser.parse(rc.from_date).date() pre_begin_period = begin_period - relativedelta(years=1) if rc.to_date: to_date = date_parser.parse(rc.to_date).date() end_period = to_date post_end_period = to_date + relativedelta(years=1) else: end_period = begin_period + relativedelta(years=1) - relativedelta( days=1) post_end_period = begin_period + relativedelta( years=2) - relativedelta(days=1) me = [p for p in self.gtx["people"] if p["_id"] == build_target][0] me["begin_period"] = dt.date.strftime(begin_period, "%m/%d/%Y") me["begin_period"] = dt.date.strftime(begin_period, "%m/%d/%Y") me["pre_begin_period"] = dt.date.strftime(pre_begin_period, "%m/%d/%Y") me["end_period"] = dt.date.strftime(end_period, "%m/%d/%Y") me["post_end_period"] = dt.date.strftime(post_end_period, "%m/%d/%Y") projs = filter_projects(self.gtx["projects"], set([build_target]), group=group["_id"]) ######## # Recommendation Letters count ######## recletts = self.gtx['recletts'] num_recletts = len([ reclett["_id"] for reclett in recletts if get_dates(reclett).get("end_date") >= begin_period ]) ######## # Proposal review count ######## proprevs = self.gtx['proprevs'] num_proprevs = len([ proprev["_id"] for proprev in proprevs if get_dates(proprev).get("end_date") >= begin_period and proprev.get('status') == 'submitted' ]) ######## # Manuscript review count ######## manrevs = self.gtx['manrevs'] num_manrevs = len([ manrev["_id"] for manrev in manrevs if manrev.get("status") == "submitted" and get_dates(manrev, date_field_prefix="submitted").get( "submitted_date", dt.date(1971, 1, 1)) is not None and get_dates(manrev, date_field_prefix="submitted").get( "submitted_date", dt.date(1971, 1, 1)) >= begin_period ]) ######### # highlights ######### for proj in projs: if proj.get('highlights'): proj["current_highlights"] = False for highlight in proj.get('highlights'): highlight_date = get_dates(highlight) if highlight_date.get("end_date") >= begin_period: highlight["is_current"] = True proj["current_highlights"] = True ######### # current and pending ######### pi = fuzzy_retrieval(self.gtx["people"], ["aka", "name"], group["pi_name"]) pinames = pi["name"].split() piinitialslist = [i[0] for i in pinames] pi['initials'] = "".join(piinitialslist).upper() grants = merge_collections_all(self.gtx["proposals"], self.gtx["grants"], "proposal_id") for g in grants: g['end_date'] = get_dates(g).get('end_date') g['begin_date'] = get_dates(g).get('begin_date', dt.date(1900, 1, 2)) g['award_start_date'] = "{}/{}/{}".format( g.get("begin_date").month, g.get("begin_date").day, g.get("begin_date").year, ) g['award_end_date'] = "{}/{}/{}".format( g.get("end_date").month, g.get("end_date").day, g.get("end_date").year) for person in g.get("team", []): rperson = fuzzy_retrieval(self.gtx["people"], ["aka", "name"], person["name"]) if rperson: person["name"] = rperson["name"] if g.get('budget'): amounts = [i.get('amount') for i in g.get('budget')] g['subaward_amount'] = sum(amounts) current_grants = [dict(g) for g in grants if is_current(g)] current_grants, _, _ = filter_grants(current_grants, {pi["name"]}, pi=False, multi_pi=True) current_grants = [ g for g in current_grants if g.get("status") != "declined" ] for g in current_grants: if g.get('budget'): amounts = [i.get('amount') for i in g.get('budget')] g['subaward_amount'] = sum(amounts) pending_grants = [ g for g in self.gtx["proposals"] if is_pending(g["status"]) ] for g in pending_grants: for person in g["team"]: rperson = fuzzy_retrieval(self.gtx["people"], ["aka", "name"], person["name"]) if rperson: person["name"] = rperson["name"] pending_grants, _, _ = filter_grants(pending_grants, {pi["name"]}, pi=False, multi_pi=True) badids = [ i["_id"] for i in current_grants if not i.get('cpp_info').get('cppflag', "") ] declined_proposals = [ g for g in self.gtx["proposals"] if is_declined(g["status"]) ] for g in declined_proposals: for person in g["team"]: rperson = fuzzy_retrieval(self.gtx["people"], ["aka", "name"], person["name"]) if rperson: person["name"] = rperson["name"] declined_proposals, _, _ = filter_grants(declined_proposals, {pi["name"]}, pi=False, multi_pi=True) declined_proposals = [ proposal for proposal in declined_proposals if get_dates(proposal).get('begin_date') >= begin_period and get_dates(proposal, date_field_prefix="submitted").get( 'submitted_date', end_period) <= end_period ] iter = copy(current_grants) for grant in iter: if grant["_id"] in badids: current_grants.remove(grant) ######### # end current and pending ######### ######### # advising ######### undergrads = filter_employment_for_advisees(self.gtx["people"], begin_period, "undergrad", rc.people[0]) masters = filter_employment_for_advisees(self.gtx["people"], begin_period, "ms", rc.people[0]) currents = filter_employment_for_advisees(self.gtx["people"], begin_period, "phd", rc.people[0]) graduateds = filter_employment_for_advisees( self.gtx["people"], begin_period.replace(year=begin_year - 5), "phd", rc.people[0]) postdocs = filter_employment_for_advisees(self.gtx["people"], begin_period, "postdoc", rc.people[0]) visitors = filter_employment_for_advisees(self.gtx["people"], begin_period, "visitor-unsupported", rc.people[0]) iter = deepcopy(graduateds) for g in iter: if g.get("active"): graduateds.remove(g) iter = deepcopy(currents) for g in iter: if not g.get("active"): currents.remove(g) ###################### # service ##################### mego = deepcopy(me) dept_service = filter_service(mego, begin_period, "department") mego = deepcopy(me) school_service = filter_service(mego, begin_period, "school") mego = deepcopy(me) uni_service = filter_service(mego, begin_period, "university") uni_service.extend(school_service) if num_recletts > 0: uni_service.append({ "name": f"Wrote recommendation letters for {num_recletts} " f"people this period" }) mego = deepcopy(me) prof_service = filter_service(mego, begin_period, "profession") if num_proprevs > 0: prof_service.append({ "name": f"Reviewed {num_proprevs} funding proposals for " f"national agencies this period" }) if num_manrevs > 0: prof_service.append({ "name": f"Reviewed {num_manrevs} manuscripts for " f"peer reviewed journals this period" }) mego = deepcopy(me) phd_defenses = filter_committees(mego, begin_period, "phddefense") phd_proposals = filter_committees(mego, begin_period, "phdproposal") phd_orals = filter_committees(mego, begin_period, "phdoral") mego = deepcopy(me) outreach = filter_service(mego, begin_period, "outreach") mego = deepcopy(me) lab = filter_facilities([mego], begin_period, "research") mego = deepcopy(me) shared = filter_facilities([mego], begin_period, "shared") mego = deepcopy(me) fac_other = filter_facilities([mego], begin_period, "other") mego = deepcopy(me) fac_teaching = filter_facilities([mego], begin_period, "teaching") mego = deepcopy(me) fac_wishlist = filter_facilities([mego], begin_period, "research_wish", verbose=False) mego = deepcopy(me) tch_wishlist = filter_facilities([mego], begin_period, "teaching_wish") mego = deepcopy(me) curric_dev = filter_activities([mego], begin_period, "teaching") mego = deepcopy(me) other_activities = filter_activities([mego], begin_period, "other") ########################## # Presentation list ########################## keypres = filter_presentations(self.gtx["people"], self.gtx["presentations"], self.gtx["institutions"], build_target, types=["award", "plenary", "keynote"], since=begin_period, before=end_period, statuses=["accepted"]) invpres = filter_presentations(self.gtx["people"], self.gtx["presentations"], self.gtx["institutions"], build_target, types=["invited"], since=begin_period, before=end_period, statuses=["accepted"]) sempres = filter_presentations(self.gtx["people"], self.gtx["presentations"], self.gtx["institutions"], build_target, types=["colloquium", "seminar"], since=begin_period, before=end_period, statuses=["accepted"]) declpres = filter_presentations(self.gtx["people"], self.gtx["presentations"], self.gtx["institutions"], build_target, types=["all"], since=begin_period, before=end_period, statuses=["declined"]) ######################### # Awards ######################### ahs = awards(me, since=begin_period) ######################## # Publications ######################## names = frozenset(me.get("aka", []) + [me["name"]]) pubs = filter_publications(all_docs_from_collection( rc.client, "citations"), names, reverse=True, bold=False, since=begin_period) #remove unpublished papers # unpubs = [pub for pub in pubs if len(pub.get("doi") == 0)] pubed = [pub for pub in pubs if len(pub.get("doi")) > 0] non_arts = [pub for pub in pubs if pub.get("entrytype") != "article"] pubs = pubed + non_arts bibfile = make_bibtex_file(pubs, pid=me["_id"], person_dir=self.bldir) articles = [ prc for prc in pubs if prc.get("entrytype") == "article" and not prc.get("peer_rev_conf") ] NONARTICLETYPES = [ "book", "inbook", "proceedings", "inproceedings", "incollection", "unpublished", "phdthesis", "misc" ] nonarticles = [ prc for prc in pubs if prc.get("entrytype") in NONARTICLETYPES ] peer_rev_conf_pubs = [prc for prc in pubs if prc.get("peer_rev_conf")] ############## # TODO: add Current Projects to Research summary section ############## ############# # IP ############# patents = filter_patents(self.gtx["patents"], self.gtx["people"], build_target, since=begin_period) licenses = filter_licenses(self.gtx["patents"], self.gtx["people"], build_target, since=begin_period) ############# # hindex ############# if not me.get("miscellaneous"): me["miscellaneous"] = {"metrics_for_success": []} if me.get("hindex"): hindex = sorted(me["hindex"], key=doc_date_key).pop() ######################### # render ######################### self.render( "columbia_annual_report.tex", f"{pi['_id']}-ann-report.tex", pi=pi, p=me, projects=projs, pending=pending_grants, current=current_grants, declined=declined_proposals, undergrads=undergrads, masters=masters, currentphds=currents, graduatedphds=graduateds, postdocs=postdocs, visitors=visitors, phd_defenses=phd_defenses, phd_proposals=phd_proposals, phd_orals=phd_orals, dept_service=dept_service, uni_service=uni_service, prof_service=prof_service, outreach=outreach, lab=lab, shared=shared, facilities_other=fac_other, fac_teaching=fac_teaching, fac_wishlist=fac_wishlist, tch_wishlist=tch_wishlist, curric_dev=curric_dev, other_activities=other_activities, keypres=keypres, invpres=invpres, sempres=sempres, declpres=declpres, sentencecase=sentencecase, monthstyle=month_fullnames, ahs=ahs, pubs=articles, nonarticles=nonarticles, peer_rev_conf_pubs=peer_rev_conf_pubs, bibfile=bibfile, patents=patents, licenses=licenses, hindex=hindex, ) self.pdf("billinge-ann-report")
def construct_global_ctx(self): """Constructs the global context""" super().construct_global_ctx() gtx = self.gtx rc = self.rc gtx["people"] = sorted( all_docs_from_collection(rc.client, "people"), key=position_key, reverse=True, ) gtx["institutions"] = sorted(all_docs_from_collection( rc.client, "institutions"), key=_id_key) gtx["contacts"] = sorted(all_docs_from_collection( rc.client, "institutions"), key=_id_key) gtx["groups"] = sorted(all_docs_from_collection(rc.client, "groups"), key=_id_key) gtx["grants"] = sorted(all_docs_from_collection(rc.client, "grants"), key=_id_key) gtx["proposals"] = sorted(all_docs_from_collection( rc.client, "proposals"), key=_id_key) gtx["projects"] = sorted(all_docs_from_collection( rc.client, "projects"), key=_id_key) gtx["presentations"] = sorted(all_docs_from_collection( rc.client, "presentations"), key=_id_key) gtx["proprevs"] = sorted(all_docs_from_collection( rc.client, "proposalReviews"), key=_id_key) gtx["manrevs"] = sorted(all_docs_from_collection( rc.client, "refereeReports"), key=_id_key) gtx["recletts"] = sorted(all_docs_from_collection( rc.client, "recletts"), key=_id_key) gtx["patents"] = sorted(all_docs_from_collection(rc.client, "patents"), key=_id_key) gtx["all_docs_from_collection"] = all_docs_from_collection gtx["float"] = float gtx["str"] = str gtx["zip"] = zip
def people(self): """Render people, former members, and each person""" rc = self.rc peeps_dir = os.path.join(self.bldir, "people") former_peeps_dir = os.path.join(self.bldir, "former") os.makedirs(peeps_dir, exist_ok=True) os.makedirs(former_peeps_dir, exist_ok=True) peeps = self.gtx["people"] for p in peeps: names = frozenset(p.get("aka", []) + [p["name"]]) pubs = filter_publications( all_docs_from_collection(rc.client, "citations"), names, reverse=True, bold=False, ) bibfile = make_bibtex_file( pubs, pid=p["_id"], person_dir=peeps_dir ) emps = p.get("employment", []) emps = [em for em in emps if not em.get("not_in_cv", False)] for e in emps: e['position'] = e.get('position_full', e.get('position').title()) ene = emps + p.get("education", []) ene.sort(key=ene_date_key, reverse=True) for e in ene: dereference_institution(e, all_docs_from_collection( rc.client, "institutions")) projs = filter_projects( all_docs_from_collection(rc.client, "projects"), names ) for serve in p.get("service", []): serve_dates = get_dates(serve) date = serve_dates.get("date") if not date: date = serve_dates.get("end_date") if not date: date = serve_dates.get("begin_date") serve["year"] = date.year serve["month"] = date.month sns = p.get("service", []) sns.sort(key=ene_date_key, reverse=True) p["service"] = sns self.render( "person.html", os.path.join("people", p["_id"] + ".html"), p=p, title=p.get("name", ""), pubs=pubs, names=names, bibfile=bibfile, education_and_employment=ene, projects=projs, ) self.render( "people.html", os.path.join("people", "index.html"), title="People" ) self.render( "former.html", os.path.join("former", "index.html"), title="Former Members", )
def meetings(self): """Render projects""" rc = self.rc mtgsi = all_docs_from_collection(rc.client, "meetings") pp_mtgs, f_mtgs = [], [] for mtg in mtgsi: if not mtg.get('lead'): print("{} missing a meeting lead".format(mtg["_id"])) if not mtg.get('scribe'): print("{} missing a meeting scribe".format(mtg["_id"])) lead = fuzzy_retrieval( all_docs_from_collection(rc.client, "people"), ["_id", "name", "aka"], mtg.get("lead")) if not lead: print("{} lead {} not found in people".format( mtg["_id"], mtg.get("lead"))) mtg["lead"] = lead["name"] scribe = fuzzy_retrieval( all_docs_from_collection(rc.client, "people"), ["_id", "name", "aka"], mtg.get("scribe")) if not scribe: print("{} scribe {} not found in people".format( mtg["_id"], mtg.get("scribe"))) mtg["scribe"] = scribe["name"] if mtg.get("journal_club"): prsn = fuzzy_retrieval( all_docs_from_collection(rc.client, "people"), ["_id", "name", "aka"], mtg["journal_club"].get("presenter")) if not prsn: print("{} Jclub presenter {} not found in people".format( mtg["_id"], mtg["journal_club"].get("presenter"))) mtg["journal_club"]["presenter"] = prsn["name"] if mtg["journal_club"].get("doi", "tbd").casefold() != 'tbd': ref, _ = get_formatted_crossref_reference( mtg["journal_club"].get("doi")) mtg["journal_club"]["doi"] = ref if mtg.get("presentation"): prsn = fuzzy_retrieval( all_docs_from_collection(rc.client, "people"), ["_id", "name", "aka"], mtg["presentation"].get("presenter")) if mtg["presentation"].get("presenter") == "hold": prsn = {} prsn["name"] = "Hold" if not prsn: print("{} presenter {} not found in people".format( mtg["_id"], mtg["presentation"].get("presenter"))) mtg["presentation"]["presenter"] = prsn["name"] mtg["presentation"]["link"] = mtg["presentation"].get( "link", "tbd") mtg['date'] = dt.date(mtg.get("year"), mtg.get("month"), mtg.get("day")) mtg['datestr'] = mtg['date'].strftime('%m/%d/%Y') today = dt.date.today() if mtg['date'] >= today: f_mtgs.append(mtg) else: pp_mtgs.append(mtg) pp_mtgs = sorted(pp_mtgs, key=lambda x: x.get('date'), reverse=True) f_mtgs = sorted(f_mtgs, key=lambda x: x.get('date')) self.render("grpmeetings.html", "grpmeetings.html", title="Group Meetings", ppmeetings=pp_mtgs, fmeetings=f_mtgs)
def sout(self): rc = self.rc if not rc.assigned_to: try: rc.assigned_to = rc.default_user_id except AttributeError: print( "Please set default_user_id in '~/.config/regolith/user.json', or you need to enter your group id " "in the command line") return try: person = document_by_value( all_docs_from_collection(rc.client, "people"), "_id", rc.assigned_to) gather_todos = person.get("todos", []) except: print("The id you entered can't be found in people.yml.") return if not rc.certain_date: today = dt.date.today() else: today = date_parser.parse(rc.certain_date).date() if rc.stati == ["started"]: rc.stati = ACTIVE_STATI for projectum in self.gtx["projecta"]: if projectum.get('lead') != rc.assigned_to: continue projectum["deliverable"].update({"name": "deliverable"}) gather_miles = [projectum["kickoff"], projectum["deliverable"]] gather_miles.extend(projectum["milestones"]) for ms in gather_miles: if projectum["status"] not in ["finished", "cancelled"]: if ms.get('status') not in \ ["finished", "cancelled"]: due_date = get_due_date(ms) ms.update({ 'id': projectum.get('_id'), 'due_date': due_date, 'assigned_by': projectum.get('pi_id') }) ms.update({ 'description': f'milestone: {ms.get("name")} ({ms.get("id")})' }) gather_todos.append(ms) if rc.filter: gather_todos = key_value_pair_filter(gather_todos, rc.filter) if rc.short: for todo in gather_todos[::-1]: if todo.get('duration') is None or float( todo.get('duration')) > float(rc.short): gather_todos.remove(todo) if rc.assigned_by: if rc.assigned_by == "default_id": rc.assigned_by = rc.default_user_id for todo in gather_todos[::-1]: if todo.get('assigned_by') != rc.assigned_by: gather_todos.remove(todo) elif rc.assigned_to == rc.assigned_by: gather_todos.remove(todo) len_of_started_tasks = 0 milestones = 0 for todo in gather_todos: if 'milestone: ' in todo['description']: milestones += 1 elif todo["status"] == 'started': len_of_started_tasks += 1 len_of_tasks = len(gather_todos) - milestones for todo in gather_todos: if not todo.get('importance'): todo['importance'] = 1 if type(todo["due_date"]) == str: todo["due_date"] = date_parser.parse(todo["due_date"]).date() if type(todo.get("end_date")) == str: todo["end_date"] = date_parser.parse(todo["end_date"]).date() todo["days_to_due"] = (todo.get('due_date') - today).days todo["sort_finished"] = ( todo.get("end_date", dt.date(1900, 1, 1)) - dt.date(1900, 1, 1)).days try: todo["order"] = todo['importance'] + 1 / ( 1 + math.exp(abs(todo["days_to_due"] - 0.5))) - ( todo["days_to_due"] < -7) * 10 except OverflowError: todo["order"] = float('inf') gather_todos[:len_of_tasks] = sorted( gather_todos[:len_of_tasks], key=lambda k: (k['status'], k['order'], -k.get('duration', 10000)), reverse=True) gather_todos[len_of_started_tasks:len_of_tasks] = sorted( gather_todos[len_of_started_tasks:len_of_tasks], key=lambda k: (-k["sort_finished"])) gather_todos[len_of_tasks:] = sorted( gather_todos[len_of_tasks:], key=lambda k: (k['status'], k['order'], -k.get('duration', 10000)), reverse=True) print( "If the indices are far from being in numerical order, please reorder them by running regolith helper u_todo -r" ) print( "(index) action (days to due date|importance|expected duration (mins)|assigned by)" ) print("-" * 81) if len_of_tasks != 0: print("tasks from people collection:") print("-" * 30) print_task(gather_todos[:len_of_tasks], stati=rc.stati) if milestones != 0: print("-" * 42) print("tasks from projecta and other collections:") print("-" * 42) print_task(gather_todos[len_of_tasks:], stati=rc.stati, index=False) print("-" * 81) return
def sout(self): rc = self.rc if not rc.assigned_to: try: rc.assigned_to = rc.default_user_id except AttributeError: print( "Please set default_user_id in '~/.config/regolith/user.json', or you need to enter your group id " "in the command line") return try: person = document_by_value( all_docs_from_collection(rc.client, "people"), "_id", rc.assigned_to) gather_todos = person.get("todos", []) except: print("The id you entered can't be found in people.yml.") return if not rc.certain_date: today = dt.date.today() else: today = date_parser.parse(rc.certain_date).date() for projectum in self.gtx["projecta"]: if projectum.get('lead') != rc.assigned_to: continue projectum["deliverable"].update({"name": "deliverable"}) gather_miles = [projectum["kickoff"], projectum["deliverable"]] gather_miles.extend(projectum["milestones"]) for ms in gather_miles: if projectum["status"] not in ["finished", "cancelled"]: if ms.get('status') not in \ ["finished", "cancelled"]: due_date = get_due_date(ms) ms.update({ 'id': projectum.get('_id'), 'due_date': due_date, }) ms.update({ 'description': f'milestone: {ms.get("name")} ({ms.get("id")})' }) gather_todos.append(ms) for todo in gather_todos: if not todo.get('importance'): todo['importance'] = 1 if type(todo["due_date"]) == str: todo["due_date"] = date_parser.parse(todo["due_date"]).date() todo["days_to_due"] = (todo.get('due_date') - today).days todo["order"] = todo['importance'] + 1 / ( 1 + math.exp(abs(todo["days_to_due"]))) gather_todos = sorted(gather_todos, key=lambda k: (-k['order'])) if rc.short_tasks: for todo in gather_todos[::-1]: if todo.get('duration') is None or float( todo.get('duration')) > float(rc.short_tasks): gather_todos.remove(t) num = 1 print("-" * 50) print( " action (days to due date|importance|expected duration(mins))") for todo in gather_todos: if todo.get('status') not in ["finished", "cancelled"]: print( f"{num:>2}. {todo.get('description')}({todo.get('days_to_due')}|{todo.get('importance')}|{str(todo.get('duration'))})" ) if todo.get('notes'): for note in todo.get('notes'): print(f" - {note}") num += 1 if rc.all: for todo in person.get("todos", []): if todo.get('status') in ["finished", "cancelled"]: print( f"{num:>2}. ({todo.get('status')}) {todo.get('description')}" ) if todo.get('notes'): for note in todo.get('notes'): print(f" - {note}") num += 1 print("-" * 50) return
def query_ppl(self, target): """Query the data base for the target's collaborators' information.""" rc = self.rc gtx = self.gtx person = fuzzy_retrieval(all_docs_from_collection(rc.client, "people"), ['aka', 'name', '_id'], target, case_sensitive=False) if not person: raise RuntimeError("Person {} not found in people.".format( target).encode('utf-8')) pubs = get_person_pubs(gtx["citations"], person) pubs = filter_since_date(pubs, rc) try: if rc.verbose: for pub in pubs: print(f"{pub.get('title')}, ({pub.get('year')})") except AttributeError: pass my_collabs = get_coauthors_from_pubs(rc, pubs, person) # output = [f"{my_collab.get('name').last}, " # f"{my_collab.get('name').first}, " # f"{my_collab.get('institution')}, " # f"{my_collab.get('interaction_date')}, " # f"{my_collab.get('type')}\n" for my_collab in my_collabs] # print(*output) advisors = get_advisors_name_inst(person, rc) # print("advisors:") # output = [f"{my_collab.get('name').last}, " # f"{my_collab.get('name').first}, " # f"{my_collab.get('institution')}, " # f"{my_collab.get('interaction_date')}, " # f"{my_collab.get('advisor_type')}, " # f"{my_collab.get('type')}\n" for my_collab in advisors] # print(*output) advisees = get_advisees_name_inst( all_docs_from_collection(rc.client, "people"), person, rc) # print("advisees:") # output = [f"{my_collab.get('name').last}, " # f"{my_collab.get('name').first}, " # f"{my_collab.get('institution')}, " # f"{my_collab.get('interaction_date')}, " # f"{my_collab.get('advisor_type')}, " # f"{my_collab.get('type')}\n" for my_collab in advisees] # print(*output) collabs = [] adviseors = advisors + advisees for collab in my_collabs: col_bool = True for advis in adviseors: if collab.get("name").last == advis.get("name").last \ and collab.get("name").first == advis.get("name").first: col_bool = False if collab.get("interaction_date") > advis.get( "interaction_date"): advis.update({ "interaction_date": collab.get("interaction_date") }) if col_bool == True: collabs.append(collab) collabs.extend(advisees) collabs.extend(advisors) collabs.sort(key=lambda d: d['name'].last) if rc.verbose: output = [ f"{my_collab.get('name').last}, " f"{my_collab.get('name').first}, " f"{my_collab.get('institution')}, " f"{my_collab.get('interaction_date')}, " f"{my_collab.get('advis_type', '')}, " f"{my_collab.get('type')}\n" for my_collab in collabs ] print(*output) person["name"] = HumanName(person.get("name")) results = { 'person_info': person, # 'ppl_tab1': ppl_tab1, # 'ppl_tab3': ppl_tab3, # 'ppl_tab4': ppl_tab4, # 'ppl_tab5': ppl_tab5, 'collabs': collabs } return results
def db_updater(self): rc = self.rc if not rc.assigned_to: try: rc.assigned_to = rc.default_user_id except AttributeError: print( "Please set default_user_id in '~/.config/regolith/user.json', or you need to enter your group id " "in the command line") return filterid = {'_id': rc.assigned_to} if rc.reorder: index = 1 for i in range(0, len(rc.databases)): db_name = rc.databases[i]["name"] person_idx = rc.client.find_one(db_name, rc.coll, filterid) if isinstance(person_idx, dict): todolist_idx = person_idx.get("todos", []) else: continue if len(todolist_idx) == 0: continue else: for todo in todolist_idx: todo["running_index"] = index index += 1 person = document_by_value( all_docs_from_collection(rc.client, "people"), "_id", rc.assigned_to) if not person: raise TypeError( f"Id {rc.assigned_to} can't be found in people collection") todolist = person.get("todos", []) if len(todolist) == 0: print(f"{rc.assigned_to} doesn't have todos in people collection.") return if not rc.certain_date: today = dt.date.today() else: today = date_parser.parse(rc.certain_date).date() if not rc.index: started_todo = 0 for todo in todolist: if todo["status"] == 'started': started_todo += 1 if not todo.get('importance'): todo['importance'] = 1 if type(todo["due_date"]) == str: todo["due_date"] = date_parser.parse( todo["due_date"]).date() if type(todo.get("end_date")) == str: todo["end_date"] = date_parser.parse( todo["end_date"]).date() todo["days_to_due"] = (todo.get('due_date') - today).days todo["sort_finished"] = ( todo.get("end_date", dt.date(1900, 1, 1)) - dt.date(1900, 1, 1)).days todo["order"] = todo['importance'] + 1 / ( 1 + math.exp(abs(todo["days_to_due"] - 0.5))) - ( todo["days_to_due"] < -7) * 10 todolist = sorted( todolist, key=lambda k: (k['status'], k['order'], -k.get('duration', 10000)), reverse=True) todolist[started_todo:] = sorted(todolist[started_todo:], key=lambda k: (-k["sort_finished"])) index_match = {} if rc.reorder: new_index_started = 1 new_index_finished = -1 for todo in todolist[:started_todo]: index_match[todo["running_index"]] = new_index_started new_index_started += 1 for todo in todolist[started_todo:]: index_match[todo["running_index"]] = new_index_finished new_index_finished += -1 for i in range(0, len(rc.databases)): db_name = rc.databases[i]["name"] person_idx = rc.client.find_one(db_name, rc.coll, filterid) if isinstance(person_idx, dict): todolist_idx = person_idx.get("todos", []) else: continue if len(todolist_idx) != 0: for todo in todolist_idx: index = index_match[todo["running_index"]] todo["running_index"] = index rc.client.update_one(db_name, rc.coll, {'_id': rc.assigned_to}, {"todos": todolist_idx}, upsert=True) print( f"Indices in {db_name} for {rc.assigned_to} have been updated." ) return if rc.assigned_by: if rc.assigned_by == "default_id": rc.assigned_by = rc.default_user_id for todo in todolist[::-1]: if todo.get('assigned_by') != rc.assigned_by: print(todo.get('assigned_by')) todolist.remove(todo) elif rc.assigned_to == rc.assigned_by: todolist.remove(todo) if rc.filter: todolist = key_value_pair_filter(todolist, rc.filter) if rc.stati == ["started"]: rc.stati = ACTIVE_STATI print( "If the indices are far from being in numerical order, please reorder them by running regolith helper u_todo -r" ) print("Please choose from one of the following to update:") print( "(index) action (days to due date|importance|expected duration (mins)|assigned by)" ) print("-" * 81) print_task(todolist, stati=rc.stati) print("-" * 81) else: match_todo = [ i for i in todolist if i.get("running_index") == rc.index ] if len(match_todo) == 0: raise RuntimeError("Please enter a valid index.") else: todo = match_todo[0] if rc.description: todo["description"] = rc.description if rc.due_date: try: relative_day = int(rc.due_date) due_date = today + relativedelta(days=relative_day) except ValueError: due_date = date_parser.parse(rc.due_date).date() todo["due_date"] = due_date if rc.estimated_duration: todo["duration"] = rc.estimated_duration if rc.importance or rc.importance == 0: if rc.importance in ALLOWED_IMPORTANCE: todo["importance"] = rc.importance else: raise ValueError( f"Importance should be chosen from{ALLOWED_IMPORTANCE}." ) if rc.status: if rc.status in ALLOWED_STATI: todo["status"] = rc.status else: raise ValueError( f"Status should be chosen from {ALLOWED_STATI}.") if rc.notes: try: todo["notes"].extend(rc.notes) except KeyError: todo["notes"] = [] todo["notes"].extend(rc.notes) if rc.begin_date: todo["begin_date"] = date_parser.parse( rc.begin_date).date() if rc.end_date: todo["end_date"] = date_parser.parse(rc.end_date).date() for i in range(0, len(rc.databases)): db_name = rc.databases[i]["name"] person_update = rc.client.find_one(db_name, rc.coll, filterid) todolist_update = person_update.get("todos", []) if len(todolist_update) != 0: for i, todo_u in enumerate(todolist_update): if rc.index == todo_u.get("running_index"): todolist_update[i] = todo rc.client.update_one( db_name, rc.coll, {'_id': rc.assigned_to}, {"todos": todolist_update}, upsert=True) print( f"The task \"({todo_u['running_index']}) {todo_u['description'].strip()}\" in {db_name} for {rc.assigned_to} has been updated." ) return return
def latex(self): """Render latex template""" gtx = self.gtx rc = self.rc for group in self.gtx["groups"]: gtx["grants"] = list(sorted( all_docs_from_collection(rc.client, "grants"), key=_id_key )) gtx["proposals"] = list(sorted( all_docs_from_collection(rc.client, "proposals"), key=_id_key )) grp = group["_id"] pi = fuzzy_retrieval( self.gtx["people"], ["aka", "name"], group["pi_name"] ) pinames = pi["name"].split() piinitialslist = [i[0] for i in pinames] pi['initials'] = "".join(piinitialslist).upper() grants = merge_collections_all(self.gtx["proposals"], self.gtx["grants"], "proposal_id") for g in grants: g['end_date'] = get_dates(g).get('end_date') g['begin_date'] = get_dates(g).get('begin_date', dt.date(1900, 1, 2)) for person in g.get("team", []): rperson = fuzzy_retrieval( self.gtx["people"], ["aka", "name"], person["name"] ) if rperson: person["name"] = rperson["name"] if g.get('budget'): amounts = [i.get('amount') for i in g.get('budget')] g['subaward_amount'] = sum(amounts) current_grants = [ dict(g) for g in grants if is_current(g) ] current_grants, _, _ = filter_grants( current_grants, {pi["name"]}, pi=False, multi_pi=True ) current_grants = [g for g in current_grants if g.get("status") != "declined"] for g in current_grants: if g.get('budget'): amounts = [i.get('amount') for i in g.get('budget')] g['subaward_amount'] = sum(amounts) pending_grants = [ g for g in self.gtx["proposals"] if is_pending(g["status"]) ] for g in pending_grants: for person in g["team"]: rperson = fuzzy_retrieval( self.gtx["people"], ["aka", "name"], person["name"] ) if rperson: person["name"] = rperson["name"] pending_grants, _, _ = filter_grants( pending_grants, {pi["name"]}, pi=False, multi_pi=True ) summed_grants = pending_grants + current_grants for grant in summed_grants: grant.update( award_start_date="{}/{}/{}".format( grant.get("begin_date").month, grant.get("begin_date").day, grant.get("begin_date").year, ), award_end_date="{}/{}/{}".format( grant.get("end_date").month, grant.get("end_date").day, grant.get("end_date").year, ), ) badids = [i["_id"] for i in current_grants if not i.get('cpp_info').get('cppflag', "")] iter = copy(current_grants) for grant in iter: if grant["_id"] in badids: current_grants.remove(grant) piname = HumanName(pi["name"]) outfile = "current-pending-{}-{}".format(grp, piname.last.lower()) self.render( "current_pending.tex", outfile + ".tex", pi=pi, pending=pending_grants, current=current_grants, pi_upper=pi["name"].upper(), group=group, ) self.pdf(outfile)
def latex(self): """Render latex template""" rc = self.rc for p in self.gtx["people"]: # so we don't modify the dbs when de-referencing names = frozenset(p.get("aka", []) + [p["name"]] + [p["_id"]]) begin_period = date(1650, 1, 1) pubs = filter_publications( all_docs_from_collection(rc.client, "citations"), names, reverse=True, ) bibfile = make_bibtex_file(pubs, pid=p["_id"], person_dir=self.bldir) emp = p.get("employment", []) for e in emp: e['position'] = e.get('position_full', e.get('position').title()) emp.sort(key=ene_date_key, reverse=True) edu = p.get("education", []) edu.sort(key=ene_date_key, reverse=True) teach = p.get("teaching", []) for t in teach: t['position'] = t.get('position').title() projs = filter_projects( all_docs_from_collection(rc.client, "projects"), names) just_grants = list(all_docs_from_collection(rc.client, "grants")) just_proposals = list( all_docs_from_collection(rc.client, "proposals")) grants = merge_collections_superior(just_proposals, just_grants, "proposal_id") presentations = filter_presentations(self.gtx["people"], self.gtx["presentations"], self.gtx["institutions"], p.get("_id"), statuses=["accepted"]) for grant in grants: for member in grant.get("team"): dereference_institution(member, self.gtx["institutions"]) pi_grants, pi_amount, _ = filter_grants(grants, names, pi=True) coi_grants, coi_amount, coi_sub_amount = filter_grants(grants, names, pi=False) aghs = awards_grants_honors(p, "honors") service = awards_grants_honors(p, "service", funding=False) # TODO: pull this out so we can use it everywhere for ee in [emp, edu]: for e in ee: dereference_institution(e, self.gtx["institutions"]) undergrads = filter_employment_for_advisees( self.gtx["people"], begin_period, "undergrad") masters = filter_employment_for_advisees(self.gtx["people"], begin_period, "ms") currents = filter_employment_for_advisees(self.gtx["people"], begin_period, "phd") graduateds = filter_employment_for_advisees( self.gtx["people"], begin_period, "phd") postdocs = filter_employment_for_advisees(self.gtx["people"], begin_period, "postdoc") visitors = filter_employment_for_advisees(self.gtx["people"], begin_period, "visitor-unsupported") iter = deepcopy(graduateds) for g in iter: if g.get("active"): graduateds.remove(g) iter = deepcopy(currents) for g in iter: if not g.get("active"): currents.remove(g) self.render( "cv.tex", p["_id"] + ".tex", p=p, title=p.get("name", ""), aghs=aghs, service=service, undergrads=undergrads, masters=masters, currentphds=currents, graduatedphds=graduateds, postdocs=postdocs, visitors=visitors, pubs=pubs, names=names, bibfile=bibfile, education=edu, employment=emp, presentations=presentations, sentencecase=sentencecase, monthstyle=month_fullnames, projects=projs, pi_grants=pi_grants, pi_amount=pi_amount, coi_grants=coi_grants, coi_amount=coi_amount, coi_sub_amount=coi_sub_amount, ) self.pdf(p["_id"])
def latex(self): """Render latex template""" rc = self.rc if not rc.people: raise RuntimeError("ERROR: please rerun specifying --people name") if not rc.from_date: raise RuntimeError("ERROR: please rerun specifying --from") build_target = get_id_from_name( all_docs_from_collection(rc.client, "people"), rc.people[0]) begin_year = int(rc.from_date.split("-")[0]) begin_month = int(rc.from_date.split("-")[1]) pre_begin_year = begin_year - 1 end_year = begin_year + 1 end_month = begin_month - 1 post_end_year = begin_year + 2 begin_period = dt.date(begin_year, begin_month, 1) pre_begin_period = dt.date(pre_begin_year, begin_month, 1) end_period = dt.date(end_year, end_month, 28) post_end_period = dt.date(post_end_year, end_month, 28) me = [p for p in self.gtx["people"] if p["_id"] == build_target][0] me["begin_period"] = dt.date.strftime(begin_period, "%m/%d/%Y") me["begin_period"] = dt.date.strftime(begin_period, "%m/%d/%Y") me["pre_begin_period"] = dt.date.strftime(pre_begin_period, "%m/%d/%Y") me["end_period"] = dt.date.strftime(end_period, "%m/%d/%Y") me["post_end_period"] = dt.date.strftime(post_end_period, "%m/%d/%Y") projs = filter_projects( self.gtx["projects"], set([build_target]), group="bg" ) ######### # highlights ######### for proj in projs: if proj.get('highlights'): proj["current_highlights"] = False for highlight in proj.get('highlights'): highlight_date = dt.date(highlight.get("year"), month_to_int(highlight.get("month", 1)), 1) if highlight_date > begin_period and highlight_date < end_period: highlight["is_current"] = True proj["current_highlights"] = True ######### # current and pending ######### ######### # end current and pending ######### ######### # advising ######### undergrads = filter_employment_for_advisees(self.gtx["people"], begin_period, "undergrad") masters = filter_employment_for_advisees(self.gtx["people"], begin_period, "ms") currents = filter_employment_for_advisees(self.gtx["people"], begin_period, "phd") graduateds = filter_employment_for_advisees(self.gtx["people"], begin_period.replace( year=begin_year - 5), "phd") postdocs = filter_employment_for_advisees(self.gtx["people"], begin_period, "postdoc") visitors = filter_employment_for_advisees(self.gtx["people"], begin_period, "visitor-unsupported") iter = deepcopy(graduateds) for g in iter: if g.get("active"): graduateds.remove(g) iter = deepcopy(currents) for g in iter: if not g.get("active"): currents.remove(g) ###################### # service ##################### mego = deepcopy(me) dept_service = filter_service([mego], begin_period, "department") mego = deepcopy(me) school_service = filter_service([mego], begin_period, "school") mego = deepcopy(me) uni_service = filter_service([mego], begin_period, "university") uni_service.extend(school_service) mego = deepcopy(me) prof_service = filter_service([mego], begin_period, "profession") mego = deepcopy(me) outreach = filter_service([mego], begin_period, "outreach") mego = deepcopy(me) lab = filter_facilities([mego], begin_period, "research") mego = deepcopy(me) shared = filter_facilities([mego], begin_period, "shared") mego = deepcopy(me) fac_other = filter_facilities([mego], begin_period, "other") mego = deepcopy(me) fac_teaching = filter_facilities([mego], begin_period, "teaching") mego = deepcopy(me) fac_wishlist = filter_facilities([mego], begin_period, "research_wish", verbose=False) mego = deepcopy(me) tch_wishlist = filter_facilities([mego], begin_period, "teaching_wish") mego = deepcopy(me) curric_dev = filter_activities([mego], begin_period, "teaching") mego = deepcopy(me) other_activities = filter_activities([mego], begin_period, "other") ########################## # Presentation list ########################## keypres = filter_presentations(self.gtx["people"], self.gtx["presentations"], self.gtx["institutions"], build_target, types=["award", "plenary", "keynote"], since=begin_period, before=end_period, statuses=["accepted"]) invpres = filter_presentations(self.gtx["people"], self.gtx["presentations"], self.gtx["institutions"], build_target, types=["invited"], since=begin_period, before=end_period, statuses=["accepted"]) sempres = filter_presentations(self.gtx["people"], self.gtx["presentations"], self.gtx["institutions"], build_target, types=["colloquium", "seminar"], since=begin_period, before=end_period, statuses=["accepted"]) declpres = filter_presentations(self.gtx["people"], self.gtx["presentations"], self.gtx["institutions"], build_target, types=["all"], since=begin_period, before=end_period, statuses=["declined"]) ######################### # Awards ######################### ahs = awards(me, since=begin_period) ######################## # Publications ######################## names = frozenset(me.get("aka", []) + [me["name"]]) pubs = filter_publications( all_docs_from_collection(rc.client, "citations"), names, reverse=True, bold=False, since=begin_period ) bibfile = make_bibtex_file( pubs, pid=me["_id"], person_dir=self.bldir ) articles = [prc for prc in pubs if prc.get("entrytype") in "article"] nonarticletypes = ["book", "inbook", "proceedings", "inproceedings", "incollection", "unpublished", "phdthesis", "misc"] nonarticles = [prc for prc in pubs if prc.get("entrytype") in nonarticletypes] peer_rev_conf_pubs = [prc for prc in pubs if prc.get("peer_rev_conf")] pubiter = deepcopy(pubs) for prc in pubiter: if prc.get("peer_rev_conf"): peer_rev_conf_pubs = prc pubs.pop(prc) ############## # TODO: add Current Projects to Research summary section ############## ############# # IP ############# patents = filter_patents(self.gtx["patents"], self.gtx["people"], build_target, since=begin_period) licenses = filter_licenses(self.gtx["patents"], self.gtx["people"], build_target, since=begin_period) ############# # hindex ############# hindex = sorted(me["hindex"], key=doc_date_key).pop() ######################### # render ######################### # "C:/Users/simon/scratch/billinge-ann-report.tex", self.render( "columbia_annual_report.tex", "billinge-ann-report.tex", pi=pi, p=me, projects=projs, pending=pending_grants, current=current_grants, undergrads=undergrads, masters=masters, currentphds=currents, graduatedphds=graduateds, postdocs=postdocs, visitors=visitors, dept_service=dept_service, uni_service=uni_service, prof_service=prof_service, outreach=outreach, lab=lab, shared=shared, facilities_other=fac_other, fac_teaching=fac_teaching, fac_wishlist=fac_wishlist, tch_wishlist=tch_wishlist, curric_dev=curric_dev, other_activities=other_activities, keypres=keypres, invpres=invpres, sempres=sempres, declpres=declpres, sentencecase=sentencecase, monthstyle=month_fullnames, ahs=ahs, pubs=articles, nonarticles=nonarticles, peer_rev_conf_pubs=peer_rev_conf_pubs, bibfile=bibfile, patents=patents, licenses=licenses, hindex=hindex, ) self.pdf("billinge-ann-report")
def db_updater(self): rc = self.rc if rc.index: if rc.index >= 9900: print("WARNING: indices >= 9900 are used for milestones which " "should be finished using u_milestone and not f_todo") return if not rc.assigned_to: try: rc.assigned_to = rc.default_user_id except AttributeError: print( "Please set default_user_id in '~/.config/regolith/user.json', or you need to enter your group id " "in the command line") return person = document_by_value( all_docs_from_collection(rc.client, "todos"), "_id", rc.assigned_to) filterid = {'_id': rc.assigned_to} if not person: raise TypeError( f"Id {rc.assigned_to} can't be found in todos collection") todolist = person.get("todos", []) if len(todolist) == 0: print(f"{rc.assigned_to} doesn't have todos in todos collection.") return now = dt.date.today() if not rc.index: if not rc.date: today = now else: today = date_parser.parse(rc.date).date() if rc.filter: todolist = key_value_pair_filter(todolist, rc.filter) for todo in todolist: if type(todo["due_date"]) == str: todo["due_date"] = date_parser.parse( todo["due_date"]).date() todo["days_to_due"] = (todo.get('due_date') - today).days todo["order"] = 1 / (1 + math.exp(abs(todo["days_to_due"] - 0.5))) todolist = sorted(todolist, key=lambda k: (k['status'], k['importance'], k[ 'order'], -k.get('duration', 10000))) print( "If the indices are far from being in numerical order, please renumber them by running regolith helper u_todo -r" ) print("Please choose from one of the following to update:") print( "(index) action (days to due date|importance|expected duration (mins)|tags|assigned by)" ) print("-" * 80) print_task(todolist, stati=['started']) else: match_todo = [ i for i in todolist if i.get("running_index") == rc.index ] if len(match_todo) == 0: raise RuntimeError("Please enter a valid index.") else: todo = match_todo[0] todo["status"] = "finished" if not rc.end_date: end_date = now else: end_date = date_parser.parse(rc.end_date).date() todo["end_date"] = end_date for i in range(0, len(rc.databases)): db_name = rc.databases[i]["name"] person_update = rc.client.find_one(db_name, rc.coll, filterid) if person_update: todolist_update = person_update.get("todos", []) else: continue if len(todolist_update) != 0: for i, todo_u in enumerate(todolist_update): if rc.index == todo_u.get("running_index"): todolist_update[i] = todo rc.client.update_one( db_name, rc.coll, {'_id': rc.assigned_to}, {"todos": todolist_update}, upsert=True) print( f"The task \"({todo_u['running_index']}) {todo_u['description'].strip()}\" in {db_name} for {rc.assigned_to} has been marked as finished." ) return return
def projects(self): rc = self.rc projs = all_docs_from_collection(rc.client, 'projects') self.render('projects.html', 'projects.html', title='Projects', projects=projs)