def get_set_orientation(set_path): """Calculate set orientation. Method will take 2 points and compare treir indices and start_times. Returns True in set has direct orientation, False otherwise. Parameters ---------- set_path : relative path to set. """ points = _get_points_in_set(join(environ['LAN10_DATA_PATH'], set_path)) _, meta1, _ = dfparser.parse_from_file(points[0], nodata=True) _, meta2, _ = dfparser.parse_from_file(points[1], nodata=True) ind1 = int(meta1['external_meta']['point_index']) time1 = tparse(meta1['params']['start_time']) ind2 = int(meta2['external_meta']['point_index']) time2 = tparse(meta2['params']['start_time']) if ind1 > ind2: if time1 > time2: return True else: return False else: if time1 > time2: return False else: return True
def _recalc(voltage, fill, sets, l_bord, r_bord): volt_range = pynumparser.NumberSequence().parse(voltage) fill_range = fill.split(', ') sets_range = pynumparser.NumberSequence().parse(sets) def filter_name(fp): return any((fill in fp for fill in fill_range)) and \ any(('set_{}/'.format(s) in fp for s in sets_range)) def filter_meta(meta): return any((int(v) == int(meta['external_meta']['HV1_value']) for v in volt_range)) points = filter_points(filter_name, filter_meta) crs = get_points_cr((p[0] for p in points), l_bord, r_bord) combined = sorted([{ 'name': point[0], 'meta': point[1], 'cr': cr } for point, cr in zip(points, crs)], key=lambda o: tparse(o['meta']["params"]["start_time"])) sets = [] for s, s_points in groupby(combined, lambda c: dirname(c['name'])): params = [{ 'x': tparse(o['meta']['params']['start_time']), 'y': o['cr'], 'text': 'set: {};<br>point index: {};<br>hv: {};'.format( s, o['meta']['external_meta']['point_index'], o['meta']['external_meta']['HV1_value']), 'customdata': o['name'], } for o in s_points] sets.append({ 'name': s, 'x': [p['x'] for p in params], 'y': [p['y'] for p in params], 'text': [p['text'] for p in params], 'customdata': [p['customdata'] for p in params], 'mode': 'markers', 'type': 'scatter', 'showlegend': True, **{} }) return {'data': sets, 'layout': {'title': FIG_1_TITLE}}
def create_media(doc): doc.sort(key=lambda t: (tparse(t['date']), t['title']), reverse=True) content = u'' for e in doc: dt = tparse(e['date']) entry_id = u"entry{:08x}".format( zlib.crc32(u"{0}_{1}".format(e['title'], mktime(dt)).encode( 'utf-8')) & 0xffffffff) appendix = [] add_misc_links(appendix, e["materials"]) authors = u'' for p in e["presenters"]: if authors: authors += u"<span>, </span>" if chk(p, "href"): authors += u"""<a href="{0}">{1}</a>""".format( p["href"], p["name"]) else: authors += u"""<span>{0}</span>""".format(p["name"]) entry = u""" <h2> <a href="#{0}" class="anchor" aria-hidden="true"><i class="fa fa-thumb-tack fa-1" aria-hidden="true"></i></a> {1} </h2> <div class="talk_info"> <h3>{2}</h3> <div class="presenter_info"> {3} </div> <div class="materials"> {4} </div> </div> """.format( entry_id, e['title'], dt.strftime("%a, %B %d %Y"), authors, " ".join(appendix) if appendix else "", ) content += u""" <div class="talk_container" id="{0}"> {1} </div> """.format(entry_id, entry) return content
def _convert_time(strtime): return tparse(strtime).timestamp()
def _convert_time(strtime): from backports.datetime_timestamp import timestamp return timestamp(tparse(strtime))
filtered.append(point) peaks = [] for point in tqdm(filtered): try: _, meta, data = dfparser.parse_from_file(point, nodata=False) amps = _madc_amps(data) num = path.basename(path.dirname(point))[4:] if num.endswith('_bad'): num = num[:-4] peak = { 'point_index': meta['external_meta']['point_index'], 'time': tparse(meta['start_time'][0]), 'peak': _amp_hist_max(amps), 'mean': _amp_mean(amps), 'std': _amp_std(amps), 'cr': _amp_cr(amps), 'color': int(num) % 2 } peaks.append(peak) except RuntimeError as err: print(err) peaks = sorted(peaks, key=lambda v: v['time']) palette = sns.color_palette() _, ax = plt.subplots()
def create_media(pref, types, docs, dry_run): type_lookup = {} for type in types: type_lookup[type['type']] = type type['docs'] = [] for doc in docs: type = type_lookup[doc['type']] type['docs'].append(doc) event_times = {} events = [] content = '' auto_pages = [] for type in types: if not type['docs']: continue content += u'<h3 id="{0}">{1}</h3>'.format(type['type'], type['name']) type['docs'].sort( key=lambda t: (tparse(t['date']), t['title']), reverse=True) for doc in type['docs']: entry_id = u"entry{:08x}".format( zlib.crc32(u"{0}_{1}_{2}".format(type['name'], doc['title'], mktime(tparse(doc['date'])) ).encode('utf-8')) & 0xffffffff) appendix = [] if 'href' in doc and doc['href']: if chk(doc, 'autopage'): auto_pages.append(doc) appendix.append( u"""<a href="{0}">[page]</a>""".format(doc['href'])) add_misc_links(appendix, doc) if chk(doc, 'bibtex'): bibtex = doc['bibtex'].strip() bibtex_link = u"bibtex/{0}.bib".format(entry_id) bibtex_filename = os.path.join( pref if pref is not None else ".", bibtex_link) if not dry_run: if not os.path.exists(os.path.dirname(bibtex_filename)): os.makedirs(os.path.dirname(bibtex_filename)) with io.open(bibtex_filename, 'w', encoding='utf-8') as f: print(bibtex, file=f) appendix.append( u"""<a href="{0}" rel="nofollow">[bibtex]</a>""" .format(bibtex_link)) authors = doc['authors'].replace( "Josua Krause", "<span style=\"text-decoration: underline;\">" + "Josua Krause</span>") astr = u"<img src=\"img/badge.png\" style=\"height: 1em;\" " + \ u"alt=\"{0}\" title=\"{0}\">" awards = [ astr.format(award) for award in doc['awards'] ] if chk(doc, 'awards') else [] body = u""" <h4 class="media-heading"> {1} <a href="#{0}" class="anchor" aria-hidden="true"> <i class="fa fa-thumb-tack fa-1" aria-hidden="true"></i> </a><br/> <small>{2}</small> </h4> <em>{3} — {4}</em>{5}{6} """.format( entry_id, doc['title'], authors, doc['conference'], doc['date'] if doc['published'] else u"to be published…", u"<br/>\n{0}".format(" ".join(appendix)) if appendix else "", u"{0}{1}".format( " " if appendix else u"<br/>\n", " ".join(awards) ) if awards else "", ) entry = u""" <a class="pull-left" href="#{0}"> <img class="media-object" src="{1}" title="{2}" alt="{3}" style="width: 64px;"> </a> <div class="media-body"> {4} </div> """.format( entry_id, doc['logo'] if chk(doc, 'logo') else "img/nologo.png", doc['title'], doc['short-title'] if chk(doc, 'short-title') else doc['title'], body, ) content += u""" <div class="media" id="{0}"> {1} </div> """.format(entry_id, entry) otid = doc['short-conference'] \ if chk(doc, 'short-conference') \ else doc['conference'] tid = otid mtime = monthtime(tparse(doc['date'])) if mtime not in event_times: event_times[mtime] = set() num = 1 while tid in event_times[mtime]: num += 1 tid = "{0} ({1})".format(otid, num) event_times[mtime].add(tid) event = { "id": tid, "group": type['type'], "name": doc['title'], "time": mktime(tparse(doc['date'])), "link": u"#{0}".format(entry_id), } events.append(event) if not dry_run: timeline_fn = os.path.join( pref if pref is not None else ".", "material/timeline.json") if not os.path.exists(os.path.dirname(timeline_fn)): os.makedirs(os.path.dirname(timeline_fn)) with open(timeline_fn, 'wb') as tl: type_names = {} for type in types: type_names[type['type']] = type['name'] print(json.dumps({ "events": events, "type_names": type_names, }, sort_keys=True, indent=2, encoding='utf-8'), file=tl) if auto_pages: with io.open("page.tmpl", 'r', encoding='utf-8') as tf: page_tmpl = tf.read() for doc in auto_pages: create_autopage( page_tmpl, doc, os.path.join( pref if pref is not None else ".", doc['href'])) return content