def arp_table(): log.info('Getting ARP info') arp_items = get_arp() names, ips, macs, companies = _.pipe( arp_items, _.map(lambda i: ( i.get('name', ''), i.get('ip', ''), i.get('mac', ''), i.get('info', {}).get('company', ''), )), lambda items: zip(*items), ) max_n, max_i, max_m, max_c = _.pipe( [names, ips, macs, companies], _.map(lambda l: max(l, key=len)), _.map(len), tuple, ) header = [ ['Name', 'IP', 'MAC', 'Company'], ['-' * max_n, '-' * max_i, '-' * max_m, '-' * max_c], ] _.pipe( _.concatv(header, zip(names, ips, macs, companies)), __.vmap(lambda n, i, m, c: ( n.ljust(max_n), i.ljust(max_i), m.ljust(max_m), c.ljust(max_c) )), _.map(' '.join), '\n'.join, print, )
def kw_str(search_kw): return pipe( search_kw.items(), sorted, vmap(lambda k, v: '{k}={v}'), ', '.join, )
def group_data_from_yaml_content(course: IdResourceEndpoint, yaml_data: str): return maybe_pipe( yaml_data, yaml.load, getitem('assignment'), enumerate, vmap(lambda i, d: merge(d, {'position': i + 1})), map(group_data_from_dict(course)), tuple, )
def fig(course_root: str, path: str, caption: str = '', *, style: dict = None): path = resolve_path(course_root, path) style = pipe( pipe( style.items(), vmap(lambda k, v: f"{k}: {v}"), '; '.join, ), 'style="{}"'.format, ) if style else '' data = image_base64(path) return FIG.format(style=style, data=data, caption=caption)
def parse_course_metadata(regexes: list, course_dict: dict): r''' Examples: >>> regexes = [ ... {'key': 'name', ... 'regex: r'^(?P<code>\S+) (?P<name>.*?) (?P<section>\S+)$'}, ... {'key': 'course_code', ... 'regex': r'^(?P<code>\S+) (?P<name>.*?) (?P<section>\S+)$'} ... ] >>> course_dict = { ... 'name': 'CS102 CompSci II S01', ... } >>> parse_course_metadata(regexes, course_dict) == { ... 'code': 'CS102', 'name': 'CompSci II', 'section': 'S01', ... } True ''' def get_course_value(regex_dict): if 'key' in regex_dict: if regex_dict['key'] in course_dict: return course_dict[regex_dict['key']] elif 'keys' in regex_dict: return _.get_in(regex_dict['keys'], course_dict) def transform_year(d): if 'year' in d: return _.assoc(d, 'year', int(d['year'])) return d return _.pipe( regexes, _.map(lambda d: (get_course_value(d), re.compile(d['regex']))), _.filter(_.first), lcommon.vmap(lambda value, regex: regex.search(value)), _.filter(None), _.map(lambda m: m.groupdict()), _.map(transform_year), tuple, reversed, tuple, lambda dicts: _.merge(*(dicts or [{}])), )
def save_attachments(submission: IdResourceEndpoint, output_dir: Union[str, Path], map_func=thread_map(max_workers=5)): ''' ''' output_dir_path = Path(output_dir).expanduser() if not output_dir_path.exists(): output_dir_path.mkdir(parents=True, exist_ok=True) return maybe_pipe( submission.data.get('attachments'), map(lcommon.get_many_t(['filename', 'url'])), filter(all), map_func( lcommon.vcall(lambda f, u: (Path(output_dir, f).expanduser(), requests.get(u)))), lcommon.vfilter(lambda p, r: r.status_code in range(200, 300)), lcommon.vmap(lambda p, r: (p, p.write_bytes(r.content))), map(first), tuple, )
def content_paths(path: (str, Path)): return _.pipe( CONTENT_DIRS.items(), common.vmap(lambda k, d: (k, Path(path, d).expanduser().resolve())), dict, )
def sync_assignment_groups_from_path(course: IdResourceEndpoint, path: Union[str, Path]): group_data = group_data_from_yaml_path(course, path) group_names = [g['name'] for g in group_data] @curry def do_log(logger, msg): return logger('[sync_assignment_groups_from_path] ' + msg) log_info = do_log(log.info) log_error = do_log(log.error) group_eps = assignment_groups(course, do_memo=False) group_ep_names = [g.data['name'] for g in group_eps] # Don't delete already created assignment groups. Report them to # be deleted manually. unneeded = set(group_ep_names) - set(group_names) if unneeded: log_error( 'The following assignment groups need to be removed manually:\n' f'{", ".join(sorted(unneeded))}') log_error('... setting position(s) to 999') pipe( group_eps, filter(lambda ep: ep.data['name'] in unneeded), do(lambda ep: update_endpoint(ep, {'position': 99})), tuple, ) missing_names = set(group_names) - set(group_ep_names) # Create missing assignment groups if missing_names: log_info('The following assignment groups will be created:\n' f'{", ".join(sorted(missing_names))}') new_data = pipe( group_data, filter(lambda d: d['name'] in missing_names), map(do(lambda d: log_info(f'... creating group: {d["name"]}'))), map(lambda d: (d, hash_from_dict(d), new_assignment_group(course, d))), map(vdo(lambda d, h, ep: log_info(f'... setting hash: {h}'))), map(vdo(lambda d, h, ep: set_metadata(ep, {'hash': h}))), tuple, ) group_hashes = pipe( group_data, map(hash_from_dict), tuple, ) name_to_ep = pipe( assignment_groups(course, do_memo=False), map(lambda ep: (ep, get_metadata(ep).get('hash'))), vmap(lambda ep, h: (ep.data['name'], (ep, h))), dict, ) for data in group_data: ep, h = name_to_ep[data['name']] data_hash = hash_from_dict(data) if h != data_hash: log_info(f'Updating group: {data["name"]}') update_endpoint(ep, data) set_metadata(ep, {'hash': data_hash})