def list_users(args, l, rc): from ambry.util import drop_empty from tabulate import tabulate headers = 'Id User Type Secret'.split() records = [] for k in l.accounts.keys(): acct = l.account(k) if acct.major_type == 'user': try: secret = acct.secret except Exception as e: secret = str(e) # "<corrupt secret>" records.append([acct.account_id, acct.user_id, acct.minor_type, secret]) if not records: return records = drop_empty([headers] + records) prt(tabulate(records[1:], records[0]))
def bundle_sources(vid, ct): from ambry.util import drop_empty r = aac().renderer.cts(ct) b = r.library.bundle(vid) sources = [] for i, row in enumerate(b.sources): if not sources: sources.append(list(row.dict.keys())) sources.append(list(row.dict.values())) sources = drop_empty(sources) cxt = dict( vid=vid, b=b, sources = sources[1:] if sources else [], sources_header = ['name','source_table_name','ref'], **r.cc() ) return r.render('bundle/sources.html', **cxt)
def objects_to_record(self): initial_rows = [] headers = [] for table in self._dataset.tables: for col in table.columns: row = col.row initial_rows.append(row) # this should put all of the data fields at the end of the headers for k in iterkeys(row): if k not in headers: headers.append(k) rows = list() # Move description to the end if 'description' in headers: headers.remove('description') headers.append('description') if initial_rows: rows.append(headers) name_index = headers.index('column') else: name_index = None for row in initial_rows: this_row = list() for h in headers: # Every row is the same length, with combined set of headers this_row.append(row.get(h, None)) if name_index and this_row[name_index] == 'id': # Blank to separate tables, but transpose trick fails if rows not all same size rows.append([None for e in this_row]) rows.append(this_row) # Transpose trick to remove empty columns if rows: rows_before_transpose = len(rows) rows = list(drop_empty(rows)) assert rows_before_transpose == len( rows ) # The transpose trick removes all of the rows if anything goes wrong else: # No contents, so use the default file rows = list(csv.reader(self.default.splitlines())) bsfile = self.record bsfile.update_contents(msgpack.packb(rows), 'application/msgpack')
def objects_to_record(self): initial_rows = [] headers = [] for table in self._dataset.tables: for col in table.columns: row = col.row initial_rows.append(row) # this should put all of the data fields at the end of the headers for k in iterkeys(row): if k not in headers: headers.append(k) rows = list() # Move description to the end if 'description' in headers: headers.remove('description') headers.append('description') if initial_rows: rows.append(headers) name_index = headers.index('column') else: name_index = None for row in initial_rows: this_row = list() for h in headers: # Every row is the same length, with combined set of headers this_row.append(row.get(h, None)) if name_index and this_row[name_index] == 'id': # Blank to separate tables, but transpose trick fails if rows not all same size rows.append([None for e in this_row]) rows.append(this_row) # Transpose trick to remove empty columns if rows: rows_before_transpose = len(rows) rows = list(drop_empty(rows)) assert rows_before_transpose == len(rows) # The transpose trick removes all of the rows if anything goes wrong else: # No contents, so use the default file rows = list(csv.reader(self.default.splitlines())) bsfile = self.record bsfile.update_contents(msgpack.packb(rows), 'application/msgpack')
def bundle_process_logs(self, show_all=None): import time from collections import OrderedDict from sqlalchemy.sql import and_ from ambry.util import drop_empty records = [] def append(pr, edit=None): if not isinstance(pr, dict): pr = pr.dict d = OrderedDict((k, str(v).strip()[:60]) for k, v in pr.items() if k in [ 'id', 'group', 'state', 'd_vid', 's_vid', 'hostname', 'pid', 'phase', 'stage', 'modified', 'item_count', 'message' ]) d['modified'] = round(float(d['modified']) - time.time(), 1) if edit: for k, v in edit.items(): d[k] = v(d[k]) if not records: records.append(d.keys()) records.append(d.values()) q = self.query.order_by(Process.modified.desc()) for pr in q.all(): # Don't show reports that are done or older than 2 minutes. if show_all or (pr.state != 'done' and pr.modified > time.time() - 120): append(pr) # Add old running rows, which may indicate a dead process. q = (self.query.filter(Process.s_vid != None).filter( and_(Process.state == 'running', Process.modified < time.time() - 60)).filter( Process.group != None)) for pr in q.all(): append(pr, edit={'modified': lambda e: (str(e) + ' (dead?)')}) records = drop_empty(records) return records
def _repr_html_(self): from tabulate import tabulate from ambry.util import drop_empty def record_gen(): for i, row in enumerate([c.row for c in self.columns]): if i == 0: yield row.keys() yield row.values() records = list(record_gen()) records = drop_empty(records) return "<h2>{}</h2>".format(self.name)+tabulate(records[1:], headers=records[0], tablefmt="html")
def bundle_process_logs(self, show_all=None): import time from collections import OrderedDict from sqlalchemy.sql import and_ from ambry.util import drop_empty records = [] def append(pr, edit=None): if not isinstance(pr, dict): pr = pr.dict d = OrderedDict((k, str(v).strip()[:60]) for k, v in pr.items() if k in ['id', 'group', 'state', 'd_vid', 's_vid', 'hostname', 'pid', 'phase', 'stage', 'modified', 'item_count', 'message']) d['modified'] = round(float(d['modified']) - time.time(), 1) if edit: for k, v in edit.items(): d[k] = v(d[k]) if not records: records.append(d.keys()) records.append(d.values()) q = self.query.order_by(Process.modified.desc()) for pr in q.all(): # Don't show reports that are done or older than 2 minutes. if show_all or (pr.state != 'done' and pr.modified > time.time() - 120): append(pr) # Add old running rows, which may indicate a dead process. q = (self.query.filter(Process.s_vid != None) .filter(and_(Process.state == 'running', Process.modified < time.time() - 60)) .filter(Process.group != None)) for pr in q.all(): append(pr, edit={'modified': lambda e: (str(e) + ' (dead?)')}) records = drop_empty(records) return records
def _repr_html_(self): from tabulate import tabulate from ambry.util import drop_empty def record_gen(): for i, row in enumerate([c.row for c in self.columns]): if i == 0: yield row.keys() yield row.values() records = list(record_gen()) records = drop_empty(records) return "<h2>{}</h2>".format(self.name) + tabulate( records[1:], headers=records[0], tablefmt="html")
def _repr_html_(self): from tabulate import tabulate from ambry.util import drop_empty def record_gen(): for i, p in enumerate(self): if i == 0: yield ['vid','vname','table','time','space','grain', 'description','sub-desc'] yield [ p.vid, p.vname, p.table.name, p.time, p.space, p.grain, p.description,p.display.sub_description ] records = list(record_gen()) records = drop_empty(records) return "<h2>Partitions in {} </h2>".format(self.bundle.identity.name) \ + tabulate(records[1:], headers=records[0], tablefmt="html")
def objects_to_record(self): sorter = lambda r: ('A' if r['reftype'] == 'ref' else 'z' if r[ 'reftype'] is None else r['reftype'], r['name']) rows = sorted([s.row for s in self._dataset.sources], key=sorter) if rows: rows = [list(rows[0].keys())] + [list(r.values()) for r in rows] # Transpose trick to remove empty columns rows = list(drop_empty(rows)) else: # No contents, so use the default file rows = list(csv.reader(self.default.splitlines())) bsfile = self.record bsfile.update_contents(msgpack.packb(rows), 'application/msgpack')
def objects_to_record(self): sorter = lambda r: ('A' if r['reftype'] == 'ref' else 'z' if r['reftype'] is None else r['reftype'], r['name']) rows = sorted([s.row for s in self._dataset.sources], key=sorter) if rows: rows = [list(rows[0].keys())] + [list(r.values()) for r in rows] # Transpose trick to remove empty columns rows = list(drop_empty(rows)) else: # No contents, so use the default file rows = list(csv.reader(self.default.splitlines())) bsfile = self.record bsfile.update_contents(msgpack.packb(rows), 'application/msgpack')
def _repr_html_(self): from tabulate import tabulate from ambry.util import drop_empty def record_gen(): for i, p in enumerate(self): if i == 0: yield [ 'vid', 'vname', 'table', 'time', 'space', 'grain', 'description', 'sub-desc' ] yield [ p.vid, p.vname, p.table.name, p.time, p.space, p.grain, p.description, p.display.sub_description ] records = list(record_gen()) records = drop_empty(records) return "<h2>Partitions in {} </h2>".format(self.bundle.identity.name) \ + tabulate(records[1:], headers=records[0], tablefmt="html")
def record_to_objects(self): """Create config records to match the file metadata""" from ambry.orm.exc import NotFoundError fr = self.record contents = fr.unpacked_contents if not contents: return # Zip transposes an array when in the form of a list of lists, so this transposes so # each row starts with the heading and the rest of the row are the values # for that row. The bool and filter return false when none of the values # are non-empty. Then zip again to transpose to original form. non_empty_rows = drop_empty(contents) s = self._dataset._database.session for i, row in enumerate(non_empty_rows): if i == 0: header = row else: d = dict(six.moves.zip(header, row)) if 'widths' in d: del d['widths'] # Obsolete column in old spreadsheets. if 'table' in d: d['dest_table_name'] = d['table'] del d['table'] if 'order' in d: d['stage'] = d['order'] del d['order'] if 'dest_table' in d: d['dest_table_name'] = d['dest_table'] del d['dest_table'] if 'source_table' in d: d['source_table_name'] = d['source_table'] del d['source_table'] d['d_vid'] = self._dataset.vid d['state'] = 'synced' try: ds = self._dataset.source_file(str(d['name'])) ds.update(**d) except NotFoundError: name = d['name'] del d['name'] try: ds = self._dataset.new_source(name, **d) except: print(name, d) import pprint pprint.pprint(d) raise except: # Odd error with 'none' in keys for d print('!!!', header) print('!!!', row) raise s.merge(ds) self._dataset._database.commit()
def root_info(args, l, rc): from ..cli import prt from ..dbexceptions import ConfigurationError from tabulate import tabulate from ambry.library.filesystem import LibraryFilesystem from ambry.util.text import ansicolors from ambry.util import drop_empty from ambry.orm import Account from ambry.orm.database import SCHEMA_VERSION import ambry if args.config_path: prt(rc.loaded[0]) return prt('Version: {}', ambry._meta.__version__) prt('Schema: {}', SCHEMA_VERSION) prt('Root dir: {}', rc.library.filesystem_root) try: if l.filesystem.source(): prt('Source : {}', l.filesystem.source()) except (ConfigurationError, AttributeError) as e: prt('Source : No source directory') prt('Config: {}', rc.loaded[0]) prt('Accounts: {}', rc.accounts.loaded[0]) if l: prt('Library: {}', l.database.dsn) prt('Remotes: {}', ', '.join([str(r.short_name) for r in l.remotes]) if l.remotes else '') else: fs = LibraryFilesystem(rc) prt('Library: {} {}(Inaccessible!){}', fs.database_dsn, ansicolors.FAIL, ansicolors.ENDC) if args.configs: ds = l.database.root_dataset prt("Configs:") records = [] for config in ds.configs: # Can't use prt() b/c it tries to format the {} in the config.value records.append((config.dotted_key, config.value)) print tabulate(sorted(records, key=lambda e: e[0]), headers=['key', 'value']) if args.accounts: headers = 'Id Service User Access Url'.split() records = [] for k in l.accounts.keys(): acct = l.account(k) records.append([acct.account_id, acct.major_type, acct.user_id, acct.access_key, acct.url]) accounts = [v for k, v in l.accounts.items()] if not records: return records = drop_empty([headers] + records) print tabulate(sorted(records[1:]), records[0])
def root_info(args, l, rc): from ..cli import prt from ..dbexceptions import ConfigurationError from tabulate import tabulate from ambry.library.filesystem import LibraryFilesystem from ambry.util.text import ansicolors from ambry.util import drop_empty from ambry.orm import Account from ambry.orm.database import SCHEMA_VERSION import ambry if args.config_path: prt(rc.loaded[0]) return prt('Version: {}', ambry._meta.__version__) prt('Schema: {}', SCHEMA_VERSION) prt('Root dir: {}', rc.library.filesystem_root) try: if l.filesystem.source(): prt('Source : {}', l.filesystem.source()) except (ConfigurationError, AttributeError) as e: prt('Source : No source directory') prt('Config: {}', rc.loaded[0]) prt('Accounts: {}', rc.accounts.loaded[0]) if l: prt('Library: {}', l.database.dsn) prt( 'Remotes: {}', ', '.join([str(r.short_name) for r in l.remotes]) if l.remotes else '') else: fs = LibraryFilesystem(rc) prt('Library: {} {}(Inaccessible!){}', fs.database_dsn, ansicolors.FAIL, ansicolors.ENDC) if args.configs: ds = l.database.root_dataset prt("Configs:") records = [] for config in ds.configs: # Can't use prt() b/c it tries to format the {} in the config.value records.append((config.dotted_key, config.value)) print tabulate(sorted(records, key=lambda e: e[0]), headers=['key', 'value']) if args.accounts: headers = 'Id Service User Access Url'.split() records = [] for k in l.accounts.keys(): acct = l.account(k) records.append([ acct.account_id, acct.major_type, acct.user_id, acct.access_key, acct.url ]) accounts = [v for k, v in l.accounts.items()] if not records: return records = drop_empty([headers] + records) print tabulate(sorted(records[1:]), records[0])
def record_to_objects(self): """Create config records to match the file metadata""" from ambry.orm.exc import NotFoundError fr = self.record contents = fr.unpacked_contents if not contents: return # Zip transposes an array when in the form of a list of lists, so this transposes so # each row starts with the heading and the rest of the row are the values # for that row. The bool and filter return false when none of the values # are non-empty. Then zip again to transpose to original form. non_empty_rows = drop_empty(contents) s = self._dataset._database.session for i, row in enumerate(non_empty_rows): if i == 0: header = row else: d = dict(six.moves.zip(header, row)) if 'widths' in d: del d['widths'] # Obsolete column in old spreadsheets. if 'table' in d: d['dest_table_name'] = d['table'] del d['table'] if 'order' in d: d['stage'] = d['order'] del d['order'] if 'dest_table' in d: d['dest_table_name'] = d['dest_table'] del d['dest_table'] if 'source_table' in d: d['source_table_name'] = d['source_table'] del d['source_table'] d['d_vid'] = self._dataset.vid d['state'] = 'synced' try: ds = self._dataset.source_file(str(d['name'])) ds.update(**d) except NotFoundError: name = d['name'] del d['name'] try: ds = self._dataset.new_source(name, **d) except: print(name, d) import pprint pprint.pprint(d) raise except: # Odd error with 'none' in keys for d print('!!!', header) print('!!!', row) raise s.merge(ds) self._dataset._database.commit()