Beispiel #1
0
def test_2():

    table1 = tables.Table(str_table1)
    table2 = tables.Table(str_table2)

    assert table1.columns == table2.columns
    assert not table1.columns != table2.columns
Beispiel #2
0
def test_3():

    table1 = tables.Table(str_table1)
    table2 = tables.Table(str_table2)
    table2.columns.append(tables.Column(name='aaaa', type='text'))

    assert not table1.columns == table2.columns
    assert table1.columns != table2.columns
Beispiel #3
0
def getTableStats(table_name, alias='oradb'):
    """
    Function to get statistics on a oracle table
    :param table_name:
    :param alias: Db alias
    """
    table_name = table_name.upper()
    alias = alias.upper()
    engine = getDbConnection(alias, asEngine=True)
    conn = engine.connect()

    if alias.startswith('ORA'):
        tableStatsSql = '''
        select owner, table_name, 
        --partition_name, 
        object_type, num_rows, avg_row_len, last_analyzed
        from all_tab_statistics where TABLE_NAME = :tbl'''

        df = pd.read_sql(tableStatsSql, conn, params={'tbl': table_name})
        if len(df) == 0:
            print(
                'Table {} not found in all_tab_statistics!'.format(table_name))
            return
        num_rows = df.num_rows[0]
        df.columns = df.columns.str.replace('_', ' ').str.title()
        config = tables.Config(border=True)
        columns = [tables.Column('Property'), tables.Column('Value')]
        table = tables.Table(config, columns)
        for rec in df.T.to_records(convert_datetime64=True):
            table.addRow(rec)
        print(table.asString())

        tableColsStatsSql = '''
        select c.column_id, s.COLUMN_NAME, s.NUM_DISTINCT, s.NUM_NULLS, s.AVG_COL_LEN 
        from all_tab_col_statistics s, all_tab_cols c
        where s.TABLE_NAME = :tbl 
        and s.table_name = c.table_name
        and s.column_name = c.column_name
        and s.column_name not like 'SYS_%'
        '''

        df = pd.read_sql(tableColsStatsSql,
                         conn,
                         params={'tbl': table_name},
                         index_col='column_id')
        df.insert(1, 'num_rows', num_rows)
        df.insert(3, 'not_nulls', df.num_rows - df.num_nulls)
        df.columns = df.columns.str.replace('_', ' ').str.title()
        df.sort_index(ascending=True, inplace=True)
        columns = [tables.Column(c) for c in df.columns]
        table = tables.Table(config, columns)
        for rec in df.to_records(index=False):
            table.addRow(rec)
        print(table.asString())

    conn.close()
Beispiel #4
0
def getPackages(package_name, alias='oradb'):
    """
    Function to search for PLSQL packages in a given oracle db
    :param package_name:
    :param alias:
    :return:
    """
    package_name = package_name.upper()
    alias = alias.upper()

    conn = getDbConnection(alias)
    cur = conn.cursor()

    sql = '''
    select distinct owner, object_name 
    from all_procedures 
    where object_type = 'PACKAGE' and owner not like '%SYS' and owner not like 'XDB' 
    and object_name like '%{}%'
    '''.format(package_name)
    config = tables.Config(border=True)
    r = cur.execute(sql)
    columns = [tables.Column(c[0]) for c in r.description]
    table = tables.Table(config, columns)

    for row in r:
        table.addRow((row[0], row[1].lower()))

    print(table.asString())
    conn.close()
Beispiel #5
0
def printUserCounts(stat, topUsers):

    print("User Counts")
    table = tables.Table()
    table.addHeader(("User", "Total", "Percentage"), )
    table.setDefaultColumnFormats((
        tables.Table.ColumnFormat("%s",
                                  tables.Table.ColumnFormat.LEFT_JUSTIFY),
        tables.Table.ColumnFormat("%d",
                                  tables.Table.ColumnFormat.RIGHT_JUSTIFY),
        tables.Table.ColumnFormat("%.1f%%",
                                  tables.Table.ColumnFormat.RIGHT_JUSTIFY),
    ))

    total = sum(stat["uid"].values())
    for uid, value in sorted(stat["uid"].items(),
                             key=lambda x: x[1],
                             reverse=True)[:topUsers]:
        table.addRow((
            uid,
            value,
            safeDivision(value, total, 100.0),
        ))
    os = StringIO()
    table.printTable(os=os)
    print(os.getvalue())
Beispiel #6
0
def add_column_table(table, ColClass, col_label, values):
    """
    Add a column to an pytable Table

    Parameters
    ----------
    table: `tables.table.Table`
    ColClass: `tables.atom.MetaAtom`
    col_label: str
    values: list or `numpy.ndarray`

    Returns
    -------
    `tables.table.Table`
    """
    # Step 1: Adjust table description
    d = table.description._v_colobjects.copy()  # original description
    d[col_label] = ColClass()  # add column

    # Step 2: Create new temporary table:
    newtable = tables.Table(table._v_file.root, '_temp_table', d, filters=table.filters)  # new table
    table.attrs._f_copy(newtable)  # copy attributes
    # Copy table rows, also add new column values:
    for row, value in zip(table, values):
        newtable.append([tuple(list(row[:]) + [value])])
    newtable.flush()

    # Step 3: Move temporary table to original location:
    parent = table._v_parent  # original table location
    name = table._v_name  # original table name
    table.remove()  # remove original table
    newtable.move(parent, name)  # move temporary table to original location

    return newtable
Beispiel #7
0
def getDbAliases(filter_=None, asDataFrame=False):
    file = os.environ['HOME'] + '/config/.dbaccess'
    df = pd.read_csv(file,
                     sep='|',
                     skiprows=5,
                     usecols=[0, 1, 2, 3],
                     header=None,
                     comment='#')

    if filter_:
        filter_ = str(filter_)
        df = df[df[0].str.contains(filter_.upper())]
    df = df.fillna('')
    if asDataFrame:
        return df
    else:
        config = tables.Config(border=True)
        columns = [tables.Column('Alias'), tables.Column('Details')]
        table = tables.Table(config, columns)

        for row in df.to_records(index=False):
            alias = row[0]
            details = '{}@{}:{}'.format(row[3], row[1], row[2])
            if details.endswith(':'):
                details = details[:-1]
            if details.endswith('.0'):
                details = details[:-2]
            table.addRow([alias, details])
        print(table.asString())
    def __delitem__(self, uid):
        """ Delete the row.

        """
        if not hasattr(uid, 'hex'):
            raise KeyError('{} is not a uuid.UUID'.format(uid))

        table = self._items
        for row in table.where(
                'uid == value', condvars={'value': uid.hex}):
            if table.nrows == 1:
                record = table.description
                # pytables due to hdf5 limitations does
                # not support removing the last row of table
                # so we delete the table and
                # create new empty table in this situation
                table.remove()
                self._items = tables.Table(self._group, 'items', record)
                del self._data[uid]
            else:
                table.remove_row(row.nrow)
            break
        else:
            raise KeyError(
                'Record (id={id}) does not exist'.format(id=uid))
Beispiel #9
0
def printAgentCounts(stat):

    print("User-Agent Counts")
    table = tables.Table()
    table.addHeader(
        ("User-Agent", "Total", "Percentage"),
    )
    table.setDefaultColumnFormats(
        (
            tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.LEFT_JUSTIFY),
            tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
            tables.Table.ColumnFormat("%.1f%%", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
        )
    )

    total = sum(stat["user-agent"].values())
    for ua in sorted(stat["user-agent"].keys()):
        table.addRow((
            ua,
            stat["user-agent"][ua],
            safeDivision(stat["user-agent"][ua], total, 100.0),
        ))
    os = StringIO()
    table.printTable(os=os)
    print(os.getvalue())
Beispiel #10
0
def getTables(table_name, alias='oradb'):
    tableName = '%{}%'.format(table_name)
    alias = alias.upper()
    conn = getDbConnection(alias)

    if alias.startswith('ORA'):
        sql = '''select table_name, owner, last_analyzed from all_tables where table_name like upper(:tab) order by 1'''
        cur = conn.cursor()
        cur.execute(sql, {'tab': tableName})
    elif alias.startswith('MYSQL'):
        sql = '''select table_name, table_schema, update_time from information_schema.tables where table_name like upper(%s) order by 1'''
        cur = conn.cursor()
        cur.execute(sql, (tableName, ))

    config = tables.Config(border=True)
    columns = list()
    columns.append(tables.Column('Table Name'))
    columns.append(tables.Column('Owner'))
    columns.append(tables.Column('Last Analyzed'))

    table = tables.Table(config, columns)

    for row in cur.fetchall():
        table.addRow(row)

    print(table.asString())
    conn.close()
    def printCallTimeTotals(self, sortIndex):

        table = tables.Table()

        table.setDefaultColumnFormats((
            tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.LEFT_JUSTIFY),
            tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.LEFT_JUSTIFY),
            tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
            tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
            tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
            tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
        ))

        table.addHeader(("File", "Name", "Count", "Inclusive", "Exclusive", "Children",))
        for key, value in sorted(self.calltimes.items(), key=lambda x: x[1][sortIndex], reverse=True):
            table.addRow((
                key[0],
                key[1],
                value[0],
                value[2],
                "%s (%6.3f%%)" % (value[1], (100.0 * value[1]) / self.exclusiveTotal),
                value[2] - value[1],
            ))
        table.addRow()
        table.addRow((
            "Total:",
            "",
            "",
            "",
            self.exclusiveTotal,
            "",
        ))

        table.printTable()
        print("")
Beispiel #12
0
def printHistogramSummary(stat, index):

    print("%s average response histogram" % (index, ))
    table = tables.Table()
    table.addHeader(
        ("", "<10ms", "10ms<->100ms", "100ms<->1s", "1s<->10s", "10s<->30s",
         "30s<->60s", ">60s", "Over 1s", "Over 10s"), )
    table.setDefaultColumnFormats((
        tables.Table.ColumnFormat("%s",
                                  tables.Table.ColumnFormat.LEFT_JUSTIFY),
        tables.Table.ColumnFormat("%d (%.1f%%)",
                                  tables.Table.ColumnFormat.RIGHT_JUSTIFY),
        tables.Table.ColumnFormat("%d (%.1f%%)",
                                  tables.Table.ColumnFormat.RIGHT_JUSTIFY),
        tables.Table.ColumnFormat("%d (%.1f%%)",
                                  tables.Table.ColumnFormat.RIGHT_JUSTIFY),
        tables.Table.ColumnFormat("%d (%.1f%%)",
                                  tables.Table.ColumnFormat.RIGHT_JUSTIFY),
        tables.Table.ColumnFormat("%d (%.1f%%)",
                                  tables.Table.ColumnFormat.RIGHT_JUSTIFY),
        tables.Table.ColumnFormat("%d (%.1f%%)",
                                  tables.Table.ColumnFormat.RIGHT_JUSTIFY),
        tables.Table.ColumnFormat("%d (%.1f%%)",
                                  tables.Table.ColumnFormat.RIGHT_JUSTIFY),
        tables.Table.ColumnFormat("%.1f%%",
                                  tables.Table.ColumnFormat.RIGHT_JUSTIFY),
        tables.Table.ColumnFormat("%.1f%%",
                                  tables.Table.ColumnFormat.RIGHT_JUSTIFY),
    ))
    for i in (
            "T",
            "T-RESP-WR",
    ):
        table.addRow((
            "Overall Response" if i == "T" else "Response Write",
            (stat[i]["<10ms"],
             safeDivision(stat[i]["<10ms"], stat["requests"], 100.0)),
            (stat[i]["10ms<->100ms"],
             safeDivision(stat[i]["10ms<->100ms"], stat["requests"], 100.0)),
            (stat[i]["100ms<->1s"],
             safeDivision(stat[i]["100ms<->1s"], stat["requests"], 100.0)),
            (stat[i]["1s<->10s"],
             safeDivision(stat[i]["1s<->10s"], stat["requests"], 100.0)),
            (stat[i]["10s<->30s"],
             safeDivision(stat[i]["10s<->30s"], stat["requests"], 100.0)),
            (stat[i]["30s<->60s"],
             safeDivision(stat[i]["30s<->60s"], stat["requests"], 100.0)),
            (stat[i][">60s"],
             safeDivision(stat[i][">60s"], stat["requests"], 100.0)),
            safeDivision(stat[i]["Over 1s"], stat["requests"], 100.0),
            safeDivision(stat[i]["Over 10s"], stat["requests"], 100.0),
        ))
    os = StringIO()
    table.printTable(os=os)
    print(os.getvalue())
    def show_connection(self, alias):
        t_headers = ['Alias', 'user', 'host', 'port']
        t_rows = []
        if alias:
            conn = self.connections.get(alias)
            if conn:
                t_rows.append((conn.alias, conn.user, conn.host, conn.port))

        table = tables.Table(t_headers, t_rows)
        table.output()
        exit(0)
Beispiel #14
0
 def tohtable(self, root: tb.Group, tabname: str) -> tb.Table:
     desc = type(
         '_struct_array', (tb.IsDescription, ),
         {n: tb.Col.from_dtype(v.dtype)
          for n, v in self._arrs.items()})
     tabl = tb.Table(root, tabname, desc)
     tabl.append([self._arrs[n] for n in tabl.colnames
                  ])  # desc.columns is an un-ordered dict
     tabl.attrs.names = self.names
     tabl.attrs.kinds = [v.dtype.str for v in self.arrays]
     return tabl
Beispiel #15
0
 def __init__(self):
     self.table = tables.Table()
     self.__grid_lines = self.table.table_lines
     self.__grid_hide_lines = self.table.table_hide_lines
     self.__game_over = False
     self.__time_start = time.time()
     self.__time_wait_start = 0
     self.__selected_list = []
     self.__betrate = 1
     self.__win_num = 'unknow'
     self.__score = 0
    def __init__(self):
        self.filename = os.path.join("tables", "MagicItems.json")
        # magic_item_name_fields = [
        #     M_ItemName.NOUN,
        #     M_ItemName.ADJECTIVE,
        #     M_ItemName.WIZARD_NAME_PRE,
        #     M_ItemName.WIZARD_NAME_POST]

        # gen.PerilGenerator.__init__(self, self.filename, magic_item_name_fields)
        gen.PerilGenerator.__init__(self, self.filename, M_ItemName)

        self.item_types = tables.Table(list(M_Item), M_Item_Weights)
        self.items = self.init_item_tables()
Beispiel #17
0
def test_4():

    st1 = """
table: my.table
columns:
    - col1: text
    - col2: int
    - col3: text
primary_key: [col2]
indexes:
    - idx1: col1
"""

    st2 = """
table: my.table
columns:
    - col1: text
    - col2: text
    - col4:
        type: date
        description: some date
primary_key: [col1, col2]
indexes:
    - idx2: col2
"""

    expected = """ALTER TABLE my.table ALTER COLUMN col2 TYPE text;
ALTER TABLE my.table DROP COLUMN IF EXISTS col3;
ALTER TABLE my.table ADD COLUMN col4 date;
COMMENT ON COLUMN my.table.col4 IS 'some date';
DROP INDEX CONCURRENTLY IF EXISTS my.idx1;
CREATE INDEX CONCURRENTLY idx2 ON my.table USING btree
    (col2);
ALTER TABLE my.table DROP table_pkey;
ALTER TABLE my.table ADD PRIMARY KEY (col1, col2);
"""
    t1 = tables.Table(st1)
    t2 = tables.Table(st2)
    assert expected == t1.alter_to(t2)
Beispiel #18
0
def getPackageFunctions(package_name, alias='oradb'):
    """
    Function to get all functions for a given package
    :param package_name:
    :param alias:
    :return:
    """
    package_name = package_name.upper()
    alias = alias.upper()

    engine = getDbConnection(alias, asEngine=True)
    conn = engine.connect()

    sql = '''
    select p.owner, p.object_name, p.PROCEDURE_NAME, a.POSITION, a.ARGUMENT_NAME||' '||a.IN_OUT||' '||a.DATA_TYPE arg
    from all_procedures p, all_arguments a
    where p.procedure_name = a.object_name(+)
    and p.object_name = a.package_name
    and p.OWNER not like '%SYS' and p.OWNER  not like 'XDB' 
    and p.object_type = 'PACKAGE' 
    and p.OBJECT_NAME = '{}'
    and a.ARGUMENT_NAME is not null
    order by 3
    '''.format(package_name)

    config = tables.Config(border=True)
    df = pd.read_sql(sql, conn)
    if len(df) == 0:
        return
    df = df.pivot_table(index=['owner', 'object_name', 'procedure_name'],
                        columns=['position'],
                        values=['arg'],
                        aggfunc='first')
    # del(df[0])
    df = df.apply(lambda x: ', '.join([xx for xx in x if xx is not None]),
                  axis=1)
    df = df.reset_index()
    df[0] = '(' + df[0] + ')'
    df.rename(columns={
        0: 'arguments',
        'object_name': 'package_name'
    },
              inplace=True)
    columns = [tables.Column(c) for c in df.columns]
    table = tables.Table(config, columns)

    for row in df.to_records(index=False):
        table.addRow([r.lower() for r in row])

    print(table.asString())
    conn.close()
Beispiel #19
0
 def prune_results(self, condition='like1>=0.0'):
     """
     Prunes the result table
     :param condition: A Python condition to select behaving runs
     :return:
     """
     new_filename = self.result_filename.replace('.h5', '.pruned.h5')
     pruned_data = self.data.read_where(condition)
     with tables.open_file(new_filename, 'w') as o:
         tab = tables.Table(o.root, self.name, self.data.description)
         tab.append(pruned_data)
     self.result_filename = new_filename
     self.data_file = tables.open_file(self.result_filename)
     self.data = self.data_file.get_node(f'/{self.model}')
Beispiel #20
0
def printRequestSummary(stats):
    table = tables.Table()
    table.addHeader(
        ("Period", "Requests", "Av. Requests", "Av. Response", "Av. Response", "Max. Response", "Slot", "CPU", "500's"),
    )
    table.addHeader(
        ("", "", "per second", "(ms)", "no write(ms)", "(ms)", "Average", "Average", ""),
    )
    table.setDefaultColumnFormats(
        (
            tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.LEFT_JUSTIFY),
            tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
            tables.Table.ColumnFormat("%.1f", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
            tables.Table.ColumnFormat("%.1f", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
            tables.Table.ColumnFormat("%.1f", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
            tables.Table.ColumnFormat("%.1f", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
            tables.Table.ColumnFormat("%.2f", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
            tables.Table.ColumnFormat("%.1f%%", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
            tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
        )
    )

    for key, seconds in (("current", 60,), ("1m", 60,), ("5m", 5 * 60,), ("1h", 60 * 60,),):

        stat = stats[key]
        table.addRow((
            key,
            stat["requests"],
            safeDivision(float(stat["requests"]), seconds),
            safeDivision(stat["t"], stat["requests"]),
            safeDivision(stat["t"] - stat["t-resp-wr"], stat["requests"]),
            stat["T-MAX"],
            safeDivision(float(stat["slots"]), stat["requests"]),
            safeDivision(stat["cpu"], stat["requests"]),
            stat["500"],
        ))

    os = StringIO()
    table.printTable(os=os)
    print(os.getvalue())

    stat = stats["1m"]
    return SummaryValues(
        requests=safeDivision(float(stat["requests"]), seconds),
        response=safeDivision(stat["t"], stat["requests"]),
        slots=safeDivision(float(stat["slots"]), stat["requests"]),
        cpu=safeDivision(stat["cpu"], stat["requests"]),
        errors=stat["500"],
    )
Beispiel #21
0
    def _savepkl(i, pkl):
        dns, dts = zip(*pkl.dtable.dtype.descr)
        desc = type('_struct_array', (ptb.IsDescription,), {dn: ptb.Col.from_dtype(np.dtype(dt)) for dn,dt in zip(dns,dts)})
        dset = ptb.Table(f.root, pkl.ID, desc)

        dset.append([pkl.get_attribute(n, flagged_only = False) for n in dset.colnames])

        dset.attrs.data_class = 'PeakList'
        dset.attrs.order = i
        dset.attrs.dtable_names = dns
        dset.attrs.dtable_types = dts
        dset.attrs.flag_attrs = np.array(pkl.flag_attributes)

        dset.attrs.tags = np.array([(t or 'None', v) for v,t in pkl.tags.to_list()])
        for k, v in pkl.metadata.items(): setattr(dset.attrs, 'metadata_' + k, _packMeta(v))
    def list_connections(self):
        t_headers = ['Aliases', 'Type', 'user', 'host', 'port']
        t_rows = []
        keys = self.connections.get_aliases()
        keys.sort()
        for key in keys:
            conn = self.connections.get(key)
            if type(conn) is not mcm.common.connections.Vnc and type(
                    conn) is not mcm.common.connections.Rdp:
                t_rows.append((conn.alias, conn.get_type(), conn.user,
                               conn.host, conn.port))

        table = tables.Table(t_headers, t_rows)
        table.output()
        print "=" * 80
Beispiel #23
0
def findColumns(col_name, alias='oradb'):

    alias = alias.upper()
    conn = getDbConnection(alias)

    if alias.startswith('ORA'):
        sql = '''select owner, table_name, column_name, NULLABLE, data_type, DATA_LENGTH 
                 from all_tab_columns where column_name like upper('%'||:col||'%') 
                 order by owner, table_name, column_id'''
        cur = conn.cursor()
        cur.execute(sql, {'col': col_name})
    elif alias.startswith('MYSQL'):
        sql = '''select table_schema owner, table_name, column_name, is_nullable, column_type, null data_length 
                  from information_schema.columns where column_name like upper(concat('%', %s, '%'))'''
        cur = conn.cursor()
        cur.execute(sql, (col_name, ))

    config = tables.Config(border=True)
    columns = list()
    columns.append(tables.Column('Schema'))
    columns.append(tables.Column('Table Name'))
    columns.append(tables.Column('Column Name'))
    columns.append(tables.Column('Null?'))
    columns.append(tables.Column('Data Type'))

    table = tables.Table(config, columns)

    for row in cur.fetchall():
        schemaName, tableName, columnName, notNull, columnType, columnSize = row
        if alias.startswith('ORA'):
            columnTypeSize = '{}({})'.format(columnType, columnSize)
            if notNull == 'N':
                notNull = 'NOT NULL'
            else:
                notNull = ''
        elif alias.startswith('MYSQL'):
            columnTypeSize = columnType
            if notNull == 'NO':
                notNull = 'NOT NULL'
            else:
                notNull = ''
        table.addRow(
            (schemaName, tableName, columnName, notNull, columnTypeSize))

    print(table.asString())
    conn.close()
Beispiel #24
0
 def test_no_wrapper_with_index(self):
     f = t.open_file('bla2',
                     'a',
                     driver="H5FD_CORE",
                     driver_core_backing_store=0)
     table = t.Table(f.get_node('/'),
                     'test', {
                         'c1': t.Int32Col(),
                         'c2': t.Int16Col()
                     },
                     title='test')
     table.row['c1'] = 0
     table.row.append()
     table.flush()
     table.cols.c1.create_index()
     [x.fetch_all_fields() for x in table.where('c1 == 0')]
     f.close()
Beispiel #25
0
def getTableColumns(table_name, alias='oradb'):

    alias = alias.upper()
    conn = getDbConnection(alias)

    if alias.startswith('ORA'):
        sql = '''select table_name, column_name, NULLABLE, data_type, DATA_LENGTH, LAST_ANALYZED
        from all_tab_columns where table_name like upper(:tab) order by table_name, column_id'''
        cur = conn.cursor()
        cur.execute(sql, {'tab': table_name})
    elif alias.startswith('MYSQL'):
        sql = '''select table_name, column_name, is_nullable, column_type, null data_length, null last_analyzed 
        from information_schema.columns where table_name like upper(%s)'''
        cur = conn.cursor()
        cur.execute(sql, (table_name, ))

    config = tables.Config(border=True)
    columns = list()
    columns.append(tables.Column('Table Name'))
    columns.append(tables.Column('Column Name'))
    columns.append(tables.Column('Null?'))
    columns.append(tables.Column('Data Type'))
    columns.append(tables.Column('Last Analyzed'))

    table = tables.Table(config, columns)

    for row in cur.fetchall():
        tableName, columnName, notNull, columnType, columnSize, lastAnalayzed = row
        if alias.startswith('ORA'):
            columnTypeSize = '{}({})'.format(columnType, columnSize)
            if notNull == 'N':
                notNull = 'NOT NULL'
            else:
                notNull = ''
        elif alias.startswith('MYSQL'):
            columnTypeSize = columnType
            if notNull == 'NO':
                notNull = 'NOT NULL'
            else:
                notNull = ''
        table.addRow(
            (tableName, columnName, notNull, columnTypeSize, lastAnalayzed))

    print(table.asString())
    conn.close()
Beispiel #26
0
    def _update_chromosomes_info(self):
        try:
            self._chromosomes_info = self._group.chromosomes
        except t.NoSuchNodeError:
            try:
                self._chromosomes_info = t.Table(
                    self._group,
                    'chromosomes',
                    RegionsTable.ChromosomeDescription,
                    expectedrows=100)
            except (t.FileModeError, t.HDF5ExtError):
                self._chromosomes_info = None

        if self._chromosomes_info is not None:
            try:
                self._chromosomes_info.remove_rows(0)
                self._chromosomes_info.flush()
            except t.HDF5ExtError:
                logger.error(
                    "File not open for writing, cannot update chromosome table!"
                )
                return

            chromosomes_info = []
            for row in self._regions.iterrows():
                if len(chromosomes_info
                       ) == 0 or row['chromosome'] != chromosomes_info[-1][1]:
                    chromosomes_info.append([
                        len(chromosomes_info), row['chromosome'], row['ix'],
                        row['ix'], row['end']
                    ])
                else:
                    chromosomes_info[-1][3] = row['ix']
                    chromosomes_info[-1][4] = row['end']

            row = self._chromosomes_info.row
            for info in chromosomes_info:
                row['ix'] = info[0]
                row['name'] = info[1]
                row['start_bin'] = info[2]
                row['end_bin'] = info[3]
                row['size'] = info[4]
                row.append()
            self._chromosomes_info.flush()
Beispiel #27
0
def printMethodCounts(stat):

    print("Method Counts")
    table = tables.Table()
    table.addHeader(
        ("Method", "Count", "%", "Av. Response", "%", "Total Resp. %"), )
    table.addHeader(("", "", "", "(ms)", "", ""), )
    table.setDefaultColumnFormats((
        tables.Table.ColumnFormat("%s",
                                  tables.Table.ColumnFormat.LEFT_JUSTIFY),
        tables.Table.ColumnFormat("%d",
                                  tables.Table.ColumnFormat.RIGHT_JUSTIFY),
        tables.Table.ColumnFormat("%.1f%%",
                                  tables.Table.ColumnFormat.RIGHT_JUSTIFY),
        tables.Table.ColumnFormat("%.1f",
                                  tables.Table.ColumnFormat.RIGHT_JUSTIFY),
        tables.Table.ColumnFormat("%.1f%%",
                                  tables.Table.ColumnFormat.RIGHT_JUSTIFY),
        tables.Table.ColumnFormat("%.1f%%",
                                  tables.Table.ColumnFormat.RIGHT_JUSTIFY),
    ))

    response_average = {}
    for method in stat["method"].keys():
        response_average[
            method] = stat["method-t"][method] / stat["method"][method]

    total_count = sum(stat["method"].values())
    total_avresponse = sum(response_average.values())
    total_response = sum(stat["method-t"].values())

    for method in sorted(stat["method"].keys()):
        table.addRow((
            method,
            stat["method"][method],
            safeDivision(stat["method"][method], total_count, 100.0),
            response_average[method],
            safeDivision(response_average[method], total_avresponse, 100.0),
            safeDivision(stat["method-t"][method], total_response, 100.0),
        ))
    os = StringIO()
    table.printTable(os=os)
    print(os.getvalue())
Beispiel #28
0
def build_tables(json_filename, fields):
    """
    Build tables from a text file, sorting each entry into categories based on fields.

    The text file contains the entries from a dice roll table in the following format:
    [number] [entry for field 1] [entry for field 2] [entry for field 3] etc.
    Example:
    42 Globe History Gyrating I- -kang
    """
    # Each field will get its own blank table. This variable maps enum members to
    # Table objects.
    enum_tables = {table_name: tables.Table() for table_name in fields}

    # Strip the JSON extension from the filename and add a TXT extension.
    # We do this so that build_tables can be called with the same filename used for
    # load_tables.
    text_filename = json_filename[:-5]
    text_filename += ".txt"

    try:
        with open(text_filename, "r") as file:
            for line in file:
                cleaned_line = line.strip()
                # Split the set of entries by field.
                entry = cleaned_line.split(" ")
                # Get the weight for this set of entries.
                # For ranges, weight = top - bottom + 1. For single numbers, weight = 1.
                weight = number_to_weight(entry[0])

                # Match each entry to its corresponding field.
                for table, item in zip(enum_tables.values(), entry[1:]):
                    table.entries.append(item)
                    table.weights.append(weight)

    except FileNotFoundError:
        raise FileNotFoundError(
            "Couldn't find a text file named {} with table data!".format(
                text_filename))

    save_tables(enum_tables, json_filename)
Beispiel #29
0
def load_tables(filename, fields):
    """
    Load tables from a JSON file, adjusting their keys to be enum members and their
    values to be Table objects.
    """
    try:
        with open(filename, "r") as file:
            json_tables = json.load(file)
    except FileNotFoundError:
        raise FileNotFoundError("No JSON file named {}!".format(filename))

    # Loading JSON will always give us string keys, and we need integers to get the
    # enum members from fields.
    labeled = {fields(int(num)): table for num, table in json_tables.items()}

    # Build Table objects with the entries and weights for each field.
    enum_tables = {}
    for label, inner_table in labeled.items():
        enum_tables[label] = tables.Table(inner_table['entries'],
                                          inner_table['weights'])

    return enum_tables
    def __delitem__(self, uid):
        """ Delete the row.

        """
        table = self._table
        for row in table.where(
                'index == value', condvars={'value': uid.hex}):
            if table.nrows == 1:
                name = table._v_name
                record = table.description
                # pytables due to hdf5 limitations does
                # not support removing the last row of table
                # so we delete the table and
                # create new empty table in this situation
                table.remove()
                parent = self._parent
                self._table = tables.Table(parent, name, record)
            else:
                table.remove_row(row.nrow)
            break
        else:
            raise KeyError(
                'Record (id={id}) does not exist'.format(id=uid))