Пример #1
0
def format_table(table, format='csv', outputstream=sys.stdout, **extra_options):
    """table can be a table from dict_to_table() or a dictionary.
    The dictionary can have either a single value as a key (for a
    one-dimensional table) or 2-tuples (for two-dimensional tables).
    format is currently one of csv, tsv, tex, texbitmap, or asciiart.
    Values for texbitmap should be floats between 0 and 1 and the output
    will be the TeX code for a large-pixeled bitmap."""
    if isinstance(table, dict):
        table = dict_to_table(table)

    if format in ('csv', 'tsv'):
        import csv
        dialect = {'csv' : csv.excel, 'tsv' : csv.excel_tab}[format]
        writer = csv.writer(outputstream, dialect=dialect)
        for row in table:
            writer.writerow(row)
    elif format == 'tex':
        import TeXTable
        print >>outputstream, TeXTable.texify(table, has_header=True)
    elif format == 'texbitmap':
        import TeXTable
        extra_options.setdefault('has_header', True)
        print >>outputstream, TeXTable.make_tex_bitmap(table, **extra_options)
    elif format == 'asciiart':
        from texttable import Texttable
        texttable = Texttable(**extra_options)
        texttable.add_rows(table)
        print >>outputstream, texttable.draw()
    else:
        raise ValueError("Unsupported format: %r (supported formats: %s)" % \
            (format, ' '.join(supported_formats)))
Пример #2
0
def print_deploy_header():
    table = Texttable(200)
    table.set_cols_dtype(["t", "t", "t", "t", "t", "t", "t", "t", "t"])
    table.header(
        ["Deployment name", "Deployment ID", "Cloud provider", "Region", "Hostname", "Source type", "Source ID",
         "Source name", "Status"])
    return table
Пример #3
0
    def output_table_list(tables):
        terminal_size = get_terminal_size()[1]
        widths = []
        for tab in tables:
            for i in range(0, len(tab.columns)):
                current_width = len(tab.columns[i].label)
                if len(widths) < i + 1:
                    widths.insert(i, current_width)
                elif widths[i] < current_width:
                    widths[i] = current_width
                for row in tab.data:
                    current_width = len(resolve_cell(row, tab.columns[i].accessor))
                    if current_width > widths[i]:
                        widths[i] = current_width

        if sum(widths) != terminal_size:
            widths[-1] = terminal_size - sum(widths[:-1]) - len(widths) * 3

        for tab in tables:
            table = Texttable(max_width=terminal_size)
            table.set_cols_width(widths)
            table.set_deco(0)
            table.header([i.label for i in tab.columns])
            table.add_rows([[AsciiOutputFormatter.format_value(resolve_cell(row, i.accessor), i.vt) for i in tab.columns] for row in tab.data], False)
            six.print_(table.draw() + "\n")
Пример #4
0
def test_texttable_header():
    table = Texttable()
    table.set_deco(Texttable.HEADER)
    table.set_cols_dtype([
        't',  # text
        'f',  # float (decimal)
        'e',  # float (exponent)
        'i',  # integer
        'a',  # automatic
    ])
    table.set_cols_align(["l", "r", "r", "r", "l"])
    table.add_rows([
        ["text",    "float", "exp", "int", "auto"],
        ["abcd",    "67",    654,   89,    128.001],
        ["efghijk", 67.5434, .654,  89.6,  12800000000000000000000.00023],
        ["lmn",     5e-78,   5e-78, 89.4,  .000000000000128],
        ["opqrstu", .023,    5e+78, 92.,   12800000000000000000000],
    ])
    assert clean(table.draw()) == dedent('''\
         text     float       exp      int     auto
        ==============================================
        abcd      67.000   6.540e+02    89   128.001
        efghijk   67.543   6.540e-01    90   1.280e+22
        lmn        0.000   5.000e-78    89   0.000
        opqrstu    0.023   5.000e+78    92   1.280e+22
    ''')
def getAuccuracy( train, testSet, k ):
	totalCount = len(testSet)
	correctCount = 0.0;

	# Init ConfusionMatrix
	confusionMatrix = { }
	for i in featuresList:
		for j in featuresList:
			confusionMatrix[ (i,j) ] = 0

	for i in range(len(testSet)):
		predition = getPrediction( getDistancesOfKSimilarSets( train, testSet[i], k ) )
		if predition == testSet[i][-1]:
			correctCount+=1;
		confusionMatrix[ testSet[i][-1], predition ] += 1

	print "Confusion Matrix"
	from texttable import Texttable
	table=[]
	row=[""]
	row.extend(featuresList)
	table.append(row)
	for i in featuresList:
		row=[i]
		for j in featuresList:
			row.append( confusionMatrix[ (i,j) ])
		table.append(row)
	T=Texttable();
	T.add_rows(table)
	print T.draw();

	return correctCount*1.0/totalCount;
Пример #6
0
 def __repr__(self):
   t = Texttable()
   
   for rowId in range(0,self.size[0]):
     rowDetails = []
     for cellId in range(0,self.size[1]):
       cell = self.cellAtLocation(cellId,rowId)
       
       color = {
         "free":   bcolors.WHITE,
         "mine":   bcolors.PURPLE,
         "theirs": bcolors.RED
       }[cell.getState()]
       
       rowDetails.append(
         get_color_string(color, cell)
       )
     
     t.add_row(rowDetails)
   
   return "\n".join([
     t.draw(),
     self.board,
     self.state
   ])
Пример #7
0
def images_to_ascii_table(images):
    """Just a method that formats the images to ascii table.
    Expects dictionary {host: [images]}
    and prints multiple tables
    """
    with closing(StringIO()) as out:
        for host, values in images.iteritems():
            out.write(str(host) + "\n")
            t = TextTable()
            t.set_deco(TextTable.HEADER)
            t.set_cols_dtype(['t'] * 5)
            t.set_cols_align(["l"] * 5)
            rows = []
            rows.append(['Repository', 'Tag', 'Id', 'Created', 'Size'])
            for image in values:
                rows.append([
                    image.repository or '<none>',
                    image.tag or '<none>',
                    image.id[:12],
                    time_ago(image.created),
                    human_size(image.size)
                ])
            t.add_rows(rows)
            out.write(t.draw() + "\n\n")
        return out.getvalue()
def _create_app(args):
    api = _login(args)
    response = api.create_app(args.name, args.type, args.autostart, args.extra_info)
    print('App has been created:')
    table = Texttable(max_width=140)
    table.add_rows([['Param', 'Value']] + [[key, value] for key, value in response.items()])
    print(table.draw())
Пример #9
0
def render_instruments_as_table(instruments, display_heading=True):
    """
    Returns ASCII table view of instruments.

    :param instruments: The instruments to be rendered.
    :type instruments: :class:`mytardisclient.models.resultset.ResultSet`
    :param render_format: The format to display the data in ('table' or
        'json').
    :param display_heading: Setting `display_heading` to True ensures
        that the meta information returned by the query is summarized
        in a 'heading' before displaying the table.  This meta
        information can be used to determine whether the query results
        have been truncated due to pagination.
    """
    heading = "\n" \
        "Model: Instrument\n" \
        "Query: %s\n" \
        "Total Count: %s\n" \
        "Limit: %s\n" \
        "Offset: %s\n\n" \
        % (instruments.url, instruments.total_count,
           instruments.limit, instruments.offset) if display_heading else ""

    table = Texttable(max_width=0)
    table.set_cols_align(["r", 'l', 'l'])
    table.set_cols_valign(['m', 'm', 'm'])
    table.header(["ID", "Name", "Facility"])
    for instrument in instruments:
        table.add_row([instrument.id, instrument.name, instrument.facility])
    return heading + table.draw() + "\n"
Пример #10
0
def render_schemas_as_table(schemas, display_heading=True):
    """
    Returns ASCII table view of schemas.

    :param schemas: The schemas to be rendered.
    :type schemas: :class:`mytardisclient.models.resultset.ResultSet`
    :param render_format: The format to display the data in ('table' or
        'json').
    :param display_heading: Setting `display_heading` to True ensures
        that the meta information returned by the query is summarized
        in a 'heading' before displaying the table.  This meta
        information can be used to determine whether the query results
        have been truncated due to pagination.
    """
    heading = "\n" \
        "Model: Schema\n" \
        "Query: %s\n" \
        "Total Count: %s\n" \
        "Limit: %s\n" \
        "Offset: %s\n\n" \
        % (schemas.url, schemas.total_count,
           schemas.limit, schemas.offset) if display_heading else ""

    table = Texttable(max_width=0)
    table.set_cols_align(["r", 'l', 'l', 'l', 'l', 'l', 'l'])
    table.set_cols_valign(['m', 'm', 'm', 'm', 'm', 'm', 'm'])
    table.header(["ID", "Name", "Namespace", "Type", "Subtype", "Immutable",
                  "Hidden"])
    for schema in schemas:
        table.add_row([schema.id, schema.name, schema.namespace,
                       schema.type, schema.subtype or '',
                       str(bool(schema.immutable)), str(bool(schema.hidden))])
    return heading + table.draw() + "\n"
Пример #11
0
def show_pretty_versions():
    result_list = list()
    header_list = ["IP", "Role", "Version", "Name", "Streamcast version", ]
    result_list.append(header_list)
    print("Versions installed:")
    ips = get_ips()
    for ip in ips:
        line = retrieve(sshcmd + ip + " cat /var/raumfeld-1.0/device-role.json")
        if "true" in line:
            moreinfo = "host"
        else:
            moreinfo = "slave"
        renderer_name = RfCmd.get_device_name_by_ip(ip)
        line = retrieve(sshcmd + ip + " cat /etc/raumfeld-version")
        line_streamcast = retrieve(sshcmd + ip + " streamcastd --version")
        single_result = list()
        single_result.append(ip)
        single_result.append(moreinfo)
        single_result.append(line.rstrip())
        single_result.append(renderer_name)
        single_result.append(line_streamcast.rstrip())
        result_list.append(single_result)
    t = Texttable(250)
    t.add_rows(result_list)
    print(t.draw())
Пример #12
0
def render_datasets_as_table(datasets, display_heading=True):
    """
    Returns ASCII table view of datasets.

    :param datasets: The datasets to be rendered.
    :type datasets: :class:`mytardisclient.models.resultset.ResultSet`
    :param render_format: The format to display the data in ('table' or
        'json').
    :param display_heading: Setting `display_heading` to True ensures
        that the meta information returned by the query is summarized
        in a 'heading' before displaying the table.  This meta
        information can be used to determine whether the query results
        have been truncated due to pagination.
    """
    heading = "\n" \
        "Model: Dataset\n" \
        "Query: %s\n" \
        "Total Count: %s\n" \
        "Limit: %s\n" \
        "Offset: %s\n\n" \
        % (datasets.url, datasets.total_count,
           datasets.limit, datasets.offset) if display_heading else ""

    table = Texttable(max_width=0)
    table.set_cols_align(["r", 'l', 'l', 'l'])
    table.set_cols_valign(['m', 'm', 'm', 'm'])
    table.header(["Dataset ID", "Experiment(s)", "Description", "Instrument"])
    for dataset in datasets:
        table.add_row([dataset.id, "\n".join(dataset.experiments),
                       dataset.description, dataset.instrument])
    return heading + table.draw() + "\n"
Пример #13
0
def _create_website(args):
    api = _login(args)
    if len(args.site_apps) % 2:
        print('Error: invalid site applications array')
        print('Array items should be pairs of application name and URL path')
        print('Example: django_app / django_app_media /media')
        return
    else:
        site_apps = zip(args.site_apps[::2], args.site_apps[1::2])
        for site_app in site_apps:
            app_name, app_url = site_app
            if not VALID_SYMBOLS.match(app_name):
                print('Error: %s is not a valid app name' % app_name)
                print('use A-Z a-z 0-9 or uderscore symbols only')
                return

            if not VALID_URL_PATHS.match(app_url):
                print('Error: %s is not a valid URL path' % app_url)
                print('must start with / and only regular characters, . and -')
                return

        response = api.create_website(args.website_name, args.ip, args.https, \
            args.subdomains, *site_apps)

        print('Web site has been created:')
        table = Texttable(max_width=140)
        table.add_rows([['Param', 'Value']] + [[key, value] for key, value in response.items()])
        print(table.draw())
Пример #14
0
 def display_two_columns(cls, table_dict=None):
     if table_dict:
         ignore_fields = ['_cls', '_id', 'date_modified', 'date_created', 'password', 'confirm']
         table = Texttable(max_width=100)
         rows = [['Property', 'Value']]
         for key, value in table_dict.iteritems():
             if key not in ignore_fields:
                 items = [key.replace('_', ' ').title()]
                 if isinstance(value, list):
                     if value:
                         if key == "projects":
                             project_entry = ""
                             for itm in value:
                                 user_project = Project.objects(id=ObjectId(itm.get('$oid'))) \
                                     .only('title', 'project_id').first()
                                 project_entry = project_entry + user_project.title + ", "
                             project_entry.strip(', ')
                             items.append(project_entry)
                         else:
                             items.append(' , '.join(value))
                     else:
                         items.append('None')
                 else:
                     items.append(value)
                 rows.append(items)
         try:
             if rows:
                 table.add_rows(rows)
         except:
             print sys.exc_info()[0]
         print table.draw()
     pass
Пример #15
0
    def draw(self):
        t = Texttable()
        t.add_rows([["TEAM","RUNS","HITS","LOB","ERRORS"],
                    [self.away_team.team_name, self.away_runs, self.away_hits, self.away_LOB, self.away_errors],
                    [self.home_team.team_name, self.home_runs, self.home_hits, self.home_LOB, self.home_errors]])

        print(t.draw())
Пример #16
0
    def output_table(tab):
        max_width = get_terminal_size()[1]
        table = Texttable(max_width=max_width)
        table.set_deco(0)
        table.header([i.label for i in tab.columns])
        widths = []
        number_columns = len(tab.columns)
        remaining_space = max_width
        # set maximum column width based on the amount of terminal space minus the 3 pixel borders
        max_col_width = (remaining_space - number_columns * 3) / number_columns
        for i in range(0, number_columns):
            current_width = len(tab.columns[i].label)
            tab_cols_acc = tab.columns[i].accessor
            max_row_width = max(
                    [len(str(resolve_cell(row, tab_cols_acc))) for row in tab.data ]
                    )
            current_width = max_row_width if max_row_width > current_width else current_width
            if current_width < max_col_width:
                widths.insert(i, current_width)
                # reclaim space not used
                remaining_columns = number_columns - i - 1
                remaining_space = remaining_space - current_width - 3
                if remaining_columns != 0:
                    max_col_width = (remaining_space - remaining_columns * 3)/ remaining_columns
            else:
                widths.insert(i, max_col_width)
                remaining_space = remaining_space - max_col_width - 3
        table.set_cols_width(widths)

        table.add_rows([[AsciiOutputFormatter.format_value(resolve_cell(row, i.accessor), i.vt) for i in tab.columns] for row in tab.data], False)
        print(table.draw())
Пример #17
0
def print_stats_table(
    header, data, columns, default_alignment="l", custom_alignment=None
):
    """Print out a list of dictionaries (or objects) as a table.

    If given a list of objects, will print out the contents of objects'
    `__dict__` attributes.

    :param header: Header that will be printed above table.
    :type header:  `str`
    :param data:   List of dictionaries (or objects )
    """
    print("# %s" % header)
    table = Texttable(max_width=115)
    table.header(columns)
    table.set_cols_align(default_alignment * len(columns))
    if not isinstance(data, list):
        data = [data]
    for row in data:
        # Treat all non-list/tuple objects like dicts to make life easier
        if not isinstance(row, (list, tuple, dict)):
            row = vars(row)
        if isinstance(row, dict):
            row = [row.get(key, "MISSING") for key in columns]
        table.add_row(row)
    if custom_alignment:
        table.set_cols_align(
            [custom_alignment.get(column, default_alignment) for column in columns]
        )
    print(table.draw())
Пример #18
0
    def command_password(self, params):
        '''[set/get/list] <pdu> <port> <password>

        password set <pdu> <port> <password>
        Set port password on pdu
        e.g.
        - Set password "A01" for port 1 on pdu 1
        password set 1 1 A01

        password get <pdu> <port>
        Get port password on pdu
        e.g.
        - Get password for port 1 on pdu 1
        password get 1 1

        password list <pdu>
        Display password of all ports on pdu
        e.g.
        - Display all ports password on pdu 1
        password list 1

        '''
        subcommand = params[0]
        if subcommand == 'set':
            if len(params) != 4:
                self.writeresponse("Invalid parameters.")
                return
            pdu = int(params[1])
            port = int(params[2])
            passwd = params[3]
            password.write_password(pdu, port, passwd)
        elif subcommand == 'get':
            if len(params) != 3:
                self.writeresponse("Invalid parameters.")
                return
            pdu = int(params[1])
            port = int(params[2])
            password_str = password.read_password(pdu, port)
            if password_str == "":
                self.writeresponse("Not found password.")
                return
            response = "Password is: " + password_str
            self.writeresponse(response)
        elif subcommand == 'list':
            if len(params) != 2:
                self.writeresponse("Invalid parameters.")
                return

            pdu = int(params[1])
            table = Texttable()
            table.header(["Port", "Password"])
            table.set_cols_dtype(['d', 't'])
            table.set_cols_align(['c', 'c'])
            for port_index in range(24):
                passwd = password.read_password(pdu, port_index + 1)
                if passwd == "":
                    continue
                table.add_row([port_index + 1, passwd])
            self.writeresponse(table.draw())
Пример #19
0
 def show_items(self, items):
     table = Texttable(max_width=0)
     table.header(['#', 'item'])
     table.set_deco(Texttable.HEADER | Texttable.VLINES)
     table.set_cols_dtype(['i', 't'])
     table.add_rows([(i, item) for i, item in enumerate(items)],
                    header=False)
     print(table.draw())
 def _make_confusion_table(self, labels, predictions):
     header = self._make_classes_header()
     C = confusion_matrix(labels, predictions)
     t = Texttable()
     t.add_row(['-'] + header)
     for i, h in enumerate(header):
         t.add_row([h] + C[i].tolist())
     return t
Пример #21
0
 def handle(self):
     table = Texttable(max_width=0)
     table.set_deco(
         Texttable.BORDER | Texttable.HEADER | Texttable.VLINES)
     table.header(("Processor", "Groups"))
     table.add_row(["Steps", ":".join(run.steps_groups())])
     table.add_row(["Alerts", ":".join(run.alerts_groups()) or "-"])
     print(table.draw())
Пример #22
0
 def execute( self ):
     count = self.server.db.fetchone( "SELECT count(*) FROM %s;" % self.type )
     items = self.server.db.fetch( "SELECT * FROM %s;" % self.type, headers=True )
     table = Texttable()
     table.add_rows( items )
     print table.draw()
     print " -- %d records." % count
     print
Пример #23
0
def _render_table(nodes):
    table = Texttable()
    rows = [["ID", "State", "Tags", "Environment", "Address"]]
    for node in nodes:
        environment_description = "%s\nDef: %s" % (node.environment_name(), node.environment_definition_name())
        rows.append([node.id(), node.state(), node.tags(), environment_description, node.address()])

    table.add_rows(rows)
    print(table.draw())
Пример #24
0
def drawTable(game, showDealersCards=False):
    atmp = os.system('clear')
    
    table = Texttable()
    
    header = []
    row_wager = []
    row_cards = []
    row_totals = []
    row_status = []
    
    #
    # Add dealer's hand
    #
    header.append("Dealer")
    row_wager.append("")
    
    if showDealersCards:
        dealersCards = "{} of {}\n{} of {}".format(game.dealer.hands[0].cards_in_hand[0].face.name, game.dealer.hands[0].cards_in_hand[0].suit.name, game.dealer.hands[0].cards_in_hand[1].face.name, game.dealer.hands[0].cards_in_hand[1].suit.name)
        dealersTotal = game.dealer.hands[0].total()
    else:
        dealersCards = "X\n{} of {}".format(game.dealer.hands[0].cards_in_hand[1].face.name, game.dealer.hands[0].cards_in_hand[1].suit.name)
        dealersTotal = ""
    
    row_cards.append(dealersCards)
    row_totals.append(dealersTotal)
    row_status.append("")
    #
    # /dealer
    #
    
    for player in game.players:
        for hand in player.hands:
            header.append(player.name)
            row_wager.append("Bet: $" + str(hand.wager))
            row_cards.append("{} of {}\n{} of {}".format(hand.cards_in_hand[0].face.name, hand.cards_in_hand[0].suit.name, hand.cards_in_hand[1].face.name, hand.cards_in_hand[1].suit.name))
            row_totals.append("Total: {}".format(hand.total()))
            
            if hand.status == HandStatus.Blackjack:
                handStatus = "Blackjack!!"
            elif hand.status == HandStatus.Bust:
                handStatus = "Bust"
            else:
                handStatus = ""
                
            row_status.append(handStatus)
            
    table.header(header)
    table.add_row(row_wager)
    table.add_row(row_cards)
    table.add_row(row_totals)
    table.add_row(row_status)
    
    table.set_deco(Texttable.BORDER | Texttable.HEADER | Texttable.VLINES)
    
    print(table.draw())
def main():
    all = get_results()
    for keys in all.keys():
        print('Simplified', keys[0], 'Hard difficulty', keys[1], 'Black border', keys[2], 'Extended dataset', keys[3])
        values = all[keys]
        values = sorted(values)
        table = Texttable()
        cols = ['Val Err', 'Compon.', 'C', 'Gamma', 'Kernel', 'Time', 'Index']
        table.add_rows([cols] + values)
        print table.draw()
Пример #26
0
 def _draw_table(self, targ, metric, rows):
     parts = [util.hline("%s Metrics on Target '%s'" % (metric, targ['name']), 'cyan')]
     table = Texttable(logger.LINE_WIDTH)
     table.set_cols_align(['r', 'l'])
     name_width = max([len(row[0]) for row in rows])
     table.set_cols_width([name_width+1, logger.LINE_WIDTH-name_width-4])
     table.set_deco(Texttable.HEADER | Texttable.VLINES)
     table.add_rows(rows)
     parts.extend([table.draw(), ''])
     return parts
Пример #27
0
 def __str__(self):
     from texttable import Texttable
     table = Texttable(max_width=180)
     matrix = [[left + ' -> ' + self.parse_table[t][left]
             if left in self.parse_table[t] else ' '
                 for t in self.parse_table if not t == 'eps']
                     for left in self.N]
     matrix.insert(0, [t for t in self.parse_table])
     table.add_rows(matrix)
     return table.draw() + '\n'
Пример #28
0
def print_current_standings(players_db):
    print()
    table = Texttable()
    table.header(['Player', 'Wins'])
    table.set_deco(Texttable.HEADER)
    table.set_cols_align(['l', 'r'])
    for name, wins in players_db.iter_players_wins():
        table.add_row([name, wins])
    print(table.draw())
    print()
Пример #29
0
def _list_query(args):
    what = args.module
    api = _login(args)
    rows = getattr(api, 'list_%s' % what)()
    if not rows:
        print('No %s found' % what)
        return
    table = Texttable(max_width=140)
    table.add_rows([rows[0].keys()] + [row.values() for row in rows])
    print(table.draw())
Пример #30
0
def main():
    all = get_results()
    for keys in all.keys():
        print('Simplified', keys[0], 'Hard difficulty', keys[1], 'Black border', keys[2], 'Extended dataset', keys[3])
        values = all[keys]
        values = sorted(values)
        table = Texttable()
        cols = ['Val Err', 'Epochs', 'Fc1', 'Optimizer', 'Index']
        table.add_rows([cols] + values)
        print table.draw()
Пример #31
0
def pboard(Title=''):
    print('\n' * 300)
    from texttable import Texttable
    ##Player's Table
    a = Texttable()
    a.add_rows([['Card', 'Suit', 'Colour', 'Value']])
    for i, x in enumerate(player_cards):
        a.add_row([
            player_cards[i].card, player_cards[i].suit, player_cards[i].colour,
            player_cards[i].value
        ])

    # Dealers Table
    b = Texttable()
    b.add_rows([['Card', 'Suit', 'Colour', 'Value']])
    if turn == 'player':
        b.add_row(['Covered', 'Covered', 'Covered', 'Covered'])
        for i, x in enumerate(dealer_cards[1:]):
            b.add_row([
                dealer_cards[i].card, dealer_cards[i].suit,
                dealer_cards[i].colour, dealer_cards[i].value
            ])
    else:
        for i, x in enumerate(dealer_cards):
            b.add_row([
                dealer_cards[i].card, dealer_cards[i].suit,
                dealer_cards[i].colour, dealer_cards[i].value
            ])

    pbalance()
    print('...............%s................' % playersname)
    print(a.draw())
    print('\n')
    print('................Dealer.................')
    print(b.draw())
    print('\n')
Пример #32
0
 def __init__(self):
     Texttable.__init__(self)
     # set class attributes so that it'll be more like TRex standard output
     self.set_chars(['-', '|', '-', '-'])
     self.set_deco(Texttable.HEADER | Texttable.VLINES)
Пример #33
0
def main():
    is_lin = False
    is_mac = False
    is_win = False
    log_start_time = None
    log_end_time = None
    host = None
    user = None
    port = None

    if platform.system() == 'Linux':
        is_lin = True
    elif platform.system() == 'Darwin':
        is_mac = True
    elif platform.system() == 'Windows':
        is_win = True

    parsing_start_time = time.time()
    log = Log()
    log.get_entries()
    parsing_end_time = time.time()

    dt = datetime.datetime.strptime(log.actions[0].time,
                                    '%a %b %d %H:%M:%S %Y')
    log_start_time = '{:%m/%d/%Y %H:%M:%S}'.format(dt)
    dt = datetime.datetime.strptime(log.actions[len(log.actions) - 1].time,
                                    '%a %b %d %H:%M:%S %Y')
    log_end_time = '{:%m/%d/%Y %H:%M:%S}'.format(dt)

    sort_start_time = time.time()
    log.actions.sort(key=operator.attrgetter(args.sortby),
                     reverse=args.reverse)
    sort_end_time = time.time()

    table = Texttable(0)
    table.set_deco(Texttable.HEADER)
    table.set_cols_dtype(['i', 't', 't', 't', 't', 't', 'f', 'i', 'i'])
    table.header([
        'ID', 'Start Time', 'End Time', 'User', 'Raw Cmd', 'Table Name',
        'Run time', 'Start Line', 'Total Lines'
    ])

    for a in log.actions[:args.howmany]:
        runtime = None

        if user is None:
            user = a.server_user

        if host is None:
            host = a.server_host

        if port is None:
            port = a.server_port

        end_datetime_object = datetime.datetime.strptime(
            a.time, '%a %b %d %H:%M:%S %Y')
        start_datetime_object = end_datetime_object - datetime.timedelta(
            seconds=a.runtime)

        start_time = '{:%H:%M:%S}'.format(start_datetime_object)
        end_time = '{:%H:%M:%S}'.format(end_datetime_object)

        # if is_lin or is_mac or args.coloring:
        #     if in_range(0, 15, a.runtime):
        #         runtime = get_color_string(bcolors.GREEN, a.runtime)
        #     elif in_range(16, 30, a.runtime):
        #         runtime = get_color_string(bcolors.LIGHT_YELLOW, a.runtime)
        #     elif in_range(31, 60, a.runtime):
        #         runtime = get_color_string(bcolors.YELLOW, a.runtime)
        #     elif in_range(61, 2147483647, a.runtime):
        #         runtime = get_color_string(bcolors.RED, a.runtime)
        #     else:
        #         runtime = a.runtime

        #    table.add_row([a.id, start_time , end_time, a.user, a.rawcmd, a.tablename, a.runtime, a.startline, a.totallines])
        # else:

        # if args.tablename is not None:
        #    print(args.tablename)

        # if args.action is not None:
        #    print(args.action)

        # if args.user is not None:
        #    print(args.user)

        table.add_row([
            a.id, start_time, end_time, a.user, a.rawcmd, a.tablename,
            a.runtime, a.startline, a.totallines
        ])

    print(table.draw())
    print("\nParse time:",
          "{0:.3f}".format(parsing_end_time - parsing_start_time))
    print("Sort time:", "{0:.3f}".format(sort_end_time - sort_start_time))
    print("Server Host: " + host)
    print("Server Port: " + port)
    print("Server User: "******"Log Start Time: " + log_start_time)
    print("Log End Time: " + log_end_time)
    print("Total actions: " + str(len(log.actions)))
Пример #34
0
def main(args):
    if args.gevent:
        import gevent.monkey
        gevent.monkey.patch_all()
    if not args.strict:
        args.name = ['*' + name + '*' for name in args.name]
    table = Texttable(args.width)
    table.set_cols_dtype(['t'] * len(args.fields))
    table.set_cols_align([args.align] * len(args.fields))
    if not args.quiet:
        table.add_row([pretify_field(field) for field in args.fields])
    table.set_deco(0)
    instances = []
    for required_name in args.name:
        instances.extend(get_instances_by_id_or_name(required_name, projects=args.projects,
                                                     raw=args.raw, regions=args.regions, clouds=args.clouds))
    if args.active:
        instances = [instance for instance in instances if instance.state == 'running']
    if not instances:
        sys.exit(1)
    instances.sort(key=lambda instance: [getattr(instance, field) for field in args.sort_by])
    table.add_rows([[getattr(instance, field) for field in args.fields]
                    for instance in instances], header=False)
    print table.draw()
    mean = score_nb.mean() * 100
    std = score_nb.std() * 100 / 2
    accuracy = "%0.2f (+/- %0.2f)" % (mean, std)
    return accuracy, mean

def TreeFunction(x, y, cvt):
    dtree = tree.DecisionTreeClassifier()
    score_tree = cross_val_score(dtree, x, y.ravel(), cv=cvt)
    mean = score_tree.mean() * 100
    std = score_tree.std() * 100 / 2
    accuracy = "%0.2f (+/- %0.2f)" % (mean, std)
    return accuracy, mean


# ----------------- Initialization --------------------
t = Texttable()

scoresnb = []
scorestree = []
sdf = ["Iris", "Echocardiogram", "Mushroom", "Breats", "Credit", "Pima", "Hepatitis", "Wine", "Voting", "Car",
       "Dermatology", "Glass"]

# np.set_printoptions(threshold=np.nan)
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
cv = KFold(n_splits=10, random_state=8)
min_max_scaler = preprocessing.MinMaxScaler()

# --------------------- Iris -------------------------
dataset_iris = pd.read_csv('Dataset/Iris.csv')
dataset_iris.label = pd.factorize(dataset_iris.label)[0]
D_iris = dataset_iris.as_matrix()
Пример #36
0
def percentage_table(data: dict) -> str:
    table = Texttable()
    table.set_deco(Texttable.HEADER)
    table.add_row(("feature", "percentage supported"))

    tlsv = {
        "TLSv1.0": 0,
        "TLSv1.1": 0,
        "TLSv1.2": 0,
        "TLSv1.3": 0,
        "SSLv2": 0,
        "SSLv3": 0,
    }

    d = {
        "plain http": 0,
        "https redirect": 0,
        "hsts": 0,
        "ipv6": 0,
    }

    for entry in data.values():
        for k in tlsv.keys():
            if k in entry["tls_versions"]:
                tlsv[k] += 1

        if entry.get("insecure_http"):
            d["plain http"] += 1

        if entry.get("redirect_to_https"):
            d["https redirect"] += 1

        if entry.get("hsts"):
            d["hsts"] += 1

        if entry.get("ipv6_addresses"):
            d["ipv6"] += 1

    l = len(data)
    if l == 0:
        return table.draw()

    for k, v in tlsv.items():
        percentage = round(v / l * 100, 2)
        table.add_row((k, f"{percentage}%"))

    for k, v in d.items():
        percentage = round(v / l * 100, 2)
        table.add_row((k, f"{percentage}%"))

    return table.draw()
Пример #37
0

if len(sys.argv) < 2:
    print(f"Usage: {sys.argv[0]} <IP_Address>")
    sys.exit(0)

chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")

browser = webdriver.Chrome(chrome_options=chrome_options)
browser.get('https://censys.io')
get_sleepy()

browser.find_element_by_class_name('btn-search-censys').click()

t = Texttable()
t.add_row(['IP ADDRESS', 'PORTS'])

with open(sys.argv[1], 'r') as f:

    for cnt, line in enumerate(f):

        open_ports = []

        browser.find_element_by_id('q').send_keys(line.strip())
        get_sleepy()

        browser.find_element_by_id('q').send_keys(Keys.ENTER)
        get_sleepy()

        protocol_details = browser.find_elements_by_class_name(
Пример #38
0
 def __print_texttable(self, data_array):
     t = Texttable()
     t.add_rows(data_array)
     print(t.draw())
def show_price(tr_code, dfh):
    while True:

        df = ts.get_realtime_quotes(tr_code)
        dfr = pd.DataFrame(df,
                           columns=('code', 'name', 'open', 'pre_close',
                                    'price', 'high', 'low'))
        #dfr['涨跌']=pd.concat([(df['high']-df['low']),(df['high'] - df['pre_close'].shift(1)).abs(),(df['low'] - df['pre_close'].shift(1))], axis=1).max(axis=1)
        dfr['涨跌%'] = df.apply(lambda x: ((float(x['price']) - float(x[
            'pre_close'])) / float(x['pre_close'])) * 100,
                              axis=1)
        dfr['持仓价'] = dfh['hold_price']
        dfr['持股数'] = dfh['hold_cnt']
        dfr['当日盈亏'] = dfr.apply(
            lambda x:
            (float(x['price']) - float(x['pre_close'])) * float(x['持股数']),
            axis=1)
        dfr['总盈亏'] = dfr.apply(
            lambda x: (float(x['price']) - float(x['持仓价'])) * float(x['持股数']),
            axis=1)
        total_twl = dfr['当日盈亏'].sum()
        total_wl = dfr['总盈亏'].sum()
        dfr = dfr.sort_values(by='总盈亏', ascending=False)
        tb = Texttable()
        tb.set_cols_align(
            ['l', 'r', 'r', 'r', 'r', 'r', 'r', 'r', 'r', 'r', 'r', 'r'])
        tb.set_cols_dtype(
            ['t', 't', 'f', 'f', 'f', 'f', 'f', 'f', 'f', 'i', 'f', 'f'])
        tb.set_cols_width([8, 12, 10, 10, 10, 10, 10, 10, 10, 10, 12, 12])
        tb.header(list(dfr))
        tb.add_rows(dfr.values, header=False)
        print(tb.draw())
        print('当日总盈亏 %.2f,总盈亏 %.2f' % (total_twl, total_wl))
        total_twl = 0
        total_wl = 0
        time.sleep(1)
Пример #40
0
    def __init__(self):
        '''Search algorithms, depth search, width search and minimum path using Dijkstra

        '''
        print('''*******************Search algorithms********************
               Author: Oraldo Jacinto Simon
               Professor: M.I. Jesus Roberto López Santillán
               ''')
        list_edges_weight = []
        fichero1 = open("graph1.txt", 'r')
        fichero2 = open("graph2.txt", 'r')
        lines1 = fichero1.read()
        lines2 = fichero2.read()
        lines1 = lines1.rsplit('\n')
        lines2 = lines2.rsplit('\n')
        list_edges_weight1 = [edge.split(',') for edge in lines1 if edge != '']
        list_edges_weight2 = [edge.split(',') for edge in lines2 if edge != '']
        fichero1.close()
        fichero2.close()
        obj_graph1 = Graph()
        obj_graph2 = Graph()
        graph1 = obj_graph1.add_from_edge(list_edges_weight1, directed=True)
        adjacents = graph1.get_adjacents()
        width = graph1.search_in_width('a')
        width_recursive = graph1.search_width_recursive('a',
                                                        queue=deque(),
                                                        visited=[])
        depth = graph1.depth_search('a')
        graph2 = obj_graph2.add_from_edge(list_edges_weight2, directed=False)
        adjacents2 = graph2.get_adjacents()

        table = Texttable()
        table.set_cols_align(["c", "c", "c"])
        table.set_cols_valign(["t", "m", "b"])
        table.set_cols_width([20, 70, 40])
        head = ["Method", "Graph", "Answer"]
        rows = []
        rows.append(head)
        search_in_width = [
            'Search in width from the vertex (a)',
            str(adjacents),
            str(width_recursive)
        ]
        rows.append(search_in_width)
        depth_search = [
            'Depth Search from the vertex (a)',
            str(adjacents),
            str(depth)
        ]
        rows.append(depth_search)
        kruskal = [
            'Kruskal',
            str(adjacents2),
            '(Minimum Spanning Tree) => ' + str(graph2.kruskal())
        ]
        rows.append(kruskal)
        dijkstra = ['Dijkstra', str(adjacents2), 'See trace below']
        rows.append(dijkstra)
        table.add_rows(rows)
        print(table.draw() + "\n")
        graph2.dijkstra_search('a', 'e')
Пример #41
0
# 主程序
if __name__ == '__main__':
    itemTemp = getMovieList(
        "D:/PycharmProjects/reMov/artists.item")  # 获取物品ID-物品item列表
    fileTemp = readFile(
        "D:/PycharmProjects/reMov/user_artists.data")  # 读取用户-物品ID
    contextTemp = readFile(
        "D:/PycharmProjects/reMov/user_taggedartists-timestamps.data"
    )  #读取用户-物品-上下文
    user_dic, movie_dic = createDict(fileTemp)  # 创建字典
    context_dic = createDictContext(contextTemp)  #创建上下文字典

    #numpy.savetxt('user_dict.txt', user_dic)
    print("创建字典")
    user_id = 66
    movieTemp = recommondation(user_id, user_dic, 80)  # 对电影tuijian排序
    #movieTemp_txt = pandas.Series(movieTemp)
    #movieTemp_txt.to_csv('movieTemp_txt.csv')
    print("创建字典2")
    rows = []
    table = Texttable()  # 创建表格并显示
    table.set_deco(Texttable.HEADER)
    table.set_cols_dtype(['t', 'f', 'a'])
    table.set_cols_align(["l", "l", "l"])
    rows.append(["user name", "recommondation_movie", "from userid"])
    for i in movieTemp:
        rows.append([user_id, itemTemp[i[0]][0], ""])  #(物品ID对应的物品item)
    table.add_rows(rows)
    print(table.draw())
Пример #42
0
def access_points(ctx, gateway, interface=None):
    """
        Get WiFi access points visible to the gateway
    """
    # TODO implement table for ext wifi

    if gateway is None:
        gateway = get_default_gateway(ctx)

    session = ctx.obj.session
    resp = session.get_wifi_access_points(gateway, interface)
    resp.raise_for_status()
    seen = set()

    table = Texttable()
    table.set_deco(Texttable.HEADER)
    table.set_cols_dtype(['t', 't', 'i'])
    table.set_cols_align(['l', 'l', 'r'])
    table.header(['ssid', 'security', 'strength'])
    for ap in resp.json().get('access_points', []):
        if ap['ssid'] not in seen:
            table.add_row([ap['ssid'], ap['security'], ap['strength']])
        seen.add(ap['ssid'])

    click.echo(table.draw())
Пример #43
0
def get_regstat(fixed_image, moving_image, out_stats, out_tm, out_qc, tm):
    x = [[]]

    time1 = time.time()
    # loading images
    target = plt.imread(fixed_image)
    #tar=cv2.imread(fixed_image)
    target_name = os.path.split(fixed_image)[1]
    # print(target_name)
    # target=cv2.resize(target,(5705,5705))
    # tar=cv2.resize(tar,(5705,5705))
    moving = plt.imread(moving_image)
    #mov=cv2.imread(moving_image)
    # moving=cv2.resize(moving,(5705,5705))
    # mov=cv2.resize(mov,(5705,5705))
    moving_name = os.path.split(moving_image)[-1]
    # print(moving_name)

    # registration
    #a=target.shape[0]
    b=target.shape[1]
    #moving=cv2.resize(moving,(b,b))
    mov_avg=np.mean(moving.astype("float"))
    print(target.shape)
    print(moving.shape)
    res=ird.similarity(target,moving)
    def_moving=res['timg']
    scale=res['scale']
    angle=res['angle']
    (t0,t1)=res['tvec']
    #def_moving, scale, angle, (t0, t1) = imreg.similarity(target, moving)
    def_mov_array=imreg.similarity_matrix(scale,angle,(t0,t1))
    np.save(out_tm, def_mov_array)
    def_avg=np.mean(def_moving.astype("float"))
    # def_moving=cv2.resize(def_moving,(5705,5705))

    time2 = time.time()
    ti = time2-time1
    # statistical results
    cc = np.corrcoef(moving.flat, target.flat)
    r1 = cc[0, 1]

    score1, diff = compare_ssim(target, moving, full=True, multichannel=True)

    loss_before = 0.5*(1-r1) + 0.5*(1-score1)

    cc = np.corrcoef(def_moving.flat, target.flat)
    r2 = cc[0, 1]

    score2, diff = compare_ssim(def_moving, target, full=True)

    loss_after = 0.5*(1-r2) + 0.5*(1-score2)

    mi_bef = mutual_info_score(moving.ravel(), target.ravel())

    mi_aft = mutual_info_score(target.ravel(), def_moving.ravel())

    mae = np.sum(np.absolute(target.astype("float") - moving.astype("float")))
    u1 = np.sum(target.astype("float") + moving.astype("float"))
    mean_before = np.divide(mae, u1)
    u2 = np.sum(target.astype("float") + def_moving.astype("float"))
    mae = np.sum(np.absolute(target.astype(
        "float") - def_moving.astype("float")))
    mean_after = np.divide(mae, u2)
    t = Texttable()
    #print(def_moving.shape)
    if mi_aft > mi_bef:
        plt.imsave(tm, def_moving,cmap='gray')
    else:
        plt.imsave(tm, moving,cmap='gray')
    # plt.imsave(tm,def_moving,cmap='gray')
    x.append([target_name, moving_name, r1, r2, score1, score2, loss_before,
              loss_after, mi_bef, mi_aft, mean_before, mean_after,mov_avg,def_avg, ti])

    t.add_rows(x)
    t.set_cols_align(['r', 'r', 'r', 'r', 'r', 'r',
                      'r', 'r', 'r', 'r', 'r', 'r', 'r','r', 'r'])
    t.header(['TARGET', 'MOVING', 'PCC_BEFORE', 'PCC_AFTER', 'SSIM_BEFORE', 'SSIM_AFTER',
              'loss_BEFORE', 'loss_AFTER', 'MI_BEFORE', 'MI_AFTER', 'MEAN_BEFORE', 'MEAN_AFTER', 'AVERAGE_INTENSITY_BEFORE', 'AVERAGE_INTENSITY_AFTER','TIME'])
    def_mov1 = cv2.imread(tm,0)

    # plotting the results

    transform = AsinhStretch() + AsymmetricPercentileInterval(0.1, 99.99)
    targ= transform(target)
    #targ=cv2.cvtColor(targ.astype(np.uint8),cv2.COLOR_GRAY2RGB)
    #targ_im=Image.fromarray(targ)

    plt.imsave('target.tiff', targ, cmap='gray')
    #plt.imsave(r'/media/u0132399/L-drive/GBW-0075_MILAN_Multiplex/u0140317/registration_comparison/imreg/output_images/target.tiff', targ, cmap='gray')
    movi = transform(moving)
    #movi=cv2.cvtColor(movi.astype(np.uint8),cv2.COLOR_GRAY2BGR)
    plt.imsave('moving.tiff', movi, cmap='gray')
    #movi_im=Image.fromarray(movi)
    #plt.imsave(r'/media/u0132399/L-drive/GBW-0075_MILAN_Multiplex/u0140317/registration_comparison/imreg/output_images/moving.tiff', movi, cmap='gray')
    targ = cv2.imread('target.tiff')
    #targ=Image.open(targ_im)
    movi= cv2.imread('moving.tiff')
    #movi=Image.open(movi_im)
    #targ = cv2.imread( r'/media/u0132399/L-drive/GBW-0075_MILAN_Multiplex/u0140317/registration_comparison/imreg/output_images/target.tiff')
    #movi = cv2.imread(r'/media/u0132399/L-drive/GBW-0075_MILAN_Multiplex/u0140317/registration_comparison/imreg/output_images/moving.tiff')
    
    # def_mov1=cv2.resize(def_mov1,(5705,5705))
    def_mov1 = transform(def_mov1)
    #print(def_mov2.shape)
    
    #def_mov1=cv2.cvtColor(def_mov1.astype(np.uint8),cv2.COLOR_GRAY2BGR)
    plt.imsave('def_moving.tiff', def_mov1, cmap='gray')
    #def_mov1=Image.fromarray(def_mov1)
    #plt.imsave('/media/u0132399/L-drive/GBW-0075_MILAN_Multiplex/u0140317/registration_comparison/imreg/output_images/def_moving.tiff', def_mov1, cmap='gray')
    def_mov1 = cv2.imread('def_moving.tiff')
    #def_mov1=Image.open(def_mov1)
   
    #def_mov1 = cv2.imread(r'/media/u0132399/L-drive/GBW-0075_MILAN_Multiplex/u0140317/registration_comparison/imreg/output_images/def_moving.tiff')
    #targ = targ.astype(np.uint8)
    #movi = movi.astype(np.uint8)
    #def_mov1 = def_mov1.astype(np.uint8)
    
    tar1 = np.zeros(targ.shape)
    mov1 = np.zeros(movi.shape)
    def_mov = np.zeros(def_mov1.shape)
    tar1[:, :, 0] = targ[:, :, 0]
    mov1[:, :, 2] = movi[:, :, 2]
    def_mov[:, :, 2] = def_mov1[:, :, 2]
    tar1 = tar1.astype(np.uint8)
    mov1 = mov1.astype(np.uint8)
    def_mov = def_mov.astype(np.uint8)
  
    # mov[np.where((mov>[15]).all(axis=2))]=[255,0,0]
    # def_mov1[np.where((def_mov1>[15]).all(axis=2))]=[255,0,0]
    alpha = 1
    beta = 1
    gamma = 0
    t1 = cv2.addWeighted(tar1, alpha, mov1, beta, gamma)
    
    # filename=target_name+moving_name+'unreg_imreg.tif'
    # cv2.imwrite(,t1)
    
    
    t2 = cv2.addWeighted(tar1, alpha, def_mov, beta, gamma)
    
    # filename=target_name+moving_name+'reg_imreg.tif'
    # cv2.imwrite(filename,t2)
    if mi_aft > mi_bef:
        cv2.imwrite(out_qc, t2)
        
    else:
        cv2.imwrite(out_qc, t1)
        
    # plt.figure(figsize=(10,10))
    # plt.axis('off')
    # plt.imshow(t1,cmap='gray')
    # plt.show(block=False)
    # plt.pause(5)
    # plt.close()
    # plt.figure(figsize=(10,10))
    # plt.axis('off')
    # plt.imshow(t2,cmap='gray')
    # plt.show(block=False)
    # plt.pause(5)
    # plt.close()

    df = pd.DataFrame(x, columns=['TARGET', 'MOVING', 'PCC_BEFORE', 'PCC_AFTER', 'SSIM_BEFORE', 'SSIM_AFTER',
                                  'loss_BEFORE', 'loss_AFTER', 'MI_BEFORE', 'MI_AFTER', 'MEAN_BEFORE', 'MEAN_AFTER','AVERAGE_INTENSITY_BEFORE', 'AVERAGE_INTENSITY_AFTER', 'TIME'])
    #df[columns]= ['PCC_BEFORE','PCC_AFTER','SSIM_BEFORE' ,'SSIM_AFTER','loss_BEFORE','loss_AFTER','MI_BEFORE','MI_AFTER','MEAN_BEFORE','MEAN_AFTER']
    #writer = pd.ExcelWriter(out_stats)
    df.to_csv(out_stats, index=None, header=True)
    # writer.save()
    #print(t.draw())

    return t
Пример #44
0
    def pformat(self, sort_by: List[int] = []) -> pformatDict:
        """Pretty format as a table. Uses fetchall()."""

        # Disabling max_width - output might be uglier if wide, but it will not be worse than beeline and will not
        # exception out.
        table = Texttable(max_width=0)
        # The only other decoration is the horizontal line between rows, which I do not want.
        table.set_deco(Texttable.BORDER | Texttable.HEADER | Texttable.VLINES)

        headers = self.headers()
        table.header(headers)

        # Convert some annoying types. Ints and friends will now not use the scientific notation anymore.
        # Note: the default will be 'a': auto.
        hive2table_types = {
            'bigint_type': 'i',
            'int_type': 'i',
            # Forcing to text prevents rounding and padding.
            'decimal_type': 't',
            'double_type': 't'
        }
        # self.description is a tuple with as 2nd element the data type.
        table.set_cols_dtype([
            hive2table_types.get(x[1].lower(), 'a') for x in self.description
        ])

        cnt = 0
        try:
            for r in sorted(self.fetchall(), key=operator.itemgetter(
                    *sort_by)) if sort_by else self.fetchall():
                cnt += 1
                table.add_row(r)
        except IndexError:
            raise TableFormattingError(
                f"You tried to sort by columns '{sort_by}' but at least one does not exist."
            )

        return {'table': table.draw(), 'rowcount': cnt}
Пример #45
0
def mostrarBBDD(bbdd):
    t = Texttable(0)
    t.header(cable.atributosIdcable())
    for i in range(len(bbdd)):
        t.add_row(bbdd[i].cableToList())
    print(t.draw())
Пример #46
0
def render_schema_as_table(schema):
    """
    Returns ASCII table view of schema.

    :param schema: The schema to be rendered.
    :type schema: :class:`mtclient.models.schema.Schema`
    """
    schema_parameter_names = ""

    table = Texttable()
    table.set_cols_align(['l', 'l'])
    table.set_cols_valign(['m', 'm'])
    table.header(["Schema field", "Value"])
    table.add_row(["ID", schema.id])
    table.add_row(["Name", schema.name])
    table.add_row(["Namespace", schema.namespace])
    table.add_row(["Type", schema.type])
    table.add_row(["Subtype", schema.subtype])
    table.add_row(["Immutable", str(bool(schema.immutable))])
    table.add_row(["Hidden", str(bool(schema.hidden))])
    schema_parameter_names += table.draw() + "\n"

    if not schema.parameter_names:
        return schema_parameter_names

    schema_parameter_names += "\n"
    table = Texttable(max_width=0)
    table.set_cols_align(["r", 'l', 'l', 'l', 'l', 'l', 'l', 'l', 'l', 'l'])
    table.set_cols_valign(['m', 'm', 'm', 'm', 'm', 'm', 'm', 'm', 'm', 'm'])
    table.header([
        "ParameterName ID", "Full Name", "Name", "Data Type", "Units",
        "Immutable", "Is Searchable", "Order", "Choices", "Comparison Type"
    ])
    for parameter_name in schema.parameter_names:
        table.add_row([
            parameter_name.id,
            parameter_name.full_name.encode('utf8', 'ignore'),
            parameter_name.name, parameter_name.data_type,
            parameter_name.units.encode('utf8', 'ignore'),
            str(bool(parameter_name.immutable)),
            str(bool(parameter_name.is_searchable)), parameter_name.order,
            parameter_name.choices, parameter_name.comparison_type
        ])
    schema_parameter_names += table.draw() + "\n"

    return schema_parameter_names
Пример #47
0
        #f-measure
        fm = f1_score(y_test, y_pred, average='micro')
        #presisi
        presisi = precision_score(y_test, y_pred)
        #recall
        recall = recall_score(y_test, y_pred)
        #confussion matrix
        # label = np.sort(np.unique(y_test))[::-1]
        label = np.unique(y_test)
        tn, fp, fn, tp = confusion_matrix(y_test, y_pred, labels=label).ravel()

        #scoring
        skor = gaus.score(X_test, y_test)
        scores = cross_val_score(clf, X_test, y_test, cv=3,
                                 scoring='accuracy')  #10fold cross validation
        f1_macro = cross_val_score(
            clf, X_test, y_test, cv=3,
            scoring='f1_macro')  #10fold cross validation
        #data_table.append(['kRNN Oversampling+SMOTE',str(X_smote.shape),str(X_train.shape), "%0.4f" % (akurasi*100),0,"%0.4f" % (fm*100),"%0.4f" % (presisi*100),"%0.4f" % (recall*100) , tp, fp , tn ,fn ])

        #DRAW TABLE-----------------------------------------------------------------------------------------------------------------------
        table = Texttable()
        # table.set_deco(Texttable.HEADER)
        table.set_cols_dtype(
            ['t', 't', 't', 't', 't', 't', 't', "t", "t", "t", "t",
             "t"])  # automatic
        table.set_cols_align(
            ["l", "r", "r", "r", "r", 'r', "r", "r", "r", "r", "r", "r"])
        table.add_rows(data_table)
        print table.draw()
values = all[parm]
values = sorted(values)
#indices = [0, 3,4,5,6, 7, 10, 11, 13]
indices = [0, 1, 2, 4]
cols = ['Val Err', 'Epochs', 'Fc1', 'Optimizer', 'Index']
values = cnn_results_analysis.select(values, indices)
cols = cnn_results_analysis.select([cols], indices)
#cols = [cols]
cols[0].append('or i')

i=0
for v in values:
    v.append(i)
    i+=1

table = Texttable()
print len(values[0])
table.add_rows(cols + values)
print table.draw()
indicies = [0]
indicies = sorted(indicies)
values_selected = []
for x in indicies:
    values_selected += [values[x]]

values_selected = values[0:]

print('\\begin{center}')
print('\\begin{table}')
print('\\begin{tabular}{ | l | l | l | l | l | l | l | l | l | l |}')
print('\\hline')
Пример #49
0
def info_txt(alist, filename_txt="zf_protein_info.txt", create_txt=True):
    """
    Creates a text file containing the information from a list of genes.

    Parameter alist: the list to create the file from.
    Precondtion: alist is a list

    Parameter full_dict: a dictionary of the full list of proteins. Used by
    statistics to gather information about the proteins
    Preconditon: full_dict is a dictionary of protein IDs

    Parameter filename_txt: a string to name the info file ending in .txt
    Preconditon: filename_txt is a string ending in .txt

    Parameter create_txt: A boolean that says whether or not to create a new
    text file.
    Preconditon: create_txt is a bool
    """
    filename = helper.edit_filename(filename_txt,
                                    "sibling",
                                    foldername='output')
    if not os.path.exists(filename) or create_txt:
        full_dict = genetics.Protein.full_protein_dict()
        x = Texttable()
        today = date.today()
        month = str(today.month)
        day = str(today.day)
        if len(month) < 2:
            month = "0" + month
        if len(day) < 2:
            day = "0" + day
        with open(filename, "w+", encoding='utf-8') as f:
            stats = count.zf_protein_stats(alist, full_dict)
            f.write(
                "This text file was generated by the Python module " +
                os.path.basename(__file__) + "." + "\n\n" +
                "This file contains information " +
                "about genes, including their \ngene IDs, their common names, and the list of "
                +
                "proteins they \ncode for. This file also contains information about the \n"
                +
                "proteins including the number of ZF domains in each protein, \ntheir core "
                + "sequences, and statistics about the proteins." +
                "\n\nAuthor: Austin Starks\nDate: " + month + "/" + day + "/" +
                str(today.year))
            f.write(stats + "Gene info: \n\nNote: A core sequence of '????' " +
                    "indicates that domain is likely invalid.\n\n")
            categories = [
                "Gene name", "Protein ID", "Core Seqs", "# ZFs", "Diseases"
            ]
            x.add_row(categories)
            for gene in alist:
                protein_list = gene.protein_list()
                for protein in protein_list:
                    row = [
                        gene.get_gene_name(),
                        protein.get_protein_id(),
                        protein.core_sequences(),
                        protein.num_domains(),
                        gene.disease_list_string()
                    ]
                    x.add_row(row)
            f.write(x.draw())
        print(filename_txt + " created successfully!")
 def generateMessage(self, state, isFlowReverse=True):
     stateIctsInfo = self.getIctsInfoForState(state, isFlowReverse)
     if stateIctsInfo.shape[0] == 0:
         return ''
     # https://stackoverflow.com/questions/15705630/get-the-rows-which-have-the-max-value-in-groups-using-groupby
     ictRows = stateIctsInfo.sort_values(by=['station_name']).apply(
         lambda b: [b.station_name, b.dev_num, '{0:.2f}'.format(b['data'])], axis=1).tolist()
     messageStr = 'MVAR flow is from LV side to HV side in the following ICTs of {0} substations: \n'.format(
         stateNames[state])
     messageStr += 'Number of ICTs = {0}\n'.format(len(ictRows))
     messageStr += '\n'
     table = Texttable()
     table.set_deco(Texttable.HEADER | Texttable.BORDER | Texttable.VLINES)
     table.set_cols_dtype(['t', 't', 't'])
     table.set_cols_align(["l", "l", "l"])
     ictRows.insert(0, ["Substation", "Device Name", "MVAR"])
     table.add_rows(ictRows)
     messageStr += table.draw()
     return messageStr
Пример #51
0
def render_schemas_as_table(schemas, display_heading=True):
    """
    Returns ASCII table view of schemas.

    :param schemas: The schemas to be rendered.
    :type schemas: :class:`mtclient.models.resultset.ResultSet`
    :param render_format: The format to display the data in ('table' or
        'json').
    :param display_heading: Setting `display_heading` to True ensures
        that the meta information returned by the query is summarized
        in a 'heading' before displaying the table.  This meta
        information can be used to determine whether the query results
        have been truncated due to pagination.
    """
    heading = "\n" \
        "Model: Schema\n" \
        "Query: %s\n" \
        "Total Count: %s\n" \
        "Limit: %s\n" \
        "Offset: %s\n\n" \
        % (schemas.url, schemas.total_count,
           schemas.limit, schemas.offset) if display_heading else ""

    table = Texttable(max_width=0)
    table.set_cols_align(["r", 'l', 'l', 'l', 'l', 'l', 'l'])
    table.set_cols_valign(['m', 'm', 'm', 'm', 'm', 'm', 'm'])
    table.header(
        ["ID", "Name", "Namespace", "Type", "Subtype", "Immutable", "Hidden"])
    for schema in schemas:
        table.add_row([
            schema.id, schema.name, schema.namespace, schema.type,
            schema.subtype or '',
            str(bool(schema.immutable)),
            str(bool(schema.hidden))
        ])
    return heading + table.draw() + "\n"
Пример #52
0
from bson.son import SON
from pymongo import MongoClient
from texttable import Texttable
t = Texttable()

client = MongoClient("mongodb://127.0.0.1:27017/")
database = client["Project"]
collection = database["players"]


def query4():
    print("List of top 10 best goalkeeper:")
    pipeline = [{
        u"$match": {
            u"player_positions": u"GK"
        }
    }, {
        u"$project": {
            u"short_name": 1.0,
            u"Score": {
                u"$avg": [
                    u"$gk_diving", u"$gk_handling", u"$gk_kicking",
                    u"$gk_reflexes"
                ]
            }
        }
    }, {
        u"$sort": SON([(u"Score", -1)])
    }, {
        u"$limit": 10.0
    }]
Пример #53
0
def rtt_range(data: dict) -> str:
    class RTTEntry(NamedTuple):
        min: int
        max: int
        url: str

    lst: List[RTTEntry] = []  #
    timeout_url: List[str] = []

    for url, entry in data.items():
        if "rtt_range" not in entry:
            continue

        if not entry["rtt_range"][0]:
            timeout_url.append(url)
            continue

        lst.append(RTTEntry(*entry["rtt_range"], url))

    lst.sort()

    table = Texttable()
    table.set_deco(Texttable.HEADER)
    table.add_row(("url", "rtt_min", "rtt_max"))

    for rttentry in lst:
        table.add_row((rttentry.url, rttentry.min, rttentry.max))

    for url in timeout_url:
        table.add_row((url, "Timeout", "Timeout"))

    return table.draw()
def print_prediction(predictions, metric_parameters):
    """Visualize the predictions.
  
  Args:
    - predictions: predictions of each patient
    - metric_parameters: parameters for the problem and labels
    
  Returns:    
    - For online predictions, returns graphs
    - For one-shot predictions, returns table
  """
    # Parameters
    label_sets = metric_parameters['label_name']
    problem = metric_parameters['problem']
    graph_format = ['bo-', 'r+--', 'gs-.', 'cp:', 'm*-']

    # For one-shot prediction setting
    if problem == 'one-shot':
        # Initialize table
        perf_table = Texttable()
        first_row = ['id/label'] + label_sets
        perf_table.set_cols_align(["c" for _ in range(len(first_row))])
        multi_rows = [first_row]

        for i in range(predictions.shape[0]):
            curr_row = [str(i + 1)]

            # For each label
            for j in range(len(label_sets)):
                label_name = label_sets[j]
                curr_row = curr_row + [predictions[i, j]]

            multi_rows = multi_rows + [curr_row]

        perf_table.add_rows(multi_rows)
        # Print table
        print(perf_table.draw())
        # Return table
        return perf_table.draw()

    # For online prediction setting
    elif problem == 'online':
        # Initialize graph
        figs = []

        for i in range(predictions.shape[0]):
            fig = plt.figure(i + 10, figsize=(8, 5))
            legend_set = []

            # For each label
            for j in range(len(label_sets)):
                label_name = label_sets[j]
                curr_perf = predictions[i][:, j]
                legend_set = legend_set + [label_name]
                plt.plot(range(len(curr_perf) - 1), curr_perf[:-1],
                         graph_format[j])

            plt.xlabel('Sequence Length', fontsize=10)
            plt.ylabel('Predictions', fontsize=10)
            plt.legend(legend_set, fontsize=10)
            plt.title('ID: ' + str(i + 1), fontsize=10)
            plt.grid()
            # Print graph
            plt.show()

            fig.patch.set_facecolor('#f0f2f6')
            figs.append(fig)
        # Return graph
        return figs
Пример #55
0
    def pformat_extended(self, sort_by: List[int] = []):
        cnt = 0
        headers = self.headers()
        output = ''
        try:
            for r in sorted(self.fetchall(), key=operator.itemgetter(
                    *sort_by)) if sort_by else self.fetchall():
                cnt += 1
                table = Texttable(max_width=0)
                table.set_deco(Texttable.VLINES)
                table.set_cols_align(['r', 'l'])
                table.set_chars(
                    ['-', ':', '+', '=']
                )  # Just replacing vertical line with ':' instead of default '|'
                for i, h in enumerate(headers):
                    table.add_row([h, r[i]])
                output += f'row {cnt}:\n'
                output += table.draw()
                output += '\n\n'

        except IndexError:
            raise TableFormattingError(
                f"You tried to sort by columns '{sort_by}' but at least one does not exist."
            )

        return {'table': output, 'rowcount': cnt}
Пример #56
0
 def showTable(self):
     neighbors_id = [i[1] for i in self.neighbors]
     table = Texttable()
     table.set_deco(Texttable.HEADER)
     table.set_cols_dtype(["t", "t", "t", "t"])
     table.set_cols_align(["l", "l", "l", "l"])
     rows = []
     rows.append([u"movie ID", u"Name", u"release", u"from userID"])
     for item in self.recommandList:
         fromID = []
         for i in self.movies:
             if i[0] == item[1]:
                 movie = i
                 break
         for i in self.ItemUser[item[1]]:
             if i in neighbors_id:
                 fromID.append(i)
         movie.append(fromID)
         rows.append(movie)
     table.add_rows(rows)
     print(table.draw())
Пример #57
0
with open(outputDir + "/negativeReviews.txt", "w") as fileNeg:
	for i in negPredIndex:
		for k in range(len(reviewContent[i])):
			fileNeg.write(reviewContent[i][k])
		fileNeg.write("\n")

#Write the predicted neutral reviews to a file
with open(outputDir + "/neutralReviews.txt", "w") as fileNeut:
	for i in neutPredIndex:
		for k in range(len(reviewContent[i])):
			fileNeut.write(reviewContent[i][k])
		fileNeut.write("\n")

#Write the predicted neutral reviews to a file
with open(outputDir + "/featureScore.txt", "w") as fileFeat:
	t = Texttable()
	lst = [["Feature", "Score"]]
	for tup in avgFeatScore:
		lst.append([tup[0], tup[1]])
	t.add_rows(lst)
	fileFeat.write(str(t.draw()))

print("The files are successfully created in the dir '" + outputDir + "'")

#Evaluation metric
PP = len(set(posActIndex).intersection(set(posPredIndex)))
PNe = len(set(posActIndex).intersection(set(negPredIndex)))
PN = len(set(posActIndex).intersection(set(neutPredIndex)))

NeP = len(set(negActIndex).intersection(set(posPredIndex)))
NeNe = len(set(negActIndex).intersection(set(negPredIndex)))
Пример #58
0
from texttable import Texttable
table = Texttable ()
jawab = "y"
no = 0
nama = []
nim = []
nilai_tugas = []
nilai_uts = []
nilai_uas = []
while(jawab == "y"):
    nama.append(input("Masukkan Nama : "))
    nim.append(input("Masukkan Nim : "))
    nilai_tugas.append(input("Nilai Tugas : "))
    nilai_uts.append(input("Nilai UTS : "))
    nilai_uas.append(input("Nilai UAS : "))
    jawab = input("Tambah data (y/t?")
    no += 1
for i in range(no):
    tugas = int(nilai_tugas[i])
    uts = int(nilai_uts[i])
    uas = int(nilai_uas[i])
    akhir = (tugas*30/100) + (uts*35/100) + (uas*35/100)
    table.add_rows([['No','Nama','NIM','TUGAS','UTS','UAS','AKHIR'],
                    [i+1, nama[i],nim[i],nilai_tugas[i],nilai_uts[i],nilai_uas[i]
                     ,akhir]])
    print (table.draw())
Пример #59
0
def generate_report(proj_conf):

    d = {
	'runname':proj_conf['run'],
	'project_id': proj_conf['id'],
        'samplenames': ' '.join(proj_conf['samples']),
        'latex_opt' : "",
        'uppnex': "",
        'mapping':"",
        'dup_rem':"",
        'read_count':"",
        'quantifyer':"",
        'gene_body_cov':"",
        'FPKM_heatmap':"",
        'FPKM_PCAplot':"",
        'Mapping_statistics': "",
        'Read_Distribution':"",
	'rRNA_table':""
        }

    ## Latex option (no of floats per page)
    floats_per_page = '.. raw:: latex\n\n   \setcounter{totalnumber}{8}'
    d['latex_opt'] = floats_per_page


    ## Metadata fetched from the 'Genomics project list' on Google Docs 
    try:
        proj_data = ProjectMetaData(proj_conf['id'], proj_conf['config'])
        uppnex_proj = proj_data.uppnex_id
    except:
        uppnex_proj = "b201YYXX"
        print "No uppnex ID fetched"
	pass
    if not uppnex_proj:
	uppnex_proj="b201YYXX"
        print "No uppnex ID fetched"
    d['uppnex'] = uppnex_proj 


    ## RNA-seq tools fetched from config file post_process.yaml
    try:
        tools      	= proj_conf['config']['custom_algorithms']['RNA-seq analysis']
        d['mapping']	= os.path.join(tools['aligner'],tools['aligner_version'])
        d['dup_rem']    = os.path.join(tools['dup_remover'],tools['dup_remover_version'])
        d['read_count'] = os.path.join(tools['counts'],tools['counts_version'])
        d['quantifyer'] = os.path.join(tools['quantifyer'],tools['quantifyer_version'])
    except:
	print "Could not fetched RNA-seq tools from config file post_process.yaml"
        d['mapping'] = "X"
        d['dup_rem'] = "X"
        d['read_count'] = "X"
        d['quantifyer'] = "X"
        pass


    ## Mapping Statistics
    tab = Texttable()
    tab.set_cols_dtype(['t','t','t','t'])
    tab.add_row(['Sample','Tot # read pairs','Uniquely mapped reads (%)','Uniquely mapped reads (%)\n dups removed'])
    statistics={}
    try:
	for sample_name in proj_conf['samples']:
	    try:
	    	f = open('tophat_out_'+sample_name+'/logs/prep_reads.log', 'r')
	    	tot_NO_read_pairs = f.readlines()[2].split()[3]
	    	f.close()
	    	f = open('tophat_out_'+sample_name+'/stat'+sample_name, 'r')
	    	dict = make_stat(f,tot_NO_read_pairs)
	    	tab.add_row([sample_name,tot_NO_read_pairs,dict['bef_dup_rem']['%uniq_mapped'],dict['aft_dup_rem']['%uniq_mapped']])
	    	statistics[sample_name] = dict
	    except:
		print 'Could not make mapping statistics for sample '+sample_name

	d['Mapping_statistics'] = tab.draw()
        json = open('stat.json','w')
        print>> json, statistics
        json.close()
    except:
	    print "Could not make Mapping Statistics table"
            pass

    ## Read Distribution 
    try:
	tab = Texttable()
	tab.set_cols_dtype(['t','t','t','t','t','t','t','t'])
        tab.add_row(["Sample","CDS Exon","5'UTR Exon","3'UTR Exon","Intron","TSS up 1kb","TES down 1kb","mRNA frac"])
	read_dist = {}
	for i in range(len(proj_conf['samples'])):
            sample_name = proj_conf['samples'][i]
	    dict = {}
	    try:
	    	f = open('RSeQC_rd_'+sample_name+'.out','r')
	    	dict = read_RSeQC_rd233(f)
	    	row = [sample_name,dict['CDS_Exons']['Tags/Kb'], dict["5'UTR_Exons"]['Tags/Kb'],
		dict["3'UTR_Exons"]['Tags/Kb'],dict['Introns']['Tags/Kb'], 
		dict['TSS_up_1kb']['Tags/Kb'],dict['TES_down_1kb']['Tags/Kb'], dict['mRNA_frac']]
            	tab.add_row(row)
            	read_dist[sample_name] = dict
            except:
		print "Could not make read distribution for sample "+sample_name
                pass
	json = open('RSeQC_rd.json','w')
        print >> json, read_dist
	json.close()
	d['Read_Distribution'] = tab.draw()
    except:
    	print "Could not make Read Distribution table"
        pass

    ## FPKM_PCAplot, FPKM_heatmap
    if os.path.exists("FPKM_PCAplot.pdf") and os.path.exists("FPKM_heatmap.pdf"):
        d['FPKM_PCAplot'] = image("FPKM_PCAplot.pdf", width="100%")
        d['FPKM_heatmap'] = image("FPKM_heatmap.pdf", width="100%")
    else:
	print "could not make FPKM PCAplot and FPKM heatmap"


    ## rRNA_table
    try:
        tab = Texttable()
        tab.set_cols_dtype(['t','t'])
        tab.add_row(["Sample","rRNA"])
	f=open('rRNA.quantification','r')
	D={}
	for line in f:
            D[str(line.split('\t')[0].strip())]=str(line.split('\t')[1].strip())
        for sample_name in proj_conf['samples']:
            if D.has_key(sample_name):
		tab.add_row([sample_name,D[sample_name]])
	d['rRNA_table']=tab.draw()
	f.close()

    except:
	print "could not generate rRNA table"
        pass   
 
    return d
Пример #60
0
from texttable import Texttable
import xlrd
import csv

t = Texttable()

filepath_1 = 'Salon Wise Service Price.xlsx'
filepath_2 = 'salon_code_mapping.xlsx'
csvfilepath = 'mapped_appointment.csv'

wb_1 = xlrd.open_workbook(filepath_1)
sheet_1 = wb_1.sheet_by_index(0)
sheet_1.cell_value(0, 0)

master_fields = []
master_rows = []

# Adding fields
for i in range(sheet_1.ncols):
    master_fields.append(sheet_1.cell_value(0, i))

# Adding rows
for i in range(1, sheet_1.nrows):
    master_rows.append(sheet_1.row_values(i))

wb_2 = xlrd.open_workbook(filepath_2)
sheet_2 = wb_2.sheet_by_index(0)
sheet_2.cell_value(0, 0)

map_fields = []
map_rows = []