def dump(relation):
  width,height = term_size()
  table = Texttable(width)


  sample, iterator = tee(relation)


  table.add_rows(take(1000,sample))
  table._compute_cols_width()
  del sample
  
  table.reset()

  table.set_deco(Texttable.HEADER)
  table.header([f.name for f in relation.schema.fields])



  rows = take(height-3, iterator)

  try:
    while rows:
      table.add_rows(rows, header=False)
      print table.draw()
      rows = take(height-3, iterator)
      if rows:
        raw_input("-- enter for more ^c to quit --")
  except KeyboardInterrupt:
    print
def getAuccuracy( train, testSet, k ):
	totalCount = len(testSet)
	correctCount = 0.0;

	# Init ConfusionMatrix
	confusionMatrix = { }
	for i in featuresList:
		for j in featuresList:
			confusionMatrix[ (i,j) ] = 0

	for i in range(len(testSet)):
		predition = getPrediction( getDistancesOfKSimilarSets( train, testSet[i], k ) )
		if predition == testSet[i][-1]:
			correctCount+=1;
		confusionMatrix[ testSet[i][-1], predition ] += 1

	print "Confusion Matrix"
	from texttable import Texttable
	table=[]
	row=[""]
	row.extend(featuresList)
	table.append(row)
	for i in featuresList:
		row=[i]
		for j in featuresList:
			row.append( confusionMatrix[ (i,j) ])
		table.append(row)
	T=Texttable();
	T.add_rows(table)
	print T.draw();

	return correctCount*1.0/totalCount;
Example #3
0
def test_texttable():
    table = Texttable()
    table.set_cols_align(["l", "r", "c"])
    table.set_cols_valign(["t", "m", "b"])
    table.add_rows([
        ["Name", "Age", "Nickname"],
        ["Mr\nXavier\nHuon", 32, "Xav'"],
        ["Mr\nBaptiste\nClement", 1, "Baby"],
        ["Mme\nLouise\nBourgeau", 28, "Lou\n \nLoue"],
    ])
    assert clean(table.draw()) == dedent('''\
        +----------+-----+----------+
        |   Name   | Age | Nickname |
        +==========+=====+==========+
        | Mr       |     |          |
        | Xavier   |  32 |          |
        | Huon     |     |   Xav'   |
        +----------+-----+----------+
        | Mr       |     |          |
        | Baptiste |   1 |          |
        | Clement  |     |   Baby   |
        +----------+-----+----------+
        | Mme      |     |   Lou    |
        | Louise   |  28 |          |
        | Bourgeau |     |   Loue   |
        +----------+-----+----------+
    ''')
Example #4
0
def test_texttable_header():
    table = Texttable()
    table.set_deco(Texttable.HEADER)
    table.set_cols_dtype([
        't',  # text
        'f',  # float (decimal)
        'e',  # float (exponent)
        'i',  # integer
        'a',  # automatic
    ])
    table.set_cols_align(["l", "r", "r", "r", "l"])
    table.add_rows([
        ["text",    "float", "exp", "int", "auto"],
        ["abcd",    "67",    654,   89,    128.001],
        ["efghijk", 67.5434, .654,  89.6,  12800000000000000000000.00023],
        ["lmn",     5e-78,   5e-78, 89.4,  .000000000000128],
        ["opqrstu", .023,    5e+78, 92.,   12800000000000000000000],
    ])
    assert clean(table.draw()) == dedent('''\
         text     float       exp      int     auto
        ==============================================
        abcd      67.000   6.540e+02    89   128.001
        efghijk   67.543   6.540e-01    90   1.280e+22
        lmn        0.000   5.000e-78    89   0.000
        opqrstu    0.023   5.000e+78    92   1.280e+22
    ''')
Example #5
0
	def getDescOfEachNoSystemTable(self):
		'''
		returns a String for print
		'''
		outputString = ""
		logging.debug("Getting all no system tables accessible with the current user")
		tablesAccessible = self.__execQuery__(query=self.REQ_GET_ALL_NO_SYSTEM_TABLES, ld=['owner', 'table_name'])
		if isinstance(tablesAccessible,Exception):
			logging.warning("Impossible to execute the request '{0}': {1}".format(self.REQ_GET_ALL_NO_SYSTEM_TABLES, tablesAccessible.generateInfoAboutError(self.REQ_GET_ALL_NO_SYSTEM_TABLES)))
			return ""
		else:
			nbTables = len(tablesAccessible)
			colNb = nbTables
			if colNb>0 : 
				pbar,currentColNum = self.getStandardBarStarted(colNb), 0
			for aTable in tablesAccessible:
				if colNb>0:
					currentColNum += 1
					pbar.update(currentColNum)
				request = self.REQ_GET_COLUMNS_FOR_TABLE.format(aTabl<e['table_name'], aTable['owner'])
				columnsAndTypes = self.__execQuery__(query=request, ld=['column_name', 'data_type'])
				if isinstance(columnsAndTypes,Exception):
					logging.warning("Impossible to execute the request '{0}': {1}".format(request, columnsAndTypes.generateInfoAboutError(request)))
				outputString += "\n[+] {0}.{1} ({2}/{3})\n".format(aTable['owner'], aTable['table_name'], currentColNum, colNb)
				resultsToTable = [('column_name', 'data_type')]
				for aLine in columnsAndTypes:
					resultsToTable.append((aLine['column_name'], aLine['data_type']))
				table = Texttable(max_width=getScreenSize()[0])
				table.set_deco(Texttable.HEADER)
				table.add_rows(resultsToTable)
				outputString += table.draw()
				outputString += '\n'
			if colNb>0 : pbar.finish()
		return outputString
    def handle(self, *args, **options):
        d = date.today()
        if options['datestring']:
            try:
                d = datetime.strptime(options['datestring'], '%Y-%m-%d').date()
            except ValueError:
                raise CommandError('Invalid date value: {} (use format YYYY-MM-DD)'.format(options['datestring']))

        emails = None
        if options['emails']:
            try:
                emails = options['emails'].split(',')
            except ValueError:
                raise CommandError('Invalid emails value: {} (use comma-separated string)'.format(options['emails']))

        week_start = datetime.combine(d, datetime.min.time()).astimezone(timezone(settings.TIME_ZONE))
        week_end = week_start + timedelta(days=7)
        rfcs = ChangeRequest.objects.filter(planned_start__range=[week_start, week_end]).order_by('planned_start')

        # Construct the HTML / plaintext email content to send.
        context = {
            'start': week_start,
            'object_list': rfcs,
            'domain': Site.objects.get_current().domain,
        }
        html_content = render_to_string('registers/email_cab_rfc_calendar.html', context)

        table = Texttable(max_width=0)
        table.set_cols_dtype(['i', 't', 't', 't', 't', 't', 't', 't'])
        rows = [['Change ref', 'Title', 'Change type', 'Status', 'Requester', 'Endorser', 'Implementer', 'Planned start & end']]
        for rfc in rfcs:
            rows.append(
                [
                    rfc.pk,
                    rfc.title,
                    rfc.get_change_type_display(),
                    rfc.get_status_display(),
                    rfc.requester.get_full_name(),
                    rfc.endorser.get_full_name(),
                    rfc.implementer.get_full_name(),
                    '{}\n{}'.format(rfc.planned_start.strftime('%A, %d-%b-%Y %H:%M'), rfc.planned_end.strftime('%A, %d-%b-%Y %H:%M'))
                ]
            )
        table.add_rows(rows, header=True)
        text_content = table.draw()

        # Email the CAB members group.
        if not Group.objects.filter(name='CAB members').exists():
            raise CommandError('"CAB members" group does not exist.')
        cab = Group.objects.get(name='CAB members')
        subject = 'Weekly change calendar starting {}'.format(week_start.strftime('%A, %d %b %Y'))
        recipients = list(User.objects.filter(groups__in=[cab], is_active=True).values_list('email', flat=True))

        # Optional additional email recipients.
        if emails:
            recipients = recipients + emails

        msg = EmailMultiAlternatives(subject, text_content, settings.NOREPLY_EMAIL, recipients)
        msg.attach_alternative(html_content, 'text/html')
        msg.send()
Example #7
0
 def displayText(self):
     column = 0
     nbRow = 1
     t = Texttable()
     t.set_cols_align(["l", "c", "c", "c"])
     title = ["/", "Tic", "Tac", "Toe"]
     endLine = ["\\", 1, 2, 3]
     adding = []
     adding.append(nbRow)
     for i in self.plateau.flat:  # reads row by row
         if i == 0:
             adding.append("X")
         elif i == 1:
             adding.append("O")
         else:
             adding.append("")
         column += 1
         if column == self.lineCount:
             t.add_rows([title, adding])
             column = 0
             nbRow += 1
             adding = []
             adding.append(nbRow)
     t.add_rows([title, endLine])
     print t.draw()
Example #8
0
def images_to_ascii_table(images):
    """Just a method that formats the images to ascii table.
    Expects dictionary {host: [images]}
    and prints multiple tables
    """
    with closing(StringIO()) as out:
        for host, values in images.iteritems():
            out.write(str(host) + "\n")
            t = TextTable()
            t.set_deco(TextTable.HEADER)
            t.set_cols_dtype(['t'] * 5)
            t.set_cols_align(["l"] * 5)
            rows = []
            rows.append(['Repository', 'Tag', 'Id', 'Created', 'Size'])
            for image in values:
                rows.append([
                    image.repository or '<none>',
                    image.tag or '<none>',
                    image.id[:12],
                    time_ago(image.created),
                    human_size(image.size)
                ])
            t.add_rows(rows)
            out.write(t.draw() + "\n\n")
        return out.getvalue()
    def output_table_list(tables):
        terminal_size = get_terminal_size()[1]
        widths = []
        for tab in tables:
            for i in range(0, len(tab.columns)):
                current_width = len(tab.columns[i].label)
                if len(widths) < i + 1:
                    widths.insert(i, current_width)
                elif widths[i] < current_width:
                    widths[i] = current_width
                for row in tab.data:
                    current_width = len(resolve_cell(row, tab.columns[i].accessor))
                    if current_width > widths[i]:
                        widths[i] = current_width

        if sum(widths) != terminal_size:
            widths[-1] = terminal_size - sum(widths[:-1]) - len(widths) * 3

        for tab in tables:
            table = Texttable(max_width=terminal_size)
            table.set_cols_width(widths)
            table.set_deco(0)
            table.header([i.label for i in tab.columns])
            table.add_rows([[AsciiOutputFormatter.format_value(resolve_cell(row, i.accessor), i.vt) for i in tab.columns] for row in tab.data], False)
            six.print_(table.draw() + "\n")
def _create_website(args):
    api = _login(args)
    if len(args.site_apps) % 2:
        print('Error: invalid site applications array')
        print('Array items should be pairs of application name and URL path')
        print('Example: django_app / django_app_media /media')
        return
    else:
        site_apps = zip(args.site_apps[::2], args.site_apps[1::2])
        for site_app in site_apps:
            app_name, app_url = site_app
            if not VALID_SYMBOLS.match(app_name):
                print('Error: %s is not a valid app name' % app_name)
                print('use A-Z a-z 0-9 or uderscore symbols only')
                return

            if not VALID_URL_PATHS.match(app_url):
                print('Error: %s is not a valid URL path' % app_url)
                print('must start with / and only regular characters, . and -')
                return

        response = api.create_website(args.website_name, args.ip, args.https, \
            args.subdomains, *site_apps)

        print('Web site has been created:')
        table = Texttable(max_width=140)
        table.add_rows([['Param', 'Value']] + [[key, value] for key, value in response.items()])
        print(table.draw())
Example #11
0
 def display_two_columns(cls, table_dict=None):
     if table_dict:
         ignore_fields = ['_cls', '_id', 'date_modified', 'date_created', 'password', 'confirm']
         table = Texttable(max_width=100)
         rows = [['Property', 'Value']]
         for key, value in table_dict.iteritems():
             if key not in ignore_fields:
                 items = [key.replace('_', ' ').title()]
                 if isinstance(value, list):
                     if value:
                         if key == "projects":
                             project_entry = ""
                             for itm in value:
                                 user_project = Project.objects(id=ObjectId(itm.get('$oid'))) \
                                     .only('title', 'project_id').first()
                                 project_entry = project_entry + user_project.title + ", "
                             project_entry.strip(', ')
                             items.append(project_entry)
                         else:
                             items.append(' , '.join(value))
                     else:
                         items.append('None')
                 else:
                     items.append(value)
                 rows.append(items)
         try:
             if rows:
                 table.add_rows(rows)
         except:
             print sys.exc_info()[0]
         print table.draw()
     pass
Example #12
0
    def draw(self):
        t = Texttable()
        t.add_rows([["TEAM","RUNS","HITS","LOB","ERRORS"],
                    [self.away_team.team_name, self.away_runs, self.away_hits, self.away_LOB, self.away_errors],
                    [self.home_team.team_name, self.home_runs, self.home_hits, self.home_LOB, self.home_errors]])

        print(t.draw())
Example #13
0
    def output_table(tab):
        max_width = get_terminal_size()[1]
        table = Texttable(max_width=max_width)
        table.set_deco(0)
        table.header([i.label for i in tab.columns])
        widths = []
        number_columns = len(tab.columns)
        remaining_space = max_width
        # set maximum column width based on the amount of terminal space minus the 3 pixel borders
        max_col_width = (remaining_space - number_columns * 3) / number_columns
        for i in range(0, number_columns):
            current_width = len(tab.columns[i].label)
            tab_cols_acc = tab.columns[i].accessor
            max_row_width = max(
                    [len(str(resolve_cell(row, tab_cols_acc))) for row in tab.data ]
                    )
            current_width = max_row_width if max_row_width > current_width else current_width
            if current_width < max_col_width:
                widths.insert(i, current_width)
                # reclaim space not used
                remaining_columns = number_columns - i - 1
                remaining_space = remaining_space - current_width - 3
                if remaining_columns != 0:
                    max_col_width = (remaining_space - remaining_columns * 3)/ remaining_columns
            else:
                widths.insert(i, max_col_width)
                remaining_space = remaining_space - max_col_width - 3
        table.set_cols_width(widths)

        table.add_rows([[AsciiOutputFormatter.format_value(resolve_cell(row, i.accessor), i.vt) for i in tab.columns] for row in tab.data], False)
        print(table.draw())
Example #14
0
	def show_interfaces(self):

		### build table with interfaces
		table = Texttable()
		table.set_cols_align(["c", "l", "l", "l", "l", "l"])
		data = [["ifIndex","ifDescr","ifAlias","IPadd","AS","OS"]]
		for line,interface in enumerate(self.snmpDevice[self.host]['interfaces']):
			### check if the interface has an ip address
			if self.snmpDevice[self.host]['interfaces'][interface]['ipAdEntAddr']:
				data.append([str(self.snmpDevice[self.host]['interfaces'][interface]['ifIndex']), \
						minimize(str(self.snmpDevice[self.host]['interfaces'][interface]['ifDescr'])), \
						str(self.snmpDevice[self.host]['interfaces'][interface]['ifAlias']), \
						str(self.snmpDevice[self.host]['interfaces'][interface]['ipAdEntAddr']), \
						str(self.snmpDevice[self.host]['interfaces'][interface]['ifAdminStatus']), \
						str(self.snmpDevice[self.host]['interfaces'][interface]['ifOperStatus']), \
						])
			else:
				data.append([str(self.snmpDevice[self.host]['interfaces'][interface]['ifIndex']), \
						minimize(str(self.snmpDevice[self.host]['interfaces'][interface]['ifDescr'])), \
						str(self.snmpDevice[self.host]['interfaces'][interface]['ifAlias']), \
						"--", \
						str(self.snmpDevice[self.host]['interfaces'][interface]['ifAdminStatus']), \
						str(self.snmpDevice[self.host]['interfaces'][interface]['ifOperStatus']), \
						])

		table.add_rows(data, header=True)

		return table.draw()
Example #15
0
def format_info(value, format, cols_width=None, dumper=None):
    if format in(INFO_FORMAT.DICT, INFO_FORMAT.JSON, INFO_FORMAT.YAML):
        value['component_details'] = json_loads(value['component_details'])

    if format == INFO_FORMAT.JSON:
        return json_dumps(value)

    elif format == INFO_FORMAT.YAML:
        buff = StringIO()
        yaml.dump_all([value], default_flow_style=False, indent=4, Dumper=dumper, stream=buff)
        value = buff.getvalue()
        buff.close()

        return value

    elif format == INFO_FORMAT.TEXT:
        cols_width = (elem.strip() for elem in cols_width.split(','))
        cols_width = [int(elem) for elem in cols_width]

        table = Texttable()
        table.set_cols_width(cols_width)

        # Use text ('t') instead of auto so that boolean values don't get converted into ints
        table.set_cols_dtype(['t', 't'])

        rows = [['Key', 'Value']]
        rows.extend(sorted(value.items()))

        table.add_rows(rows)

        return table.draw()

    else:
        return value
def _create_app(args):
    api = _login(args)
    response = api.create_app(args.name, args.type, args.autostart, args.extra_info)
    print('App has been created:')
    table = Texttable(max_width=140)
    table.add_rows([['Param', 'Value']] + [[key, value] for key, value in response.items()])
    print(table.draw())
Example #17
0
 def do_list_changesets(self, arg, opts=None):
     """Show changesets needing review."""
     changesets = requests.get(
         "http://%s/api/v1/changeset/" % self.site, params={"review_status": "needs"}, auth=self.api_auth
     )
     objects = changesets.json().get("objects")
     table = Texttable()
     table.set_deco(Texttable.HEADER)
     table.set_cols_align(["c", "c", "c", "c", "c"])
     table.set_cols_width([5, 20, 15, 15, 10])
     rows = [["ID", "Type", "Classification", "Version Control URL", "Submitted By"]]
     for cs in objects:
         user = requests.get("http://%s%s" % (self.site, cs.get("submitted_by")), auth=self.api_auth)
         user_detail = user.json()
         rows.append(
             [
                 cs.get("id"),
                 cs.get("type"),
                 cs.get("classification"),
                 cs.get("version_control_url"),
                 user_detail.get("name"),
             ]
         )
     table.add_rows(rows)
     print "Changesets That Need To Be Reviewed:"
     print table.draw()
Example #18
0
def containers_to_ascii_table(containers):
    """Just a method that formats the images to ascii table.
    Expects dictionary {host: [images]}
    and prints multiple tables
    """
    with closing(StringIO()) as out:
        for host, values in containers.iteritems():
            out.write("[" + str(host) + "] \n")
            t = TextTable(max_width=400)
            t.set_deco(TextTable.HEADER)
            t.set_cols_dtype(['t'] * 6)
            t.set_cols_align(["l"] * 6)
            t.set_cols_width([12, 25, 25, 15, 20, 15])
            rows = []
            rows.append(
                ['Id', 'Image', 'Command', 'Created', 'Status', 'Ports'])
            for container in values:
                rows.append([
                    container.id[:12],
                    container.image,
                    container.command[:20],
                    time_ago(container.created),
                    container.status,
                    container.ports
                ])
            t.add_rows(rows)
            out.write(t.draw() + "\n\n")
        return out.getvalue()
Example #19
0
def test_colored():
    table = Texttable()
    table.set_cols_align(["l", "r", "c"])
    table.set_cols_valign(["t", "m", "b"])
    table.add_rows([
        [get_color_string(bcolors.GREEN, "Name Of Person"), "Age", "Nickname"],
         ["Mr\nXavier\nHuon", 32, "Xav'"],
         [get_color_string(bcolors.BLUE,"Mr\nBaptiste\nClement"),
          1,
          get_color_string(bcolors.RED,"Baby")] ])
    expected_output = dedent("""
        +----------------+-----+----------+
        | Name Of Person | Age | Nickname |
        +================+=====+==========+
        | Mr             |     |          |
        | Xavier         |  32 |          |
        | Huon           |     |   Xav'   |
        +----------------+-----+----------+
        | Mr             |     |          |
        | Baptiste       |   1 |          |
        | Clement        |     |   Baby   |
        +----------------+-----+----------+
        """).strip('\n')

    assert table.draw() == expected_output
Example #20
0
def format_table(table, format='csv', outputstream=sys.stdout, **extra_options):
    """table can be a table from dict_to_table() or a dictionary.
    The dictionary can have either a single value as a key (for a
    one-dimensional table) or 2-tuples (for two-dimensional tables).
    format is currently one of csv, tsv, tex, texbitmap, or asciiart.
    Values for texbitmap should be floats between 0 and 1 and the output
    will be the TeX code for a large-pixeled bitmap."""
    if isinstance(table, dict):
        table = dict_to_table(table)

    if format in ('csv', 'tsv'):
        import csv
        dialect = {'csv' : csv.excel, 'tsv' : csv.excel_tab}[format]
        writer = csv.writer(outputstream, dialect=dialect)
        for row in table:
            writer.writerow(row)
    elif format == 'tex':
        import TeXTable
        print >>outputstream, TeXTable.texify(table, has_header=True)
    elif format == 'texbitmap':
        import TeXTable
        extra_options.setdefault('has_header', True)
        print >>outputstream, TeXTable.make_tex_bitmap(table, **extra_options)
    elif format == 'asciiart':
        from texttable import Texttable
        texttable = Texttable(**extra_options)
        texttable.add_rows(table)
        print >>outputstream, texttable.draw()
    else:
        raise ValueError("Unsupported format: %r (supported formats: %s)" % \
            (format, ' '.join(supported_formats)))
Example #21
0
def print_price_data(data):

    # Current BTC Price
    # --------------------
    print '\n%s' % colorize('CaVirtex Market\n---------------', colors.CYAN)
    

    status_color = colors.GREEN if data['net'] > 0 else colors.RED

    print '\n%s' % colorize('Price', colors.BLUE)
    
    print '\n%s' % colorize('$%.2f CAD/BTC' % data['current_price'], status_color)

    # Latest Trades
    # ----------------
    print '\n%s\n' % colorize('Latest Trades', colors.BLUE)
    
    trades_table = Texttable()
    trades_table.set_deco(Texttable.HEADER)
    trades_table.set_precision(2)
    trades_table.set_cols_dtype(['f', 'f', 'f', 't'])
    trades_table.add_rows(data['latest_trades'])

    print trades_table.draw()
    
    # Investment Returns
    # ---------------------
    print '\n%s' % colorize('Your Investment', colors.BLUE)

    print '\nNet: %s' % colorize('$%.2f CAD' % data['net'], status_color)
    print '\nVOI: %s' % colorize('$%.2f CAD' % data['voi'], status_color)
    print '\nROI: %s' % colorize('%.2f%%' % data['roi'], status_color)
Example #22
0
 def long_format(self, records):
     """Format records in long format.
     
     Args:
         records: Controlled records to format.
     
     Returns:
         str: Record data in long format.
     """
     title = util.hline(self.title_fmt % {'model_name': records[0].name.capitalize(), 
                                          'storage_path': records[0].storage}, 'cyan')
     retval = [title]
     for record in records:
         rows = [['Attribute', 'Value', 'Command Flag', 'Description']]
         populated = record.populate()
         for key, val in sorted(populated.iteritems()):
             if key != self.model.key_attribute:
                 rows.append(self._format_long_item(key, val))
         table = Texttable(logger.LINE_WIDTH)
         table.set_cols_align(['r', 'c', 'l', 'l'])
         table.set_deco(Texttable.HEADER | Texttable.VLINES)
         table.add_rows(rows)
         retval.append(util.hline(populated[self.model.key_attribute], 'cyan'))
         retval.extend([table.draw(), ''])
     return retval
Example #23
0
    def print_steps(self, show_result=True):
        def max_len_of_list_of_str(s):
            return max(len(line) for line in str(s).split('\n'))

        def autodetect_width(d):
            widths = [0] * len(d[0])
            for line in d:
                for _i in range(len(line)):
                    widths[_i] = max(widths[_i], max_len_of_list_of_str(line[_i]))
            return widths

        if self.save_history:
            if self.errors:
                self.history = self.history[:-1]
            t = Texttable()
            header = ['â„–', 'Term', 'Code'] if self.parallel else ['â„–', 'Term', 'Code', 'Stack']
            data = [header] + [
                [repr(i) for i in item][:-1] if self.parallel else [repr(i) for i in item] for item in self.history]
            t.add_rows(data)
            t.set_cols_align(['l'] + ['r'] * (len(header) - 1))
            t.set_cols_valign(['m'] + ['m'] * (len(header) - 1))
            t.set_cols_width(autodetect_width(data))
            print t.draw()
        else:
            if not self.errors:
                print ' Steps: %10s' % self.iteration
                if show_result:
                    print 'Result: %10s' % repr(self.term)
Example #24
0
def _dataframe_to_texttable(df, align=None):
    """Convert data frame to texttable. Sets column widths to the
    widest entry in each column."""
    ttab = Texttable()
    ttab.set_precision(1)
    h = [[x for x in df]]
    h.extend([x for x in df.to_records(index=False)])
    if align:
        colWidths = [max(len(x), len(".. class:: {}".format(y))) for x,y in izip(df.columns, align)]
    else:
        colWidths = [len(x) for x in df.columns]
    for row in h:
        for i in range(0, len(row)):
            if type(row[i]) == str:
                colWidths[i] = max([len(str(x)) for x in row[i].split("\n")] + [colWidths[i]])
            colWidths[i] = max(len(str(row[i])), colWidths[i])
    table_data = []
    if align:
        for row in h:
            table_row = []
            i = 0
            for col, aln in izip(row, align):
                table_row.append(".. class:: {}".format(aln) + " " * colWidths[i] + "{}".format(col))
                i = i + 1
            table_data.append(table_row)
    else:
        table_data = h
    ttab.add_rows(table_data)
    ttab.set_cols_width(colWidths)
    # Note: this does not affect the final pdf output
    ttab.set_cols_align(["r"] * len(colWidths))
    return ttab
Example #25
0
    def dashboard_format(self, records):
        """Format modeled records in dashboard format.

        Args:
            records: Modeled records to format.
 
        Returns:
            str: Record data in dashboard format.
        """
        title = util.hline(self.title_fmt % {'model_name': records[0].name.capitalize(), 
                                             'storage_path': records[0].storage}, 'cyan')
        header_row = [col['header'] for col in self.dashboard_columns]
        rows = [header_row]
        for record in records:
            populated = record.populate()
            row = []
            for col in self.dashboard_columns:
                if 'value' in col:
                    try:
                        cell = populated[col['value']]
                    except KeyError:
                        cell = 'N/A'
                elif 'yesno' in col:
                    cell = 'Yes' if populated.get(col['yesno'], False) else 'No'
                elif 'function' in col:
                    cell = col['function'](populated)
                else:
                    raise InternalError("Invalid column definition: %s" % col)
                row.append(cell)
            rows.append(row)
        table = Texttable(logger.LINE_WIDTH)
        table.set_cols_align([col.get('align', 'c') for col in self.dashboard_columns])
        table.add_rows(rows)
        return [title, table.draw(), '']
Example #26
0
def run(host, port):

    port = int(port)

    from . import interop_tests
    test_names = [x for x in dir(interop_tests) if x.startswith("test_")]

    tests = [getattr(interop_tests, test_name) for test_name in test_names]

    results = []
    with click.progressbar(tests, label="Running interop tests...") as _tests:
        for test in _tests:
            results.append(test(host, port))

    fmt_results = []
    for r in results:
        fmt_results.append((r.name,
                            "True" if r.success else "False", r.reason if r.reason else "", r.transcript))

    t = Texttable()
    t.set_cols_width([20, 10, 80, 60])
    rows = [["Name", "Successful", "Reason", "Client Transcript"]]
    rows.extend(fmt_results)
    t.add_rows(rows)
    print(t.draw(), file=sys.__stdout__)

    failures = []
    for x in results:
        if not x.success:
            failures.append(False)

    if failures:
        sys.exit(len(failures))
    sys.exit(0)
    def per_class_metrics(self, labels, predictions):

        _, counts = np.unique(labels, return_counts=True)
        precision, recall, _, _ = score(labels, predictions)
        C = confusion_matrix(labels, predictions)
        avg_acc_per_class = np.average(recall)

        t = Texttable()
        t.add_rows([
            ['Metric', 'CAR', 'BUS', 'TRUCK', 'OTHER'],
            ['Count labels'] + counts.tolist(),
            ['Precision'] + precision.tolist(),
            ['Recall'] + recall.tolist()
        ])

        t2 = Texttable()
        t2.add_rows([
            ['-', 'CAR', 'BUS', 'TRUCK', 'OTHER'],
            ['CAR'] + C[0].tolist(),
            ['BUS'] + C[1].tolist(),
            ['TRUCK'] + C[2].tolist(),
            ['OTHER'] + C[3].tolist()
        ])

        return t, t2, avg_acc_per_class
Example #28
0
File: data.py Project: tkf/neorg
    def print_diff_as_table(self, include=None, exclude=None,
                            deco_border=False, deco_header=False,
                            deco_hlines=False, deco_vlines=False):
        diffdict = self.diff(include, exclude)
        if not diffdict:
            return

        from texttable import Texttable
        table = Texttable()
        deco = 0
        if deco_border:
            deco |= Texttable.BORDER
        if deco_header:
            deco |= Texttable.HEADER
        if deco_hlines:
            deco |= Texttable.HLINES
        if deco_vlines:
            deco |= Texttable.VLINES
        table.set_deco(deco)

        sortedkey = sorted(diffdict)
        table.add_rows(
            [[''] + self._name] +
            [[keystr] + [self._getrepr(diffdict[keystr], name)
                         for name in self._name]
             for keystr in sortedkey]
            )
        print table.draw()
Example #29
0
def show_pretty_versions():
    result_list = list()
    header_list = ["IP", "Role", "Version", "Name", "Streamcast version", ]
    result_list.append(header_list)
    print("Versions installed:")
    ips = get_ips()
    for ip in ips:
        line = retrieve(sshcmd + ip + " cat /var/raumfeld-1.0/device-role.json")
        if "true" in line:
            moreinfo = "host"
        else:
            moreinfo = "slave"
        renderer_name = RfCmd.get_device_name_by_ip(ip)
        line = retrieve(sshcmd + ip + " cat /etc/raumfeld-version")
        line_streamcast = retrieve(sshcmd + ip + " streamcastd --version")
        single_result = list()
        single_result.append(ip)
        single_result.append(moreinfo)
        single_result.append(line.rstrip())
        single_result.append(renderer_name)
        single_result.append(line_streamcast.rstrip())
        result_list.append(single_result)
    t = Texttable(250)
    t.add_rows(result_list)
    print(t.draw())
Example #30
0
def main(args):
    """
    process each argument
    """
    table = Texttable()
    table.set_cols_align(["r", "r", "r", "r", "r"])
    rows = [["Number", "File Name", "File Size", "Video Duration (H:MM:SS)", "Conversion Time"]]
    total_time = 0.0
    total_file_size = 0

    for index, arg in enumerate(args, start=1):
        timer = utils.Timer()
        with timer:
            result = resize(arg, (index, len(args)))
        #
        result.elapsed_time = timer.elapsed_time()
        rows.append([index,
                     result.file_name,
                     utils.sizeof_fmt(result.file_size),
                     utils.sec_to_hh_mm_ss(utils.get_video_length(result.file_name)) if result.file_name else "--",
                     "{0:.1f} sec.".format(result.elapsed_time) if result.status else FAILED])
        #
        if rows[-1][-1] != FAILED:
            total_time += result.elapsed_time
        total_file_size += result.file_size

    table.add_rows(rows)
    print table.draw()
    print 'Total file size:', utils.sizeof_fmt(total_file_size)
    print 'Total time: {0} (H:MM:SS)'.format(utils.sec_to_hh_mm_ss(total_time))
    print utils.get_unix_date()
Example #31
0
 def loss_printer(self):
     """
     Printing the losses in tabular format.
     """
     t = Texttable()
     t.add_rows([["Losses"]])
     print(t.draw())
     t = Texttable()
     t.add_rows([["Iteration", "Loss B1", "Loss B2", "Loss U", "Loss V"]])
     t.add_rows(self.losses)
     print(t.draw())
Example #32
0
def draw_details():
    os.system("clear")

    print("\n\t\t Health Status")

    TABLE_ = [['SERVICE Name', 'IP', 'PORT', 'STATUS']]
    for key in health_status.keys():
        if (health_status[key][0] == "Registry-Backup"):
            health_status[key][1] = '172.17.0.4'
        TABLE_.append(health_status[key])
    t = Texttable()
    t.add_rows(TABLE_)
    print(t.draw())
    try:
        res = requests.get(
            'http://172.17.0.2:3001/give_load_details_ai_machines')
        content = res.json()

        load = content["load"]
        machines = content["machines"]

        details = [[
            'Machine Name', 'Username', 'IP', 'SSH-PORT', 'STATUS', 'LOAD'
        ]]
        for i in range(len(load)):
            list_ = ["AI NODE " + str(i)]
            list_.append(machines[i]["username"])

            list_.append(machines[i]["ip"])
            list_.append(machines[i]["port"])
            if (load[i] == 0):
                list_.append("IDLE")
            else:
                list_.append("RUNNING")

            list_.append(str(load[i]) + "/3")
            details.append(list_)

        print("\n\t\t AI NODES")
        t = Texttable()
        t.add_rows(details)
        print(t.draw())
    except:
        details = [['Machine Name', 'IP', 'SSH-PORT', 'STATUS', 'LOAD']]

        print("\n\t\t AI NODES")
        t = Texttable()
        t.add_rows(details)
        print(t.draw())
Example #33
0
def tab_printer(log):
    """
    Function to print the logs in a nice tabular format.
    """
    t = Texttable()
    t.add_rows([['Epoch', log["losses"][-1][0]]])
    print t.draw()

    t = Texttable()
    t.add_rows([['Loss', round(log["losses"][-1][1], 3)]])
    print t.draw()

    t = Texttable()
    t.add_rows([['Modularity', round(log["cluster_quality"][-1][1], 3)]])
    print t.draw()
def printstats(variables,pvals,group1,group2,g1str='group1',g2str='group2',foot='',verbose=True,fname=None):
    """
    Pretty-print previously computed statistical results 

    Parameters
    ----------
    variables : list or NumPy 1darray
        Python list/NumPy array of strings representing variables that have been tested
    pvals : Numpy 1darray
        Aray of `p`-values (floats) of the same size as `variables`
    group1 : NumPy 2darray
        An #samples-by-#variables array holding the data of the first group sample used in the previously
        performed statistical comparison
    group2 : NumPy 2darray
        An #samples-by-#variables array holding the data of the second group sample used in the previously
        performed statistical comparison
    g1str : string
        Name of the first group that will be used in the generated table
    g2str : string
        Name of the first group that will be used in the generated table
    fname : string
        Name of a csv-file (with or without extension '.csv') used to save the table 
        (WARNING: existing files will be overwritten!). Can also be a path + file-name 
        (e.g., `fname='path/to/file.csv'`). By default output is not saved. 

    Returns
    -------
    Nothing : None

    Notes
    -----
    Uses the `texttable` module to print results

    See also
    --------
    texttable : a module for creating simple ASCII tables (currently available at the 
                `Python Package Index <https://pypi.python.org/pypi/texttable/0.8.1>`_)
    printdata : a function that pretty-prints/-saves data given in an array (part of ``nws_tools.py``)
    """

    # Make sure that the groups, p-values and tested variables have appropriate dimensions
    if not isinstance(variables,(list,np.ndarray)):
        raise TypeError('Input variables must be a Python list or NumPy 1d array of strings, not '+\
                        type(variables).__name__+'!')
    m = len(variables)
    for var in variables:
        if not isinstance(var,(str,unicode)):
            raise TypeError('All variables must be strings!')

    if not isinstance(pvals,(list,np.ndarray)):
        raise TypeError('The p-values must be provided as NumPy 1d array, not '+type(variables).__name__+'!')
    pvals = np.array(pvals)
    if not np.issubdtype(pvals.dtype, np.number): # Don't check for NaNs and Infs - some tests might return that...
        raise ValueError('Provided p-values must be real-valued!')
    M = pvals.size
    if M != m:
        raise ValueError('No. of variables (=labels) and p-values do not match up!')

    # Don't check for NaNs and Infs - just make sure we can compute mean and std
    try:
        N,M = group1.shape
    except: 
        raise TypeError('Data-set 1 must be a NumPy 2d array, not '+type(group1).__name__+'!')
    if M != m:
        raise ValueError('No. of variables (=labels) and dimension of group1 do not match up!')
    if not np.issubdtype(group1.dtype, np.number):       
        raise ValueError('Provided p-values must be real-valued!')
    try:
        N,M = group2.shape
    except: 
        raise TypeError('Data-set 2 must be a NumPy 2d array, not '+type(group2).__name__+'!')
    if M != m:
        raise ValueError('No. of variables (=labels) and dimension of group2 do not match up!')
    if not np.issubdtype(group2.dtype, np.number):       
        raise ValueError('Provided p-values must be real-valued!')

    # If column labels were provided, make sure they are printable strings
    if not isinstance(g1str,(str,unicode)):
        raise TypeError('The optional column label `g1str` has to be a string!')
    if not isinstance(g2str,(str,unicode)):
        raise TypeError('The optional column label `g2str` has to be a string!')

    # If a footer was provided, make sure it is a printable string
    if not isinstance(foot,(str,unicode)):
        raise TypeError('The optional footer `foot` has to be a string!')

    # See if we're supposed to print stuff to the terminal or just save everything to a csv file
    if not isinstance(verbose,bool):
        raise TypeError("The switch `verbose` has to be Boolean!")

    # If a file-name was provided make sure it's a string and check if the path exists
    if fname != None:
        if not isinstance(fname,(str,unicode)):
            raise TypeError('Input fname has to be a string specifying an output file-name, not '\
                            +type(fname).__name__+'!')
        fname = str(fname)
        if fname.find("~") == 0:
            fname = os.path.expanduser('~') + fname[1:]
        slash = fname.rfind(os.sep)
        if slash >= 0 and not os.path.isdir(fname[:fname.rfind(os.sep)]):
            raise ValueError('Invalid path for output file: '+fname+'!')
        if fname[-4::] != '.csv':
            fname = fname + '.csv'
        save = True
    else:
        save = False

    # Construct table head
    head = [" ","p","mean("+g1str+")"," ","std("+g1str+")","</>",\
            "mean("+g2str+")"," ","std("+g2str+")"]

    # Compute mean/std of input data
    g1mean = group1.mean(axis=0)
    g1std  = group1.std(axis=0)
    g2mean = group2.mean(axis=0)
    g2std  = group2.std(axis=0)

    # Put "<" if mean(base) < mean(test) and vice versa
    gtlt = np.array(['<']*g1mean.size)
    gtlt[np.where(g1mean > g2mean)] = '>'

    # Prettify table
    pmstr = ["+/-"]*g1mean.size

    # Assemble data array
    Data = np.column_stack((variables,\
                            pvals.astype('str'),\
                            g1mean.astype('str'),\
                            pmstr,\
                            g1std.astype('str'),\
                            gtlt,\
                            g2mean.astype('str'),\
                            pmstr,\
                            g2std.astype('str')))

    # Construct texttable object
    table = Texttable()
    table.set_cols_align(["l","l","r","c","l","c","r","c","l"])
    table.set_cols_valign(["c"]*9)
    table.set_cols_dtype(["t"]*9)
    table.set_cols_width([12,18,18,3,18,3,18,3,18])
    table.add_rows([head],header=True)
    table.add_rows(Data.tolist(),header=False)
    table.set_deco(Texttable.HEADER)

    # Pump out table if wanted
    if verbose:
        print "Summary of statistics:\n"
        print table.draw() + "\n"
        print foot + "\n"

    # If wanted, save stuff in a csv file
    if save:
        head = str(head)
        head = head.replace("[","")
        head = head.replace("]","")
        head = head.replace("'","")
        np.savetxt(fname,Data,delimiter=",",fmt="%s",header=head,footer=foot,comments="")
Example #35
0
def printDataTable():
    global hv_Data_Avg
    while True:
        try:
            ####################################### Table Viszualization #########################################
            tableData = [[
                'Channels', 'B0_Voltage [V]', 'B0_Currents [A]',
                'B1_Voltage [V]', 'B1_Currents [A]'
            ]]

            tableData.append([
                "Ch0",
                str(round(hv_Data_Avg["VoltageB0"]["Ch0"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB0"]["Ch0"], 11)) + " A",
                str(round(hv_Data_Avg["VoltageB1"]["Ch0"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB1"]["Ch0"], 11)) + " A"
            ])
            tableData.append([
                "Ch1",
                str(round(hv_Data_Avg["VoltageB0"]["Ch1"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB0"]["Ch1"], 11)) + " A",
                str(round(hv_Data_Avg["VoltageB1"]["Ch1"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB1"]["Ch1"], 11)) + " A"
            ])
            tableData.append([
                "Ch2",
                str(round(hv_Data_Avg["VoltageB0"]["Ch2"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB0"]["Ch2"], 11)) + " A",
                str(round(hv_Data_Avg["VoltageB1"]["Ch2"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB1"]["Ch2"], 11)) + " A"
            ])
            tableData.append([
                "Ch3",
                str(round(hv_Data_Avg["VoltageB0"]["Ch3"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB0"]["Ch3"], 11)) + " A",
                str(round(hv_Data_Avg["VoltageB1"]["Ch3"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB1"]["Ch3"], 11)) + " A"
            ])
            tableData.append([
                "Ch4",
                str(round(hv_Data_Avg["VoltageB0"]["Ch4"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB0"]["Ch4"], 11)) + " A",
                str(round(hv_Data_Avg["VoltageB1"]["Ch4"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB1"]["Ch4"], 11)) + " A"
            ])
            tableData.append([
                "Ch5",
                str(round(hv_Data_Avg["VoltageB0"]["Ch5"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB0"]["Ch5"], 11)) + " A",
                str(round(hv_Data_Avg["VoltageB1"]["Ch5"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB1"]["Ch5"], 11)) + " A"
            ])
            tableData.append([
                "Ch6",
                str(round(hv_Data_Avg["VoltageB0"]["Ch6"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB0"]["Ch6"], 11)) + " A",
                str(round(hv_Data_Avg["VoltageB1"]["Ch6"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB1"]["Ch6"], 11)) + " A"
            ])
            tableData.append([
                "Ch7",
                str(round(hv_Data_Avg["VoltageB0"]["Ch7"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB0"]["Ch7"], 11)) + " A",
                str(round(hv_Data_Avg["VoltageB1"]["Ch7"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB1"]["Ch7"], 11)) + " A"
            ])
            tableData.append([
                "Ch8",
                str(round(hv_Data_Avg["VoltageB0"]["Ch8"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB0"]["Ch8"], 11)) + " A",
                str(round(hv_Data_Avg["VoltageB1"]["Ch8"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB1"]["Ch8"], 11)) + " A"
            ])
            tableData.append([
                "Ch9",
                str(round(hv_Data_Avg["VoltageB0"]["Ch9"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB0"]["Ch9"], 11)) + " A",
                str(round(hv_Data_Avg["VoltageB1"]["Ch9"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB1"]["Ch9"], 11)) + " A"
            ])
            tableData.append([
                "Ch10",
                str(round(hv_Data_Avg["VoltageB0"]["Ch10"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB0"]["Ch10"], 11)) + " A",
                str(round(hv_Data_Avg["VoltageB1"]["Ch10"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB1"]["Ch10"], 11)) + " A"
            ])
            tableData.append([
                "Ch11",
                str(round(hv_Data_Avg["VoltageB0"]["Ch11"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB0"]["Ch11"], 11)) + " A",
                str(round(hv_Data_Avg["VoltageB1"]["Ch11"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB1"]["Ch11"], 11)) + " A"
            ])
            tableData.append([
                "Ch12",
                str(round(hv_Data_Avg["VoltageB0"]["Ch12"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB0"]["Ch12"], 11)) + " A",
                str(round(hv_Data_Avg["VoltageB1"]["Ch12"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB1"]["Ch12"], 11)) + " A"
            ])
            tableData.append([
                "Ch13",
                str(round(hv_Data_Avg["VoltageB0"]["Ch13"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB0"]["Ch13"], 11)) + " A",
                str(round(hv_Data_Avg["VoltageB1"]["Ch13"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB1"]["Ch13"], 11)) + " A"
            ])
            tableData.append([
                "Ch14",
                str(round(hv_Data_Avg["VoltageB0"]["Ch14"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB0"]["Ch14"], 11)) + " A",
                str(round(hv_Data_Avg["VoltageB1"]["Ch14"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB1"]["Ch14"], 11)) + " A"
            ])
            tableData.append([
                "Ch15",
                str(round(hv_Data_Avg["VoltageB0"]["Ch15"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB0"]["Ch15"], 11)) + " A",
                str(round(hv_Data_Avg["VoltageB1"]["Ch15"], 2)) + " V",
                str(round(hv_Data_Avg["CurrentB1"]["Ch15"], 11)) + " A"
            ])

            Table = Texttable()
            Table.add_rows(tableData)
            TableText = Table.draw()
            print(TableText)

            CurrentTime = datetime.now()
            print(
                "Time-> {} => Module-> {} => Data point-> {} => DB-> {} => Log-> {}"
                .format(CurrentTime, ModuleName, ReadNumber, databaseStatus,
                        logstatus),
                end='\r')

            time.sleep(0.5)
            os.system('clear')
            tableData = []
        except KeyError:
            pass
Example #36
0
 def __createTable(self, world):
     table = Texttable(30)
     table.set_cols_align(["c", "c", "c", "c", "c"])
     table.set_cols_width([3, 3, 3, 3, 3])
     table.add_rows(world, header=False)
     print(table.draw())
Example #37
0
 def __str__(self):
     t = Texttable()
     t.add_rows(self.board, [])
     return t.draw()
Example #38
0
 def __print_table(self):
     board = self.__controller.getTable()
     table = Texttable()
     table.add_rows(board)
     print(table.draw())
Example #39
0
def updatedataToUser(data: list):
    flag_outfordelivery = False
    #######Print tabulated##########
    if len(data) > 1:
        print("################ URL load time----> ",
              data[0].loaded_time_of_url_human_readable, " ################")

    tabulatedarray = []
    tabulatedarray.append([
        "Progress load time", "Order name", "Progress" + " & " + "Load status",
        "status"
    ])

    for order in data:
        if order.is_out_for_deliver:
            flag_outfordelivery = True
        """
        self.order_id = order_id
        self.order_name = order_product_name  ### we are not using this
        self.tracking_url = order_url
        self.loaded_time_of_url = loaded_time_of_url
        ######################
        self.loaded_time_of_url_human_readable = self.get_mili_to_date(self.loaded_time_of_url)
        #####################
        self.where_to_deliver = where_to_deliver

        self.track_progress = []
        self.has_error_loading = True
        self.loaded_time = 0
        self.loaded_time_human_readable = 0
        """
        order: Order = order

        ######pruductname######
        names = ""
        length = 15
        flagcounter = 0
        for prductname in order.order_name:
            flagcounter += 1
            trunicated_name = (
                prductname[:length] +
                '......') if len(prductname) > length else prductname
            names = f"{names} {flagcounter}){trunicated_name}   \n"
        #####productptractprogress######
        progressstr = ""
        for attribute, value in order.track_progress.items():
            if value:
                progressstr = f"{progressstr}{attribute} [X]---->"
            else:
                progressstr = f"{progressstr}{attribute} [ ]---->"
        order_last_location = "\n".join(order.last_tract_location)
        if len(progressstr) > 4:
            progressstr = progressstr[:-5]

        orderDetail = [
            order.loaded_time_human_readable, names,
            progressstr + "\n" + order_last_location,
            str(not order.has_error_loading)
        ]
        tabulatedarray.append(orderDetail)
    t = Texttable()
    t.add_rows(tabulatedarray)
    t.set_cols_width([19, 27, 90, 6])
    t.set_cols_align(["c", "c", "c", "c"])
    t.set_cols_valign(["m", "m", "m", "m"])
    print(t.draw())
Example #40
0
 def display_table(column_count, max_width, matrix):
     table = Texttable(max_width=max_width)
     # make all the column types be text
     table.set_cols_dtype(['t']*column_count)
     table.add_rows(matrix)
     return table.draw()
def print_flights_table(flights_list, header):
    table_for_suitable_flights = Texttable(max_width=100)
    table_for_suitable_flights.header(header)
    table_for_suitable_flights.add_rows(flights_list, header=False)
    print(table_for_suitable_flights.draw())
    def handle(self, *args, **options):
        try:
            d = date.today()
            if options['start_date']:
                try:
                    d = datetime.strptime(options['start_date'],
                                          '%Y-%m-%d').date()
                except ValueError:
                    raise CommandError(
                        'Invalid date value: {} (use format YYYY-MM-DD)'.
                        format(options['start_date']))

            emails = None
            if options['emails']:
                try:
                    emails = options['emails'].split(',')
                except ValueError:
                    raise CommandError(
                        'Invalid emails value: {} (use comma-separated string)'
                        .format(options['emails']))

            start_date = datetime.combine(d, datetime.min.time()).astimezone(
                timezone(settings.TIME_ZONE))
            end_date = start_date + timedelta(days=options['days'])

            if 'all_rfcs' in options and options['all_rfcs']:
                rfcs = ChangeRequest.objects.filter(
                    planned_start__range=[start_date, end_date]).order_by(
                        'planned_start')
            elif 'scheduled' in options and options['scheduled']:
                rfcs = ChangeRequest.objects.filter(
                    planned_start__range=[start_date, end_date],
                    status=2).order_by('planned_start')
            elif 'ready' in options and options['ready']:
                rfcs = ChangeRequest.objects.filter(
                    planned_start__range=[start_date, end_date],
                    status=3).order_by('planned_start')
            else:
                rfcs = ChangeRequest.objects.filter(
                    planned_start__range=[start_date, end_date],
                    status__in=[2, 3]).order_by('planned_start')

            if Site.objects.filter(name='Change Requests').exists():
                domain = Site.objects.get(name='Change Requests').domain
            else:
                domain = Site.objects.get_current().domain
            if domain.startswith('http://'):
                domain = domain.replace('http', 'https')
            if not domain.startswith('https://'):
                domain = 'https://' + domain

            # Construct the HTML and plaintext email content to send.
            context = {
                'start': start_date,
                'end': end_date,
                'object_list': rfcs,
                'domain': domain,
            }
            html_content = render_to_string(
                'registers/email_cab_rfc_calendar.html', context)

            table = Texttable(max_width=0)
            table.set_cols_dtype(['i', 't', 't', 't', 't', 't', 't', 't'])
            rows = [[
                'Change ref', 'Title', 'Change type', 'Status', 'Requester',
                'Endorser', 'Implementer', 'Planned start & end'
            ]]
            for rfc in rfcs:
                # Planned end date field might be blank.
                planned_end = rfc.planned_end.strftime(
                    '%A, %d-%b-%Y %H:%M') if rfc.planned_end else ''
                rows.append([
                    rfc.pk, rfc.title,
                    rfc.get_change_type_display(),
                    rfc.get_status_display(),
                    rfc.requester.get_full_name() if rfc.requester else '',
                    rfc.endorser.get_full_name() if rfc.endorser else '',
                    rfc.implementer.get_full_name() if rfc.implementer else '',
                    '{}\n{}'.format(
                        rfc.planned_start.strftime('%A, %d-%b-%Y %H:%M'),
                        planned_end)
                ])
            table.add_rows(rows, header=True)
            text_content = table.draw()

            subject = 'Change calendar starting {}'.format(
                start_date.strftime('%A, %d %b %Y'))
            recipients = []

            # Email the CAB members group.
            if options['cab_members']:
                if not Group.objects.filter(name='CAB members').exists():
                    raise CommandError('"CAB members" group does not exist.')
                cab = Group.objects.get(name='CAB members')
                recipients = recipients + list(
                    User.objects.filter(groups__in=[cab],
                                        is_active=True).values_list('email',
                                                                    flat=True))

            # Additional email recipients.
            if emails:
                recipients = recipients + emails

            recipients = set(recipients)

            msg = EmailMultiAlternatives(
                subject=subject,
                body=text_content,
                from_email=settings.NOREPLY_EMAIL,
                to=recipients,
            )
            msg.attach_alternative(html_content, 'text/html')
            msg.send()
        except Exception as ex:
            error = 'IT Assets email RFC calendar raised an exception at {}'.format(
                datetime.now().astimezone(timezone(
                    settings.TIME_ZONE)).isoformat())
            text_content = 'Exception:\n\n{}'.format(ex)
            if not settings.DEBUG:
                # Send an email to ADMINS.
                msg = EmailMultiAlternatives(
                    subject=error,
                    body=text_content,
                    from_email=settings.NOREPLY_EMAIL,
                    to=settings.ADMINS,
                )
                msg.send()
            raise CommandError(error)
Example #43
0
    def _compute_and_display_flops_binarization_rate(self):
        net = self._model
        weight_list = {}
        state_dict = net.state_dict()
        for n, v in state_dict.items():
            weight_list[n] = v.clone()

        ops_dict = OrderedDict()

        def get_hook(name):
            def compute_flops_hook(self, input_, output):
                name_type = str(type(self).__name__)
                if isinstance(self, (nn.Conv2d, nn.ConvTranspose2d)):
                    ks = self.weight.data.shape
                    ops_count = ks[0] * ks[1] * ks[2] * ks[3] * output.shape[
                        3] * output.shape[2]
                elif isinstance(self, nn.Linear):
                    ops_count = input_[0].shape[1] * output.shape[1]
                else:
                    return
                ops_dict[name] = (name_type, ops_count,
                                  isinstance(self, NNCFConv2d))

            return compute_flops_hook

        hook_list = [
            m.register_forward_hook(get_hook(n))
            for n, m in net.named_modules()
        ]

        net.do_dummy_forward(force_eval=True)

        for h in hook_list:
            h.remove()

        # restore all parameters that can be corrupted due forward pass
        for n, v in state_dict.items():
            state_dict[n].data.copy_(weight_list[n].data)

        ops_bin = 0
        ops_total = 0

        for layer_name, (layer_type, ops, is_binarized) in ops_dict.items():
            ops_total += ops
            if is_binarized:
                ops_bin += ops

        table = Texttable()
        header = [
            "Layer name", "Layer type", "Binarized", "MAC count", "MAC share"
        ]
        table_data = [header]

        for layer_name, (layer_type, ops, is_binarized) in ops_dict.items():
            drow = {h: 0 for h in header}
            drow["Layer name"] = layer_name
            drow["Layer type"] = layer_type
            drow["Binarized"] = 'Y' if is_binarized else 'N'
            drow["MAC count"] = "{:.3f}G".format(ops * 1e-9)
            drow["MAC share"] = "{:2.1f}%".format(ops / ops_total * 100)
            row = [drow[h] for h in header]
            table_data.append(row)

        table.add_rows(table_data)
        nncf_logger.info(table.draw())
        nncf_logger.info("Total binarized MAC share: {:.1f}%".format(
            ops_bin / ops_total * 100))
Example #44
0
def print_store(store):
    table = Texttable()
    table.header(store.columns)
    table.add_rows(store.rows)
    print(table.draw() + "\n")
        gm = np.average(arr_gm)
        precision = np.average(arr_p)
        recall = np.average(arr_r)

        skor = gaus.score(X_test, y_test)
        scores = cross_val_score(clf, X_test, y_test, cv=3,
                                 scoring='accuracy')  #10fold cross validation
        f1_macro = cross_val_score(
            clf, X_test, y_test, cv=3,
            scoring='f1_macro')  #10fold cross validation
        data_table.append([
            'ADASYN',
            str(X_adasyn.shape),
            str(X_train.shape),
            str(X_test.shape),
            "%0.4f" % (akurasi * 100),
            "%0.4f" % (fm * 100),
            "%0.4f" % (precision * 100),
            "%0.4f" % (recall * 100),
            "%0.4f" % (gm * 100)
        ])

        #DRAW TABLE-----------------------------------------------------------------------------------------------------------------------
        table = Texttable()
        # table.set_deco(Texttable.HEADER)
        table.set_cols_dtype(['t', 't', 't', 't', 't', 't', 't', 't',
                              't'])  # automatic
        table.set_cols_align(["l", "r", "r", "r", "r", 'r', "r", 'r', 'r'])
        table.add_rows(data_table)
        print table.draw()
def subproject3(
    inverted_index, sorted_document_tokens, document_list
):  # subproject3() method to build the lossy dictionary compression techniques of Table 5.1

    #------------------------------------------------------------------------------------------------
    # REMOVING NUMBERS
    #------------------------------------------------------------------------------------------------

    total_token_count = len(
        inverted_index
    )  # getting the total no of tokens generated i.e. unfiltered tokens

    non_positional_positing = sum(len(v) for v in inverted_index.values(
    ))  # getting the value of the non positional postings

    remove_numbers = []  # list to store the tokens after removing the no.
    for j in sorted_document_tokens:
        if not (j[0].isnumeric()):  # if the token is not numeric
            token_tple = (j[0], j[1])
            remove_numbers.append(token_tple)  # append the tokens to the list

    inverted_index_without_no = {}  #building the inverted index
    for rm in remove_numbers:

        if rm[0] in inverted_index_without_no:  # if the term exist in the index
            temp_posting_list = inverted_index_without_no[rm[0]]
            temp_posting_list.append(
                rm[1])  # append the id to the postings list
        else:
            posting_list = list()  # if it is encountering the term first time
            posting_list.append(rm[1])  # create a list and id
            inverted_index_without_no[
                rm[0]] = posting_list  # create the key and assign the doc id

    total_token_without_no = len(
        inverted_index_without_no)  # calculating the distinct tokens
    non_positional_positing_without_no = sum(
        len(v) for v in inverted_index_without_no.values()
    )  # calculating the non positinal index value

    # ------------------------------------------------------------------------------------------------
    # CASE FOLDING
    # ------------------------------------------------------------------------------------------------

    document_dicts = []  # creating the document list

    # iterating over the input
    for i in document_list:
        # setting the html.parse
        soup = BeautifulSoup(i, 'html.parser')
        # finding the text tag
        text_tag = soup.find('text')

        # checking the type of text to get only meaningful documents
        if (text_tag.get('type') != 'BRIEF') and (text_tag.get('type') !=
                                                  'UNPROC'):
            reuters = soup.find('reuters')
            # getting the id of the news document
            ids = reuters.get('newid')
            # parsing the id into int
            id = int(ids)
            # getting the date of the document
            date = soup.find('date').string
            # getting the title of the document
            title = soup.find('title').string
            # getting the body of the document
            body = soup.find('body')
            # building a string of useful data
            temp_text = date + ' ' + title + ' ' + body.get_text()

            # print(temp_text)
            punctuations = '''!()[]{};:'"\,<>./?#$%^|&*_+~\u0005\u0005\u0005'''

            for char in temp_text:  # removing the punctuation from the text
                if char in punctuations:
                    temp_text = temp_text.replace(char, " ")

            text = nltk.word_tokenize(temp_text)  # case folding the terms
            text = [i.lower() for i in text]

            for j in text:
                if j != '\x03' and j != '\u0005\u0005\u0005' and j != '' and not (
                        j.isdigit()):  # removing the no. and unicode
                    token_tuple = (j, id)
                    document_dicts.append(token_tuple)

    sorted_document_tokens_case_folding = list(
        set(document_dicts))  # getting the list of unique tokens

    sorted_document_tokens_case_folding = sorted(
        set(sorted_document_tokens_case_folding)
    )  # getting the sorted list of tokens

    inverted_index_cf = {}  # building the indexer
    for sorted_tokens_cf in sorted_document_tokens_case_folding:  # iterating the tokens

        if sorted_tokens_cf[0] in inverted_index_cf:  # if the key(term) exist
            temp_posting_list = inverted_index_cf[sorted_tokens_cf[0]]
            temp_posting_list.append(
                sorted_tokens_cf[1]
            )  # assig the doc id to the exisitng posting list
        else:
            posting_list = list()  # if the term doesn't exist
            posting_list.append(sorted_tokens_cf[1]
                                )  # creating a list and assigning the doc id
            inverted_index_cf[sorted_tokens_cf[
                0]] = posting_list  # creating the dictionary key and assigning the list

    total_token_case_folding = len(
        inverted_index_cf)  # calculating the distinct tokens
    non_positional_positing_case_folding = sum(
        len(v) for v in inverted_index_cf.values()
    )  # calculating the non positional index values

    # ------------------------------------------------------------------------------------------------
    # REMOVAL OF 30 STOP WORDS
    # ------------------------------------------------------------------------------------------------

    with open("first30.txt"
              ) as f:  # reading the 30 stop words and storing in the list
        global content_first30
        content_first30 = f.read().splitlines()

    stop_words30 = [
    ]  # list to store the tokens after removing the 30 stop words
    for i in sorted_document_tokens_case_folding:
        if i[0] not in content_first30:  # making a check if the term is not stop words, then generate the token
            token_stop_words_30_tuple = (i[0], i[1]
                                         )  # generating the token tuple
            stop_words30.append(token_stop_words_30_tuple)
    inverted_index_stop_words_30 = {}  # dictionary to build the index
    for sw30 in stop_words30:
        if sw30[0] in inverted_index_stop_words_30:  # if the term key exist in the posting list
            temp_posting_list = inverted_index_stop_words_30[sw30[0]]
            temp_posting_list.append(
                sw30[1])  # append the doc id to the posting list
        else:
            posting_list = list(
            )  # if the key-term is encountered first time, create the list
            posting_list.append(sw30[1])  # append the doc id to the new list
            inverted_index_stop_words_30[sw30[
                0]] = posting_list  # create the key term and assign the posting list

    total_token_stop_words_30 = len(
        inverted_index_stop_words_30
    )  # calculating the count of distinct tokens
    non_positional_positing_stop_words_30 = sum(
        len(v) for v in inverted_index_stop_words_30.values()
    )  # calculating the count of non positional index values

    # ------------------------------------------------------------------------------------------------
    # REMOVAL OF 150 STOP WORDS
    # ------------------------------------------------------------------------------------------------

    with open("first150.txt") as f:  # reading the list of 150 stop words
        global content_first150
        content_first150 = f.read().splitlines(
        )  # storing the list of 150 stop words into the list

    stop_words150 = []
    for i in sorted_document_tokens_case_folding:
        if i[0] not in content_first150:  # removing the 150 stop words from the list
            token_stop_words_150_tuple = (i[0], i[1])  # generating the tuples
            stop_words150.append(token_stop_words_150_tuple)
    inverted_index_stop_words_150 = {}  # building the index
    for sw150 in stop_words150:
        if sw150[
                0] in inverted_index_stop_words_150:  # check if the term exist in the index
            temp_posting_list = inverted_index_stop_words_150[sw150[0]]
            temp_posting_list.append(
                sw150[1])  # append the doc id to the posting list
        else:
            posting_list = list(
            )  # if the key doesn't exist, create an empty list
            posting_list.append(sw150[1])
            inverted_index_stop_words_150[sw150[
                0]] = posting_list  # assign the key-term to the dictionary and docid

    #print(len(inverted_index_stop_words_150))
    total_token_stop_words_150 = len(
        inverted_index_stop_words_150)  # calculating the distinct tokens
    non_positional_positing_stop_words_150 = sum(
        len(v) for v in inverted_index_stop_words_150.values()
    )  # calculating the non positional index values
    #print(non_positional_positing_stop_words_150)

    # ------------------------------------------------------------------------------------------------
    # PORTER STEMMER
    # ------------------------------------------------------------------------------------------------

    document_dicts_ps = []  # list to store the tokens

    # iterating over the input
    for i in document_list:
        # setting the html.parse
        soup = BeautifulSoup(i, 'html.parser')
        # finding the text tag
        text_tag = soup.find('text')

        # checking the type of text to get only meaningful documents
        if (text_tag.get('type') != 'BRIEF') and (text_tag.get('type') !=
                                                  'UNPROC'):
            reuters = soup.find('reuters')
            # getting the id of the news document
            ids = reuters.get('newid')
            # parsing the id into int
            id = int(ids)
            # getting the date of the document
            date = soup.find('date').string
            # getting the title of the document
            title = soup.find('title').string
            # getting the body of the document
            body = soup.find('body')
            # building a string of useful data
            temp_text = date + ' ' + title + ' ' + body.get_text()

            # print(temp_text)
            punctuations = '''!()[]{};:'"\,<>./?#$%^|&*_+~\u0005\u0005\u0005'''

            for char in temp_text:  # removing the punctuation from the text
                if char in punctuations:
                    temp_text = temp_text.replace(char, " ")

            text = nltk.word_tokenize(temp_text)
            text = [i.lower() for i in text]  # case folding of the text

            ps = PorterStemmer()  # Porter Stemmer to stem the tokens

            for j in text:
                if j != '\x03' and j != '\u0005\u0005\u0005' and j != '' and not (
                        j.isdigit()
                ) and j not in content_first30 and j not in content_first150:
                    token_tuple = (ps.stem(j), id
                                   )  # stemmings, removing stop words, no
                    document_dicts_ps.append(token_tuple)

    sorted_document_tokens_ps = list(
        set(document_dicts_ps))  # generating the unique tokens

    sorted_document_tokens_ps = sorted(
        set(sorted_document_tokens_ps))  # sorting the unique tokens

    inverted_index_ps = {}  # building the indexer
    for sorted_tokens_ps in sorted_document_tokens_ps:

        if sorted_tokens_ps[
                0] in inverted_index_ps:  # if the token exist in the indexer
            temp_posting_list = inverted_index_ps[
                sorted_tokens_ps[0]]  # get the postings list
            temp_posting_list.append(
                sorted_tokens_ps[1])  # assign the doc id to the psoting list
        else:
            posting_list = list(
            )  # if the key doesn't exist in the indexer, create new list
            posting_list.append(sorted_tokens_ps[1])  # append the doc id
            inverted_index_ps[sorted_tokens_ps[
                0]] = posting_list  # create a key in the dictionary and assign the newly created posting list

    total_token_porter_stemmer = len(
        inverted_index_ps)  # calculate the distinct tokens
    non_positional_positing_porter_stemmer = sum(
        len(v) for v in inverted_index_ps.values()
    )  # calculating the non positional indexer values

    t = Texttable()  # building the Table 5.1
    main_row = [[
        '', 'Distinct terms', '', '', '', 'Non positional postings', ''
    ]]

    row = ['', 'number', '\u0394%', 'T%', 'number', '\u0394%', 'T%']
    main_row.append(row)

    row = [
        'unfiltered', total_token_count, '', '', non_positional_positing, '',
        ''
    ]

    main_row.append(row)
    cumulative_cost = 0
    cumulative_cost_np = 0
    temp = 100 * (total_token_count -
                  total_token_without_no) / total_token_count
    #print(temp, "%")
    cumulative_cost += temp
    np = 100 * (non_positional_positing -
                non_positional_positing_without_no) / non_positional_positing
    cumulative_cost_np += np
    row = [
        'no numbers', total_token_without_no, -temp, -cumulative_cost,
        non_positional_positing_without_no, -np, -cumulative_cost_np
    ]
    main_row.append(row)
    #print("Cumulative", cumulative_cost)
    temp = 100 * (total_token_without_no -
                  total_token_case_folding) / total_token_without_no
    #print(temp, "%")
    cumulative_cost += temp
    np = 100 * (non_positional_positing_without_no -
                non_positional_positing_case_folding
                ) / non_positional_positing_without_no
    cumulative_cost_np += np
    row = [
        'case folding', total_token_case_folding, -temp, -cumulative_cost,
        non_positional_positing_case_folding, -np, -cumulative_cost_np
    ]
    main_row.append(row)
    #print("Cumulative", cumulative_cost)
    temp = 100 * (total_token_case_folding -
                  total_token_stop_words_30) / total_token_case_folding
    #print(temp, "%")
    cumulative_cost += temp
    np = 100 * (non_positional_positing_case_folding -
                non_positional_positing_stop_words_30
                ) / non_positional_positing_case_folding
    cumulative_cost_np += np
    row = [
        '30 stop words', total_token_stop_words_30, -temp, -cumulative_cost,
        non_positional_positing_stop_words_30, -np, -cumulative_cost_np
    ]
    main_row.append(row)
    #print("Cumulative", cumulative_cost)
    temp = 100 * (total_token_stop_words_30 -
                  total_token_stop_words_150) / total_token_stop_words_30
    #print(temp, "%")
    cumulative_cost += temp
    np = 100 * (non_positional_positing_stop_words_30 -
                non_positional_positing_stop_words_150
                ) / non_positional_positing_stop_words_30
    cumulative_cost_np += np
    row = [
        '150 stop words', total_token_stop_words_150, -temp, -cumulative_cost,
        non_positional_positing_stop_words_150, -np, -cumulative_cost_np
    ]
    main_row.append(row)
    #print("Cumulative", cumulative_cost)
    temp = 100 * (total_token_stop_words_150 -
                  total_token_porter_stemmer) / total_token_stop_words_150
    #print(temp, "%")
    cumulative_cost += temp
    np = 100 * (non_positional_positing_stop_words_150 -
                non_positional_positing_porter_stemmer
                ) / non_positional_positing_stop_words_150
    cumulative_cost_np += np
    row = [
        'stemming', total_token_porter_stemmer, -temp, -cumulative_cost,
        non_positional_positing_porter_stemmer, -np, -cumulative_cost_np
    ]
    main_row.append(row)
    #print("Cumulative", cumulative_cost)
    t.add_rows(main_row)
    print(t.draw())  # printing the talbe
    f = open("Table.txt", "w")  # storing the output of the table
    f.write("".join(str(item) for item in t.draw()))
    f.close()
    json.dump(
        inverted_index_ps,
        open("invertedIndex_subproject3.json", "w", encoding="utf−8")
    )  # storing the newly create index into invertedIndex_subproject3.json
    return inverted_index_ps
Example #47
0
from itertools import combinations
from texttable import Texttable

l = []
a = int(input("Enter N:"))
b = int(input("Enter K:"))
for j in range(1, a + 1):
    l.append(j)
lsi = []
for i in combinations(l, b):
    lsi.append(list(i))
    # print(i)
t = Texttable()
t.add_rows(lsi)
print(t.draw())
print("Combination lists number ", len(lsi))

print("Thank you for using\nMuhammadali Hakimov!")
Example #48
0
def show_results(keras_lists, kerasmin_lists, model_name):
    """
    :param keras_lists: is a tuple of (first_predictions_list, second_predictions_list, load_model_keras, output_keras)
    :param kerasmin_lists: is a tuple of (first_predictions_list), second_predictions_list, load_model_kerasmin, output_kerasmin)
    :return: printing the table of results and showing the plots
    """
    # keras
    first_predictions_list_keras = keras_lists[0]
    second_predictions_list_keras = keras_lists[1]
    load_model_keras = keras_lists[2]
    output_keras = keras_lists[3]

    # kerasmin
    first_predictions_list_kerasmin = kerasmin_lists[0]
    second_predictions_list_kerasmin = kerasmin_lists[1]
    load_model_kerasmin = kerasmin_lists[2]
    output_kerasmin = kerasmin_lists[3]

    # print table of results
    t = Texttable()
    t.add_rows([[
        'Name of model: ' + model_name, 'first_predictions_time_average',
        'second_predictions_time_average', 'load_model_time_average',
        'prediction_output'
    ],
                [
                    'Keras',
                    mean(first_predictions_list_keras),
                    mean(second_predictions_list_keras),
                    mean(load_model_keras), output_keras[0]
                ],
                [
                    'Kerasmin',
                    mean(first_predictions_list_kerasmin),
                    mean(second_predictions_list_kerasmin),
                    mean(load_model_kerasmin), output_kerasmin[0]
                ]])
    print(t.draw())

    f = open(
        Path(__file__).parent.absolute() / "tables_results" /
        (model_name + ".txt"), "w")
    f.write(t.draw())
    f.close()

    # show plots
    dictio_first_deployment = {
        "Numpy": first_predictions_list_kerasmin,
        "keras": first_predictions_list_keras
    }
    dictio_second_deployment = {
        "Numpy": second_predictions_list_kerasmin,
        "keras": second_predictions_list_keras
    }
    dictio_loading_model = {
        "Numpy": load_model_kerasmin,
        "keras": load_model_keras
    }

    # first_deployment
    dist_plot(dictio_first_deployment, model_name, "first_deployment")
    line_plot(dictio_first_deployment, model_name, "first_deployment")

    # second_deployment
    dist_plot(dictio_second_deployment, model_name, "second_deployment")
    line_plot(dictio_second_deployment, model_name, "second_deployment")

    # loading_model
    dist_plot(dictio_loading_model, model_name, "loading_model")
    line_plot(dictio_loading_model, model_name, "loading_model")
from Programming.Analysis import pca_svm_results_analysis

all = pca_svm_results_analysis.get_results()

parm = (False, False, True, True)
values = all[parm]
values = sorted(values)

i=0
for v in values:
    v.append(i)
    i+=1

table = Texttable()
cols = ['Val Err', 'Compon.', 'C', 'gamma', 'kernel', 'Time', 'Index', 'Or. Ind']
table.add_rows([cols] + values)
print table.draw()
indicies = [0,14,22,29,38]
indicies = sorted(indicies)
values_selected = []
for x in indicies:
    values_selected += [values[x]]

values_selected = values[21:]

print('\\begin{center}')
print('\\begin{table}')
print('\\begin{tabular}{ | l | l | l | l | l | l | l |}')
print('\\hline')
print('Index & Components & C & Gamma & Kernel f. & Time & Val. error \\\\ ')
for tuple in values_selected:
Example #50
0
 def print_output(self, final_output):
     t = Texttable()
     t.add_rows(final_output)
     print t.draw()
Example #51
0
               'iligent,', 'from', 'hi', 's', 'earl', 'iest', 'gramm', 'ar', 'c', 'lasse', 's', 'he', '\x19s', '',
               'kept', 'a', 'lit']
    fruits2 = ['Introduction', 'from', ':', 'Distinction', ':', 'A', 'Social', 'Critique', 'of', 'the', 'Judgement',
               'of', 'Taste', 'by', 'Pierre', 'B', 'our', 'dieu', '1984', 'Introduction', 'You', 'said', 'it', ',',
               'my', 'good', 'knight', 'There', 'ought', 'to', 'be', 'laws', 'to', 'protect', 'the', 'body', 'of',
               'acquired', 'knowledge', '.', 'Take', 'one', 'of', 'our', 'good', 'pupils', ',', 'for', 'example', ':',
               'modest', 'and', 'diligent', ',', 'from', 'his', 'earliest', 'grammar', 'classes', 'he', 's', 'kept',
               'a', 'lit']
    alignment = list(zip(*needle(fruits1, fruits2)))
    print(a    print(alignment_table(alignment, fruits1, fruits2))
lignment_table(alignment, fruits1, fruits2))
    print(f"len 1 {len(fruits1)}  len 2 {len(fruits2)}")

    fruits1 = ["con", "sump", "t", "ion", "orange", "pear", "apple", "x,y,z", "pear", "orange", "consumption"]
    fruits2 = ["consumption", "pear", "apple", "x,", "y,", "z", "con", "sump", "t", "ion"]
    alignment = list(zip(*needle(fruits1, fruits2)))
    print()
    table = Texttable()
    table.set_deco(Texttable.HEADER)
    table.set_cols_align(["c", "l", "r", "l", "r"])
    table.add_rows([['i', 'w1', 'w2', 'i1', 'i2']] + [[i, t, w, x, y] for i, (t, w, x, y)
                                                      in enumerate(alignment)])
    print(table.draw())
    print(f"len 1 {len(fruits1)}  len 2 {len(fruits2)}")

if __name__ == "__main__":
    import doctest

    doctest.NORMALIZE_WHITESPACE = True
    doctest.testmod()
Example #52
0
class TextTableWrapper(SimpleObject):
    LEFT: ClassVar[str] = 'l'
    RIGHT: ClassVar[str] = 'r'
    CENTER: ClassVar[str] = 'c'

    TOP: ClassVar[str] = 't'
    BOTTOM: ClassVar[str] = 'b'
    MIDDLE: ClassVar[str] = 'm'

    STR: ClassVar[str] = 't'
    FLOAT: ClassVar[str] = 'f'
    EXP: ClassVar[str] = 'e'
    INT: ClassVar[str] = 'i'
    AUTO: ClassVar[str] = 'a'

    data: Iterable
    col_align: Iterable[str] = None
    col_valign: Iterable[str] = None
    col_dtype: Iterable[str] = None
    deco: int = None

    max_width: int = 80

    def __post_init__(self):
        self.data = arr(self.data)
        self._table = Texttable(max_width=self.max_width)
        self._table.add_rows(self.data)
        if self.col_align:
            self._table.set_cols_align(self.col_align)
        if self.col_valign:
            self._table.set_cols_valign(self.col_valign)
        if self.col_dtype:
            self._table.set_cols_dtype(self.col_dtype)
        if self.deco:
            self._table.set_deco(self.deco)

    def str(self):
        self._table.set_max_width(self.max_width)
        return self._table.draw()

    @staticmethod
    def example1():
        return TextTableWrapper(
            data=[["Name", "Age", "Nickname"], ["Mr. Xavier Huon", 32, "Xav'"],
                  ["Mr. Baptiste Clement", 1, "Baby"],
                  ["Mme. Louise Bourgeau", 28, "Lou, Loue"]],
            col_align=[
                TextTableWrapper.LEFT, TextTableWrapper.RIGHT,
                TextTableWrapper.CENTER
            ],
            col_valign=[
                TextTableWrapper.TOP, TextTableWrapper.MIDDLE,
                TextTableWrapper.BOTTOM
            ])

    @staticmethod
    def example2():
        return TextTableWrapper(data=[
            ["text", "float", "exp", "int", "auto"],
            ["abcd", "67", 654, 89, 128.001],
            ["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
            ["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
            ["opqrstu", .023, 5e+78, 92., 12800000000000000000000]
        ],
                                col_align=[
                                    TextTableWrapper.LEFT,
                                    TextTableWrapper.RIGHT,
                                    TextTableWrapper.RIGHT,
                                    TextTableWrapper.RIGHT,
                                    TextTableWrapper.LEFT
                                ],
                                col_dtype=[
                                    TextTableWrapper.STR,
                                    TextTableWrapper.FLOAT,
                                    TextTableWrapper.EXP, TextTableWrapper.INT,
                                    TextTableWrapper.AUTO
                                ],
                                deco=Texttable.HEADER)
Example #53
0
def query():
    user_id = input("Please type in the ID of the user in question: ")

    # Keep asking for a user until the program is given a valid ID.
    while (user_id not in USER_DICT):
        user_id = input(
            "That user ID is not in the training set. Please pick a new one: ")

    # Keep track of the user given as well as the year given.
    user = USER_DICT[user_id]
    year = input("Please type in a year: ")

    a = [
    ]  # This will hold all movies and ratings found for the given year and user.

    for movie_id, movie in MOVIE_DICT.items():  # Iterate through each movie.
        # Only pull movies that are of the proper year and that the user has not yet watched.
        if movie.movie_year == year and movie_id not in user.ratings:
            # Calculate a rating for this movie.
            calc_rating = user.average_rating
            for userkey, comp_user in USER_DICT.items(
            ):  # Iterate through each user in USER_DICT
                if movie_id in comp_user.ratings:
                    # We found a match, but we need the weight between the two users.
                    weight = 0
                    for m, rating in comp_user.ratings.items():
                        if m != movie_id and m in user.ratings:
                            weight += (user.ratings[m] / user.norm_avg) * (
                                rating / comp_user.norm_avg)

                    calc_rating += weight * (comp_user.ratings[movie_id] -
                                             comp_user.average_rating)

            a.append((movie.movie_name, calc_rating))  # Append the movie to a.

    a.sort(key=lambda tup:
           (-tup[1], tup[0]))  # Sort them by rating and then by name.

    # Make a table for the data so it looks nice.
    t = Texttable()
    table_rows = [["Movie Name", "Expected Rating"]]

    for movie in a:
        # If the rating given is less than 1, clamo it to 1.
        if (movie[1] < 1):
            table_rows.append([movie[0], 1.0])
        # If the rating given is greater than 5, clamp it to 5.
        elif movie[1] > 5:
            table_rows.append([movie[0], 5.0])
        # Otherwise, just round.
        else:
            table_rows.append([movie[0], round(movie[1])])

    # Build and print the table.
    t.add_rows(table_rows)
    print(t.draw())

    # See if the user wants to query again.
    nextSelection = input("\nWould you like to query again? (y/n): ")

    # MAke sure their choice is valid.
    while nextSelection != 'y' and nextSelection != 'n':
        print("That was not a valid response. Please try again.")
        nextSelection = input("\nWould you like to query again? (y/n): ")

    # Now either query again or return to the options menu.
    if nextSelection == 'y':
        query()
    else:
        classify()
    def run(self, **kwargs) -> None:
        """
        Run a grid search.
        :param kwargs: Parameters to search over.
        :return: None.
        """
        # Overall results of grid search
        overall_accs = []
        overall_losses = []
        overall_params = []
        overall_filenames = []

        # Configure logging so output is saved to log file.
        sys.stdout = DualLogger(self.log_path)

        # Create all configs to search over
        all_configs = self._create_all_configs(kwargs)

        # Run grid search
        for i, config in enumerate(all_configs):
            print("")
            print("-- Configuration " + str(i + 1) + "/" +
                  str(len(all_configs)) + " --")
            self._print_config(config)

            accs = []
            losses = []
            trained_models = []
            for r in range(self.repeats):
                print("Repeat " + str(r + 1) + "/" + str(self.repeats))
                acc, loss, model = self._train_model(config, **kwargs)
                accs.append(acc)
                losses.append(loss)
                trained_models.append(model)
                time.sleep(0.1)

            best_model_idx = int(np.argmin(losses))
            best_acc = accs[best_model_idx]
            best_loss = losses[best_model_idx]
            best_model = trained_models[best_model_idx]

            overall_accs.append(best_acc)
            overall_losses.append(best_loss)
            overall_params.append(config)

            print("Best Loss:", best_loss)
            print(" Best Acc:", best_acc)
            print(" Avg Loss:", np.mean(losses))
            print("  Avg Acc:", np.mean(accs))

            file_name = self._save_model(best_model, self.save_dir + "/all")
            overall_filenames.append(file_name)

        # Sort best models
        print("")
        print("--- Final Results ---")
        results = zip(
            overall_losses,
            overall_accs,
            overall_params,
            overall_filenames,
        )
        sorted_results = sorted(results, key=lambda x: x[0])

        # Create results table
        table = Texttable(max_width=0)
        table.set_cols_align(["c", "c", "c", "c", "c"])
        rows = [["Pos", "Loss", "Acc", "Params", "Filename"]]
        for i, r in enumerate(sorted_results):
            rows.append([i + 1, *r[:4]])
        table.add_rows(rows)
        table_output = table.draw()
        print(table_output)

        # Save results
        results_file = os.path.join(self.save_dir, "results.txt")
        with open(results_file, "w") as file:
            file.write(table_output)

        # Save
        src_path = os.path.join(self.save_dir, "all", sorted_results[0][3])
        dst_path = os.path.join(self.save_dir, "best.pth")
        copyfile(src_path, dst_path)

        # Upload
        ModelManager().upload_model(dst_path)
Example #55
0
def _banner(message):
    table = Texttable()
    table.set_deco(Texttable.BORDER)
    table.add_rows([[message]])
    return table.draw()
Example #56
0
        table.set_cols_valign(["t", "t", "t", "t"])
        table.add_rows(
            [["MEASURE", "BAND-I", "BAND-II", "BAND-III"],
             [
                 "UIQI", list_of_all_output[i - 2][0],
                 list_of_all_output[i - 1][0], list_of_all_output[i][0]
             ],
             [
                 "COVARIANCE", list_of_all_output[i - 2][1],
                 list_of_all_output[i - 1][1], list_of_all_output[i][1]
             ],
             [
                 "CORR_COEFF", list_of_all_output[i - 2][2],
                 list_of_all_output[i - 1][2], list_of_all_output[i][2]
             ],
             [
                 "ENTROPY", list_of_all_output[i - 2][3],
                 list_of_all_output[i - 1][3], list_of_all_output[i][3]
             ],
             [
                 "RMSE", list_of_all_output[i - 2][4],
                 list_of_all_output[i - 1][4], list_of_all_output[i][4]
             ],
             [
                 "RMEAN", list_of_all_output[i - 2][5],
                 list_of_all_output[i - 1][5], list_of_all_output[i][5]
             ],
             [
                 "PSNR", list_of_all_output[i - 2][6],
                 list_of_all_output[i - 1][6], list_of_all_output[i][6]
             ]])
def logged(station_id, station_name):
    while (True):
        cam = cv2.VideoCapture(0)
        cam.set(3, 640)  # set video width
        cam.set(4, 480)  # set video height

        face_detector = cv2.CascadeClassifier(
            'haarcascade_frontalface_default.xml')

        # For each person, enter one numeric face id
        while True:
            face_name = input('\n Enter Name : ')
            if face_name.isalpha():
                break
            print("\n[INFO] Please enter valid name")
        print("\n")
        if station_id != 12:
            print("To North :\n")
            mycursor = mydb.cursor()
            sql3 = "SELECT * FROM station where id>=%s"
            station = (station_id, )
            mycursor.execute(sql3, station)
            myresult = mycursor.fetchall()
            #for i in range(a,12):
            for x in myresult:
                #list.append("->")
                print("(" + str(x[0]) + ")" + x[1], end=" -> ")
                #break
            print("**Finished**", end=" ")
            print("\n")
        if station_id != 1:
            print("To South :\n")
            mycursor = mydb.cursor()
            sql3 = "SELECT * FROM station where id<=%s"
            station = (station_id, )
            mycursor.execute(sql3, station)
            myresult = mycursor.fetchall()
            #for i in range(a,12):
            for x in reversed(myresult):
                #list.append("->")
                print("(" + str(x[0]) + ")" + x[1], end=" -> ")
                #break
            print("**Finished**", end=" ")
        while (True):
            to_station = input('\n\nEnter To Station : ')
            if to_station.isdigit() and int(to_station) <= 12:
                if int(to_station) == int(station_id):
                    print(
                        "\n[INFO] Both Source And Destination Cannot Be Same")
                else:
                    break
            else:
                print("\n[INFO] Please enter valid station id")

        mycursor = mydb.cursor()
        dest = "SELECT name from station where id=%s"
        de = (to_station, )
        mycursor.execute(dest, de)
        myresult = mycursor.fetchall()
        for xy in myresult:
            to_station_name = xy[0]
        mycursor = mydb.cursor()
        sql = "INSERT INTO customer(name, fromstation,tostation) VALUES (%s, %s, %s)"
        val = (face_name, station_id, to_station)
        mycursor.execute(sql, val)
        mydb.commit()
        #print(mycursor.rowcount, "record inserted.")
        face_id = mycursor.lastrowid
        #a=int(station_id)
        #b=int(to_station)
        no = abs(int(station_id) - int(to_station))
        fare = str(no * 10)
        date1 = str(datetime.date.today())
        t = Texttable()
        t.add_rows(
            [['WELCOME TO KOCHI METRO \n\n ' + station_name + ' Station \t'],
             ['Id: Metro00' + str(face_id) + '\t\tDate : ' + date1],
             [
                 '\nName : ' + face_name.capitalize() + '  \n\nTo Station : ' +
                 str(to_station_name) + '\n'
             ], ['Total Fare   \t:  ' + fare + ' Rs']])
        print(t.draw())

        print(
            "\n [INFO] Initializing face capture. Look the camera and wait ..."
        )
        # Initialize individual sampling face count
        count = 0

        while (True):

            ret, img = cam.read()
            img = cv2.flip(img, 1)  # flip video image vertically
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            faces = face_detector.detectMultiScale(gray, 1.3, 5)

            #while faces:
            #print ("hai")
            #else:
            #  print ("not")

            for (x, y, w, h) in faces:

                cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
                count += 1

                # Save the captured image into the datasets folder
                cv2.imwrite(
                    "dataset/User." + str(face_id) + '.' + str(count) + ".jpg",
                    gray[y:y + h, x:x + w])

                cv2.imshow('image', img)

            k = cv2.waitKey(100) & 0xff  # Press 'ESC' for exiting video
            if k == 27:
                break
            elif count >= 30:  # Take 30 face sample and stop video
                break

        # Do a bit of cleanup
        print("\n [INFO] Image Captured Successfully")
        cam.release()
        cv2.destroyAllWindows()
        exit_key = input("Press Enter to continue or q to loggout...")
        if exit_key == 'q':
            print("You have been successfully logged out successfully!")
            break
class Grid:

    def __init__(self):
        self.data = [[" ", " ", " ", " ", " ", " ", " ", " "] for i in range(8)]

    def __str__(self):
        self.table = Texttable()
        self.table.add_rows(self.data, [])
        return self.table.draw()

    def change(self):
        """
        Input: none
        Output: no return per se, but the data of the board is changed accordingly to the rules stated
        the neighbours are checked and based on their values the new board is created
        """
        data = deepcopy(self.data)
        for i in range(8):
            for j in range(8):
                #checking the neighbours
                contorlive = 0
                contordead = 0
                if i - 1 >= 0 and j - 1 >= 0:
                    if data[i - 1][j - 1] == "X":
                        contorlive += 1
                    else:
                        contordead += 1
                if i - 1 >= 0:
                    if data[i - 1][j] == "X":
                        contorlive += 1
                    else:
                        contordead += 1
                if j - 1 >= 0:
                    if data[i][j - 1] == "X":
                        contorlive += 1
                    else:
                        contordead += 1
                if i + 1 < 8 and j + 1 < 8:
                    if data[i + 1][j + 1] == "X":
                        contorlive += 1
                    else:
                        contordead += 1
                if i + 1 < 8:
                    if data[i + 1][j] == "X":
                        contorlive += 1
                    else:
                        contordead += 1
                if j + 1 < 8:
                    if data[i][j + 1] == "X":
                        contorlive += 1
                    else:
                        contordead += 1
                if i - 1 >= 0 and j + 1 < 8:
                    if data[i - 1][j + 1] == "X":
                        contorlive += 1
                    else:
                        contordead += 1
                if i + 1 < 8 and j - 1 >= 0:
                    if data[i + 1][j - 1] == "X":
                        contorlive += 1
                    else:
                        contordead += 1
                #based on the results, the current space is modified or not
                if data[i][j] == "X":
                    if contorlive > 3 or contorlive < 2:
                        self.data[i][j] = " "
                if data[i][j] == " ":
                    if contorlive == 3:
                        self.data[i][j] = "X"

    def readfromfile(self, fname):
        with open(fname, 'r') as f:
            lines = f.readlines()
            i = 0
            for line in lines:
                for j in range(8):
                    if line[j] == "+":
                        self.data[i][j] = " "
                    elif line[j] == "X":
                        self.data[i][j] = "X"
                i += 1
        f.close()

    def writetofile(self, fname):
        with open(fname, "w") as f:
            for line in self.data:
                for elem in line:
                    if elem == " ":
                        f.write("+")
                    else:
                        f.write(elem)
                f.write("\n")
        f.close()

    def readpattern(self, fname):
        """
        param fname: filename - string
        return: a list containing the patterns and their data
        The function reads the pattern from the file Pattern.txt and creates a list with
        the name of the pattern and its data
        """
        patterns = []
        with open(fname, "r") as f:
            lines = f.readlines()
            newlines = []
            for line in lines:
                newline = line.strip()
                newlines.append(newline)
            lines = newlines
            for i in range(len(lines)):
            #we identify the kind of pattern read
                if lines[i] == "block":
                    blockdata = []
                    line = [lines[i+1][0], lines[i+1][1]]
                    blockdata.append(line)
                    line = [lines[i + 2][0], lines[i + 2][1]]
                    blockdata.append(line)
                    patterns.append("block")
                    patterns.append(blockdata)
                if lines[i] == "tub":
                    tubdata = []
                    line = [lines[i + 1][0], lines[i + 1][1], lines[i + 1][2]]
                    tubdata.append(line)
                    line = [lines[i + 2][0], lines[i + 2][1], lines[i + 2][2]]
                    tubdata.append(line)
                    line = [lines[i + 3][0], lines[i + 3][1], lines[i + 3][2]]
                    tubdata.append(line)
                    patterns.append("tub")
                    patterns.append(tubdata)
                if lines[i] == "blinker":
                    blinkerdata = []
                    line = [lines[i + 1][0], lines[i + 1][1], lines[i + 1][2]]
                    blinkerdata.append(line)
                    line = [lines[i + 2][0], lines[i + 2][1], lines[i + 2][2]]
                    blinkerdata.append(line)
                    line = [lines[i + 3][0], lines[i + 3][1], lines[i + 3][2]]
                    blinkerdata.append(line)
                    patterns.append("blinker")
                    patterns.append(blinkerdata)
                if lines[i] == "beacon":
                    beacondata = []
                    line = [lines[i + 1][0], lines[i + 1][1], lines[i + 1][2],lines[i + 1][3]]
                    beacondata.append(line)
                    line = [lines[i + 2][0], lines[i + 2][1], lines[i + 2][2], lines[i + 2][3]]
                    beacondata.append(line)
                    line = [lines[i + 3][0], lines[i + 3][1], lines[i + 3][2], lines[i + 3][3]]
                    beacondata.append(line)
                    line = [lines[i + 4][0], lines[i + 4][1], lines[i + 4][2], lines[i + 4][3]]
                    beacondata.append(line)
                    patterns.append("beacon")
                    patterns.append(beacondata)
                if lines[i] == "spaceship":
                    spaceshipdata = []
                    line = [lines[i + 1][0], lines[i + 1][1], lines[i + 1][2], lines[i + 1][3]]
                    spaceshipdata.append(line)
                    line = [lines[i + 2][0], lines[i + 2][1], lines[i + 2][2], lines[i + 2][3]]
                    spaceshipdata.append(line)
                    line = [lines[i + 3][0], lines[i + 3][1], lines[i + 3][2], lines[i + 3][3]]
                    spaceshipdata.append(line)
                    line = [lines[i + 4][0], lines[i + 4][1], lines[i + 4][2], lines[i + 4][3]]
                    spaceshipdata.append(line)
                    patterns.append("spaceship")
                    patterns.append(spaceshipdata)
        f.close()
        return patterns
Example #59
0
def query(
    date, edate, ctype, itype
):  #itype I/N ;type=0 query sum ; type=1 query dict; type=2 query detail only ;type=3 query detail include dict
    if itype.upper() == 'I':
        stritype = "and qtype='I' "
    elif itype.upper() == 'C':
        stritype = "and qtype='C' "
    else:
        stritype = ''
    if ctype == 0:
        a = ['l']
        d = ['f']
        qsql = (
            "select sum(specific_amount) as Available_balance from financial where cdate<= '{0}' {1}"
        ).format(date, stritype)

    if ctype == 1:
        a = ['l', 'c', 'c', 'c', 'c']
        d = ['i', 't', 't', 'f', 'i']
        if edate:  #(endate is None ):
            qsql = (
                "select id, cdate, qtype,specific_amount, cycle from financial_dict where cdate between '{0}' and '{1}' {2}"
            ).format(date, edate, stritype)

        else:  # endate is not None :
            qsql = (
                "select id, cdate, qtype,specific_amount, cycle from financial_dict where cdate='{0}' {1}"
            ).format(date, stritype)

    if ctype == 2:
        a = ['l', 'c', 'c', 'c']
        d = ['i', 't', 't', 'f']
        if (edate is None):
            qsql = (
                "select id,cdate, qtype,specific_amount from financial_detail where cdate='{0}'{1}"
            ).format(date, stritype)
        elif edate is not None:
            qsql = (
                "select id,cdate,qtype,specific_amount from financial_detail where cdate between '{0}' and '{1}'{2}"
            ).format(date, edate, stritype)

    if ctype == 3:
        a = ['l', 'l', 'l', 'c', 'c', 'c']
        d = ['i', 't', 't', 't', 't', 'f']
        if (edate is None):
            qsql = (
                "select id,dictid,detailid,cdate, qtype,specific_amount from financial where cdate='{0}'{1}"
            ).format(date, stritype)
        elif edate is not None:
            qsql = (
                "select id,dictid,detailid,cdate,qtype,specific_amount from financial where cdate between '{0}' and '{1}'{2}"
            ).format(date, edate, stritype)
    #print(qsql)

    col, data = sqlite(account, qsql)
    #print(col)
    #print(data)
    df = pandas.DataFrame(data=data, columns=col)
    tb = Texttable()
    tb.set_cols_align(a)
    #tb.set_cols_valign(['m','m','m','m','m'])
    tb.set_cols_dtype(d)
    tb.header(df.columns.get_values())
    tb.add_rows(df.values, header=False)
    print(tb.draw() + '\n=======*********************************=========')
    return (col, data)
Example #60
0
    x = float(wl[0])
    y = float(wl[1])
    target = int(wl[2])
    X1.append(x)
    X2.append(y)
    adder.append([x, y, target])
    buff.append([1, x, y])
    yt.append([target])

    if target == -1:
        target += 1

    result.append(target)
#    f.write(str(x)+'\t'+str(y)+'\t'+str(target))
#    f.write('\n')
t.add_rows(adder)
#f.close()
print(t.draw())


def calculateweights(x1, x2, w):
    return w[0] + w[1] * x1 + w[2] * x2


def train():
    converged = False
    weights = [0.0, 0.0, 0.0]
    iterations = 0
    print(X1, X2, weights)
    while (not converged):
        #iteration begin