def main():
    infile = raw_input('Input file name: ')
    if os.path.exists(infile):
        print '\n[!] Loading PCAP file. Please wait, it might take a while...'
        ips = sorted(set(p[IP].src for p in PcapReader(infile) if IP in p))

        total = len(ips)
        print '[!] Total number of IP addresses: %d\n' % total

        bar = Bar('Processing', max=total)
        for ip in ips:
            get_data(ip)
            bar.next()
        bar.finish()

        headers = ['IP', 'OWNER','COUNTRY', 'ORGANIZATION','SERVER','DESCRIPTION']
        print '\n\n'
        print tabulate(table,headers,tablefmt='grid')
        if exceptions:
            print '\nExceptions:'
            for e in exceptions:
                print '*\t%s' % e
            print '\n\n[!] Done.\n\n'
    else:
        print '[!] Cannot find file "%s"\n\tExiting...' % infile
        sys.exit()
 def print_domain_templates(self,templates):
     '''
     Print template details
     List of templates provided in templates argument
     '''
     
     
     #Create table and table headers
     table = []
     headers = ["Name","Description","TemplateId"]
     
     
     if type(templates) is list:
         #Print domain template parameters
         for template in templates:
             entry = []
             entry.append(template.name)
             entry.append(template.description)
             entry.append(template.id)
             table.append(entry)
     else:
         entry = []
         entry.append(templates.name)
         entry.append(templates.description)
         entry.append(templates.id)
         table.append(entry)
         
     print tabulate(table,headers)
Example #3
0
    def __str__(self):
        try:
            self.check()
        except ValueError as e:
            return e.message
        poolitems = sorted(self.pools.items(),
                           key=lambda pitem: pitem[1].hashrate, reverse=True)
        totalhashrate = self.calc_totalhashrate()
        headers = ["Name", "HR (Thps)", "Prop", "MBS", "MFR",
                   "AKN", "BKN", "MFR.mean", "MFR.std", "MFR.bias"]
        table = [[
            name,
            pool.hashrate*1e-12,
            pool.hashrate/totalhashrate,
            pool.maxblocksize,
            pool.minfeerate,
            pool.mfrstats['abovekn'],
            pool.mfrstats['belowkn'],
            pool.mfrstats['mean'],
            pool.mfrstats['std'],
            pool.mfrstats['bias']]
            for name, pool in poolitems]
        poolstats = tabulate(table, headers=headers)
        meanblocksize = sum([row[2]*row[3] for row in table])
        maxcap = meanblocksize*self.blockrate

        table = [
            ("Block interval (s)", 1 / self.blockrate),
            ("Total hashrate (Thps)", totalhashrate*1e-12),
            ("Max capacity (bytes/s)", maxcap)
        ]
        miscstats = tabulate(table)
        return poolstats + '\n' + miscstats
  def dumpProfile(self):
    """
    Print region profiling information in a nice format.
    """
    print "Profiling information for {}".format(type(self).__name__)
    totalTime = 0.000001
    for region in self.network.regions.values():
      timer = region.computeTimer
      totalTime += timer.getElapsed()

    count = 1
    profileInfo = []
    for region in self.network.regions.values():
      timer = region.computeTimer
      count = max(timer.getStartCount(), count)
      profileInfo.append([region.name,
                          timer.getStartCount(),
                          timer.getElapsed(),
                          100.0*timer.getElapsed()/totalTime,
                          timer.getElapsed()/max(timer.getStartCount(),1)])

    profileInfo.append(["Total time", "", totalTime, "100.0", totalTime/count])
    print tabulate(profileInfo, headers=["Region", "Count",
                   "Elapsed", "Pct of total", "Secs/iteration"],
                   tablefmt = "grid", floatfmt="6.3f")

    if self.tmRegion is not None:
      if self.tmRegion.getSpec().commands.contains("prettyPrintTraces"):
        self.tmRegion.executeCommand(["prettyPrintTraces"])
Example #5
0
def displayGraph(ingredientMap):
    # Display graph
    # list of foods
    foodNodes = ingredientMap.m.values()
    # table for display
    numNodeComb = 0
    numNodeSep = 0
    displayTable = [ [row,"nameFood",[]] for row in range(len(foodNodes)) ]
    for eachFood in foodNodes:
        displayTable[eachFood.getIndex()][1] = eachFood.getName()
    for i,each in enumerate(ingredientMap.adjList):
        stringChild = [str(eachChild) for eachChild in each ]
        if (len(stringChild) > 1):
            numNodeSep +=1
        elif (len(stringChild) == 1):
            numNodeComb += 1
        stringChild = ",".join(stringChild)
        displayTable[i][2] = stringChild
    # global countComb, countSep
    # countComb += numNodeComb
    # countSep += numNodeSep
        
    print tabulate(displayTable, headers=["node-id","node-form", "child-id"])
    
#     originalTextsWithEdge = []
#     for each in originalTexts:
#         if each in dictTextEdge.keys():
#             originalTextsWithEdge.append("\nEDGE:"+each +'\n'+ str(dictTextEdge[each])+"\n")
#         else:
#             originalTextsWithEdge.append(each)
    
     
    
Example #6
0
def output(matrix):
	tmp = [[''] * 4 for _ in range(4)]
	for i in range(4):
		for j in range(4):
			if matrix[i][j] != 0:
				tmp[i][j] = matrix[i][j]
	print tabulate.tabulate(tmp, tablefmt = 'grid')
Example #7
0
def printTableRowsAvg(sumTuples, countTuples, columns):
    rows = []
    if len(sumTuples) == 1 and countTuples[0][0] == 0:
        if len(columns) == 2:
            rows.append(["NULL", "NULL"])
        else:
            rows.append(["NULL"])
        print(tabulate(rows, headers = columns))
        return
    else:
        if len(columns) == 2:
            counts = dict(countTuples)
            for row in sumTuples:
                newRow = list(row)
                encryptedSalaryHex = newRow[0]
                encryptedSalary = newRow[0][:-1].decode('hex_codec')
                encryptLength = len(encryptedSalary)
                ffiUC_encryptedSalary = ffi.new("unsigned char[%i]" % encryptLength, encryptedSalary)
                newRow[0] = float(int(cryptoLib.decrypt_num(cryptoLib.toData(ffiUC_encryptedSalary), keys.public, keys.private, encryptLength))) / counts[newRow[1]]
                rows.append(newRow)
            print(tabulate(rows, headers = columns))
            return
        else:
            counts = countTuples[0][0]
            for row in sumTuples:
                newRow = list(row)
                encryptedSalaryHex = newRow[0]
                encryptedSalary = newRow[0][:-1].decode('hex_codec')
                encryptLength = len(encryptedSalary)
                ffiUC_encryptedSalary = ffi.new("unsigned char[%i]" % encryptLength, encryptedSalary)
                newRow[0] = float(int(cryptoLib.decrypt_num(cryptoLib.toData(ffiUC_encryptedSalary), keys.public, keys.private, encryptLength))) / counts
                rows.append(newRow)
            print(tabulate(rows, headers = columns))
            return
def construct_SCL_table(pv_methods):
    table = []
    i = 0
    #collect domain pairs
    for method in pv_methods:
        if i == 0:
            m_list = collect_accuracy("../work/batchSCL.%s.csv"%method)
            pairs = [x[0] for x in m_list]
        i+=1

    # use pairs to collect accuracies
    for pair in sorted(pairs):
        tmp = []
        tmp.append(pair)
        for method in pv_methods:
            if "landmark" in method:
                m_list = construct_best_landmark(collect_params('SCL',method))
                tmp.append([x[2] for x in m_list if x[0]==pair][0])
                m_list = construct_best_landmark(collect_params_on_server('SCL',method))
                tmp.append([x[2] for x in m_list if x[0]==pair][0])
            else:
                m_list = collect_accuracy("../work/batch%s.%s.csv"% ('SCL', method))
                tmp.append([x[2] for x in m_list if x[0]==pair][0])
                m_list = collect_accuracy("../work/temp/batch%s.%s.csv"% ('SCL', method))
                tmp.append([x[2] for x in m_list if x[0]==pair][0])
        best = max(tmp[1:])
        best_idx = [i for i, j in enumerate(tmp) if j == best]
        new_tmp = ["\\textbf{%.2f}"%x if x == best else x for x in tmp]
        # print pair,[convert(pv_methods[i-1]) for i in best_idx],best
        table.append(new_tmp)

    headers = ['S-T']+sum([[convert(x)+'(S,T)',convert(x)+'(T)'] for x in pv_methods],[])
    print tabulate(table,headers,floatfmt=".2f")
    pass
Example #9
0
def highest_dividend():
    nshares = defaultdict(lambda: 1)
    for order in orders():
        nshares[order[1]] = nshares[order[1]] + order[3]
    sortsec = sorted(securities(), key=(lambda x: x[1] * x[2] / nshares[x[0]]), reverse=True)
    table = map(lambda sec: [sec[0], sec[1] * sec[2] / nshares[sec[0]], sec[1] * sec[2], nshares[sec[0]]], sortsec)
    print tabulate(table, headers=["Ticker", "Dividend per Share", "Total Dividend", "Shares being traded"])
Example #10
0
def main():
    parser = argparse.ArgumentParser(description="List available subtitles for a show")
    parser.add_argument("show", help="Show you want to list the subtitles for")
    parser.add_argument("-s", "--season", help="Season you want to list", type=int, default=1)
    parser.add_argument("-l", "--language", help="Language you want to list subtitles in", default='en')
    args = parser.parse_args()

    os = OpenSubtitles()
    os.login(username=None, password=None)

    show_name = args.show
    sub_language = args.language
    season = str(args.season)

    results = os.search_subtitles([{'sublanguageid': sub_language, 'query': show_name, 'season': season}])
    sorted_results = sorted(results, key=lambda x: int(x['SeriesEpisode']))

    table = []
    for result in sorted_results:
        name = result['MovieName'].strip()
        match_exact_show_name_regex = r'\"{}\"'.format(show_name)
        if re.search(match_exact_show_name_regex, name):
            username_raw = result['UserNickName']
            username = username_raw if username_raw else 'nobody'
            size = result['SubSize']
            date_raw = result['SubAddDate']
            date = datetime.strptime(date_raw, '%Y-%m-%d %H:%M:%S')
            filename = result['SubFileName']
            table.append(['-r--r--r--', '1', username, username, size,
                          date.strftime('%b %d %H:%M'), filename])

    print "total {}".format(len(table))
    print tabulate(table, tablefmt="plain")
Example #11
0
def load_lots_of_items():
    char = poe_lib.Character()
    js = open("items.json").read().strip().decode('ascii', 'ignore')
    items = json.loads(js)

    weapon_types = {}
    words = {}
    for _, item, _ in items:
        if isinstance(item, dict):
            typ = item['typeLine'].lower()
            for word in typ.split(" "):
                words.setdefault(word, 0)
                words[word] += 1

            checks = ['sword', 'mace', 'stave', 'flask', 'shield', 'greaves', 'boots']
            for check in checks:
                if check in typ:
                    weapon_types.setdefault(check, 0)
                    weapon_types[check] += 1
                    break
            else:
                weapon_types.setdefault("unk", 0)
                weapon_types["unk"] += 1

    pprint.pprint(sorted(words.items(), key=lambda a: a[1]))
    pprint.pprint(sorted(weapon_types.items(), key=lambda a: a[1]))
    data = [eff.tabulate() for eff in char.effects]
    print tabulate.tabulate(data, headers=["Effect Magnitude", "Requirement to Effect", "Effects", "Source", "Original Text"])
Example #12
0
def sensorInfos(args):
    requireSensorID(args)
    params = extractParams(args)

    if 'tail' not in params:
        params['tail'] = 1

    obj = lib.Sensor(args.directory, args.sensorid, args.type)
    infos = obj.SensorInfos(**params)

    if not infos:
        print "Not enought datas for %s" % args.sensorid
        sys.exit(1)

    showresult = [
        ['Sensorid', args.sensorid],
        #['Sensor Type', obj.configs['type']],
        ['NB lines', str(infos['nblines'])],
        ['Min date', format_datetime(infos['mindate'])],
        ['Max date', format_datetime(infos['maxdate'])],
        ['Min value', '%s (%s)' % (str(infos['minvalue']), format_datetime(infos['minvaluedate']))],
        ['Max value', '%s (%s)' % (str(infos['maxvalue']), format_datetime(infos['maxvaluedate']))],
        #        ['Avg size', str(infos['avgsize'])],
        ['Avg value', str(infos['avgvalue'])],
        ['Avg delta (round ratio)', str(infos['avgdelta'])],
        # ['Total size', '%s Mo' % str(infos['avgsize'] * infos['nblines'] / 1024 / 1024.0)],
    ]

    header = ['Title', 'Value']
    print tabulate(showresult, headers=header)
    def bridgemem_details(self):
        """
        :return: list vlans or bridge names of various stp states MODIFY
        """
        if not self.iface.is_bridgemem():
            return None
        # check if port is in STP
        _str = ''
        _stpstate = self.iface.stp.state
        # get the list of states by grabbing all the keys
        if self.iface.vlan_filtering:
            _vlanlist = self.iface.vlan_list
            _header = [_("all vlans on l2 port")]
            _table = [[', '.join(linux_common.create_range('', _vlanlist))]]
            _str += tabulate(_table, _header, numalign='left') + self.new_line()
            _header = [_("untagged vlans")]
            _table = [[', '.join(self.iface.native_vlan)]]
            _str += tabulate(_table, _header, numalign='left') + self.new_line()
        for _state, _bridgelist in _stpstate.items():
            if _bridgelist:
                _header = [_("vlans in %s state") %
                           (inflection.titleize(_state))]
                # if vlan aware and bridgelist is not empty, then assume
                # all vlans have that stp state
                if self.iface.vlan_filtering:
                    _table = [[', '.join(linux_common.create_range(
                        '', _vlanlist))]]
                else:
                    _table = [self._pretty_vlanlist(_bridgelist)]

                _str += tabulate(_table, _header, numalign='left') + self.new_line()

        return _str
Example #14
0
def print_events(conn, stack_name, follow, lines=100, from_dt=datetime.fromtimestamp(0, tz=pytz.UTC)):
    """Prints tabulated list of events"""
    events_display = []
    seen_ids = set()
    next_token = None

    while True:
        events, next_token = get_events(conn, stack_name, next_token)
        status = get_stack_status(conn, stack_name)
        normalize_events_timestamps(events)
        if follow:
            events_display = [(ev.timestamp.astimezone(tzlocal.get_localzone()), ev.resource_status, ev.resource_type,
                               ev.logical_resource_id, ev.resource_status_reason) for ev in events
                              if ev.event_id not in seen_ids and ev.timestamp >= from_dt]
            if len(events_display) > 0:
                print(tabulate(events_display, tablefmt='plain'), flush=True)
                seen_ids |= set([event.event_id for event in events])
            if status not in IN_PROGRESS_STACK_STATES and next_token is None:
                break
            if next_token is None:
                time.sleep(5)
        else:
            events_display.extend([(event.timestamp.astimezone(tzlocal.get_localzone()), event.resource_status,
                                    event.resource_type, event.logical_resource_id, event.resource_status_reason)
                                   for event in events])
            if len(events_display) >= lines or next_token is None:
                break

    if not follow:
        print(tabulate(events_display[:lines], tablefmt='plain'), flush=True)

    return status
Example #15
0
 def show(self, header=True):
   print
   if header: print self.table_header + ":"
   print
   table = copy.deepcopy(self.cell_values)
   print tabulate.tabulate(table, headers=self.col_header, numalign="left", stralign="left")
   print
Example #16
0
def handle_party_list(data):
    wanted = "name series_id acc atk def eva matk mdef mnd series_acc series_atk series_def series_eva series_matk series_mdef series_mnd"
    topn = OrderedDict()
    topn["atk"] = 5
    topn["matk"] = 2
    topn["mnd"] = 2
    topn["def"] = 5
    find_series = [101001, 102001, 104001, 105001, 106001, 107001, 108001, 110001]
    equips = defaultdict(list)
    for item in data["equipments"]:
        kind = item.get("equipment_type", 1)
        heapq.heappush(equips[kind], Equipment(slicedict(item, wanted)))

    for series in find_series:
        print "Best equipment for FF{0}:".format((series - 100001) / 1000)

        # Need to use lists for column ordering
        tbl = ["stat n weapon stat n armor stat n accessory".split()]
        tbldata = [[],[],[],[]]
        for itemtype in range(1, 4): ## 1, 2, 3
            for stat, count in topn.iteritems():
                for equip in best_equipment(series, equips[itemtype], stat, count):
                    name = equip["name"].replace(u"\uff0b", "+")
                    tbldata[itemtype].append([stat, equip[stat], name])

        # Transpose data
        for idx in range(0, len(tbldata[1])):
            tbl.append(tbldata[1][idx] + tbldata[2][idx] + tbldata[3][idx])
        print tabulate(tbl, headers="firstrow")
        print ""
Example #17
0
def show_summary_all(db_server, db_port, db_name, db_collection):
    pattern = {}

    print "-" * 60
    print "Summary Data: "
    print "-" * 60

    data = pns_mongo.pns_search_results_from_mongod(db_server,
                                                    db_port,
                                                    db_name,
                                                    db_collection,
                                                    pattern)
    for record in data:
        print_record_header(record)
        for flow in record['flows']:
            print_flow_header(flow)

            # Display the results for each flow.
            cols = ['throughput_kbps', 'protocol', 'tool',
                    'rtt_ms', 'loss_rate', 'pkt_size',
                    'rtt_avg_ms']
            result_list = get_results_info(flow['results'], cols)
            print tabulate.tabulate(result_list,
                                    headers="keys", tablefmt="grid")

    print "\n"
Example #18
0
def main():

    print'''
                                                                        Welcome to La-Z-Boy
                                                                    For the love of good content
    '''
    print("If you want to check movies movies on a channel select 1")
    print("To get movies of a specific Genre select 2")
    choice=raw_input("Enter choice: ")

    if(str(choice)=='1'):
        #channel = raw_input("Enter name of the TV Channel: ")
        channel2 = raw_input("Enter name of the TV Channel: ")
        #channel = "-".join([item.strip() for item in channel.split(" ")])
        if(len(channel2.split())>1):
            channel2 = "-".join([item.strip() for item in channel2.split(" ")])
            channel2 = channel2.title()
        else:
            channel2 = channel2.strip()
            channel2 = channel2.upper()
        movie_rating = search_channel(channel2)
    else:
        genre = raw_input("Enter Genre: (like  comedy, action ....) ")
        no_of_channel = raw_input("Enter No of channels to check (e.g, 1-44)")
        genre_recommend(genre, no_of_channel)
        print '\nNumber of movies of genre ' + genre.upper()+' found : ' + str(len(movies_of_my_genre))
        get_ratings(movies_of_my_genre)
        sorted_list = sorted(movies_of_my_genre, key=lambda movie: movie.movie_rating, reverse=True)

        headers = ['Movies','Channel','Time', 'Rating']
        data_movies2 = []

        for movie in islice(sorted_list, 5):
            data_movies2.append([movie.movie_name.replace('\t', ''), movie.movie_channel.replace('\t', ''), movie.movie_start+"-"+movie.movie_end, movie.movie_rating])
        print tabulate(data_movies2, headers=headers)
Example #19
0
    def tabulate(cls, data, fields, headers={}):
        """ Prints a tabulate version of data

            Args:
                data: something to disply

        """
        if isinstance(data, str):
            print(tabulate([[data]], tablefmt=Printer.TABULATE_FORMAT))

        elif isinstance(data, dict):
            print(tabulate([data], headers=headers, tablefmt=Printer.TABULATE_FORMAT))

        elif isinstance(data, list):
            results = []

            for obj in data:
                if isinstance(obj, NURESTObject):
                    results.append(cls._object_to_dict(obj, fields))
                else:
                    results.append([obj])

            print(tabulate(results, headers=headers, tablefmt=Printer.TABULATE_FORMAT))

        else:
            dictionary = cls._object_to_dict(data, fields)
            result = [(key, value) for key, value in dictionary.items()]
            print(tabulate(result, headers=headers, tablefmt=Printer.TABULATE_FORMAT))
Example #20
0
 def after(self):
     def fmt_date(dt):
         if not dt:
             return ''
         return dt.strftime('%Y-%m-%d %H:%M')
     def fmt_dur(d):
         if not d:
             return ''
         return '%0.1f' % (d / 60.0)
     s=Storage(self.config['storage_root'])
     stats=s.get_status()
     tab=[]
     for vm in self.result:
         stat=stats.get(vm)
         row=self.result[vm]
         if stat:
             row.append(fmt_date(stat['last_backup']))
             row.append(fmt_dur(stat['duration']))
         else:
             row.extend(['', ''])
         tab.append(row)
     for vm in stats:
         if not self.result.has_key(vm):
             tab.append(['', vm, '', '', '', fmt_date(stats[vm]['last_backup']), fmt_dur(stats[vm]['duration'])])
     print tabulate(tab, ['Host', 'VM', 'State', 'AutoBck', 'AutoBck Batch', 'Last Backup', 'Dur. (m)'])
	def parse(self, response):
		htmlresponseinit = HtmlXPathSelector(text=response.body).select('//div[@id="browseJobs"]')
		i_range = range(13)
		for sel in htmlresponseinit:
			items = []
			for i in i_range:

			        item = FreelancerscraperItem()
				id_a_link = "jobCat" + str(i) + "_heading"
			        id_ul_link = "jobCat" + str(i)
			        parent_categ_name = sel.select('p/strong/a[@id="' + id_a_link + '"]/text()').extract()
				item['parent_categ_name'] = parent_categ_name
			        categ_and_count_string =	(sel.select('ul[@id="' + id_ul_link  + '"]/li/a/text()').extract())

			        result = {}
				for categ_and_count in categ_and_count_string:
			           #   Electronics (1234)
				   categ = categ_and_count.split("(")[0].strip()
				   count = re.findall(r'\d+', categ_and_count)[0]
				   result[categ] = int(count)
				sorted_result = sorted(result.items(), key=operator.itemgetter(1), reverse = True)
				item['categ_name_count'] = sorted_result
			        items.append(item)
                for item in items:
                        print "--------------*******------------------------------*******----------------"
                        print tabulate(item['categ_name_count'], ["CATEGORY: " + "".join(item['parent_categ_name']), "Count"], tablefmt="grid")
                                
		yield None
Example #22
0
  def tail(self, rows=10, cols=200, **kwargs):
    """
    Analgous to R's `tail` call on a data.frame. Display a digestible chunk of the H2OFrame starting from the end.

    :param rows: Number of rows to display.
    :param cols: Number of columns to display.
    :param kwargs: Extra arguments passed from other methods.
    :return: None
    """
    if self._vecs is None or self._vecs == []:
      raise ValueError("Frame Removed")
    nrows = min(self.nrow(), rows)
    ncols = min(self.ncol(), cols)
    colnames = self.names()[0:ncols]

    exprs = [self[c][(self.nrow()-nrows):(self.nrow())] for c in range(ncols)]
    print "Last", str(nrows), "rows and first", str(ncols), "columns: "
    if nrows != 1:
      fr = H2OFrame.py_tmp_key()
      cbind = "(= !" + fr + " (cbind %"
      cbind += " %".join([expr.eager() for expr in exprs]) + "))"
      res = h2o.rapids(cbind)
      h2o.remove(fr)
      tail_rows = [range(self.nrow()-nrows+1, self.nrow() + 1, 1)]
      tail_rows += [rows[0:nrows] for rows in res["head"][0:ncols]]
      tail = zip(*tail_rows)
      print tabulate.tabulate(tail, headers=["Row ID"] + colnames)
    else:
      print tabulate.tabulate([[self.nrow()] + [expr.eager() for expr in exprs]], headers=["Row ID"] + colnames)
    print
Example #23
0
def main():
    parser = argparse.ArgumentParser(
        description='Finds spare change using CSV exported from Mint!')
    parser.add_argument('filename', type=str,
        help='Filename to read for csv.')
    parser.add_argument('-y', '--years', type=int, default=5,
        help='The number of previous years (including current) to print.')

    args = parser.parse_args()

    # Ensure that the file exists that user provides.
    if not os.path.isfile(args.filename):
        print "ERROR: {0} does not exist! Please specify a valid file!".format(
            args.filename)
        sys.exit(1)

    # Determine the start date for grabbing the values.
    TODAY = datetime.datetime.now()
    start_date = datetime.datetime(
        TODAY.year - args.years + 1,
        1,
        1)

    spare_change = collections.OrderedDict(
        {"Month" : calendar.month_abbr[1:13] + ["Total"]})

    # Open the CSV file and parse each row.
    with open(args.filename, 'rb') as csvfile:
        dictreader = csv.DictReader(csvfile)
        for row in dictreader:
            date = datetime.datetime.strptime(row['Date'], '%m/%d/%Y')

            # If the date is greater than the start date, accumlate values.
            if date > start_date:

                # See if the year exist in the dictionary yet and create
                # the list if not. We use None here instead of 0 so the table
                # does not print values that are zero.
                if date.year not in spare_change:
                    spare_change[date.year] = [None] * (MONTHS_IN_YEAR + 1)

                # Calculate the change and then add the amount to the list
                # in the dictionary. Index is the month offset by 1 since
                # the list starts with 0.
                dollars = float(row['Amount'])
                change = dollars - math.floor(dollars)

                if spare_change[date.year][date.month - 1] is None:
                    spare_change[date.year][date.month - 1] = change
                else:
                    spare_change[date.year][date.month - 1] += change

                if spare_change[date.year][12] is None:
                    spare_change[date.year][12] = change
                else:
                    spare_change[date.year][12] += change


    # Print the results.
    print tabulate(spare_change, headers="keys", floatfmt=".2f")
Example #24
0
def processDatabase(dbfile,free_params):

    con = sqlite3.connect(dbfile)
    cur = con.cursor()

    cur.execute('SELECT ParamName,Value,GaussianPrior,Scale,Max,Min,Fixed from SystematicParams ORDER BY ParamName')
    data = cur.fetchall()

    try:
         print tabulate(data,headers=["Name","Value","GaussianPrior","Scale","Max","Min","Fixed"],tablefmt="grid")
    except:
        print "PARAMETER TABLE: \n"
        col_names = [col[0] for col in cur.description]
        print "  %s %s %s %s %s %s %s" % tuple(col_names)
        for row in data: print "  %s %s %s %s %s %s %s" % row

    # Convert all row's angles from degrees to rad:
    for irow,row in enumerate(data):
        if 'theta' in row[0]:
            row = list(row)
            row[1] = np.deg2rad(row[1])
            if row[2] is not None:
                row[2] = np.deg2rad(row[2])
            row[4] = np.deg2rad(row[4])
            row[5] = np.deg2rad(row[5])
            data[irow] = row
            
            
    params = {}
    for row in data:
        prior_dict = {'kind': 'uniform'} if row[2] is None else {'fiducial': row[1], 
                                                                 'kind': 'gaussian', 
                                                                 'sigma': row[2]}
            
        params[row[0]] = {'value': row[1], 'range': [row[5],row[4]],
                          'fixed': bool(row[6]),'scale': row[3],
                          'prior': prior_dict}                    
    
    # now make fixed/free:
    if free_params is not None:
        # modify the free params to include the '_ih'/'_nh' tags:
        mod_free_params = []
        for p in free_params:
            if ('theta23' in p) or ('deltam31' in p):
                mod_free_params.append(p+'_ih')
                mod_free_params.append(p+'_nh')
            else:
                mod_free_params.append(p)

        print "\nmod free params: ",mod_free_params
        #Loop over the free params and set to fixed/free
        for key in params.keys():
            if key in mod_free_params: params[key]['fixed'] = False
            else: params[key]['fixed'] = True

            if not params[key]['fixed']:
                print "  Leaving parameter free: ",key
        print "  ...all others fixed!"

    return params
Example #25
0
def __mk_volume_table(table, ty, headers=(), **kwargs):
    if ty == 'global':
        return tabulate(table, headers=headers, tablefmt=global_tablefmt, **kwargs)
    elif ty == 'byweek':
        return tabulate(table, headers=headers, tablefmt=byweek_tablefmt, **kwargs)
    elif ty == 'rank':
        return tabulate(table, headers=headers, tablefmt=rank_tablefmt, **kwargs)
Example #26
0
File: c.py Project: hiveeyes/kotori
    def pprint(cls, struct, data=None, format='pprint'):
        # TODO: maybe refactor to struct._pprint_
        name = struct._name_()
        payload = struct._dump_()
        payload_hex = hexlify(payload)
        if data:
            payload_data = list(data.items())
        else:
            payload_data = list(cls.to_dict(struct).items())

        if format == 'pprint':
            print 'name:', name
            print 'hex: ', payload_hex
            pprint(payload_data, indent=4, width=42)

        elif format == 'tabulate-plain':
            separator = ('----', '')
            output = [
                separator,
                ('name', name),
                separator,
            ]
            output += StructAdapter.binary_reprs(payload)
            output += [separator]
            output += payload_data
            print tabulate(output, tablefmt='plain')

            #print tabulate(list(meta.items()), tablefmt='plain')
            #print tabulate(payload_data, missingval='n/a', tablefmt='simple')

        else:
            raise ValueError('Unknown format "{}" for pretty printer'.format(format))
Example #27
0
def print_tabulate(res, noheader=False, short=False):
    config = read_config()
    c = conn(config)
    tbl = []
    for i in res:
        if not i.__dict__.has_key('passwordenabled'):
            i.__setattr__('passwordenabled', 0)
        if not i.__dict__.has_key('created'):
            i.__setattr__('created', '')
        if i.passwordenabled == 1:
            passw = "Yes"
        else:
            passw = "No"
        if short:
            tbl.append(["%s/%s" % (i.account, i.name)])
        else:
            tbl.append([
                "%s/%s" % (i.account, i.name),
                i.zonename,
                i.ostypename,
                i.created,
                passw,
                ])

    tbl = sorted(tbl, key=operator.itemgetter(0))
    if (noheader or short):
        print tabulate(tbl, tablefmt="plain")
    else:
        tbl.insert(0, ['name', 'zone', 'ostype', 'created', 'passwordenabled'])
        print tabulate(tbl, headers="firstrow")
Example #28
0
  def head(self, rows=10, cols=200, **kwargs):
    """
    Analgous to R's `head` call on a data.frame. Display a digestible chunk of the H2OFrame starting from the beginning.

    :param rows: Number of rows to display.
    :param cols: Number of columns to display.
    :param kwargs: Extra arguments passed from other methods.
    :return: None
    """
    if self._vecs is None or self._vecs == []:
      raise ValueError("Frame Removed")
    nrows = min(self.nrow(), rows)
    ncols = min(self.ncol(), cols)
    colnames = self.names()[0:ncols]

    fr = H2OFrame.py_tmp_key()
    cbind = "(= !" + fr + " (cbind %"
    cbind += " %".join([vec._expr.eager() for vec in self]) + "))"
    res = h2o.rapids(cbind)
    h2o.remove(fr)
    head_rows = [range(1, nrows + 1, 1)]
    head_rows += [rows[0:nrows] for rows in res["head"][0:ncols]]
    head = zip(*head_rows)
    print "First", str(nrows), "rows and first", str(ncols), "columns: "
    print tabulate.tabulate(head, headers=["Row ID"] + colnames)
    print
Example #29
0
def showPfcAsym(interface):
    """
    PFC handler to display asymmetric PFC information.
    """

    i = {}
    table = []
    key = []

    header = ('Interface', 'Asymmetric')

    configdb = swsssdk.ConfigDBConnector()
    configdb.connect()

    if interface:
        db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|{0}'.format(interface))
    else:
        db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|*')

    for i in db_keys or [None]:
        if i:
            key = i.split('|')[-1]

        if key and key.startswith('Ethernet'):
            entry = configdb.get_entry('PORT', key)
            table.append([key, entry.get('pfc_asym', 'N/A')])

    sorted_table = natsorted(table)

    print '\n'
    print tabulate(sorted_table, headers=header, tablefmt="simple", missingval="")
    print '\n'
Example #30
0
  def describe(self):
    """
    Generate an in-depth description of this H2OFrame.

    The description is a tabular print of the type, min, max, sigma, number of zeros,
    and number of missing elements for each H2OVec in this H2OFrame.

    :return: None (print to stdout)
    """
    if self._vecs is None or self._vecs == []:
      raise ValueError("Frame Removed")
    print "Rows:", len(self._vecs[0]), "Cols:", len(self)
    headers = [vec._name for vec in self._vecs]
    table = [
      self._row('type', None),
      self._row('mins', 0),
      self._row('mean', None),
      self._row('maxs', 0),
      self._row('sigma', None),
      self._row('zero_count', None),
      self._row('missing_count', None)
    ]

    chunk_summary_tmp_key = H2OFrame.send_frame(self)

    chunk_summary = h2o.frame(chunk_summary_tmp_key)["frames"][0]["chunk_summary"]

    h2o.remove(chunk_summary_tmp_key)

    print tabulate.tabulate(table, headers)
    print
    print chunk_summary
    print
Example #31
0
    async def statusset_list(self, ctx: commands.Context,
                             service: Optional[ServiceConverter]):
        """
        List that available services and ones are used in this server.

        Optionally add a service at the end of the command to view detailed settings for that
        service.
        """
        # this needs refactoring
        # i basically copied and pasted in rewrite
        # maybe stick the two sections in .utils

        if TYPE_CHECKING:
            guild = Guild()
        else:
            guild = ctx.guild

        unused_feeds = list(FEEDS.keys())

        if service:
            data = []
            for channel in guild.channels:
                feeds = await self.config.channel(channel).feeds()
                restrictions = await self.config.guild(
                    guild).service_restrictions()
                for name, settings in feeds.items():
                    if name != service.name:
                        continue
                    mode = settings["mode"]
                    webhook = settings["webhook"]
                    if channel.id in restrictions.get(service, []):
                        restrict = True
                    else:
                        restrict = False
                    data.append([f"#{channel.name}", mode, webhook, restrict])

            table = box(
                tabulate(data,
                         headers=[
                             "Channel", "Send mode", "Use webhooks", "Restrict"
                         ]))
            await ctx.send(
                f"**Settings for {service.name}**: {table}\n`Restrict` is whether or not to "
                f"restrict access for {service.name} server-wide in the `status` command. Members "
                "are redirected to an appropriate channel.")

        else:
            guild_feeds: Dict[str, List[str]] = {}
            for channel in guild.channels:
                feeds = await self.config.channel(channel).feeds()
                for feed in feeds.keys():
                    try:
                        guild_feeds[feed].append(f"#{channel.name}")
                    except KeyError:
                        guild_feeds[feed] = [f"#{channel.name}"]

            if not guild_feeds:
                msg = "There are no status updates set up in this server.\n"
            else:
                msg = ""
                data = []
                for name, settings in guild_feeds.items():
                    if not settings:
                        continue
                    data.append([name, humanize_list(settings)])
                    try:
                        unused_feeds.remove(name)
                    except Exception:
                        pass
                if data:
                    msg += "**Services used in this server:**"
                    msg += box(tabulate(data, tablefmt="plain"),
                               lang="arduino")  # cspell:disable-line
            if unused_feeds:
                msg += "**Other available services:** "
                msg += inline_hum_list(unused_feeds)
            msg += (
                f"\nTo see settings for a specific service, run `{ctx.clean_prefix}statusset "
                "list <service>`")
            await ctx.send(msg)
    def analyze_all_continuous(self):
        list_tables = Database.get_table_names(Database.database_name)

        iter = 0
        func_names = {}
        for table in list_tables:
            if (table.split('_')[0] != 'Comp'):
                func_name = table.split('_')[3]
                if (func_names.get(func_name, -1) == -1):
                    func_names[str(func_name)] = iter
                    iter += 1

        acc_matrix = [['' for x in range(7)] for x in range(iter)]
        rec_matrix = [['' for x in range(7)] for x in range(iter)]
        prec_matrix = [['' for x in range(7)] for x in range(iter)]

        iter = -1
        x = 0
        for table in list_tables:
            print(table)
            if (table.split('_')[0] != 'Comp'):
                func_name = table.split('_')[3]

                result = ClassifyMusic.run(table, 5, [1, 2, 3, 4, 5, 6], False)

                string = acc_matrix[func_names.get(func_name)]
                array = [func_name, result[0][2], result[1][2], result[2][2], result[3][2], result[4][2], result[5][2]]
                if (string[0] == ''):
                    acc_matrix[func_names.get(func_name)] = array
                else:
                    arr = [a + '-' + b for a, b in zip(string, array)]
                    acc_matrix[func_names.get(func_name)] = arr

                array = [func_name, result[0][3], result[1][3], result[2][3], result[3][3], result[4][3], result[5][3]]
                if (string[0] == ''):
                    prec_matrix[func_names.get(func_name)] = array
                else:
                    arr = [a + '-' + b for a, b in zip(string, array)]
                    prec_matrix[func_names.get(func_name)] = arr

                array = [func_name, result[0][4], result[1][4], result[2][4], result[3][4], result[4][4], result[5][4]]
                if (string[0] == ''):
                    rec_matrix[func_names.get(func_name)] = array
                else:
                    arr = [a + '-' + b for a, b in zip(string, array)]
                    rec_matrix[func_names.get(func_name)] = arr

        avg_acc_matrix = cont_create_tables(acc_matrix)
        avg_rec_matrix = cont_create_tables(rec_matrix)
        avg_prec_matrix = cont_create_tables(prec_matrix)

        print('-------------------------------------------------------------------')
        print('------------------------------------acc matrix---------------------')

        print(tabulate(avg_acc_matrix, tablefmt="latex_raw",
                       headers=['Type', 'Random Forest', 'KNN', 'SVC Poly', 'GaussianNB', 'SVC Linear', 'SVC RBF']))

        print(
            '----------------------------------------------------------------------------')
        print(
            '-----------------------------------acc matrix--------------------------------')

        print(
            '-------------------------------------------------------------------------------')
        print(
            '----------------------------------------rec matrix-----------------------------')


        print(tabulate(avg_rec_matrix, tablefmt="latex_raw",
                       headers=['Type', 'Random Forest', 'KNN', 'SVC Poly', 'GaussianNB', 'SVC Linear', 'SVC RBF']))

        print(
            '-----------------------------------------------------------------------------------')
        print(
            '-----------------------------rec matrix--------------------------------------------')

        print(
            '----------------------------------------------------------------')
        print(
            '-------------------------prec matrix-----------------------------')

        print(tabulate(avg_prec_matrix, tablefmt="latex_raw",
                       headers=['Type', 'Random Forest', 'KNN', 'SVC Poly', 'GaussianNB', 'SVC Linear', 'SVC RBF']))

        print(
            '-------------------------------------------------------------------------------')
        print(
            '-----------------------------------------prec matrix---------------------------')
Example #33
0
def print_matrix(input_matrix):
    cprint(tabulate(input_matrix,
                    tablefmt="fancy_grid", floatfmt="2.5f"), 'cyan')
Example #34
0
def print_ip_table(avlist, navlist):
	res = {'Reachable': avlist, 'Unreachable': navlist}
	return print(tabulate(res, headers="keys"))
Example #35
0
        return 0
    elif re.search('Self', x):
        return 1
    elif re.search('gov', x):
        return 2
    else:
        return 3


df['WorfClass_cat'] = df.workclass.apply(lambda x: x.strip()).apply(
    lambda x: workclas(x))
df['WorfClass_cat'] = df.workclass.apply(lambda x: x.strip()).apply(
    lambda x: workclas(x))
df['WorfClass_cat'].value_counts()

print(tabulate(df.head(15), tablefmt='psql', headers='keys'))

#......................................test  data........................................................

data_test = pd.read_csv(
    r"C:\Users\nikita\Desktop\data_analytics_material\AIT590\python presentation\ML_test_data_without_output.csv"
)

df_test = pd.DataFrame(data_test)
# print(df)

df_test.replace(' ?', numpy.nan, regex=False, inplace=True)
print(df_test.columns[df_test.isna().any()])

#print(tabulate(df, tablefmt='psql'))
Example #36
0
 cmd = input()
 ctrlc_cnt = 0
 if cmd[0:2] == 'cd':
     try:
         os.chdir(os.path.join(os.getcwd(), cmd.split(' ')[1]))
     except:
         print("Cannot cd.")
 elif cmd == 'ls':
     print(subprocess.getoutput("ls"))
 elif len(cmd) > 0 and cmd[0] == '!':
     cmds = cmd.split(" ")
     if cmd == '!ls':
         print(
             tabulate([(k, *jobs[k].shortsummary()) for k in jobs],
                      tablefmt="grid",
                      headers=[
                          "ID", "status", "pwd", "command", "create", "run",
                          "finished"
                      ]))
     elif cmds[0] == '!k':
         if len(cmds) == 2 and cmds[1].isdigit():
             jobs[int(cmds[1])].kill()
     elif cmds[0] == '!s':
         if len(cmds) == 2 and cmds[1].isdigit():
             jobs[int(cmds[1])].showstdout()
     elif cmds[0] == '!j':
         if len(cmds) == 2 and cmds[1].isdigit():
             threadpoolmax = int(cmds[1])
         else:
             print("Thread pool size:", threadpoolmax)
     # elif cmds[0]=='!showstderr':
     #     if len(cmds)==2 and cmds[1].isdigit():
Example #37
0
    def run_all(self):
        self.pulse_logger.info('Checking instance pulse')
        self.pulse_logger.info('Checking Connections')
        result = self.check_connections()
        print(result)
        self.pulse_logger.info('Complete: Checking Connections')

        self.pulse_logger.info('Analyzing Query Stats')
        r1, r2, r3 = self.check_query_stats()
        print(r1)
        print(r2)
        print(r3)
        self.pulse_logger.info('Complete: Analyzing Query Stats')

        # check scheduled plans
        self.pulse_logger.info('Analyzing Query Stats')
        with trange(1,
                    desc='(3/5) Analyzing Scheduled Plans',
                    bar_format=self.bar,
                    postfix=self.postfix_default,
                    ncols=100,
                    miniters=0) as t:
            for i in t:
                result = self.check_scheduled_plans()
                fail_flag = 0
                if type(result) == list and len(result) > 0:
                    if result[0]['failure'] > 0:
                        fail_flag = 1
                    result = tabulate(result,
                                      headers="keys",
                                      tablefmt='psql',
                                      numalign='center')
                t.postfix[0]["value"] = 'DONE'
                t.update()
        print(result)
        if fail_flag == 1:
            print('Navigate to /admin/scheduled_jobs on your instance for '
                  'more details')
        self.pulse_logger.info('Complete: Analyzing Scheduled Plans')

        # check enabled legacy features
        self.pulse_logger.info('Checking Legacy Features')
        with trange(1,
                    desc='(4/5) Legacy Features',
                    bar_format=self.bar,
                    postfix=self.postfix_default,
                    ncols=100,
                    miniters=0) as t:
            for i in t:
                result = self.check_legacy_features()
                t.postfix[0]["value"] = 'DONE'
                t.update()
        print(result)
        self.pulse_logger.info('Complete: Checking Legacy Features')

        # check looker version
        self.pulse_logger.info('Checking Version')
        t = trange(1,
                   desc='(5/5) Version',
                   bar_format=self.bar,
                   postfix=self.postfix_default,
                   ncols=100)
        for i in t:
            result = self.check_version()
            t.postfix[0]["value"] = "DONE"
            t.update()
        print(result)
        self.pulse_logger.info('Complete: Checking Version')
        self.pulse_logger.info('Complete: Checking instance pulse')

        return
Example #38
0
def print_latex(T: list, num_table: int) -> None:
    """
    Print iteration data as LaTeX tabular.

    Parameters
    ----------
    T : list
        List of iteration data.
    num_table : int
        Split iteration data into columns to display side by side.
    """

    if num_table != 1:
        if num_table <= 0:
            logging.warning(
                f'Non-positive number of side by side tables {num_table}, now print normally'
            )
        elif num_table > len(T):
            logging.warning(
                f'Number of side by side tables {num_table} exceeds number of data rows {len(T)}, now print normally'
            )
        else:
            num_row = len(T)
            num_col = len(T[0])
            row_per_side_table = int(ceil(num_row / num_table))
            table = T

            T = (
                (
                    table[i + j * row_per_side_table][k]
                    for i in [i]  # I lol'd so hardddddd
                    for j in range(num_table) for k in range(num_col)
                    if i + j * row_per_side_table < num_row)
                for i in range(row_per_side_table))

    latex = str(
        tabulate(T, tablefmt='latex',
                 floatfmt=f".{config['decimal_places']}f"))

    # Filter content line only (avoid tabular,...)
    latex = '\n'.join(
        filter(lambda line: line.endswith('\\'), latex.splitlines()))

    # Remove trailing zeros
    trailing_zero_matcher = re.compile(
        r'''
            (?<=         # Positive (=) look behind (<), i.e. must be (positive) preceded by (look behind)
                \.\d*    # Decimal dot, followed by zero or more digit
            )
            0            # Match a SINGLE trailing zero
            (?=          # Positive (=) look ahead (no <), i.e. must be (positive) succeeded by (look ahead)
                0*\b     # Zero or more 0, followed by word boundary \b
            )
        ''',
        re.VERBOSE  # Allow free-spacing and comment
    )
    latex = re.sub(trailing_zero_matcher, ' ', latex)

    # Remove trailing decimal dot
    latex = re.sub(r'\.(?=\s)', ' ', latex)

    # Remove redundant minus sign
    latex = re.sub(r'-(?=0\s)', ' ', latex)

    # Add spacing between columns and newline
    latex = re.sub('&', ' & ', latex)
    latex = re.sub(r'\\\\', r' \\\\', latex)

    # Remove excessive whitespaces
    # e.g. from
    # 8     \\
    # 9.2   \\
    # to
    # 8    \\
    # 9.2  \\

    # Split row into cells
    split_cells: Callable[[str], List[str]] = lambda row: \
        row[: len(row) - 2].split('&')

    # Merge cells into row
    merge_cells: Callable[[Iterable[str]], str] = lambda cells: \
        '&'.join(cells) + r'\\'

    # Split data into columns
    split_columns: Callable[[str], Iterable[str]] = lambda latex: \
        zip(*map(split_cells, latex.splitlines()))

    # Merge rows into string, reversing split_columns
    merge_columns: Callable[[Iterable[str]], str] = lambda columns: \
        '\n'.join(map(merge_cells, zip(*columns)))

    # Find the number of trailing spaces
    num_of_trailing_spaces: Callable[[str], int] = lambda cell: \
        len(cell) - len(cell.rstrip())

    # Find the number of excess spaces in each row
    find_excess_spaces: Callable[[Iterable[str]], int] = lambda column: \
        min((num_of_trailing_spaces(cell) for cell in column)) - 2    # Mandatory 2 trailing spaces

    def strip_excess_spaces(column):
        num_of_excess_spaces = find_excess_spaces(column)
        return map(lambda cell: cell[:len(cell) - num_of_excess_spaces],
                   column)

    latex = merge_columns(map(strip_excess_spaces, split_columns(latex)))

    print(latex)
def main():
    results = Manager().list()
    uniqueSymbols = Manager().list()
    filesProcessed = Manager().Value('i', 0)
    symbolsProcessed = Manager().Value('i', 0)

    out('''
    :: findSymbols.py - Finds PE Import/Exports based on supplied filters.
    
    Mariusz Banach / mgeeky, '21
    <mb [at] binary-offensive.com> 
''')

    args, regexes = opts(sys.argv)

    is_wow64 = (platform.architecture()[0] == '32bit'
                and 'ProgramFiles(x86)' in os.environ)

    start_time = datetime.now()
    try:
        if '\\system32\\' in args.path.lower() and is_wow64:
            verbose(
                args,
                'Redirecting input path from System32 to SysNative as we run from 32bit Python.'
            )
            args.path = args.path.lower().replace('\\system32\\',
                                                  '\\SysNative\\')

        if os.path.isdir(args.path):
            processDir(args, regexes, args.path, results, uniqueSymbols,
                       filesProcessed, symbolsProcessed)

        else:
            if not os.path.isfile(args.path):
                out(f'[!] Input file does not exist! Path: {args.path}')
                sys.exit(1)

            processFile(args, regexes, args.path, results, uniqueSymbols,
                        filesProcessed, symbolsProcessed)

    except KeyboardInterrupt:
        out(f'[-] User interrupted the scan.')

    time_elapsed = datetime.now() - start_time

    if args.format == 'json':
        resultsList = list(results)
        dumped = str(json.dumps(resultsList, indent=4))

        if args.output:
            with open(args.output, 'w') as f:
                f.write(dumped)
        else:
            print('\n' + dumped)
    else:
        resultsList = list(results)
        if len(resultsList) > 0:

            idx = headers.index(args.column)

            resultsList.sort(key=lambda x: x[idx], reverse=args.descending)
            headers[idx] = '▼ ' + headers[
                idx] if args.descending else '▲ ' + headers[idx]

            if args.first > 0:
                for i in range(len(resultsList) - args.first):
                    resultsList.pop()

            table = tabulate.tabulate(resultsList,
                                      headers=[
                                          '#',
                                      ] + headers,
                                      showindex='always',
                                      tablefmt='pretty')

            if args.output:
                with open(args.output, 'w', encoding='utf-8') as f:
                    f.write(str(table))
            else:
                print('\n' + table)

            if args.first > 0:
                out(f'\n[+] Found {Logger.colored(args, len(resultsList), "green")} symbols meeting all the criterias (but shown only first {Logger.colored(args, args.first, "magenta")} ones).\n'
                    )
            else:
                out(f'\n[+] Found {Logger.colored(args, len(resultsList), "green")} symbols meeting all the criterias.\n'
                    )

        else:
            out(f'[-] Did not find symbols meeting specified criterias.')

        out(f'[.] Processed {Logger.colored(args, filesProcessed.value, "green")} files and {Logger.colored(args, symbolsProcessed.value, "green")} symbols.'
            )
        out('[.] Time elapsed: {}'.format(
            Logger.colored(args, time_elapsed, "magenta")))
Example #40
0
            column_marker = 0
            columns = row.find_all('td')
            for column in columns:
                df.iat[row_marker, column_marker] = column.get_text()
                column_marker += 1
            if len(columns) > 0:
                row_marker += 1

        for col in df:
            try:
                df[col] = df[col].astype(float)
            except ValueError:
                pass

        return df


time = datetime.datetime.now(
    datetime.timezone.utc).strftime("%Y-%m-%d %H:%M%Z")
time1 = datetime.datetime.now(datetime.timezone.utc).strftime("%m-%d %H")
print("\n" + "Date/Time >: " + time)
print("Counters are reset at 23:59UTC" + "\n")

hp = HTMLTableParser()
table = hp.parse_url(url)[0][1]
print(tabulate(table, headers=["#","Country","Cases","+","Deaths","+",\
"Recovered","Active","Critical","CPM", "Deaths PM"], tablefmt='psql'))
table.to_csv("/root/coronavirus/kungflu.csv")

exit()
Example #41
0
def print_table(dataframe):
    print(tabulate(dataframe, headers="keys", tablefmt="psql"))
Example #42
0
import pandas as pd
import matplotlib.pyplot as plt
from pydataset import data
df = data('mtcars')

df.head()
df.columns
df.dtypes
df.shape

pd.options.display.max_columns = None
pd.options.display.width = 1000
#%%%
df
from tabulate import tabulate
print(tabulate(df, headers='firstrow'))

#%%%
from tabulate import tabulate
pdtabulate = lambda df: tabulate(df, headers='keys')
print(pdtabulate(df))
data.head()
print(pdtabulate(df.head()))
#%%%
from tabulate import tabulate
pdtabulate = lambda df: tabulate(df, headers='keys', tablefmt='psql')
print(pdtabulate(df))

#%%%html
from tabulate import tabulate
pdtabulate = lambda df: tabulate(df, headers='keys', tablefmt='html')
Example #43
0
#!/usr/bin/python3
"""
Prints out a prompted IP address in decimal, binary, and hex formatted in a table.
"""

from tabulate import tabulate
from__future__ import print_function, unicode_literals

ip_address = input("Please Enter Your IP Address Separated With a Decimal(.):")
#Prompts a user to input an IP address

decimal_address=ip_address.split(".")
#Breaks the IP address into octets

results = []
for number in decimal_address:
    decimal_number=results.append(number)
    binary_number=results.append(bin(int(number)))
    hexadecimal_number=results.append(hex(int(number)))
#Puts the IP address into decimal, binary and hexadecimal

table = [["Octet1",results[0],results[1],results[2]],
["Octet2",results[3],results[4],results[5]],
["Octet3",results[6],results[7],results[8]],
["Octet4",results[9],results[10],results[11]]]
#Inputs the values into a table

print(tabulate(table, headers=["Octet","Decimal", "Binary", "Hexadecimal"], tablefmt="pretty"))
Example #44
0
def tabulate_results(results):
    """
    Formats the results of a validation run into a table
    :param results: dictionary containing the results of a validation run. Key is the annotated filename, which maps
            to an inner dictionary where the key is the model name, and the value is the validation metric score
            of that model on the annotated filename

            Will take the form {"filename": {"model_name": metric_score, "model_name2": metric_score}, "filename2": ...}
    :return: the table (also prints to stdout)
    """
    # takes the form [<filename>, model1score, model2score,...]
    rows = []
    # list of model names
    headers = ["test filename"]
    flag = 0

    # dict that for each model, tracks the scores in a list
    per_model = defaultdict(list)

    # iterate over each file
    for fname, file_dict in results.items():

        model_scores = []
        # creates a list of model scores in alphabetical order based on model name
        for model_name in sorted(file_dict):
            model_scores.append(file_dict[model_name])
            per_model[model_name].append(file_dict[model_name])
            if not flag:
                headers.append(model_name)

        ## find the model name with the highest score for that file
        max_model_name = sorted(file_dict)[model_scores.index(
            max(model_scores))]
        model_scores.insert(0, fname)
        model_scores.append(max_model_name)
        rows.append(model_scores)
        flag = 1

    ## create the mean, median, stdev rows that tally the columns
    means = ["mean"]
    medians = ["median"]
    stdevs = ["stdev"]
    for model_name in sorted(per_model):
        means.append(mean(per_model[model_name]))
        medians.append(median(per_model[model_name]))
        stdevs.append(stdev(per_model[model_name]))

    ## get the model with the highest mean and median
    model_means = means[1:]  # doesn't include the "mean" header
    model_medians = medians[1:]
    max_mean_model_name = sorted(file_dict)[model_means.index(
        max(model_means))]
    max_median_model_name = sorted(file_dict)[model_medians.index(
        max(model_medians))]
    means.append(max_mean_model_name)
    medians.append(max_median_model_name)

    # spacing between data table and summary statistics
    rows.append(["- " for i in range(len(means))])  # spacing
    rows.append(means)
    rows.append(medians)
    rows.append(stdevs)
    headers.append("file's best model")
    table = tabulate(rows, headers)
    print(table)
    return table
Example #45
0
def compare_results():
    print('Comparing results')
    import pandas as pd
    from tabulate import tabulate

    # Read in output of demo script
    measure_fpath = 'measurements_haul83.csv'
    py_df = pd.DataFrame.from_csv(measure_fpath, index_col=None)
    # Convert python length output from mm into cm for consistency
    py_df['fishlen'] = py_df['fishlen'] / 10
    py_df['current_frame'] = py_df['current_frame'].astype(np.int)

    # janky CSV parsing
    py_df['box_pts1'] = py_df['box_pts1'].map(
        lambda p: eval(p.replace(';', ','), np.__dict__))
    py_df['box_pts2'] = py_df['box_pts2'].map(
        lambda p: eval(p.replace(';', ','), np.__dict__))

    py_df['obox1'] = [
        ctalgo.OrientedBBox(*cv2.minAreaRect(pts[:, None, :].astype(np.int)))
        for pts in py_df['box_pts1']
    ]
    py_df['obox2'] = [
        ctalgo.OrientedBBox(*cv2.minAreaRect(pts[:, None, :].astype(np.int)))
        for pts in py_df['box_pts2']
    ]
    py_df.drop(['box_pts1', 'box_pts2'], axis=1, inplace=True)

    # Remap to matlab names
    py_df = py_df.rename(columns={
        'error': 'Err',
        'fishlen': 'fishLength',
        'range': 'fishRange',
    })

    # Load matlab results
    mat_df = _read_kresimir_results()

    FORCE_COMPARABLE_RANGE = True
    # FORCE_COMPARABLE_RANGE = False
    if FORCE_COMPARABLE_RANGE:
        # Be absolutely certain we are in comparable regions (may slightly bias
        # results, against python and in favor of matlab)
        min_frame = max(mat_df.current_frame.min(), py_df.current_frame.min())
        max_frame = min(mat_df.current_frame.max(), py_df.current_frame.max())
        print('min_frame = {!r}'.format(min_frame))
        print('max_frame = {!r}'.format(max_frame))

        mat_df = mat_df[(mat_df.current_frame >= min_frame)
                        & (mat_df.current_frame <= max_frame)]
        py_df = py_df[(py_df.current_frame >= min_frame)
                      & (py_df.current_frame <= max_frame)]

    intersect_frames = np.intersect1d(mat_df.current_frame,
                                      py_df.current_frame)
    print('intersecting frames = {} / {} (matlab)'.format(
        len(intersect_frames), len(set(mat_df.current_frame))))
    print('intersecting frames = {} / {} (python)'.format(
        len(intersect_frames), len(set(py_df.current_frame))))

    #  Reuse the hungarian algorithm implementation from ctalgo
    min_assign = ctalgo.FishStereoMeasurments.minimum_weight_assignment

    correspond = []
    for f in intersect_frames:
        pidxs = np.where(py_df.current_frame == f)[0]
        midxs = np.where(mat_df.current_frame == f)[0]

        pdf = py_df.iloc[pidxs]
        mdf = mat_df.iloc[midxs]

        ppts1 = np.array([o.center for o in pdf['obox1']])
        mpts1 = np.array([o.center for o in mdf['obox1']])

        ppts2 = np.array([o.center for o in pdf['obox2']])
        mpts2 = np.array([o.center for o in mdf['obox2']])

        dists1 = sklearn.metrics.pairwise.pairwise_distances(ppts1, mpts1)
        dists2 = sklearn.metrics.pairwise.pairwise_distances(ppts2, mpts2)

        # arbitrarilly chosen threshold
        thresh = 100
        for i, j in min_assign(dists1):
            d1 = dists1[i, j]
            d2 = dists2[i, j]
            if d1 < thresh and d2 < thresh and abs(d1 - d2) < thresh / 4:
                correspond.append((pidxs[i], midxs[j]))
    correspond = np.array(correspond)

    # pflags = np.array(ub.boolmask(correspond.T[0], len(py_df)))
    mflags = np.array(ub.boolmask(correspond.T[1], len(mat_df)))
    # print('there are {} detections that seem to be in common'.format(len(correspond)))
    # print('The QC flags of the common detections are:       {}'.format(
    #     ub.dict_hist(mat_df[mflags]['QC'].values)))
    # print('The QC flags of the other matlab detections are: {}'.format(
    #     ub.dict_hist(mat_df[~mflags]['QC'].values)))

    print('\n\n----\n## All stats\n')
    print(
        ub.codeblock('''
        Overall, the matlab script made {nmat} length measurements and the
        python script made {npy} length measurements.  Here is a table
        summarizing the average lengths / ranges / errors of each script:
        ''').format(npy=len(py_df), nmat=len(mat_df)))
    stats = pd.DataFrame(columns=['python', 'matlab'])
    for key in ['fishLength', 'fishRange', 'Err']:
        stats.loc[key, 'python'] = '{:6.2f} ± {:6.2f}'.format(
            py_df[key].mean(), py_df[key].std())
        stats.loc[key, 'matlab'] = '{:6.2f} ± {:6.2f}'.format(
            mat_df[key].mean(), mat_df[key].std())

    stats.loc['nTotal', 'python'] = '{}'.format(len(py_df))
    stats.loc['nTotal', 'matlab'] = '{}'.format(len(mat_df))
    print(tabulate(stats, headers='keys', tablefmt='psql', stralign='right'))

    print('\n\n----\n## Only COMMON detections\n')
    py_df_c = py_df.iloc[correspond.T[0]]
    mat_df_c = mat_df.iloc[correspond.T[1]]
    stats = pd.DataFrame(columns=['python', 'matlab'])
    for key in ['fishLength', 'fishRange', 'Err']:
        stats.loc[key, 'python'] = '{:6.2f} ± {:6.2f}'.format(
            py_df_c[key].mean(), py_df_c[key].std())
        stats.loc[key, 'matlab'] = '{:6.2f} ± {:6.2f}'.format(
            mat_df_c[key].mean(), mat_df_c[key].std())

    stats.loc['nTotal', 'python'] = '{}'.format(len(py_df_c))
    stats.loc['nTotal', 'matlab'] = '{}'.format(len(mat_df_c))

    print(
        ub.codeblock('''
        Now, we investigate how many dections matlab and python made in common.
        (Note, choosing which dections in one version correspond to which in
         another is done using a heuristic based on distances between bbox
         centers and a thresholded minimum assignment problem).

        Python made {npy_c}/{nmat} = {percent:.2f}% of the detections matlab made

        ''').format(npy_c=len(py_df_c),
                    nmat=len(mat_df),
                    percent=100 * len(py_df_c) / len(mat_df)))
    print(tabulate(stats, headers='keys', tablefmt='psql', stralign='right'))

    print('\n\n----\n## Evaulation using the QC code\n')
    hist_hit = ub.dict_hist(mat_df[mflags]['QC'].values)
    hist_miss = ub.dict_hist(mat_df[~mflags]['QC'].values)
    print(
        ub.codeblock('''
        However, not all of those matlab detections were good. Because we have
        detections in corrsepondences with each other we can assign the python
        detections QC codes.

        Here is a histogram of the QC codes for these python detections:
        {}
        (Note: read histogram as <QC-code>: <frequency>)

        Here is a histogram of the other matlab detections that python did not
        find:
        {}

        To summarize:
            python correctly rejected {:.2f}% of the matlab QC=0 detections
            python correctly accepted {:.2f}% of the matlab QC=1 detections
            python correctly accepted {:.2f}% of the matlab QC=2 detections

            Note, that because python made detections that matlab did not make,
            the remaining {} detections may be right or wrong, but there is
            no way to tell from this analysis.

        Lastly, here are the statistics for the common detections that had a
        non-zero QC code.
        ''').format(ub.repr2(hist_hit, nl=1), ub.repr2(hist_miss, nl=1),
                    100 * hist_miss[0] / (hist_hit[0] + hist_miss[0]),
                    100 * hist_hit[1] / (hist_hit[1] + hist_miss[1]),
                    100 * hist_hit[2] / (hist_hit[2] + hist_miss[2]),
                    len(py_df) - len(py_df_c)))

    is_qc = (mat_df_c['QC'] > 0).values
    mat_df_c = mat_df_c[is_qc]
    py_df_c = py_df_c[is_qc]
    stats = pd.DataFrame(columns=['python', 'matlab'])
    for key in ['fishLength', 'fishRange', 'Err']:
        stats.loc[key, 'python'] = '{:6.2f} ± {:6.2f}'.format(
            py_df_c[key].mean(), py_df_c[key].std())
        stats.loc[key, 'matlab'] = '{:6.2f} ± {:6.2f}'.format(
            mat_df_c[key].mean(), mat_df_c[key].std())

    stats.loc['nTotal', 'python'] = '{}'.format(len(py_df_c))
    stats.loc['nTotal', 'matlab'] = '{}'.format(len(mat_df_c))
    print(tabulate(stats, headers='keys', tablefmt='psql', stralign='right'))
Example #46
0
        print j.nodeid,

for i in all_nodes:
    fname = "chain" + str(i.nodeid)
    fcreate = open(fname, "w+")
    for txt in i.my_chain.flist:
        fcreate.write(txt + "\n")
    fcreate.close()

print "Total trans happened: ", len(all_nodes[0].ledger)
'''for i in all_nodes:
    temp = i.my_chain.find_longest_chain()
    print i.nodeid, ": ", i.my_chain.print_blockchain(temp)'''

#printing entire blockchain of the node
for i in all_nodes:
    print "My blockchain", i.nodeid, ": ", i.my_chain.print_longest()

check_list = verify_btc()
nodeids = []
for j in all_nodes:
    nodeids.append([
        j.nodeid, j.nature, j.num_peers, j.btc, check_list[j.nodeid - 1001],
        j.my_chain.print_blockchain(j.my_chain.find_longest_chain())
    ])
print tabulate(nodeids,
               headers=[
                   'Node ID', 'Nature', 'No. of peers', "BTC", "Cal BTC",
                   "Chain length"
               ])
desk_filter([1],[3])
#print(df2)
index = 0

coloringtester = tk.Tk()
#for index, row in df2.iterrows():
    #print(index)
    #print(row)
    #tk.Label(coloringtester, text=row,font=("Helvetica", 16)).grid(row=0, column=0)
#for i in range(len(df2)):
   # print(df2.iloc[i])
    #emplist = []
    #emplist.append()
    #tk.Label(coloringtester, text=df2.iloc[i],font=("Helvetica", 16)).pack()
layout = tabulate(df2)
#print(layout)
#alist = [[1,2,3],[4],[5,6]]
#for i in alist:
#    print(i)
#    tk.Label(coloringtester, text=i,font=("Helvetica", 16)).pack()
#coloringtester.mainloop()
#print(df2.to_csv(header=None, index=False))
headers = []
for col in df2.columns:
    #print(col)
    headers.append(col)
headers[0] = 'Desk#'
headerindex = 0
for i in headers:
    tk.Label(coloringtester, text=i,font=("Helvetica", 16),bg = 'cyan',anchor= 'e',relief = 'solid').grid(row = 0, column = headerindex, sticky = 'w'+'e')
Example #48
0
 def _tabulate(self, table, **args):
     if tabulate:
         tabular = tabulate(table, args)
         self._print(tabular)
     else:
         self._print("\n".join("\t|".join(line) for line in table))
 def create_numerical_table(self):
     """
     Prints a summary table for t-test comparisons of numerical values at the patient level
     """
     numerical_table_data = [self._calc_t_test(column_label) for column_label in self._numerical_columns]
     print(tabulate(numerical_table_data, headers=['Feature', 'Mean (SD)', 'Mean (SD)', "Student's T", 'P-Value']))
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold

from sklearn.metrics import classification_report, roc_auc_score, \
precision_recall_curve, auc, roc_curve

titanic = sns.load_dataset("titanic")

# こんなデータです。先頭5行を表示します。

headers = [c for c in titanic.columns]
headers.insert(0,"ID")
print(tabulate(titanic[0:5], headers, tablefmt="pipe"))
# 確認
print(titanic.head())

# カテゴリ変数をダミー変数化
def convert_dummies(df, key):
    dum = pd.get_dummies(df[key])
    ks = dum.keys()
    print("Removing {} from {}...".format(ks[0], key))
    dum.columns = [key + "_" + str(k) for k in ks]  
    df = pd.concat((df, dum.iloc[:,1:]), axis=1)
    df = df.drop(key, axis=1)
    return df

titanic = convert_dummies(titanic, "who")
titanic = convert_dummies(titanic, "class")
                new_column_order.extend(current_columns)
                output_data_frame = output_data_frame[new_column_order]
                output_data_frame.drop(columns=old_col_name, inplace=True)
            elif (is_target
                  and old_col_suffix != '(t)') or (not is_target and re.search(
                      't-1', old_col_suffix) is not None):
                output_data_frame[new_col_name] = output_data_frame[
                    old_col_name]
                output_data_frame.drop(columns=old_col_name, inplace=True)

            else:
                output_data_frame.drop(columns=old_col_name, inplace=True)

ts_data_frame = output_data_frame.copy()
ts_data_frame.set_index('Date', inplace=True)
print(tabulate(ts_data_frame.tail(), ts_data_frame.columns.values))

history = pd.DataFrame(index=ts_data_frame.index.values)
history['Interval_Date'] = history.index.values
history['Interval_Date'] = history['Interval_Date'].shift(+INTERVAL_IN_DAYS)
history = pd.merge(history,
                   mv_time_series,
                   how='left',
                   left_on='Interval_Date',
                   right_on='Date')
history.index = ts_data_frame.index.values
history = history[[TARGET_COL]]

groups = [3, 4, 5]
# if 1 in graphs:
#     i = 1
Example #52
0
            if r.status_code != 200:
                logger.critical("Failed to contact {}: {} | time={}s size={}B".format(mpd.get_url(), r.status_code,
                                                                                      round(t.elapsed_secs, 2),
                                                                                      r.headers['content-length']))
            else:
                logger.info("MPD OK | time={}s size={}B".format(t.elapsed_secs, r.headers['content-length']))

        else:

            logger.info("MPD status.")
            table = [["Descriptions", "Values"], ["http code", r.status_code],
                     ["time spend (sec)", round(t.elapsed_secs, 2)], ["content-length", r.headers['content-length']],
                     ["url", str(r.url)], ["content-type", r.headers['content-type']]]

            # Output table
            print tabulate(table, headers="firstrow")

            # Get Lexer base on content-type
            if r.headers['content-type'].startswith('application/xml'):
                lexer = lexers.XmlLexer()
            elif r.headers['content-type'].startswith('application/json'):
                lexer = lexers.JsonLexer()
            else:
                lexer = lexers.HtmlLexer()

            # Try to print JSON content
            try:
                # Format Json with indentation
                formatted_http_response = json.dumps(json.loads(r.text), sort_keys=True, indent=2)
            except Exception:
                formatted_http_response = r.text
Example #53
0
    elif angka == 12:
        return "C"
    elif angka == 13:
        return "D"
    elif angka == 14:
        return "E"
    elif angka == 15:
        return "F"
    elif angka == 16:
        return "G"


print("Program Sederhana untuk Konversi Bilangan Desimal-Biner-Octal-Hexa")
angka = int(
    input("Masukkan bilangan untuk dikonversi dalam bentuk Desimal ? "))
print("==============================================================")

biner, hasilBiner = konversi(angka, 2)
octal, hasilOctal = konversi(angka, 8)
hexa, hasilHexa = konversi(angka, 16)

combine = list(it.zip_longest(biner, octal, hexa))

header = ("Binner", "Octal", "Hexadecimal")

print(tabulate(combine, header, tablefmt="plain"))
print("")
print("==============================================================")
print("Binner   =", hasilBiner, end=" ")
print("Octal    =", hasilOctal, end=" ")
print("Hexa     =", hasilHexa, end=" ")
Example #54
0
# Таким образом, Вы определите как взаимосвязаны пол и класс кабины с выживаемостью.

import csv
import pandas as pd
import pprint as pp
from tabulate import tabulate


def get_data_from_csv(filename):

    f = open(filename, "r")
    list_dicts = [row for row in csv.DictReader(f, delimiter=',')]
    list_inner_dicts = [dict(row) for row in list_dicts]
    f.close()

    return list_inner_dicts


data_lst = get_data_from_csv(r'C:\Users\lisam\Downloads\titanic.csv')
dt = pd.DataFrame.from_dict(data_lst)
dt['Qty'] = 1

wanted_keys = ['Sex', 'Pclass', 'Survived', 'Qty']
dt_new = dt[wanted_keys]
wanted_keys[:] = ['Sex', 'Pclass', 'Survived']
#dt_new['Qty'] = 1
grouped = dt_new.groupby(wanted_keys).sum()
# print(grouped)
pp.pprint(grouped)
print(tabulate(grouped, headers='keys', tablefmt='psql'))
Example #55
0
def showData(tab):
    cursorA.execute("SELECT * FROM {name}".format(name=tab))
    data_tmp = cursorA.fetchall()
    print(tabulate(data_tmp, headers=cursorA.column_names), '\n')
            img.append(img_train[rn])
            label.append(label_train[rn])
        n.train(img, label)
    t2 = time.time()
    u = n.losses(img_test, label_test)
    losses[i] = u
    t3 = time.time()
    print(
        f"Batch : {i + 1}/{BATCH} en {round(t2 - t1, 4)} s avec {round(t3 - t2, 4)} en loss ({EPOCH} {BATCH_SIZE})"
    )

ftime = time.time()
## NN representation
evaluation, accuracy, precision, recall, f1_score = n.confusion(
    img_test, label_test)
peval = tabulate(evaluation, tablefmt="fancy_grid")
pscore = tabulate([["accuracy", accuracy], ["precision", precision],
                   ["recall", recall], ["f1_score", f1_score]],
                  tablefmt="fancy_grid")
str_result = f"Evaluation :\n{peval}\nScore :\n{pscore}\nTime to train : {round(ftime - stime, 4)}"
print(str_result)

plt.figure()
plt.plot(losses.get())
plt.xlabel("BATCH")
plt.ylabel("Loss")
plt.title(f"Covid Loss with {BATCH} batchs of {BATCH_SIZE} retropopagation")

filename = "naif_s90_b300_e1_bs10"
plt.savefig(f'plot/{filename}.png')
with open(f"{filename}.txt", "w") as f:
Example #57
0
 def print_merged_table_per_algorithm(self, results):
     for det in self.detectors:
         table = tabulate(results[results['algorithm'] == det.name], headers='keys', tablefmt='psql')
         self.logger.info(f'Detector: {det.name}\n{table}')
Example #58
0
def ValueCounts(f, data, target, columns):
    # To view summary aggregates 
    f.writelines("\n\nVALUE COUNTS:\n\n")
    dataf= pd.DataFrame(list(zip(data[target].value_counts().index,data[target].value_counts())), columns=['Column','counts'])
    f.write(tabulate(dataf, tablefmt="grid", headers="keys", showindex=False))
    f.writelines("\n---------------------------------------------------------------------------------------------------------------------------------------")
Example #59
0
 def print_merged_table_per_dataset(self, results):
     for ds in self.datasets:
         table = tabulate(results[results['dataset'] == ds.name], headers='keys', tablefmt='psql')
         self.logger.info(f'Dataset: {ds.name}\n{table}')
Example #60
0
def print_list(datasets):
    """Print dataset list in a mono-replica context."""
    li = [[d['status'], d['dataset_functional_id']] for d in datasets]
    print tabulate(li, tablefmt="plain")