Esempio n. 1
0
def covid(countries=None):
    if countries is None:
        countries = [
            'italy', 'uk', 'france', 'spain', 'switzerland', 'usa', 'greece',
            'brazil'
        ]
    try:
        res = requests.get(f'{covid_api}/countries').json()
    except (requests.exceptions.RequestException,
            json.decoder.JSONDecodeError) as e:
        return e
    rows = list()
    for i in res:
        if i['country'].lower() in countries:
            i['country'] = isoify(i['country'])
            if i['country'] == 'Switzerland':
                i['country'] = 'Suisse'
            for k in i.keys():
                if isinstance(i[k], int) and i[k] > 10000:
                    i[k] = human_format(i[k])
            row = [
                i.get('country'),
                i.get('cases'),
                i.get('todayCases'),
                i.get('deaths'),
                i.get('todayDeaths')
            ]
            rows.append(row)
    table = tabulate.tabulate(rows,
                              headers=['', 'Case', 'New', 'D️', 'ND'],
                              tablefmt='simple',
                              numalign='right',
                              stralign='right')
    return f'<pre>\nCOVID-19 situation:\n{table}\n</pre>\nhttps://www.worldometers.info/coronavirus/#countries'
Esempio n. 2
0
def create_container_with_specific_mems_cpus(name, mems, cpus):
    need_fake_numa()
    os.mkdir(full_path(name))
    utils.write_one_line(cpuset_attr(name, 'mem_hardwall'), '1')
    utils.write_one_line(mems_path(name), ','.join(map(str, mems)))
    utils.write_one_line(cpus_path(name), ','.join(map(str, cpus)))
    logging.debug('container %s has %d cpus and %d nodes totalling %s bytes',
                  name, len(cpus), len(get_mem_nodes(name)),
                  utils.human_format(container_bytes(name)) )
Esempio n. 3
0
def create_container_via_memcg(name, parent, bytes, cpus):
    # create container via direct memcg cgroup writes
    os.mkdir(full_path(name))
    nodes = utils.read_one_line(mems_path(parent))
    utils.write_one_line(mems_path(name), nodes)  # inherit parent's nodes
    utils.write_one_line(memory_path(name)+'.limit_in_bytes', str(bytes))
    utils.write_one_line(cpus_path(name), ','.join(map(str, cpus)))
    logging.debug('Created container %s directly via memcg,'
                  ' has %d cpus and %s bytes',
                  name, len(cpus), utils.human_format(container_bytes(name)))
Esempio n. 4
0
    def compute_memory(self):
        ssize = 1
        print ("***"*200)
        print ("saliency_size", self.saliency_size)
        for s in self.saliency_size:
            ssize *= s
        saliency_memory = 4 * self.args.experiment.memory_budget * ssize

        ncha, size, _ = self.args.inputsize
        image_size = ncha * size * size

        samples_memory = 4 * self.args.experiment.memory_budget * image_size
        count = sum(p.numel() for p in self.model.parameters() if p.requires_grad)

        print('Num parameters in the entire model    = %s ' % (utils.human_format(count)))
        architecture_memory = 4 * count

        print("-------------------------->  Saliency memory size: (%sB)" % utils.human_format(saliency_memory))
        print("-------------------------->  Episodic memory size: (%sB)" % utils.human_format(samples_memory))
        print("------------------------------------------------------------------------------")
        print("                             TOTAL:  %sB" % utils.human_format(
            architecture_memory+samples_memory+saliency_memory))
Esempio n. 5
0
    def render(self):

        self.__clear_frame()

        location = self.__tracker.getCurrentLocation()
        presents = human_format(location.get("presentsDelivered"))
        stopover = location.get("stopover")
        curr_location = None

        # Printing local time
        local_time = datetime.fromtimestamp(
            int(self.__tracker.get_adj_time() / 1000)).strftime("%H:%M")
        self.__frame.text((6, 48),
                          "LT: {} | D: {}".format(local_time, presents),
                          font=self.__sm_font,
                          fill=1)

        if (stopover):
            departure = datetime.fromtimestamp(int(stopover.departure /
                                                   1000)).strftime("%H:%M")
            self.__frame.text((6, 4), "Landed", font=self.__sm_font, fill=1)
            self.__frame.text((76, 4),
                              "ETD: {}".format(departure),
                              font=self.__sm_font,
                              fill=1)
            curr_location = stopover

        else:
            eta = datetime.fromtimestamp(
                int(location.get("next").arrival / 1000)).strftime("%H:%M")
            self.__frame.text((6, 4), "Next stop", font=self.__sm_font, fill=1)
            self.__frame.text((76, 4),
                              "ETA: {}".format(eta),
                              font=self.__sm_font,
                              fill=1)
            curr_location = location.get("next")

        # Checking if the location has changed
        if (self.__last_location == None
                or curr_location.id != self.__last_location.id):
            self.__last_location = curr_location
            self.__location_str = ScrollingText(text="{}, {}".format(
                self.__last_location.city, self.__last_location.region))

        self.__frame.text((6, 22),
                          self.__location_str.render(),
                          font=self.__lg_font,
                          fill=1)

        return self.__image
Esempio n. 6
0
def get_player_contract_table(html, url):
    # html = utils.get_page(url)
    # with open('spotrac.txt', 'r') as w:
    #     html = w.read()
    bs = BeautifulSoup(html, 'html.parser')
    blurb = bs.find("p", {"class":"currentinfo"})
    output = ""
    if blurb is not None:
        output = blurb.get_text() + "\n\n"
    table = bs.find("table", {"class":"salaryTable salaryInfo current visible-xs"})
    rows = table.find_all("tr")
    contract_table = []
    for row in rows:
        cells = row.find_all('td')
        r = {'0':cells[0].get_text(), '1':cells[1].get_text()}
        contract_table.append(r)
    output = output + "```python\n%s```" % utils.format_table(['0','1'], contract_table, showlabels=False)

    table = bs.find("table", {"class":"salaryTable current"})
    rows = table.find_all("tr")
    salary_table = []
    salary_th = []
    for header in rows[0].find_all('th'):
        salary_th.append(header.get_text())
    print(salary_th)
    for row in rows[1:]:
        year_row = {}
        cells = row.find_all('td')
        # print(cells)
        for i in range(len(salary_th)):
            try:
                if 'noborder' not in cells[i]['class'] or 'fayear' in cells[i]['class']:
                    data = cells[i].get_text().lstrip()
                    if data.startswith('$'):
                        if '(' in data:
                            data = data[:data.find('(')]
                        data = utils.human_format(data)
                    year_row[salary_th[i]] = data
            except:
                pass
        if len(year_row.keys()) > 0:
            salary_table.append(year_row)
    print(salary_table)
    labs = ['Year', 'Age', 'Base Salary','Luxury Tax Salary','Payroll  Salary', 'Adjusted Salary', 'Yearly Cash']
    repl = {"Base Salary":"Base", "Luxury Tax Salary":"Luxury Tax", "Payroll  Salary":"Payroll", "Adjusted Salary":"Adjusted", "Yearly Cash":"Take home"}
    output = output + "\n```python\n%s```" % utils.format_table(labs, salary_table, repl_map=repl)
    output = output + "\n<%s>" % url
    return output
Esempio n. 7
0
def corona(countries=None):
    if countries is None:
        countries = [
            'italy', 'uk', 'france', 'spain', 'switzerland', 'usa', 'greece',
            'brazil'
        ]
    countries.append('total:')
    url = 'https://www.worldometers.info/coronavirus/'
    try:
        r = requests.get(url)
    except requests.exceptions.RequestException as e:
        return e
    soup = BeautifulSoup(r.text, features='html.parser')
    table = soup.table
    table_rows = table.find_all('tr')
    rows = list()
    for tr in table_rows:
        td = tr.find_all('td')
        row = [
            i.text.strip(' ').replace(',', '').replace('+', '')
            for i in td[0:4]
        ]
        if row and ((row[0].lower() in countries) or ('all' in countries)):
            row[1] = int(row[1])
            if not row[2]:
                row[2] = 0
            row[2] = int(row[2])
            if not row[3]:
                row[3] = 0
            row[3] = int(row[3])
            if row[0] == 'Switzerland':
                row[0] = 'Suisse'
            if row[0] == 'Total:':
                row[0] = 'Global'
            rows.append(
                human_format(r) if isinstance(r, int) and r > 10000 else r
                for r in row)
    fancy_table = tabulate.tabulate(
        rows,
        headers=['Pais', 'Cases', 'New', 'Deaths️'],
        tablefmt='simple',
        numalign='right')
    text = f'<pre>\n{fancy_table}\n</pre>'
    return text
Esempio n. 8
0
def discover_container_style():
    """Fetch information about containers and cache in global state."""
    global super_root_path, cpuset_prefix
    global mem_isolation_on, fake_numa_containers
    global node_mbytes, root_container_bytes

    if super_root_path != '':
        return  # already looked up

    if os.path.exists('/dev/cgroup/tasks') or \
       os.path.exists('/dev/cgroup/cpuset/tasks'):
        # running on 2.6.26 or later kernel with containers on:
        super_root_path = '/dev/cgroup'
        cpuset_prefix = 'cpuset.'
        if get_boot_numa():
            mem_isolation_on = fake_numa_containers = True
        else:  # memcg containers IFF compiled-in & mounted & non-fakenuma boot
            fake_numa_containers = False
            #TODO(teravest): Fix this to detect correct mounting.
            mem_isolation_on = os.path.exists(
                    '/dev/cgroup/memory.limit_in_bytes')
            # TODO: handle possibility of where memcg is mounted as its own
            #       cgroup hierarchy, separate from cpuset?

    else:
        # neither cpuset nor cgroup filesystem active:
        super_root_path = None
        cpuset_prefix = 'no_cpusets_or_cgroups_exist'
        mem_isolation_on = fake_numa_containers = False

    logging.debug('mem_isolation: %s', mem_isolation_on)
    logging.debug('fake_numa_containers: %s', fake_numa_containers)
    if fake_numa_containers:
        node_mbytes = int(mbytes_per_mem_node())

    elif mem_isolation_on:  # memcg-style containers
        # For now, limit total of all containers to using just 98% of system's
        # visible total ram, to avoid oom events at system level, and avoid
        # page reclaim overhead from going above kswapd highwater mark.
        system_visible_pages = utils.memtotal() >> 2
        usable_pages = int(system_visible_pages * 0.98)
        root_container_bytes = usable_pages << 12
        logging.debug('root_container_bytes: %s',
                      utils.human_format(root_container_bytes))
Esempio n. 9
0
    def views(self, formatted: bool = True):
        """ The visualizations are really important to determine if a song is famous or not. Using this information it
        is possible to create leaderboards and statistics, top rankings and more. Visualizations can only be gained in
        official ways like on the official website, Telegram bot or Discord bot.

        Args:
            formatted (bool, optional): Indicates if the number should be formatted in a human readable way or not. By
            default it's True.

        Returns:
            int:
                The number of visualizations of the video.
        """
        self.cursor.execute("SELECT views FROM songs WHERE id = ?",
                            (self.id, ))
        views = self.cursor.fetchone()[0]
        self.cursor.close()
        self.conn.close()
        return human_format(views) if formatted else views
Esempio n. 10
0
def update_cards(n, time_range, exclude_rt):
    conn = sqlite3.connect(DATABASE_PATH)
    time_range = 15 if time_range not in (15, 60, 1440) else time_range
    filter_rt = True if exclude_rt == [1] else False
    query = f"""
    select
        target as target,
        count(*) as responses,
        avg(case when sentiment < 0.5 then 0 else 1 end) * 100 as sentiment
    from tweets
    where
        datetime(tweet_timestamp) >= datetime('now', '-{time_range} minutes')
        {"and IS_RT = 0" if filter_rt else ""}
        group by target;
    """
    df = pd.read_sql_query(query, conn)
    cards = []
    for target in TARGETS_DF.itertuples():
        try:
            responses = df.loc[df.target == target.id, "responses"].item()
            sentiment_score = df.loc[df.target == target.id,
                                     "sentiment"].item()
            cards.append(card(target, responses, sentiment_score))
        except Exception as e:
            logging.debug(e)
            pass
    total_responses_num = df.responses.sum()
    total_responses = human_format(total_responses_num)
    total_approval_num = 0
    try:
        total_approval_num = np.nanmean(df.sentiment)
    except Exception as e:
        logging.debug(e)
        pass
    total_approval = f"{total_approval_num:.0f}%"
    approval_style = {"color": get_color_from_score(total_approval_num)}
    return cards, total_responses, total_approval, approval_style
Esempio n. 11
0
    def __init__(self, model, vocab):

        assert isinstance(model, dict) or isinstance(model, str)
        assert isinstance(vocab, tuple) or isinstance(vocab, str)

        # dataset
        logger.info('-' * 100)
        logger.info('Loading training and validation dataset')
        self.dataset = data.CodePtrDataset(mode='test')
        self.dataset_size = len(self.dataset)
        logger.info('Size of training dataset: {}'.format(self.dataset_size))

        logger.info('The dataset are successfully loaded')

        self.dataloader = DataLoader(dataset=self.dataset,
                                     batch_size=config.test_batch_size,
                                     collate_fn=lambda *args: utils.collate_fn(args,
                                                                               source_vocab=self.source_vocab,
                                                                               code_vocab=self.code_vocab,
                                                                               ast_vocab=self.ast_vocab,
                                                                               nl_vocab=self.nl_vocab,
                                                                               raw_nl=True))

        # vocab
        logger.info('-' * 100)
        if isinstance(vocab, tuple):
            logger.info('Vocabularies are passed from parameters')
            assert len(vocab) == 4
            self.source_vocab, self.code_vocab, self.ast_vocab, self.nl_vocab = vocab
        else:
            logger.info('Vocabularies are read from dir: {}'.format(vocab))
            self.source_vocab = utils.load_vocab(vocab, 'source')
            self.code_vocab = utils.load_vocab(vocab, 'code')
            self.ast_vocab = utils.load_vocab(vocab, 'ast')
            self.nl_vocab = utils.load_vocab(vocab, 'nl')

        # vocabulary
        self.source_vocab_size = len(self.source_vocab)
        self.code_vocab_size = len(self.code_vocab)
        self.ast_vocab_size = len(self.ast_vocab)
        self.nl_vocab_size = len(self.nl_vocab)

        logger.info('Size of source vocabulary: {} -> {}'.format(self.source_vocab.origin_size, self.source_vocab_size))
        logger.info('Size of code vocabulary: {} -> {}'.format(self.code_vocab.origin_size, self.code_vocab_size))
        logger.info('Size of ast vocabulary: {}'.format(self.ast_vocab_size))
        logger.info('Size of nl vocabulary: {} -> {}'.format(self.nl_vocab.origin_size, self.nl_vocab_size))

        logger.info('Vocabularies are successfully built')

        # model
        logger.info('-' * 100)
        logger.info('Building model')
        self.model = models.Model(source_vocab_size=self.source_vocab_size,
                                  code_vocab_size=self.code_vocab_size,
                                  ast_vocab_size=self.ast_vocab_size,
                                  nl_vocab_size=self.nl_vocab_size,
                                  is_eval=True,
                                  model=model)
        # model device
        logger.info('Model device: {}'.format(next(self.model.parameters()).device))
        # log model statistic
        logger.info('Trainable parameters: {}'.format(utils.human_format(utils.count_params(self.model))))
# degrees_in = g.degree(mode="in")
# max_deg_in = max(degrees_in)
# print(max_deg_in)
# df_degree_in = [g.vs[idx].attributes() for idx, eb in enumerate(degrees_in) if eb == max_deg_in]
# print(df_degree_in)
# print(" twitter id  having max degree IN: ", df_degree_in)
# delta1 = get_delta()
# print('Time required to get  graph degree in : %(delta1)s' % locals())
#
#
# start_time()
# degrees_out = g.degree(mode="out")
# max_deg_out = max(degrees_out)
# print(max_deg_out)
# df_degree_out = [g.vs[idx].attributes() for idx, eb in enumerate(degrees_out) if eb == max_deg_out]
# print(df_degree_out)
# print(" twitter id  having max degree OUT: ", df_degree_out)
# delta1 = get_delta()
# print('Time required to get  graph degree out : %(delta1)s' % locals())

file_name: str = '../files/TEST_twitter_' + u.human_format(
    n_rows) + '_pickle' + ('z' if compressed else '')
print("now saving graph to ", file_name)
u.start_time()
if compressed:
    g.write_picklez(file_name)
else:
    g.write_pickle(file_name)
u.print_delta("save the graph")
print("====== End of program =======")
Esempio n. 13
0
    def __init__(self):

        # dataset
        logger.info('-' * 100)
        logger.info('Loading training and validation dataset')
        self.dataset = data.CodePtrDataset(mode='train')
        self.dataset_size = len(self.dataset)
        logger.info('Size of training dataset: {}'.format(self.dataset_size))
        self.dataloader = DataLoader(dataset=self.dataset,
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     collate_fn=lambda *args: utils.collate_fn(
                                         args,
                                         source_vocab=self.source_vocab,
                                         code_vocab=self.code_vocab,
                                         ast_vocab=self.ast_vocab,
                                         nl_vocab=self.nl_vocab))

        # valid dataset
        self.valid_dataset = data.CodePtrDataset(mode='valid')
        self.valid_dataset_size = len(self.valid_dataset)
        self.valid_dataloader = DataLoader(
            dataset=self.valid_dataset,
            batch_size=config.valid_batch_size,
            collate_fn=lambda *args: utils.collate_fn(
                args,
                source_vocab=self.source_vocab,
                code_vocab=self.code_vocab,
                ast_vocab=self.ast_vocab,
                nl_vocab=self.nl_vocab))
        logger.info('Size of validation dataset: {}'.format(
            self.valid_dataset_size))
        logger.info('The dataset are successfully loaded')

        # vocab
        logger.info('-' * 100)
        logger.info('Building vocabularies')

        sources, codes, asts, nls = self.dataset.get_dataset()

        self.source_vocab = utils.build_word_vocab(
            dataset=sources,
            vocab_name='source',
            ignore_case=True,
            max_vocab_size=config.source_vocab_size,
            save_dir=config.vocab_root)
        self.source_vocab_size = len(self.source_vocab)
        logger.info('Size of source vocab: {} -> {}'.format(
            self.source_vocab.origin_size, self.source_vocab_size))

        self.code_vocab = utils.build_word_vocab(
            dataset=codes,
            vocab_name='code',
            ignore_case=True,
            max_vocab_size=config.code_vocab_size,
            save_dir=config.vocab_root)
        self.code_vocab_size = len(self.code_vocab)
        logger.info('Size of code vocab: {} -> {}'.format(
            self.code_vocab.origin_size, self.code_vocab_size))

        self.ast_vocab = utils.build_word_vocab(dataset=asts,
                                                vocab_name='ast',
                                                ignore_case=True,
                                                save_dir=config.vocab_root)
        self.ast_vocab_size = len(self.ast_vocab)
        logger.info('Size of ast vocab: {}'.format(self.ast_vocab_size))

        self.nl_vocab = utils.build_word_vocab(
            dataset=nls,
            vocab_name='nl',
            ignore_case=True,
            max_vocab_size=config.nl_vocab_size,
            save_dir=config.vocab_root)
        self.nl_vocab_size = len(self.nl_vocab)
        logger.info('Size of nl vocab: {} -> {}'.format(
            self.nl_vocab.origin_size, self.nl_vocab_size))

        logger.info('Vocabularies are successfully built')

        # model
        logger.info('-' * 100)
        logger.info('Building the model')
        self.model = models.Model(source_vocab_size=self.source_vocab_size,
                                  code_vocab_size=self.code_vocab_size,
                                  ast_vocab_size=self.ast_vocab_size,
                                  nl_vocab_size=self.nl_vocab_size)
        # model device
        logger.info('Model device: {}'.format(
            next(self.model.parameters()).device))
        # log model statistic
        logger.info('Trainable parameters: {}'.format(
            utils.human_format(utils.count_params(self.model))))

        # optimizer
        self.optimizer = Adam([
            {
                'params': self.model.parameters(),
                'lr': config.learning_rate
            },
        ])

        self.criterion = nn.CrossEntropyLoss(
            ignore_index=self.nl_vocab.get_pad_index())

        if config.use_lr_decay:
            self.lr_scheduler = lr_scheduler.StepLR(self.optimizer,
                                                    step_size=1,
                                                    gamma=config.lr_decay_rate)

        # early stopping
        self.early_stopping = None
        if config.use_early_stopping:
            self.early_stopping = utils.EarlyStopping(
                patience=config.early_stopping_patience, high_record=False)