Exemplo n.º 1
0
    def cmd_bestprice(self, args, msg):
        """Returns the best price for an item out of the current known market hub systems"""
        item = ' '.join(args)
        res = self._item_picker(item)
        if isinstance(res, basestring):
            return res
        type_id, type_name = res

        min_sell = 0
        max_buy = 0
        sell_sys = None
        buy_sys = None

        for name in self.market_systems:
            sys_id = self.map.get_system_id(name)
            if not sys_id:
                continue
            sell, buy = self._get_evecentral_price(type_id, sys_id)
            if (sell < min_sell or min_sell == 0) and sell > 0:
                min_sell = sell
                sell_sys = name
            if buy > max_buy:
                max_buy = buy
                buy_sys = name
        return '{}\nBest Sell: {} @ {} ISK\nBest Buy: {} @ {} ISK'.format(
            type_name,
            sell_sys, intcomma(min_sell),
            buy_sys, intcomma(max_buy)
        )
Exemplo n.º 2
0
def show_stats():
  threads = threading.activeCount()
  
  print 'conf.opts: %s ' % list(conf.opts)
   
  print 'Probes: %s' % pp.intcomma(conf.probes)
  print 'Packets: %s' % pp.intcomma(conf.packets)
  print 'Dropped: %s' % pp.intcomma(conf.dropped)
  print 'Signal Threshold: %s' % conf.signal_thresh
  print 'Clients seen: %s' % pp.intcomma(conf.clientcount)
  print 'SSIDs seen: %s' % pp.intcomma(conf.ssidcount)
  print 'Vendors seen: %s' % conf.vendorcount
  print 'Favorites list: %s' % list(conf.fav.items())
  print 'Interface list: %s' % list(conf.interfaces)
  print 'Start time: %s' % conf.uptime
  print 'Threads: %d' % threads
  
  print '\n\nPress q to exit, or choose an option:\n'
  print '[c]lients [a]ccess Points [S]ort [Q]uit [A]dd interface [D]ebug [T]hreads\n'
  #print '[ g ]  [ s ]  [ h ]  [ c ]  [ a ]  [ v ]  [ D ]  [ d ]  [ q ]'
  #print '                    Clients  APs  Vendors               quit'
  #print 'Graphs Statistics Help                    Debug  daemonize'
  print '\nsigmon %s' % conf.version
  
  waitkey()
Exemplo n.º 3
0
    def summary(self):
        """
        Return a formatted string summary of the run information.

        :return: human-readable summary
        :rtype: str
        """
        s = "s3sfe v%s run report\n" % VERSION
        x = "%s (%s on %s)\n" % (
            self._end_dt.strftime('%A, %B %d, %Y %H:%M:%S'), getuser(), node())
        s += x
        s += ('-' * (len(x) - 1)) + "\n"
        if self._dry_run:
            s += "-- DRY RUN - NO FILES ACTUALLY UPLOADED --\n"
        s += "Total Run Time: %s\n" % self.time_total
        s += "Time Listing Files: %s\n" % self.time_listing
        s += "Time Getting Metadata: %s\n" % self.time_meta
        s += "Time Querying S3: %s\n" % self.time_s3_query
        s += "Time Calculating Uploads: %s\n" % self.time_calc
        s += "Time Uploading Files: %s\n" % self.time_upload
        s += "\n"
        s += "Backed-up files on disk: %s files; %s\n" % (intcomma(
            self.total_files), naturalsize(self.total_bytes))
        s += "Uploaded %s files; %s\n" % (intcomma(
            self.files_uploaded), naturalsize(self.bytes_uploaded))
        if len(self.error_files) < 1:
            s += "\nAll files uploaded successfully.\n"
        else:
            s += "\n%d files failed uploading:\n" % len(self.error_files)
            for f in sorted(self.error_files):
                s += "%s\n" % f
        if self._dry_run:
            s += "-- DRY RUN - NO FILES ACTUALLY UPLOADED --\n"
        return s
Exemplo n.º 4
0
    def _start_learn_state(self, dataset, optimizer, validation_dataset,
                           max_epochs):
        self._logger.info('Model: \n\n{}\n'.format(repr(self)))
        if isinstance(dataset, (AbsDataHolder, SequentialAbsDataHolder,
                                DataSet, AsyncDataLoader)):
            self._logger.info('Training data: \n\n{}\n'.format(repr(dataset)))

        num_params = utils.number_parameters(self)
        self._logger.info('Number of trainable parameters: ' +
                          humanize.intcomma(num_params[0]))
        self._logger.info('Number of non-trainable parameters: ' +
                          humanize.intcomma(num_params[1]))
        self._logger.info('Total number of parameters: ' +
                          humanize.intcomma(num_params[0] + num_params[1]))
        self._logger.info('Starting the training ...')

        self.state = dict(
            **{
                'max_epochs': max_epochs,
                'num_batches': len(dataset),
                'model': self,
                'optimizer': optimizer,
                'current_batch': 0,
                'current_epoch': 0,
                'sample': None,
                'output': None,
                'loss': None,
                'loss_mean': None,
                'validation_dataset': validation_dataset
            })

        if self._requires_register_default:
            self._register_default_plugins()
Exemplo n.º 5
0
 def status_embed(ping):
     process = psutil.Process(getpid())
     last_restart_timedelta = datetime.now() - datetime.fromtimestamp(process.create_time())
     last_restart = humanize.naturaltime(last_restart_timedelta)
     servers = humanize.intcomma(message.client.total_servers)
     members = humanize.intcomma(message.client.total_members)
     messages = len(message.client.messaging.ledger)
     memory = psutil.virtual_memory()
     memory_total = humanize.naturalsize(memory.total)
     memory_used = humanize.naturalsize(memory.used)
     memory_percent = memory.percent
     cpu_count = psutil.cpu_count()
     cpu_percent = psutil.cpu_percent()
     disk_total = humanize.naturalsize(psutil.disk_usage("/").total)
     disk_used = humanize.naturalsize(psutil.disk_usage("/").used)
     disk_percent = psutil.disk_usage("/").percent
     uptime = datetime.now() - datetime.fromtimestamp(psutil.boot_time())
     embed = Embed(title="Glyph Status", timestamp=datetime.utcfromtimestamp(process.create_time()))
     embed.add_field(name="Discord Info",
                     value="**Ping** {} ms\n**Servers** {}\n**Members** {}\n"
                           "**Messages** {}".format(ping, servers, members, messages))
     embed.add_field(name="Stack Info",
                     value="**Memory** {}/{} ({}%)\n**CPU** {}-cores at {}% utilization\n"
                           "**Disk** {}/{} ({}%)\n**Uptime** {} days".format(
                         memory_used, memory_total, memory_percent, cpu_count, cpu_percent,
                         disk_used, disk_total, disk_percent, uptime.days))
     embed.set_footer(text="Last restarted {}".format(last_restart))
     return embed
Exemplo n.º 6
0
def info():
    date_of_search = datetime.now().strftime("%B %-d, %Y")
    ticker_symbol = request.args.get("ticker_symbol").upper()
    # TODO cache ticker_info
    yf_ticker = yf.Ticker(ticker_symbol)
    ticker_info = yf_ticker.info
    details = {
        "name": ticker_info["longName"],
        "symbol": ticker_info["symbol"],
        "industry": ticker_info["industry"],
        "logo_url": ticker_info["logo_url"],
        "web_url": ticker_info["website"],
        "description": ticker_info["longBusinessSummary"],
        "market_cap": humanize.intword(ticker_info["marketCap"])
        if ticker_info["marketCap"] > 999999
        else humanize.intcomma(ticker_info["marketCap"]),
        "open_price": humanize.intcomma(ticker_info["open"]),
    }

    return render_template(
        "dashboard/info.html",
        ticker_symbol=ticker_symbol,
        date_of_search=date_of_search,
        details=details,
        period_options=PERIOD_OPTIONS,
        interval_options=INTERVAL_OPTIONS,
    )
Exemplo n.º 7
0
    def renderPDF(self, data):
        buff = StringIO.StringIO()
        doc = SimpleDocTemplate(buff, pagesize=letter,
                                rightMargin=72, leftMargin=72,
                                topMargin=72, bottomMargin=18)
        finished = data['finished']
        running = data['running']
        ready = data['ready']

        Report = []
        styles = getSampleStyleSheet()
        style = getSampleStyleSheet()['Normal']
        style.leading = 24

        Report.append(Paragraph('Report on Campaign: ' + "MC16a", styles["Heading1"]))
        Report.append(Paragraph('Build on ' + time.ctime() + " by BigPanDA", styles["Bullet"]))
        Report.append(Paragraph('Progress and loads', styles["Heading2"]))
        Report.append(Paragraph('Done events: ' + humanize.intcomma(int(finished/1000000)) +' M', styles["Normal"]))
        Report.append(Paragraph('Running events: ' + humanize.intcomma(int(running)/1000000) +' M', styles["Normal"]))
        Report.append(Paragraph('Ready for processing events: ' + humanize.intcomma(int(ready)/1000000)  +' M', styles["Normal"]))

        doc.build(Report)
        response = HttpResponse(content_type='application/pdf')
        response['Content-Disposition'] = 'attachment; filename="report.pdf"'
        response.write(buff.getvalue())
        buff.close()
        return response
Exemplo n.º 8
0
def _print_cyclic_core(x, y, xcore, ycore, essential, t0, prm, fol):
    """Print results of cyclic core computation.

    Assert support and covering properties.
    """
    if log.getEffectiveLevel() > logging.INFO:
        return
    # assert
    if essential != fol.false:
        assert support_issubset(essential, prm.p_vars, fol)
    if xcore != fol.false:
        assert support_issubset(xcore, prm.p_vars, fol)
    if ycore != fol.false:
        assert support_issubset(ycore, prm.p_vars, fol)
    # print
    m = fol.count(x)
    n = fol.count(y)
    log.info(('(x={m}, y={n}) implicants of '
              'covering problem').format(m=humanize.intcomma(m),
                                         n=humanize.intcomma(n)))
    m = fol.count(xcore)
    n = fol.count(ycore)
    log.info(('(x={m}, y={n}) implicants after '
              'removing essential elements').format(m=humanize.intcomma(m),
                                                    n=humanize.intcomma(n)))
    n = fol.count(essential)
    log.info('{n} primes are essential'.format(n=humanize.intcomma(n)))
    t1 = time.time()
    dt = t1 - t0
    log.info('cyclic core took {dt}'.format(dt=humanize.naturaldelta(dt)))
Exemplo n.º 9
0
def mesh_extract_single(information: dict, slices: str=None, interval: str=None, **kwargs) -> bool:
    """
    Given a dictionary containing the relevant parameters for the extraction, extract the material
    properties for a single process.

    Args:
        information (dict): The dictionary containing the metadata defining the extraction.

    Returns:
        True, when successful. It will raise an error if the extraction is not successful.
    """
    internal_mesh = InternalMesh(information)

    if slices is not None:
        internal_mesh.do_slices(slices)
    elif interval is not None:
        internal_mesh.do_interval(interval)

    sd_array = UCVM.create_max_seismicdata_array(min(internal_mesh.total_size, 250000), 1)

    print("\nThere are a total of " + humanize.intcomma(internal_mesh.total_size) + " grid points "
          "to extract.\nWe can extract " + humanize.intcomma(len(sd_array)) + " points at once.\n"
          "\nStarting extraction...\n")

    information["minimums"]["vp"] = float(information["minimums"]["vp"])
    information["minimums"]["vs"] = float(information["minimums"]["vs"])

    if internal_mesh.format == "awp":
        _mesh_extract_single_awp(sd_array, information, internal_mesh, slices, interval)
    elif internal_mesh.format == "rwg":
        _mesh_extract_single_rwg(sd_array, information, internal_mesh)

    print("\nExtraction done.")

    return True
Exemplo n.º 10
0
    def _system_price(self, args, msg, system, system_id):
        item = ' '.join(args)
        res = self._item_picker(item)
        if isinstance(res, basestring):
            return res
        type_id, type_name = res

        try:
            resp = requests.get(
                'http://api.eve-central.com/api/marketstat?typeid={}&usesystem={}'
                .format(type_id, system_id))
            root = ElementTree.fromstring(resp.content)
        except:
            return "An error occurred tying to get the price for {}".format(
                type_name)

        return "{} @ {} | Sell: {} | Buy: {}".format(
            type_name,
            system,
            intcomma(
                float(
                    root.findall("./marketstat/type[@id='{}']/sell/min".format(
                        type_id))[0].text)),
            intcomma(
                float(
                    root.findall("./marketstat/type[@id='{}']/buy/max".format(
                        type_id))[0].text)),
        )
Exemplo n.º 11
0
    def cmd_bestprice(self, args, msg):
        """Returns the best price for an item out of the current known market hub systems"""
        item = ' '.join(args)
        res = self._item_picker(item)
        if isinstance(res, basestring):
            return res
        type_id, type_name = res

        min_sell = 0
        max_buy = 0
        sell_sys = None
        buy_sys = None

        for name in self.market_systems:
            sys_id = self.map.get_system_id(name)
            if not sys_id:
                continue
            sell, buy = self._get_evecentral_price(type_id, sys_id)
            if (sell < min_sell or min_sell == 0) and sell > 0:
                min_sell = sell
                sell_sys = name
            if buy > max_buy:
                max_buy = buy
                buy_sys = name
        return '{}\nBest Sell: {} @ {} ISK\nBest Buy: {} @ {} ISK'.format(
            type_name, sell_sys, intcomma(min_sell), buy_sys,
            intcomma(max_buy))
Exemplo n.º 12
0
def api_statistics(request):
    if request.method == "POST":
        params = request.POST
    else:
        params = request.GET

    result = {
        "draw": params["draw"],
    }

    data = []

    count_lines = Hashfile.objects.aggregate(
        Sum('line_count'))["line_count__sum"]
    count_cracked = Hashfile.objects.aggregate(
        Sum('cracked_count'))["cracked_count__sum"]
    data.append(["<b>Lines</b>", humanize.intcomma(count_lines)])
    data.append([
        "<b>Cracked</b>",
        "%s (%.2f%%)" %
        (humanize.intcomma(count_cracked), count_cracked / count_lines * 100.0)
    ])
    data.append(["<b>Hashfiles</b>", Hashfile.objects.count()])
    data.append(["<b>Nodes</b>", Node.objects.count()])

    result["data"] = data

    return HttpResponse(json.dumps(result), content_type="application/json")
Exemplo n.º 13
0
def ImportFromLanguage(language: scrape_repos_pb2.LanguageToClone,
                       pool: multiprocessing.Pool) -> None:
    """Import contentfiles from a language specification.

  Args:
    language: The language to import.
    pool: A multiprocessing pool.

  Raises:
    ValueError: If importer field not set.
  """
    if not language.importer:
        raise ValueError('LanguageToClone.importer field not set')

    logging.info('Enumerating all repos ...')
    all_repos = [
        github_repo.GitHubRepo(pathlib.Path(language.destination_directory /
                                            f))
        for f in pathlib.Path(language.destination_directory).iterdir()
        if f.name.endswith('.pbtxt')
    ]
    logging.info('Pruning indexed repos ...')
    num_repos = len(all_repos)
    repos_to_import = [repo for repo in all_repos if not repo.IsIndexed()]
    num_todo = len(repos_to_import)
    num_pruned = num_repos - num_todo
    random.shuffle(repos_to_import)
    logging.info('Importing %s of %s %s repos ...',
                 humanize.intcomma(num_todo), humanize.intcomma(num_repos),
                 language.language.capitalize())
    for i, repo in enumerate(repos_to_import):
        repo.Index(list(language.importer), pool,
                   github_repo.IndexProgress(num_pruned + i, num_repos))
Exemplo n.º 14
0
    def getArtistsInfo(self, text=None):

        _url = f'http://ws.audioscrobbler.com/2.0/?method=artist.getinfo&artist={text}&api_key={self.__api_key}&format=json'

        response = requests.post(_url).json()

        listeners = humanize.intword(response['artist']['stats']['listeners'])
        pl_count = humanize.intword(response['artist']['stats']['playcount'])

        if str(listeners).isdigit():

            listeners = humanize.intcomma(response['artist']['stats']['listeners'])
        
        if str(pl_count).isdigit():

            pl_count = humanize.intcomma(response['artist']['stats']['playcount'])


        data = {
            'listeners':response['artist']['stats']['listeners'],
            'listeners_display':listeners,
            'playcount':response['artist']['stats']['playcount'],
            'playcount_display':pl_count,
            'bio':str(response['artist']['bio']['content']).split('<a href="')[0]
        }

        return data
Exemplo n.º 15
0
    def Create(self) -> None:
        """Create the corpus files.

    Raises:
      EmptyCorpusException: If there are no content files, or no successfully
        pre-processed files.
    """
        self._created = True
        logging.info('Content ID: %s', self.content_id)
        preprocessed_lock_path = pathlib.Path(
            self.preprocessed.url[len('sqlite:///'):]).parent / 'LOCK'
        with lockfile.LockFile(preprocessed_lock_path).acquire(
                replace_stale=True, block=True):
            self.preprocessed.Create(self.config)
        if not self.preprocessed.size:
            raise errors.EmptyCorpusException(
                f"Pre-processed corpus contains no files: '{self.preprocessed.url}'"
            )
        encoded_lock_path = pathlib.Path(
            self.encoded.url[len('sqlite:///'):]).parent / 'LOCK'
        with lockfile.LockFile(encoded_lock_path).acquire(replace_stale=True,
                                                          block=True):
            start_time = time.time()
            atomizer = self.atomizer
            logging.info(
                '%s: %s tokens in %s ms',
                type(atomizer).__name__,
                humanize.intcomma(atomizer.vocab_size),
                humanize.intcomma(int((time.time() - start_time) * 1000)))
            self.encoded.Create(self.preprocessed, atomizer,
                                self.config.contentfile_separator)
Exemplo n.º 16
0
 def Import(self, session: sqlutil.Database.session_t,
            config: corpus_pb2.Corpus) -> None:
     with self.GetContentFileRoot(config) as contentfile_root:
         relpaths = set(self.GetImportRelpaths(contentfile_root))
         done = set([
             x[0]
             for x in session.query(PreprocessedContentFile.input_relpath)
         ])
         todo = relpaths - done
         logging.info('Preprocessing %s of %s content files',
                      humanize.intcomma(len(todo)),
                      humanize.intcomma(len(relpaths)))
         jobs = [
             internal_pb2.PreprocessorWorker(
                 contentfile_root=str(contentfile_root),
                 relpath=t,
                 preprocessors=config.preprocessor) for t in todo
         ]
         pool = multiprocessing.Pool()
         bar = progressbar.ProgressBar(max_value=len(jobs))
         last_commit = time.time()
         wall_time_start = time.time()
         for preprocessed_cf in bar(
                 pool.imap_unordered(PreprocessorWorker, jobs)):
             wall_time_end = time.time()
             preprocessed_cf.wall_time_ms = (int(
                 (wall_time_end - wall_time_start) * 1000))
             wall_time_start = wall_time_end
             session.add(preprocessed_cf)
             if wall_time_end - last_commit > 10:
                 session.commit()
                 last_commit = wall_time_end
Exemplo n.º 17
0
def _describe_programs(lang: Language, file=sys.stdout):
    for generator in lang.generators:
        num = humanize.intcomma(generator.num_programs())
        sloc = humanize.intcomma(generator.sloc_total())
        print(
            f"You have {Colors.BOLD}{num} {generator}{Colors.END} "
            f"programs, total {Colors.BOLD}{sloc}{Colors.END} SLOC.",
            file=file)
Exemplo n.º 18
0
def iva(var, num , tipo):
  import humanize
  if tipo == 1:
    return humanize.intcomma( '%.2f' % (var / 1.16))
  if tipo == 2:
    return humanize.intcomma( '%.2f'%(var*num) )
  if tipo == 3:
    return humanize.intcomma( '%.2f'%(var + var*num) )
  return var
Exemplo n.º 19
0
def home():
    """Serve the home page."""
    return render_template(
        "home.html",
        name_count=intcomma(backend.count_names()),
        alts_count=intcomma(backend.count_alts()),
        prefix_count=intcomma(backend.count_prefixes()),
        definition_count=intcomma(backend.count_definitions()),
    )
Exemplo n.º 20
0
    async def buy(self, ctx, ticker: str = 'MSFT', amount='1'):

        wallet, bank = await self.get_stats(self, ctx.author.id)
        ticker = ticker.upper()
        async with self.bot.session.get(
                f'https://ws-api.iextrading.com/1.0/tops/last?symbols={ticker}'
        ) as resp:
            data: list = await resp.json()

        if not data:
            return await ctx.send('Yeah so thats not a valid stock lmao')

        stock: dict = data[0]
        price: int = math.floor(stock["price"])
        humanized_price: str = humanize.intcomma(price)

        match = re.search(r'^[0-9]*$', str(amount))
        if match:
            amount = int(match[0])
        else:
            match = re.search(r'^[a-zA-Z]*$', amount)
            if match and match[0] == 'max':
                amount = math.floor(wallet / price)
                if amount == 0:
                    return await ctx.send(
                        'You don\'t have enough money to buy a share.')
            else:
                amount = 1

        total: int = amount * price
        humanized_total: str = humanize.intcomma(total)

        share: str = plural("share(s)", amount)
        answer, message = await ctx.confirm(
            f'Confirm to buy **{amount}** {share} of **{ticker}** at **${humanized_price}**'
            f' per share for a total of **${humanized_total}**.')

        if answer:
            if total > wallet:
                return await message.edit(
                    content=
                    f'You need **${total - wallet}** more in order to purchase this stock.'
                )
            values = (ctx.author.id, ticker, amount)
            await ctx.bot.db.execute(
                "INSERT INTO stocks(user_id, ticker, amount) VALUES($1, $2, $3) ON CONFLICT (user_id, ticker) DO UPDATE SET amount = stocks.amount + $3",
                *values)
            await self.bot.db.execute(
                "UPDATE economy SET wallet = $1 WHERE userid = $2",
                wallet - total, ctx.author.id)
            await message.edit(
                content=
                f'Purchased **{amount}** {share} of **{ticker}** for **${humanized_total}**.'
            )

        if not answer:
            await message.edit(content='Cancelled the transaction.')
Exemplo n.º 21
0
    def getDEFTEventsSummaryChain(self, condition):
        sqlRequest = '''
          SELECT * FROM (
          SELECT TASKID, PARENT_TID, 
          CASE WHEN TASKNAME LIKE '%.merge.%' AND substr(substr(TASKNAME,instr(TASKNAME,'.',-1) + 1),instr(substr(TASKNAME,instr(TASKNAME,'.',-1) + 1),'_',-1) + 1) like 'r%' THEN 'merge'
          WHEN t1.TASKNAME LIKE '%.merge.%' AND not substr(substr(t1.TASKNAME,instr(t1.TASKNAME,'.',-1) + 1),instr(substr(t1.TASKNAME,instr(t1.TASKNAME,'.',-1) + 1),'_',-1) + 1) like 'r%' THEN 'mergeHits'
          WHEN TASKNAME LIKE '%.recon.%' THEN 'recon'
          WHEN TASKNAME LIKE '%.simul.%' THEN 'simul'
          WHEN TASKNAME LIKE '%.evgen.%' THEN 'evgen'
          END AS STEP,
          /*s.INPUT_EVENTS,*/ t.TOTAL_REQ_EVENTS, t.TOTAL_EVENTS, t.STATUS
          FROM ATLAS_DEFT.T_PRODUCTION_TASK t, ATLAS_DEFT.T_PRODUCTION_STEP s WHERE s.STEP_ID=t.STEP_ID and  not TASKNAME LIKE '%valid%' and TASKNAME LIKE 'mc16_%' {0}
        )t1 where STEP IS NOT NULL
        '''

        #TASKID, PARENT_TID, STEP, INPUT_EVENTS, TOTAL_EVENTS, STATUS

        sqlRequestFull = sqlRequest.format(condition)
        cur = connection.cursor()
        cur.execute(sqlRequestFull)
        tasks = cur.fetchall()
        parentIDHash = {}
        processedHash = {}
        summaryInput = {} #Step, INPUT_EVENTS
        summaryProcessed = {} #Step, TOTAL_EVENTS

        for task in tasks:
            if not task[5] in ['failed','aborted','broken']:
                if task[0] != task[1]:
                    parentIDHash[task[0]] = task[1]
                processedHash[task[0]] = task[4]

        for task in tasks:
            if not task[5] in ['failed','aborted','broken']:
                step = task[2]
                if step not in summaryInput:
                    summaryInput[step] = 0
                if step not in summaryProcessed:
                    summaryProcessed[step] = 0

                taskid = task[0]
                inputEvent = task[3]
                outputEvent = task[4]
                #if taskid in parentIDHash and parentIDHash[taskid] in processedHash:
                #    inputEvent = processedHash[parentIDHash[taskid]]

                summaryInput[step] += inputEvent
                summaryProcessed[step] += outputEvent

        fullSummary = {}
        fullSummary['total'] = {}
        fullSummary['total']['evgen'] = '%s/%s' % (  humanize.intcomma(summaryInput['evgen']) if 'evgen' in summaryInput else '-', humanize.intcomma(summaryProcessed['evgen']) if 'evgen' in summaryInput else '-')
        fullSummary['total']['recon'] = '%s/%s' % (  humanize.intcomma(summaryInput['simul']) if 'simul' in summaryInput else '-', humanize.intcomma(summaryProcessed['simul']) if 'simul' in summaryInput else '-')
        fullSummary['total']['simul'] = '%s/%s' % (  humanize.intcomma(summaryInput['recon']) if 'recon' in summaryInput else '-', humanize.intcomma(summaryProcessed['recon']) if 'recon' in summaryInput else '-')
        fullSummary['total']['merge'] = '%s/%s' % (  humanize.intcomma(summaryInput['merge']) if 'merge' in summaryInput else '-', humanize.intcomma(summaryProcessed['merge']) if 'merge' in summaryInput else '-')
        return fullSummary
Exemplo n.º 22
0
    def GetTrainingCorpus(
        self, corpus: "corpuses.Corpus",
        training_opts: model_pb2.TrainingOptions
    ) -> typing.Tuple[np.ndarray, np.ndarray, int]:
        """Get the corpus to train over.

    Args:
      corpus: A Corpus instance.
      training_opts: A TrainingOptions proto.

    Returns:
      An X, y pair of data for an epoch, and the number of steps in the epoch.

    Raises:
      UserError: If batch_size and sequence_length are too large for the corpus,
        yielding no batches.
    """
        start_time = time.time()
        encoded_corpus = np.concatenate(
            corpus.GetTrainingData(shuffle=training_opts.
                                   shuffle_corpus_contentfiles_between_epochs))
        corpus_length = len(encoded_corpus)
        steps_per_epoch = (corpus_length - 1) // (
            training_opts.batch_size * training_opts.sequence_length)
        if not steps_per_epoch:
            raise ValueError(
                f"Requested batch size ({training_opts.batch_size}) and "
                f"sequence length ({training_opts.sequence_length}) are too large for "
                f"corpus of size {corpus_length}.")

        clipped_corpus_length = (steps_per_epoch * training_opts.batch_size *
                                 training_opts.sequence_length)

        x = np.reshape(
            encoded_corpus[:clipped_corpus_length],
            [
                training_opts.batch_size,
                steps_per_epoch * training_opts.sequence_length
            ],
        )
        y = np.reshape(
            encoded_corpus[1:clipped_corpus_length + 1],
            [
                training_opts.batch_size,
                steps_per_epoch * training_opts.sequence_length
            ],
        )

        l.logger().info(
            "Encoded corpus of {} tokens (clipped last {} tokens) in {} ms.".
            format(
                humanize.intcomma(clipped_corpus_length),
                humanize.intcomma(corpus_length - clipped_corpus_length),
                humanize.intcomma(int((time.time() - start_time) * 1000)),
            ))
        return x, y, steps_per_epoch
Exemplo n.º 23
0
    async def sell(self, ctx, ticker: str = 'MSFT', amount='1'):
        wallet, bank = await self.get_stats(self, ctx.author.id)
        ticker = ticker.upper()

        async with self.bot.session.get(
                f'https://ws-api.iextrading.com/1.0/tops/last?symbols={ticker}'
        ) as resp:
            data: list = await resp.json()

        if not data:
            return await ctx.send('Yeah so thats not a valid stock lmao')

        stock: dict = data[0]
        price: int = math.floor(stock["price"])
        humanized_price: str = humanize.intcomma(price)

        match = re.search(r'^[0-9]*$', str(amount))
        if match:
            amount = int(match[0])
        else:
            match = re.search(r'^[a-zA-Z]*$', amount)
            if match and match[0] == 'max':
                amount = await ctx.bot.db.fetchval(
                    "SELECT amount FROM stocks WHERE user_id = $1 AND ticker = $2",
                    ctx.author.id, ticker)
            else:
                amount = 1

        total: int = amount * price
        humanized_total: str = humanize.intcomma(total)

        share: str = plural("share(s)", amount)
        answer, message = await ctx.confirm(
            f'Confirm to sell **{amount}** {share} of **{ticker}** at **${humanized_price}**'
            f' per share for a total of **${humanized_total}**.')

        if answer:
            try:
                query = await ctx.bot.db.execute(
                    "UPDATE stocks SET amount = stocks.amount - $3 WHERE user_id = $1 AND ticker = $2",
                    ctx.author.id, ticker, amount)
                if query == 'UPDATE 0':
                    return await message.edit(content="You don't any stock.")
                await ctx.bot.db.execute('DELETE FROM stocks WHERE amount = 0')
                await self.bot.db.execute(
                    "UPDATE economy SET wallet = $1 WHERE userid = $2",
                    wallet + total, ctx.author.id)
                return await message.edit(
                    content=
                    f'Sold **{amount}** {share} of **{ticker}** for **${humanized_total}**.'
                )
            except CheckViolationError:
                return await message.edit("You don't have that much stock")
        else:
            await message.edit(content='Cancelled the transaction.')
Exemplo n.º 24
0
def main():

    day = GetDate()
    hour = GetTime()

    dollar_price = DollarPrice()
    coins = CoinsPrice()

    bitcoin_price_rial = int(coins[0] * dollar_price)
    bitcoin_sell_price_rial = humanize.intcomma(int((bitcoin_price_rial +
                                                     ((3 * bitcoin_price_rial) / 100)) / 10))
    ethereum_price_rial = int(coins[1] * dollar_price)
    ethereum_sell_price_rial = humanize.intcomma(int((ethereum_price_rial +
                                                      ((3 * ethereum_price_rial) / 100)) / 10))
    tether_price_rial = int(coins[2] * dollar_price)
    tether_sell_price_rial = humanize.intcomma(int((tether_price_rial +
                                                    ((3 * tether_price_rial) / 100)) / 10))
    xrp_price_rial = int(coins[3] * dollar_price)
    xrp_sell_price_rial = humanize.intcomma(int((xrp_price_rial +
                                                 ((3 * xrp_price_rial) / 100)) / 10))
    bitecoincash_price_rial = int(coins[4] * dollar_price)
    bitecoincash_sell_price_rial = humanize.intcomma(int((bitecoincash_price_rial +
                                                          ((3*bitecoincash_price_rial) / 100)) / 10))
    litecoin_price_rial = int(coins[5] * dollar_price)
    litecoin_sell_price_rial = humanize.intcomma(int((litecoin_price_rial +
                                                      ((3*litecoin_price_rial) / 100)) / 10))
    cardano_price_rial = int(coins[6] * dollar_price)
    cardano_sell_price_rial = humanize.intcomma(int((cardano_price_rial +
                                                     ((3*cardano_price_rial) / 100)) / 10))
    tron_price_rial = int(coins[7] * dollar_price)
    tron_sell_price_rial = humanize.intcomma(int((tron_price_rial +
                                                  ((3*tron_price_rial) / 100)) / 10))
    monero_price_rial = int(coins[8] * dollar_price)
    monero_sell_price_rial = humanize.intcomma(int((monero_price_rial +
                                                    ((3*monero_price_rial) / 100)) / 10))

    # update_id = last_update(bot_url)['message']["chat"]["id"]
    allowed_chat_ids = [965851315, -1001331723254]
    while True:

        message_text = '\U0001F514' + "اعلام نرخ لحظه ارزهای دیجیتال" + \
            '\n' + '\U0001F5D3' + " تاریخ: " + day + '\n' + '\U0000231A' + " ساعت: " + \
            hour + "\n" + '--------------------------------------------------' + '---------------------------------------------------' + \
            "\n\n" + '\U00000031' '\U0000FE0F' '\U000020E3' + " بیت کوین - Bitcoin" + "\n" + "       قیمت به دلار: " + \
            str(coins[0]) + "$" + "\n" + "       قیمت فروش: " + \
            str(bitcoin_sell_price_rial) + " تومان" + "\n\n" + '\U00000032' '\U0000FE0F' '\U000020E3' + " اتریوم - Ethereum" + "\n" + \
            "       قیمت به دلار: " + \
            str(coins[1]) + "$" + "\n" + "       قیمت فروش: " + \
            str(ethereum_sell_price_rial) + " تومان" + "\n\n" + '\U00000033' '\U0000FE0F' '\U000020E3' + " تتر - Tether" + "\n" + \
            "       قیمت به دلار: " + \
            str(coins[2]) + "$" + "\n" + "       قیمت فروش: " + \
            str(tether_sell_price_rial) + " تومان" + "\n\n" + '\U00000034' '\U0000FE0F' '\U000020E3' + " ریپل - XRP" + "\n" + \
            "       قیمت به دلار: " + \
            str(coins[3]) + "$" + "\n" + "       قیمت فروش: " + \
            str(xrp_sell_price_rial) + " تومان" + "\n\n" + '\U00000035' '\U0000FE0F' '\U000020E3' + " ببیت کوین کش - Bitcoin Cash" + "\n" + "       قیمت به دلار: " + str(coins[4]) + "$" + "\n" + "       قیمت فروش: " + str(bitecoincash_sell_price_rial) + " تومان" + "\n\n" + '\U00000036' '\U0000FE0F' '\U000020E3' + " لایت کوین - Litecoin" + "\n" + "       قیمت به دلار: " + str(coins[5]) + "$" + "\n" + "       قیمت فروش: " + str(litecoin_sell_price_rial) + " تومان" + "\n\n" + '\U00000037' '\U0000FE0F' '\U000020E3' + "کاردانو - Cardano" + "\n" + "       قیمت به دلار: " + str(coins[6]) + "$" + "\n" + "       قیمت فروش: " + str(cardano_sell_price_rial) + " تومان" + "\n\n" + '\U00000038' '\U0000FE0F' '\U000020E3' + " ترون - TRON" + "\n" + "       قیمت به دلار: " + str(coins[7]) + "$" + "\n" + "       قیمت فروش: " + str(
                tron_sell_price_rial) + " تومان" + "\n\n" + '\U00000039' '\U0000FE0F' '\U000020E3' + " مونرو - Monero" + "\n" + "       قیمت به دلار: " + str(coins[8]) + "$" + "\n" + "       قیمت فروش: " + str(monero_sell_price_rial) + " تومان" + "\n" + '--------------------------------------------------' + '---------------------------------------------------' + "\n\n" + "       برای مشاهده لیست کامل رمز ارزها لطفا از سایت ما دیدن فرمائید." + "\n\n" + '--------------------------------------------------' + '---------------------------------------------------' + "\n\n" + "کانال اعلام قیمت لحظه ای و سایر اخبار ارزهای دیجیتال " + '\U0001F447' + "\n\n" + "@coinbitiran_exchange" + "\n\n" + '\U0001F310' + " لینک سایت صرافی کوین بیت:" + "\n\n" + "https://coinbit-exchange.com/" + "\n\n" + "آسایش و اطمینان شما، هدف ماست!"
        for i in allowed_chat_ids:
            send_message(i, message_text)
        time.sleep(300.0)
        main()
Exemplo n.º 25
0
def scan(paths, recursive, size, min_size, max_size, hash_function):
    """Scan files in directories and report duplication."""
    if min_size is None:
        min_size = size // 4
    if max_size is None:
        max_size = size * 8

    bytes_total = 0
    bytes_dupe = 0
    fingerprints = set()
    supported = supported_hashes()
    if hash_function not in supported:
        msg = "'{}' is not a supported hash.\nTry one of these:\n{}".format(
            hash_function, ", ".join(supported))
        raise click.BadOptionUsage("hf", msg)

    hf = getattr(hashlib, hash_function)
    files = []
    for path in paths:
        files += list(iter_files(path, recursive))
    t = Timer("scan", logger=None)
    t.start()
    with click.progressbar(files) as pgbar:
        for entry in pgbar:
            try:
                chunker = fastcdc.fastcdc(entry.path,
                                          min_size,
                                          size,
                                          max_size,
                                          hf=hf)
            except Exception as e:
                click.echo("\n for {}".format(entry.path))
                click.echo(repr(e))
                continue
            for chunk in chunker:
                bytes_total += chunk.length
                if chunk.hash in fingerprints:
                    bytes_dupe += chunk.length
                fingerprints.add(chunk.hash)
    t.stop()
    if bytes_total:
        data_per_s = bytes_total / Timer.timers.mean("scan")
        dd_ratio = bytes_dupe / bytes_total * 100
        click.echo("Files:          {}".format(intcomma(len(files))))
        click.echo("Chunk Sizes:    min {} - avg {} - max {}".format(
            min_size, size, max_size))
        click.echo("Unique Chunks:  {}".format(intcomma(len(fingerprints))))
        click.echo("Total Data:     {}".format(naturalsize(bytes_total)))
        click.echo("Dupe Data:      {}".format(naturalsize(bytes_dupe)))
        click.echo("DeDupe Ratio:   {:.2f} %".format(dd_ratio))
        click.echo("Throughput:     {}/s".format(naturalsize(data_per_s)))
    else:
        click.echo("No data.")
Exemplo n.º 26
0
def test_intcomma():
    number = 10_000_000

    assert humanize.intcomma(number) == "10,000,000"

    try:
        humanize.i18n.activate("fr_FR")
        assert humanize.intcomma(number) == "10 000 000"

    finally:
        humanize.i18n.deactivate()
        assert humanize.intcomma(number) == "10,000,000"
Exemplo n.º 27
0
def load_filings(collection, committees, recent_filings, alert=False):
    """
    Given a collection of filings, a list of committees, and a list of filings, will insert
    new filings into the collection.
    Returns a list of messages if alert has been set to True.
    """

    if campfinbot.EXCLUDED_COMMITTEE_PATH:
        with open(campfinbot.EXCLUDED_COMMITTEE_PATH, 'r') as f:
            excluded_comms = [c.strip() for c in f]
            logging.info("excluding comms: {}".format(', '.join(excluded_comms)))
    else:
        excluded_comms = []
        logging.info("No comms to exclude")

    committees = [c for c in committees] #otherwise it uses up the cursor
    messages = []
    for filing in recent_filings:
        committee = [c for c in committees if unicode(c['committee_id']) == unicode(filing['fec_id'])]
        if len(committee) > 0:
            if not collection.find_one({'filing_id': filing['filing_id']}):
                form_type = filing['form_type'].rstrip('HSPAX')
                if form_type in campfinbot.ACCEPTABLE_FORMS:
                    collection.insert(filing)
                    if form_type in campfinbot.ALERT_FORMS and alert and filing['filed_date'] > campfinbot.EARLIEST_ALERT and filing['fec_id'] not in excluded_comms:
                        message = "*{comm}* has filed a {form_type}".format(comm=filing['committee_name'],
                                                                                 form_type=filing['form_type'])
                        if filing['is_amendment']:
                            message += " AMENDMENT"
                        
                        message += " on {date}.\n{url}".format(date=filing['filed_date'], url=filing['source_url'])
                        if filing['has_cycle_totals']:
                            try:
                                message += "\n\tReceipts: $%s" % humanize.intcomma(round(float(filing['period_total_receipts']), 2))
                            except:
                                message += "\n\tReceipts: %s" % filing['period_total_receipts']

                            try:
                                message += "\n\tCash on hand: $%s" % humanize.intcomma(round(float(filing['coh_end']), 2))
                            except:
                                message += "\n\tCash on hand: %s" % filing['coh_end']

                            try:
                                message += "\n\tDisbursements: $%s" % humanize.intcomma(round(float(filing['period_total_disbursements']), 2))
                            except:
                                message += "\n\tDisbursements: %s" % filing['period_total_disbursements']
                        candidate_names = len(committee[0]['candidate_names'])
                        if candidate_names > 0:
                            message += "\n\tThis committee supports {}".format(" and ".join(committee[0]['candidate_names']))

                        messages.append(message)

    return messages
Exemplo n.º 28
0
def hashfile(request, hashfile_id, error_msg=''):
    context = {}
    context["Section"] = "Hashfile"

    hashfile = get_object_or_404(Hashfile, id=hashfile_id)

    context['hashfile'] = hashfile
    context['lines'] = humanize.intcomma(hashfile.line_count)
    context['recovered'] = "%s (%.2f%%)" % (humanize.intcomma(hashfile.cracked_count), hashfile.cracked_count/hashfile.line_count*100) if hashfile.line_count != 0 else "0"
    context['hash_type'] = "Plaintext" if hashfile.hash_type == -1 else Hashcat.get_hash_types()[hashfile.hash_type]["name"]

    template = loader.get_template('Hashcat/hashfile.html')
    return HttpResponse(template.render(context, request))
Exemplo n.º 29
0
def GetTrainingCorpus(
    corpus: 'corpuses.Corpus',
    training_opts: model_pb2.TrainingOptions) -> typing.Tuple[
  np.ndarray, np.ndarray, int]:
  """Get the corpus to train over.

  Args:
    corpus: A Corpus instance.
    training_opts: A TrainingOptions proto.

  Returns:
    An X, y pair of data for an epoch, and the number of steps in the epoch.

  Raises:
    UserError: If batch_size and sequence_length are too large for the corpus,
      yielding no batches.
  """
  start_time = time.time()
  encoded_corpus = corpus.GetTrainingData(
      shuffle=training_opts.shuffle_corpus_contentfiles_between_epochs)
  corpus_length = len(encoded_corpus)
  steps_per_epoch = (corpus_length - 1) // (
      training_opts.batch_size * training_opts.sequence_length)
  if not steps_per_epoch:
    raise errors.UserError(
        f'Requested batch size ({training_opts.batch_size}) and '
        f'sequence length ({training_opts.sequence_length}) are too large for '
        f'corpus of size {corpus_length}.')

  clipped_corpus_length = (
      steps_per_epoch * training_opts.batch_size *
      training_opts.sequence_length)

  x = np.reshape(
      encoded_corpus[:clipped_corpus_length],
      [training_opts.batch_size,
       steps_per_epoch * training_opts.sequence_length])
  print(x)
  y = np.reshape(
      encoded_corpus[1:clipped_corpus_length + 1],
      [training_opts.batch_size,
       steps_per_epoch * training_opts.sequence_length])

  logging.info(
      'Encoded corpus of %s tokens (clipped last %s tokens) in %s ms.',
      humanize.intcomma(clipped_corpus_length),
      humanize.intcomma(corpus_length - clipped_corpus_length),
      humanize.intcomma(int((time.time() - start_time) * 1000)))
  return x, y, steps_per_epoch
Exemplo n.º 30
0
def LoadPositiveNegativeProtos(path: pathlib.Path) -> PositiveNegativeDataset:
  """Load positive and negative training protos from a directory."""
  positive_protos = [
    pbutil.FromFile(p, fish_pb2.CompilerCrashDiscriminatorTrainingExample())
    for p in path.iterdir() if p.name.startswith('positive-')
  ]
  logging.info(
      'Loaded %s positive protos', humanize.intcomma(len(positive_protos)))
  negative_protos = [
    pbutil.FromFile(p, fish_pb2.CompilerCrashDiscriminatorTrainingExample())
    for p in path.iterdir() if p.name.startswith('negative-')
  ]
  logging.info(
      'Loaded %s negative protos', humanize.intcomma(len(negative_protos)))
  return PositiveNegativeDataset(positive_protos, negative_protos)
Exemplo n.º 31
0
    def create_value_embed(embed, api_response):
        """
        Embed with data about profile value
        :param embed: base embed from create_user_base_embed()
        :param api_response: response from steamladder api
        :return: Embed
        """

        values = api_response['steamladder']['value']

        total_value = 0
        total_value += values['level'] if values['level'] else 0
        total_value += values['games_current'] if values['games_current'] else 0
        total_value += values['donator_value'] if values['donator_value'] else 0

        if total_value > 0:
            embed.add_field(name='Total value',
                            value='**${}**'.format(
                                humanize.intcomma(round(total_value))),
                            inline=False)

            if values['level']:
                embed.add_field(name='Level value',
                                value='${}'.format(
                                    humanize.intcomma(round(values['level']))),
                                inline=True)

            if values['games_current']:
                embed.add_field(name='Games value',
                                value='${}'.format(
                                    humanize.intcomma(
                                        round(values['games_current']))),
                                inline=True)

            donator_value = values['donator_value'] if values[
                'donator_value'] else 0
            embed.add_field(name='Donator value',
                            value='${}'.format(
                                humanize.intcomma(round(donator_value))),
                            inline=True)
        else:
            embed.add_field(name='Info',
                            value='No value known, try updating your profile.',
                            inline=False)

        embed.set_footer(
            text='Game pricing data based on current Steam store prices.')
        return embed
Exemplo n.º 32
0
def load_filings(collection, committees, recent_filings, alert=False):
    """
    Given a collection of filings, a list of committees, and a list of filings, will insert
    new filings into the collection.
    Returns a list of messages if alert has been set to True.
    """
    messages = []
    for filing in recent_filings:
        if filing['fec_id'] in committees:
            if not collection.find_one({'filing_id': filing['filing_id']}):
                form_type = filing['form_type'].rstrip('HSPAX')
                if form_type in campfinbot.ACCEPTABLE_FORMS:
                    collection.insert(filing)
                    if form_type in campfinbot.ALERT_FORMS and alert and filing['filed_date'] > campfinbot.EARLIEST_ALERT:
                        message = "*{comm}* has filed a {form_type}".format(comm=filing['committee_name'],
                                                                                 form_type=filing['form_type'])
                        if filing['is_amendment']:
                            message += " AMENDMENT"
                        
                        message += " on {date}.\n{url}".format(date=filing['filed_date'], url=filing['source_url'])
                        if filing['has_cycle_totals']:
                            try:
                                message += "\n\tReceipts: $%s" % humanize.intcomma(round(float(filing['period_total_receipts']), 2))
                            except:
                                message += "\n\tReceipts: %s" % filing['period_total_receipts']

                            try:
                                message += "\n\tCash on hand: $%s" % humanize.intcomma(round(float(filing['coh_end']), 2))
                            except:
                                message += "\n\tCash on hand: %s" % filing['coh_end']

                            try:
                                message += "\n\tDisbursements: $%s" % humanize.intcomma(round(float(filing['period_total_disbursements']), 2))
                            except:
                                message += "\n\tDisbursements: %s" % filing['period_total_disbursements']
                        
                        #Kitty wants to know about bernie filings immediately
                        if filing['fec_id'] in ['C00577130']:
                            try:
                                kitty_user = os.environ['KITTY']
                            except KeyError:
                                message += "\nHEADS UP kitty"
                            else:
                                message += "\n HEADS UP {}".format(kitty_user)

                        messages.append(message)

    return messages
Exemplo n.º 33
0
 async def fortnite(
         self,
         ctx: discord.ApplicationContext,
         platform: Option(
             str,
             "Wybierz platformę na której grasz",
             choices=[
                 OptionChoice("Epic Games", "epic"),
                 OptionChoice("Playstation Network", "psn"),
                 OptionChoice("Xbox Live", "xbl"),
             ],
         ),
         nick: Option(str, "Nick gracza"),
 ):
     await ctx.defer()
     r = requests.get(
         url="https://fortnite-api.com/v2/stats/br/v2",
         params={
             "name": nick,
             "accountType": platform
         },
         headers={"Authorization": config["fortnite"]},
     )
     if r.status_code == 200:
         json = r.json()
         data = json["data"]["stats"]["all"]["overall"]
         embed = discord.Embed()
         embed.title = "Statystyki w grze Fortnite"
         embed.description = "🧑 Gracz: **{}**".format(nick)
         embed.add_field(name="⭐️ Punkty",
                         value=humanize.intcomma(data["score"]))
         embed.add_field(name="🏆 Wygrane",
                         value=humanize.intcomma(data["wins"]))
         embed.add_field(name="⚔ Zabójstwa",
                         value=humanize.intcomma(data["kills"]))
         embed.add_field(name="☠ Śmierci",
                         value=humanize.intcomma(data["deaths"]))
         embed.add_field(name="🕹 Rozegranych meczy",
                         value=humanize.intcomma(data["matches"]))
         await ctx.send_followup(embed=embed)
     elif r.status_code == 403:
         raise commands.BadArgument(
             f"Statystyki gracza __{nick}__ są **prywatne**!")
     elif r.status_code == 404:
         raise commands.BadArgument(
             "Podany gracz nie istnieje lub nigdy nie grał w Fortnite!")
     else:
         raise commands.CommandError(await r.text())
Exemplo n.º 34
0
def _describe_testcases(lang: Language, generator: Generator, file=sys.stdout):
    for harness in generator.harnesses:
        num = humanize.intcomma(generator.num_testcases())
        print(
            f"There are {Colors.BOLD}{num} {generator}:{harness} "
            "testcases.",
            file=file)
Exemplo n.º 35
0
    def getJEDIEventsSummaryRequested(self, condition):
        sqlRequest = '''
          SELECT * FROM (
            SELECT SUM(NEVENTS), SUM(NEVENTSUSED), STEP FROM (
            SELECT t2.nevents, t2.neventsused,
            
                      CASE WHEN t1.TASKNAME LIKE '%.merge.%' AND substr(substr(t1.TASKNAME,instr(t1.TASKNAME,'.',-1) + 1),instr(substr(t1.TASKNAME,instr(t1.TASKNAME,'.',-1) + 1),'_',-1) + 1) like 'r%' THEN 'merge'
                      WHEN t1.TASKNAME LIKE '%.merge.%' AND not substr(substr(t1.TASKNAME,instr(t1.TASKNAME,'.',-1) + 1),instr(substr(t1.TASKNAME,instr(t1.TASKNAME,'.',-1) + 1),'_',-1) + 1) like 'r%' THEN 'mergeHits'
                      WHEN t1.TASKNAME LIKE '%.recon.%' THEN 'recon'
                      WHEN t1.TASKNAME LIKE '%.simul.%' THEN 'simul'
                      WHEN t1.TASKNAME LIKE '%.evgen.%' THEN 'evgen'
                      END AS STEP
            
            FROM ATLAS_PANDA.JEDI_TASKS t1, ATLAS_PANDA.JEDI_DATASETS t2 WHERE  t1.status not in ('failed','aborted','broken') and  t1.JEDITASKID=t2.JEDITASKID and t2.MASTERID IS NULL and t2.TYPE IN ('input', 'pseudo_input') {0} 
            )group by STEP
        )t1 where STEP IS NOT NULL
        '''
        # INPUT_EVENTS, TOTAL_EVENTS, STEP

        sqlRequestFull = sqlRequest.format(condition)
        cur = connection.cursor()
        cur.execute(sqlRequestFull)
        tasks = cur.fetchall()
        summaryInput = {} #Step, INPUT_EVENTS
        summaryProcessed = {} #Step, TOTAL_EVENTS

        for task in tasks:
            step = task[2]
            if step not in summaryInput:
                summaryInput[step] = 0
            if step not in summaryProcessed:
                summaryProcessed[step] = 0

            inputEvent = task[0]
            outputEvent = task[1]
            summaryInput[step] += inputEvent
            summaryProcessed[step] += outputEvent

        fullSummary = {}
        fullSummary['input/processed'] = {}
        fullSummary['input/processed']['evgen'] = '%s/%s' % (  humanize.intcomma(summaryInput['evgen']) if 'evgen' in summaryInput else '-', humanize.intcomma(summaryProcessed['evgen']) if 'evgen' in summaryInput else '-')
        fullSummary['input/processed']['simul'] = '%s/%s' % (  humanize.intcomma(summaryInput['simul']) if 'simul' in summaryInput else '-', humanize.intcomma(summaryProcessed['simul']) if 'simul' in summaryInput else '-')
        fullSummary['input/processed']['recon'] = '%s/%s' % (  humanize.intcomma(summaryInput['recon']) if 'recon' in summaryInput else '-', humanize.intcomma(summaryProcessed['recon']) if 'recon' in summaryInput else '-')
        fullSummary['input/processed']['merge'] = '%s/%s' % (  humanize.intcomma(summaryInput['merge']) if 'merge' in summaryInput else '-', humanize.intcomma(summaryProcessed['merge']) if 'merge' in summaryInput else '-')
        return fullSummary
Exemplo n.º 36
0
Arquivo: reddit.py Projeto: w4/belle
def subreddit(bot, args, sender, source):
    """Ran whenever a subreddit is mentioned"""
    if dave.config.redis.exists("reddit:subreddit:mentioned:{}:{}".format(args[0], source)):
        # if this subreddit was mentioned in the last x seconds (see the setex below),
        # don't spam info about it
        return

    if not dave.config.redis.exists("reddit:subreddit:{}".format(args[0])):
        req = get("https://reddit.com/r/{}/about.json".format(args[0]),
                  headers={'user-agent': 'irc bot (https://github.com/w4)'})

        if req.status_code != 200:
            return

        if "/search.json" in req.url:
            # 404'd, reddit redirected us to the search page because they couldn't find
            # the user.
            return

        req = req.json()

        dave.config.redis.setex("reddit:subreddit:{}".format(args[0]), 600,
                                pickle.dumps(req))
    else:
        req = pickle.loads(dave.config.redis.get("reddit:subreddit:{}".format(args[0])))

    resp = req["data"]

    # don't give info about this user again in this channel for 300 seconds
    dave.config.redis.setex("reddit:subreddit:mentioned:{}:{}".format(args[0], source),
                            300, 1)

    bot.msg(source, assembleFormattedText(
        A.normal[
            A.bold[A.fg.lightRed["[NSFW] "]] if resp["over18"] else "",
            A.normal[resp["title"]],
            " ({}), a community for {}. {} subscribers, {} browsing right now.".format(
                resp["display_name_prefixed"],
                naturaldelta(datetime.utcnow().timestamp() - resp["created"]),
                intcomma(resp["subscribers"]),
                intcomma(resp["accounts_active"])
            )
        ]
    ))
Exemplo n.º 37
0
def intcomma_filter(number, cast=True):
    """
    Formats a number with `humanize.intcomma` - if cast is True, will ensure
    that all strings are converted to ints, which could raise an exception.
    """

    if isinstance(number, basestring) and cast:
        number = int(number)

    return humanize.intcomma(number)
Exemplo n.º 38
0
    def _system_price(self, args, msg, system, system_id):
        item = ' '.join(args)
        res = self._item_picker(item)
        if isinstance(res, basestring):
            return res
        type_id, type_name = res

        try:
            resp = requests.get('http://api.eve-central.com/api/marketstat?typeid={}&usesystem={}'.format(type_id, system_id))
            root = ElementTree.fromstring(resp.content)
        except:
            return "An error occurred tying to get the price for {}".format(type_name)

        return "{} @ {} | Sell: {} | Buy: {}".format(
            type_name,
            system,
            intcomma(float(root.findall("./marketstat/type[@id='{}']/sell/min".format(type_id))[0].text)),
            intcomma(float(root.findall("./marketstat/type[@id='{}']/buy/max".format(type_id))[0].text)),
        )
Exemplo n.º 39
0
 def cmd_price(self, args, msg):
     """Returns the price of an item in a particular system"""
     if len(args) < 2:
         return '!price <system name> <item>'
     item = ' '.join(args[1:])
     system_id = self._system_picker(args[0])
     if isinstance(system_id, basestring):
         return system_id
     item = self._item_picker(item)
     if isinstance(item, basestring):
         return item
     type_id, type_name = item
     sell, buy = self._get_evecentral_price(type_id, system_id)
     return '{} @ {} | Sell {} | Buy: {}'.format(
         type_name,
         self.map.get_system_name(system_id),
         intcomma(sell),
         intcomma(buy)
     )
Exemplo n.º 40
0
def format_longint(data, style='wiki', **kwargs):
    if style=='wiki':
        return humanize.intcomma(data)
    elif style=='json':
        if data:
            return int(data)
        else:
            return data
    else:
        return default_formatter(data)
Exemplo n.º 41
0
def format_volume(value):
    try:
        if value == 0:
            return "0m<sup>3</sup>"
        if value < 0.01:
            return "%.4fm<sup>3</sup>" % value
        if value < 1:
            return "%.2fm<sup>3</sup>" % value
        return "%sm<sup>3</sup>" % humanize.intcomma(int(value))
    except:
        return "unknown m<sup>3</sup>"
Exemplo n.º 42
0
Arquivo: youtube.py Projeto: w4/belle
def youtubevideo(bot, args, sender, source):
    """Ran whenever a YouTube video is sent"""
    if not dave.config.redis.exists("youtube:{}".format(args[0])):
        req = get("{}&id={}".format(BASE_URL, args[0]),
                  headers={'user-agent': 'irc bot (https://github.com/w4)'})

        if req.status_code != 200:
            print("{} from YouTube API: {}", req.status_code, req.json())
            return

        req = req.json()

        if not req["pageInfo"]["totalResults"]:
            bot.msg(source, "That video doesn't exist.")
            return

        dave.config.redis.setex("youtube:{}".format(args[0]), 400,
                                pickle.dumps(req))
    else:
        req = pickle.loads(dave.config.redis.get("youtube:{}".format(args[0])))

    resp = req["items"][0]

    bot.msg(source, assembleFormattedText(
        A.normal[
            A.bold[resp["snippet"]["title"]],
            " ({}) by {} uploaded {}. {} views, +{}/-{}.".format(
                str(isodate.parse_duration(resp["contentDetails"]["duration"])),
                resp["snippet"]["channelTitle"],
                naturaltime(
                    datetime.now(timezone.utc)
                        - isodate.parse_datetime(resp["snippet"]["publishedAt"])
                ),
                intcomma(resp["statistics"]["viewCount"]),
                intcomma(resp["statistics"]["likeCount"]),
                intcomma(resp["statistics"]["dislikeCount"])
            )
        ]
    ))
Exemplo n.º 43
0
Arquivo: reddit.py Projeto: w4/belle
def post(bot, args, sender, source):
    """Ran whenever a reddit post is sent"""
    if dave.config.redis.exists("reddit:post:mentioned:{}:{}".format(args[0], source)):
        # if this post was mentioned in the last x seconds (see the setex below),
        # don't spam info about it
        return

    if not dave.config.redis.exists("reddit:post:{}".format(args[0])):
        req = get("https://reddit.com/{}.json?limit=1".format(args[0]),
                  headers={'user-agent': 'irc bot (https://github.com/w4)'})

        if req.status_code != 200:
            return

        req = req.json()

        dave.config.redis.setex("reddit:post:{}".format(args[0]), 200,
                                pickle.dumps(req))
    else:
        req = pickle.loads(dave.config.redis.get("reddit:post:{}".format(args[0])))

    resp = req[0]["data"]["children"][0]["data"]

    dave.config.redis.setex("reddit:post:mentioned:{}:{}".format(args[0], source), 300, 1)

    bot.msg(source, assembleFormattedText(
        A.normal[
            A.bold[A.fg.lightRed["[NSFW] "]] if resp["over_18"] else "",
            A.normal[resp["title"][:75] + (resp["title"][75:] and '...')],
            " by ", A.normal[resp["author"]],
            " (/r/{}) {} comments, {} points, posted {}".format(
                resp["subreddit"],
                intcomma(resp["num_comments"]),
                intcomma(resp["score"]),
                naturaltime(datetime.utcnow().timestamp() - resp["created_utc"])
            ),
        ]
    ))
Exemplo n.º 44
0
def _print_cyclic_core(
        x, y, xcore, ycore, essential,
        t0, prm, fol):
    """Print results of cyclic core computation.

    Assert support and covering properties.
    """
    if log.getEffectiveLevel() > logging.INFO:
        return
    # assert
    if essential != fol.false:
        assert support_issubset(essential, prm.p_vars, fol)
    if xcore != fol.false:
        assert support_issubset(xcore, prm.p_vars, fol)
    if ycore != fol.false:
        assert support_issubset(ycore, prm.p_vars, fol)
    # print
    m = fol.count(x)
    n = fol.count(y)
    log.info((
        '(x={m}, y={n}) implicants of '
        'covering problem').format(
            m=humanize.intcomma(m),
            n=humanize.intcomma(n)))
    m = fol.count(xcore)
    n = fol.count(ycore)
    log.info((
        '(x={m}, y={n}) implicants after '
        'removing essential elements').format(
            m=humanize.intcomma(m),
            n=humanize.intcomma(n)))
    n = fol.count(essential)
    log.info('{n} primes are essential'.format(
        n=humanize.intcomma(n)))
    t1 = time.time()
    dt = t1 - t0
    log.info('cyclic core took {dt}'.format(
        dt=humanize.naturaldelta(dt)))
Exemplo n.º 45
0
    def cmd_id(self, args, msg):
        """Provides an overview of a character's activity in-game"""
        if len(args) == 0:
            return '!id <character name>'
        char_name = ' '.join(args)

        result = self.get_eveapi().eve.CharacterID(names=char_name.strip())
        char_name = result.characters[0].name
        char_id = result.characters[0].characterID

        if char_id == 0:
            return 'Unknown character {}'.format(char_name)

        headers, res = ZKillboard().characterID(char_id).kills().pastSeconds(60 * 60 * 24 * 7).get()

        from collections import defaultdict, Counter

        kill_types = defaultdict(int)
        ship_types = defaultdict(int)
        alli_assoc = defaultdict(int)
        sum_value = 0.0
        for kill in res:
            kill_type_id = int(kill['victim']['shipTypeID'])
            if kill_type_id > 0:
                kill_types[self.types[unicode(kill_type_id)]] += 1
            sum_value += float(kill['zkb']['totalValue'])
            for attk in kill['attackers']:
                if attk['allianceName'].strip() != '' and attk['allianceName'] is not None:
                    alli_assoc[attk['allianceName']] += 1
                if int(attk['characterID']) == char_id:
                    ship_type_id = int(attk['shipTypeID'])
                    if ship_type_id > 0:
                        ship_types[self.types[unicode(ship_type_id)]] += 1
                    break
        if len(res) == 0:
            return '{} has had no kills in the last week'.format(char_name)

        kill_types = Counter(kill_types).most_common(5)
        ship_types = Counter(ship_types).most_common(5)
        alli_assoc = Counter(alli_assoc).most_common(5)

        return '{}, {} kill(s) ({} ISK) in the last week\nActive Systems: {}\nTop 5 Killed Types: {}\nTop 5 Ship: {}\nTop 5 Associates: {}'.format(
            char_name,
            len(res),
            intcomma(sum_value),
            ', '.join(set([self.map.node[int(x['solarSystemID'])]['name'] for x in res])),
            ', '.join(['{} ({})'.format(x, y) for x, y in kill_types]),
            ', '.join(['{} ({})'.format(x, y) for x, y in ship_types]),
            ', '.join([x for x, y in alli_assoc])
        )
Exemplo n.º 46
0
Arquivo: reddit.py Projeto: w4/belle
def user(bot, args, sender, source):
    if dave.config.redis.exists("reddit:user:mentioned:{}:{}".format(args[0], source)):
        # if this user was mentioned in the last x seconds (see the setex below), don't
        # spam info about them
        return

    if not dave.config.redis.exists("reddit:user:{}".format(args[0])):
        req = get("https://reddit.com/u/{}/about.json".format(args[0]),
                  headers={'user-agent': 'irc bot (https://github.com/w4)'})

        if req.status_code != 200:
            return

        req = req.json()

        dave.config.redis.setex("reddit:user:{}".format(args[0]), 600,
                                pickle.dumps(req))
    else:
        req = pickle.loads(dave.config.redis.get("reddit:user:{}".format(args[0])))

    resp = req["data"]

    # don't give info about this user again in this channel for 300 seconds
    dave.config.redis.setex("reddit:user:mentioned:{}:{}".format(args[0], source), 300, 1)

    bot.msg(source, assembleFormattedText(
        A.normal[
            A.normal[resp["name"]],
            ", a redditor for {}. {} link karma, {} comment karma.".format(
                naturaldelta(datetime.utcnow().timestamp() - resp["created"]),
                intcomma(resp["link_karma"]),
                intcomma(resp["comment_karma"])
            ),
            " Verified user." if resp["verified"] else "",
            " Reddit employee." if resp["is_employee"] else ""
        ]
    ))
Exemplo n.º 47
0
def get_contracts(timeout=0):
    """Load the recent character contracts."""
    contracts, _, _ = char.contracts()
    contracts = contracts.values()

    export_fd = None
    if FLAGS.export_contracts:
        export_fd = open(FLAGS.export_contracts, 'a')

    if timeout:
        now = time.mktime(datetime.datetime.utcnow().timetuple())
        contracts = [c for c in contracts
                     if c['status'] != 'Completed'
                     or c['expired'] > now - timeout]

    if contracts:
        char_ids = set([c['issuer'] for c in contracts])
        char_ids.update(set([c['assignee'] for c in contracts]))
        names, _, _ = evelink.eve.EVE().character_names_from_ids(char_ids)

        if FLAGS.print_report:
            print '%26s | %20s | %20s | %14s | %s' % (
                'When', 'From', 'To', 'Amount', 'Status'
            )
            print '-'*105

        for contract in sorted(contracts,
                               key=lambda x: (x['status'], x['issued'])):
            if FLAGS.print_report:
                print '%20s (%2sd) | %20s | %20s | %14s | %s' % (
                    datetime.datetime.fromtimestamp(contract['issued']),
                    (datetime.datetime.fromtimestamp(contract['expired']) -
                    datetime.datetime.now()).days,
                    names[contract['issuer']],
                    names[contract['assignee']],
                    humanize.intcomma(int(contract['price'])),
                    contract['status'])

            if export_fd:
                export_fd.write('\t'.join([
                    str(datetime.datetime.fromtimestamp(contract['issued'])),
                    str(datetime.datetime.fromtimestamp(contract['expired'])),
                    names[contract['issuer']],
                    names[contract['assignee']],
                    str(contract['price']),
                    contract['status'],
                ]))
                export_fd.write('\n')
Exemplo n.º 48
0
    if FLAGS.print_report:
        print '\n\n==== %s ====' % char_name

    categories, tx_details = get_journal_tx()

    add_market_tx(categories)

    if FLAGS.print_report:
        if categories:
            print '\n%10s  %12s  %12s  %12s  %12s' % (
                'Summary', 'Bounties', 'Duty', 'Sales', 'Purchases')
            for date in sorted(categories.keys()):
                cats = categories[date]
                print '%10s: %12s  %12s  %12s  %12s' % (
                    humanize.naturalday(date),
                    humanize.intcomma(int(cats['bounties'])),
                    humanize.intcomma(int(cats['duty'])),
                    humanize.intcomma(int(cats['sales'])),
                    humanize.intcomma(int(cats['purchases'])),
                )

        info, _, _ = char.wallet_info()
        print '\nBalance: %s' % humanize.intcomma(info['balance'])

    get_contracts(2*24*60*60)

    events = []
    if keywords:
        events.extend(search_calendar(keywords))

    if events and FLAGS.print_report:
Exemplo n.º 49
0
def intkilo(num):
    i = int(num)
    if i >= 10000:
        return "%sk+" % humanize.intcomma(i/1000)

    return humanize.intcomma(i)
Exemplo n.º 50
0
    firstrow = fp.parse_form_line(f1.get_first_row(), filing_dict['version'])

    line_sequence = 0

    while True:
        line_sequence += 1
        row = f1.get_body_row()

        if not row:
            break

        try:
            linedict = fp.parse_form_line(row, filing_dict['version'])
            filing_dict['transactions'].append(linedict)

        except form.ParserMissingError:
            msg = 'process_filing_body: Unknown line type in filing %s line %s: type=%s Skipping.' % (filingnum, linenum, row[0])
            print msg
            continue

    payload.append(filing_dict)

    print Style.BRIGHT + Fore.MAGENTA + "Processed records: " + Style.BRIGHT + Fore.YELLOW +  "%s" % humanize.intcomma(len(filing_dict['transactions']))
    print Style.BRIGHT + Fore.YELLOW + filing_dict['formtype']
    print Style.BRIGHT + Fore.YELLOW + filing_dict['version']
    print Style.BRIGHT + Fore.YELLOW + filing_dict['filer_id']

end = datetime.datetime.now()
print Style.BRIGHT + Fore.MAGENTA + "Time to complete: " + Style.BRIGHT + Fore.YELLOW + "%s" % (end - start)
Exemplo n.º 51
0
def plusshumanize(total,due):
	return humanize.intcomma(total+due)
Exemplo n.º 52
0
    def initialize(self, request, response):
        super(BaseRequestHandler, self).initialize(request, response)
        self.run_handler = True

        if abuse.is_abuse(self.request):
            self.run_handler = False
            self.response.out.write(
                'You are destroying our server with your request rate. Please implement rate-limiting, respect robots.txt, and/or email [email protected]'
            )
            return

        url = urlparse.urlsplit(self.request.url)

        # Always turn https on!
        # This only 'takes effect' when it is returned on an https domain,
        # so we still need to make sure to add an https redirect.
        https_redirect_duration = 60 * 60 * 24 * 365
        if 'dev.dancedeets.com' not in url.netloc:
            self.response.headers.add_header('Strict-Transport-Security', 'max-age=%s' % https_redirect_duration)
        # This is how we detect if the incoming url is on https in GAE Flex (we cannot trust request.url)
        http_only_host = 'dev.dancedeets.com' in url.netloc or 'localhost' in url.netloc
        if request.method == 'GET' and request.headers.get('x-forwarded-proto', 'http') == 'http' and not http_only_host:
            new_url = urlparse.urlunsplit([
                'https',
                url.netloc,
                url.path,
                url.query,
                url.fragment,
            ])
            self.run_handler = False
            self.redirect(new_url, permanent=True, abort=True)

        login_url = self.get_login_url()
        redirect_url = self.handle_alternate_login(request)
        if redirect_url:
            self.run_handler = False
            # We need to run with abort=False here, or otherwise our set_cookie calls don't work. :(
            # Reported in https://github.com/GoogleCloudPlatform/webapp2/issues/111
            self.redirect(redirect_url, abort=False)
            return

        self.setup_login_state(request)

        self.display['attempt_autologin'] = 1
        # If they've expired, and not already on the login page, then be sure we redirect them to there...
        redirect_for_new_oauth_token = (self.user and self.user.expired_oauth_token)
        if redirect_for_new_oauth_token:
            logging.error("We have a logged in user, but an expired access token. How?!?!")
        # TODO(lambert): delete redirect_for_new_oauth_token codepaths
        # TODO(lambert): delete codepaths that handle user-id but no self.user. assume this entire thing relates to no-user.
        if redirect_for_new_oauth_token or (self.requires_login() and (not self.fb_uid or not self.user)):
            # If we're getting a referer id and not signed up, save off a cookie until they sign up
            if not self.fb_uid:
                logging.info("No facebook cookie.")
            if not self.user:
                logging.info("No database user object.")
            if self.user and self.user.expired_oauth_token:
                logging.info("User's OAuth token expired")
                #self.set_cookie('fbsr_' + FACEBOOK_CONFIG['app_id'], '', 'Thu, 01 Jan 1970 00:00:01 GMT')
                #logging.info("clearing cookie %s", 'fbsr_' + FACEBOOK_CONFIG['app_id'])
                self.set_cookie('User-Message', "You changed your facebook password, so will need to click login again.")
            if self.request.get('referer'):
                self.set_cookie('User-Referer', self.request.get('referer'))
            if not self.is_login_page():
                logging.info("Login required, redirecting to login page: %s", login_url)
                self.run_handler = False
                return self.redirect(login_url)
            else:
                self.display['attempt_autologin'] = 0  # do not attempt auto-login. wait for them to re-login
                self.fb_uid = None
                self.access_token = None
                self.user = None
        # If they have a fb_uid, let's do lookups on that behalf (does not require a user)
        if self.fb_uid:
            self.setup_fbl()
            # Always look up the user's information for every page view...?
            self.fbl.request(fb_api.LookupUser, self.fb_uid)
        else:
            self.fbl = fb_api.FBLookup(None, None)
        self.fbl.debug = 'fbl' in self.debug_list
        if self.user:
            self.jinja_env.filters['date_only_human_format'] = self.user.date_only_human_format
            self.jinja_env.filters['date_human_format'] = self.user.date_human_format
            self.jinja_env.filters['time_human_format'] = self.user.time_human_format
            self.jinja_env.globals['duration_human_format'] = self.user.duration_human_format
            self.display['messages'] = self.user.get_and_purge_messages()
        else:
            self.jinja_env.filters['date_only_human_format'] = dates.date_only_human_format
            self.jinja_env.filters['date_human_format'] = dates.date_human_format
            self.jinja_env.filters['time_human_format'] = dates.time_human_format
            self.jinja_env.globals['duration_human_format'] = dates.duration_human_format
            self.display['login_url'] = login_url
        self.jinja_env.filters['datetime_format'] = dates.datetime_format

        self.jinja_env.globals['dd_event_url'] = urls.dd_event_url
        self.jinja_env.globals['raw_fb_event_url'] = urls.raw_fb_event_url
        self.jinja_env.globals['dd_admin_event_url'] = urls.dd_admin_event_url
        self.jinja_env.globals['dd_admin_source_url'] = urls.dd_admin_source_url
        self.jinja_env.globals['event_image_url'] = urls.event_image_url

        locales = self.request.headers.get('Accept-Language', '').split(',')
        self.locales = [x.split(';')[0] for x in locales]
        if self.request.get('hl'):
            self.locales = self.request.get('hl').split(',')
        logging.info('Accept-Language is %s, final locales are %s', self.request.headers.get('Accept-Language', ''), self.locales)
        self.display['request'] = request
        self.display['app_id'] = facebook.FACEBOOK_CONFIG['app_id']
        self.display['prod_mode'] = self.request.app.prod_mode

        self.display['base_hostname'] = 'www.dancedeets.com' if self.request.app.prod_mode else 'dev.dancedeets.com'
        self.display['full_hostname'] = self._get_full_hostname()

        self.display['email_suffix'] = ''

        self.display['keyword_tokens'] = [{'value': x.public_name} for x in event_types.STYLES]
        fb_permissions = 'rsvp_event,email,user_events'
        if self.request.get('all_access'):
            fb_permissions += ',read_friendlists,manage_pages'
        self.display['fb_permissions'] = fb_permissions

        already_used_mobile = self.user and (
            'react-android' in self.user.clients or 'react-ios' in self.user.clients or 'android' in self.user.clients or
            'ios' in self.user.clients or False
        )
        mobile_platform = mobile.get_mobile_platform(self.request.user_agent)
        show_mobile_promo = not mobile_platform and not already_used_mobile
        self.display['show_mobile_promo'] = show_mobile_promo
        self.display['mobile_platform'] = mobile_platform
        if mobile_platform == mobile.MOBILE_ANDROID:
            self.display['mobile_app_url'] = mobile.ANDROID_URL
        elif mobile_platform == mobile.MOBILE_IOS:
            self.display['mobile_app_url'] = mobile.IOS_URL
        self.display['mobile'] = mobile
        self.display['mobile_show_smartbanner'] = True

        start = time.time()
        self.display['ip_location'] = self.get_location_from_headers()
        timelog.log_time_since('Getting City from IP', start)

        self.display['styles'] = event_types.STYLES
        self.display['cities'] = [(
            'North America', [
                'Albuquerque',
                'Austin',
                'Baltimore',
                'Boston',
                'Chicago',
                'Detroit',
                'Houston',
                'Las Vegas',
                'Los Angeles',
                'Miami',
                'New York City',
                'Orlando',
                'Philadelphia',
                'Portland',
                'San Francisco',
                'San Jose',
                'San Diego',
                'Seattle',
                'Washington DC',
                '',
                'Calgary',
                'Edmonton',
                'Montreal',
                'Ottawa',
                'Toronto',
                'Vancouver',
                ''
                'Mexico: Mexico City',
            ]
        ), (
            'Latin/South America', [
                'Argentina: Buenos Aires',
                'Argentina: Neuquen',
                'Brazil: Belo Horizonte',
                'Brazil: Brasilia',
                'Brazil: Cruitiba',
                'Brazil: Porto Alegre',
                'Brazil: Rio de Janeiro',
                'Brazil: Sao Paulo',
                'Colombia',
                'Chile: Santiago',
                'Peru: Lima',
            ]
        ), (
            'Europe', [
                'Austria: Vienna',
                'Belgium: Brussels',
                'Czech: Prague Republic',
                'Denmark: Copenhagen',
                'Estonia: Tallinn',
                'Finland: Helsinki',
                'France: Nantes',
                'France: Paris',
                'France: Perpignan',
                'Germany: Berlin',
                'Germany: Hamburg',
                u'Germany: Köln/Cologne',
                'Germany: Leipzig',
                u'Germany: München/Munich',
                'Italy: Milan',
                'Italy: Rome',
                'Netherlands: Amsterdam',
                'Norway: Oslo',
                'Poland: Warsaw',
                'Poland: Wroclaw',
                'Russia: Moscow',
                'Slovakia: Bratislava',
                'Spain: Barcelona',
                'Sweden: Malmoe',
                'Sweden: Stockholm',
                'Switzerland: Basel',
                'Switzerland: Geneve',
                'Switzerland: Zurich',
                'United Kingdom: Leeds',
                'United Kingdom: London',
            ]
        ), (
            'Asia', [
                'Hong Kong',
                'India',
                u'Japan: Tokyo (日本東京)',
                u'Japan: Osaka (日本大阪)',
                'Korea',
                u'Taiwan: Kaohsiung (台灣高雄市)',
                u'Taiwan: Taipei (台灣台北市)',
                u'Taiwan: Taichung (台灣臺中市)',
                'Philippines',
                'Singapore',
                'Australia: Melbourne',
                'Australia: Perth',
                'Australia: Sydney',
            ]
        )]

        self.display['deb'] = self.request.get('deb')
        self.display['debug_list'] = self.debug_list
        self.display['user'] = self.user

        webview = bool(request.get('webview'))
        self.display['webview'] = webview
        if webview:
            self.display['class_base_template'] = '_new_base_webview.html'
        else:
            self.display['class_base_template'] = '_new_base.html'

        totals = rankings.retrieve_summary()
        totals['total_events'] = humanize.intcomma(totals['total_events'])
        totals['total_users'] = humanize.intcomma(totals['total_users'])
        self.display.update(totals)

        self.setup_inlined_css()
Exemplo n.º 53
0
def _mesh_extract_single_awp(sd_array: List[SeismicData], information: dict, im: InternalMesh, slices: str=None,
                             interval: str=None) -> bool:
    """
    Takes an InternalMesh object, the mesh information file, and the iterator, and generates, using
    one core only, the mesh in AWP-ODC format.

    Args:
        information (dict): The mesh information dictionary (from the XML config file).
        im (InternalMesh): The internal representation of the AWP mesh.
        im_iter (AWPInternalMeshIterator): The internal mesh iterator that was generated from im.

    Returns:
        Nothing
    """
    file_out = information["mesh_name"] + ".awp"

    start_point = 0
    end_point = im.total_size

    if slices is not None:
        if "-" in slices:
            parts = slices.split("-")
            start_point = (int(parts[0]) - 1) * im.slice_size
            end_point = (int(parts[1])) * im.slice_size
        else:
            start_point = (int(slices) - 1) * im.slice_size
            end_point = int(slices) * im.slice_size
    elif interval is not None:
        if "-" in interval:
            parts = interval.split("-")
            start_point = (int(parts[0]) / 100) * im.full_size
            end_point = (int(parts[1]) / 100) * im.full_size
        else:
            raise ValueError("Interval must be a range (e.g. 0-10 which means generate the first 10% of the mesh).")

    im_iter = AWPInternalMeshIterator(im, start_point, end_point, len(sd_array), sd_array)
    progress = start_point

    progress = 0
    sqrt2 = math.sqrt(2)

    with open(os.path.join(information["out_dir"], file_out), "ab+") as fd:
        while progress < im.total_size:
            count = next(im_iter)

            UCVM.query(sd_array[0:count], information["cvm_list"], ["velocity"])

            fl_array = []
            for s in sd_array[0:count]:
                if s.velocity_properties is not None and s.velocity_properties.vs is not None and \
                   s.velocity_properties.vs < information["minimums"]["vs"]:
                    s.set_velocity_data(
                        VelocityProperties(
                            information["minimums"]["vp"], information["minimums"]["vs"],
                            s.velocity_properties.density, s.velocity_properties.qp,
                            s.velocity_properties.qs, s.velocity_properties.vp_source,
                            s.velocity_properties.vs_source, s.velocity_properties.density_source,
                            s.velocity_properties.qp_source, s.velocity_properties.qs_source
                        )
                    )

                fl_array.append(s.velocity_properties.vp)
                fl_array.append(s.velocity_properties.vs)
                fl_array.append(s.velocity_properties.density)

                if s.velocity_properties is None or s.velocity_properties.vp is None or \
                   s.velocity_properties.vs is None or s.velocity_properties.density is None:
                    print("Attention! %.3f, %.3f, %.3f has no material properties." % (
                        s.original_point.x_value, s.original_point.y_value, s.original_point.z_value
                    ))
                if s.velocity_properties is not None and s.velocity_properties.vp is not None and \
                   s.velocity_properties.vs is not None and s.velocity_properties.vp / s.velocity_properties.vs < sqrt2:
                    print("Warning: %.3f, %.3f, %.3f has a Vp/Vs ratio of less than sqrt(2)." % (
                        s.original_point.x_value, s.original_point.y_value, s.original_point.z_value
                    ))
            s = struct.pack('f' * len(fl_array), *fl_array)
            fd.seek(progress * 12)
            fd.write(s)

            progress += count

            print("%-4.2f" % ((progress / (im_iter.end_point - start_point)) * 100.0) +
                  "% complete. Wrote " + humanize.intcomma(count) + " more grid points.")

        print("\nExpected file size is " + im.get_grid_file_size()["display"] + ". " +
              "Actual size is " + humanize.naturalsize(os.path.getsize(
              os.path.join(information["out_dir"], file_out)), gnu=False) + ".")

        if im.get_grid_file_size()["real"] == \
           os.path.getsize(os.path.join(information["out_dir"], file_out)):
            print("Generated file size matches the expected file size.")
        else:
            print("ERROR! File sizes DO NOT MATCH!")

    return True
Exemplo n.º 54
0
def format_isk(value):
    value = float(value)
    try:
        return "%s" % humanize.intcomma(int(value))
    except:
        return value
Exemplo n.º 55
0
def _mesh_extract_single_rwg(sd_array: List[SeismicData], information: dict, im: InternalMesh) -> bool:
    """
    Takes an InternalMesh object, the mesh information file, and the iterator, and generates, using
    one core only, the mesh in RWG format.

    Args:
        information (dict): The mesh information dictionary (from the XML config file).
        im (InternalMesh): The internal representation of the RWG mesh.

    Returns:
        Nothing
    """
    file_out_vp = information["mesh_name"] + ".rwgvp"
    file_out_vs = information["mesh_name"] + ".rwgvs"
    file_out_dn = information["mesh_name"] + ".rwgdn"

    im_iter = RWGInternalMeshIterator(im, 0, im.total_size, len(sd_array), sd_array)

    progress = 0

    with open(os.path.join(information["out_dir"], file_out_vp), "wb") as fd_vp, \
         open(os.path.join(information["out_dir"], file_out_vs), "wb") as fd_vs, \
         open(os.path.join(information["out_dir"], file_out_dn), "wb") as fd_dn:
        while progress < im_iter.end_point:
            count = next(im_iter)
            progress += count

            UCVM.query(sd_array[0:count], information["cvm_list"], ["velocity"], None)

            vp_array = []
            vs_array = []
            dn_array = []
            for s in sd_array[0:count]:
                if s.velocity_properties is not None and s.velocity_properties.vs is not None and \
                   s.velocity_properties.vs < information["minimums"]["vs"]:
                    s.set_velocity_data(
                        VelocityProperties(
                            information["minimums"]["vp"], information["minimums"]["vs"],
                            s.velocity_properties.density, s.velocity_properties.qp,
                            s.velocity_properties.qs, s.velocity_properties.vp_source,
                            s.velocity_properties.vs_source, s.velocity_properties.density_source,
                            s.velocity_properties.qp_source, s.velocity_properties.qs_source
                        )
                    )

                vp_array.append(s.velocity_properties.vp / 1000)
                vs_array.append(s.velocity_properties.vs / 1000)
                dn_array.append(s.velocity_properties.density / 1000)

                if s.velocity_properties.vp is None:
                    print("Attention! %.3f, %.3f, %.3f has no material properties." % (
                        s.original_point.x_value, s.original_point.y_value, s.original_point.z_value
                    ))
                if s.velocity_properties is not None and s.velocity_properties.vp is not None and \
                   s.velocity_properties.vs is not None and s.velocity_properties.vp / s.velocity_properties.vs < 1.45:
                    print("Warning: %.3f, %.3f, %.3f has a Vp/Vs ratio of less than 1.45." % (
                        s.original_point.x_value, s.original_point.y_value, s.original_point.z_value
                    ))
            s = struct.pack('f' * len(vp_array), *vp_array)
            fd_vp.write(s)
            s = struct.pack('f' * len(vs_array), *vs_array)
            fd_vs.write(s)
            s = struct.pack('f' * len(dn_array), *dn_array)
            fd_dn.write(s)

            print("%-4.2f" % ((progress / im_iter.end_point) * 100.0) +
                  "% complete. Wrote " + humanize.intcomma(count) + " more grid points.")

        print("\nExpected file size is " + im.get_grid_file_size()["display"] + ". " +
              "Actual size is " + humanize.naturalsize(os.path.getsize(
              os.path.join(information["out_dir"], file_out_vp)), gnu=False) + ".")

        if im.get_grid_file_size()["real"] == \
           os.path.getsize(os.path.join(information["out_dir"], file_out_vp)) and \
           im.get_grid_file_size()["real"] == \
           os.path.getsize(os.path.join(information["out_dir"], file_out_vs)) and \
           im.get_grid_file_size()["real"] == \
           os.path.getsize(os.path.join(information["out_dir"], file_out_dn)):
            print("Generated file size matches the expected file size.")
        else:
            print("ERROR! File sizes DO NOT MATCH!")

    return True
Exemplo n.º 56
0
def natural_number(number):
    """ Filter used to present integers cleanly """
    a = humanize.intcomma(number)
    return a
Exemplo n.º 57
0
def _mesh_extract_mpi_awp(sd_array: List[SeismicData], information: dict, im: InternalMesh, start_end: tuple) -> bool:
    """
    Extract an AWP mesh using MPI. Internal method.

    Args:

    Returns:
        True, if successful. Raises an error if not successful.
    """
    from mpi4py import MPI

    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()

    file_out = os.path.join(information["out_dir"], information["mesh_name"]) + ".awp"

    im_iter = AWPInternalMeshIterator(im, start_end[0], start_end[1], len(sd_array), sd_array)

    progress = start_end[0]
    sqrt2 = math.sqrt(2)

    fh = MPI.File.Open(MPI.COMM_WORLD, file_out, amode=MPI.MODE_WRONLY | MPI.MODE_CREATE)

    while progress < im_iter.end_point:
        count = next(im_iter)

        UCVM.query(sd_array[0:count], information["cvm_list"], ["velocity"])

        fl_array = []
        for s in sd_array[0:count]:
            if s.velocity_properties is not None and s.velocity_properties.vs is not None and \
               s.velocity_properties.vs < information["minimums"]["vs"]:
                s.set_velocity_data(
                    VelocityProperties(
                        information["minimums"]["vp"], information["minimums"]["vs"],
                        s.velocity_properties.density, s.velocity_properties.qp,
                        s.velocity_properties.qs, s.velocity_properties.vp_source,
                        s.velocity_properties.vs_source, s.velocity_properties.density_source,
                        s.velocity_properties.qp_source, s.velocity_properties.qs_source
                    )
                )

            fl_array.append(s.velocity_properties.vp)
            fl_array.append(s.velocity_properties.vs)
            fl_array.append(s.velocity_properties.density)

            if s.velocity_properties is None or s.velocity_properties.vp is None or \
               s.velocity_properties.vs is None or s.velocity_properties.density is None:
                print("[Node %d] Attention! %.3f, %.3f, %.3f has no material properties." % (
                    rank, s.original_point.x_value, s.original_point.y_value, s.original_point.z_value
                ), flush=True)
            if s.velocity_properties is not None and s.velocity_properties.vp is not None and \
               s.velocity_properties.vs is not None and s.velocity_properties.vp / s.velocity_properties.vs < sqrt2:
                print("[Node %d] Warning: %.3f, %.3f, %.3f has a Vp/Vs ratio of less than sqrt(2)." % (
                    rank, s.original_point.x_value, s.original_point.y_value, s.original_point.z_value
                ), flush=True)
        s = struct.pack('f' * len(fl_array), *fl_array)
        fh.Write_at(progress * 12, s)
        fh.Sync()

        progress += count

        print("[Node %d] %-4.2f" % (rank, ((progress - start_end[0]) / (start_end[1] - start_end[0])) * 100.0) +
              "% complete. Wrote " + humanize.intcomma(count) + " more grid points.", flush=True)

    comm.Barrier()
    fh.Close()

    if rank == 0:
        print("\n[Node " + str(rank) + "] Extraction job fully complete.")

        # print("\n[Node " + str(rank) + "] Expected file size is " + im.get_grid_file_size()["display"] + ". " +
        #       "Actual size is " + humanize.naturalsize(os.path.getsize(os.path.join(file_out)), gnu=False) + ".",
        #       flush=True)

        # if im.get_grid_file_size()["real"] == os.path.getsize(file_out):
        #     print("Generated file size matches the expected file size.")
        # else:
        #     print("ERROR! File sizes DO NOT MATCH!")

    return True