Ejemplo n.º 1
0
def restart(timestamp):
    """
    Dumps data for the reachable nodes into a JSON file.
    Loads all reachable nodes from Redis into the crawl set.
    Removes keys for all nodes from current crawl.
    Updates excluded networks with current list of bogons.
    Updates number of reachable nodes and most common height in Redis.
    """
    redis_pipe = REDIS_CONN.pipeline()

    nodes = REDIS_CONN.smembers('up')  # Reachable nodes
    redis_pipe.delete('up')

    for node in nodes:
        (address, port, services) = node[5:].split("-", 2)
        redis_pipe.sadd('pending', (address, int(port), int(services)))

    for key in get_keys(REDIS_CONN, 'node:*'):
        redis_pipe.delete(key)

    for key in get_keys(REDIS_CONN, 'crawl:cidr:*'):
        redis_pipe.delete(key)

    redis_pipe.execute()

    update_excluded_networks()

    reachable_nodes = len(nodes)
    logging.info("Reachable nodes: %d", reachable_nodes)
    REDIS_CONN.lpush('nodes', (timestamp, reachable_nodes))

    height = dump(timestamp, nodes)
    REDIS_CONN.set('height', height)
    logging.info("Height: %d", height)
Ejemplo n.º 2
0
def main(argv):
    test_conn()

    if len(argv) < 3 or not os.path.exists(argv[1]):
        print("Usage: crawl.py [config] [master|slave]")
        return 1

    # Initialize global conf
    init_conf(argv)

    # Initialize logger
    loglevel = logging.INFO
    if CONF['debug']:
        loglevel = logging.DEBUG

    logformat = ("[%(process)d] %(asctime)s,%(msecs)05.1f %(levelname)s "
                 "(%(funcName)s) %(message)s")
    logging.basicConfig(level=loglevel,
                        format=logformat,
                        filename=CONF['logfile'],
                        filemode='a')
    print("Log: {}, press CTRL+C to terminate..".format(CONF['logfile']))

    global REDIS_CONN
    REDIS_CONN = new_redis_conn(db=CONF['db'])

    if CONF['master']:
        REDIS_CONN.set('crawl:master:state', "starting")
        REDIS_CONN.set('crawl:master:blockchain', CONF['BLOCKCHAIN'])
        logging.info("Removing all keys")
        redis_pipe = REDIS_CONN.pipeline()
        for b in all_chains:
            redis_pipe.delete('up-{}'.format(b))
        redis_pipe.delete('up')
        for key in get_keys(REDIS_CONN, 'node:*'):
            redis_pipe.delete(key)
        for key in get_keys(REDIS_CONN, 'crawl:cidr:*'):
            redis_pipe.delete(key)
        redis_pipe.delete('pending')
        redis_pipe.execute()
        set_pending()
        update_excluded_networks()
        REDIS_CONN.set('crawl:master:state', "running")

    # Spawn workers (greenlets) including one worker reserved for cron tasks
    workers = []
    if CONF['master']:
        workers.append(gevent.spawn(cron))
    for _ in xrange(CONF['workers'] - len(workers)):
        workers.append(gevent.spawn(task))
    logging.info("Workers: %d", len(workers))
    gevent.joinall(workers)

    return 0
Ejemplo n.º 3
0
def main(argv):
    if len(argv) < 3 or not os.path.exists(argv[1]):
        print("Usage: crawl.py [config] [master|slave]")
        return 1

    # Initialize global conf
    init_conf(argv)

    # Initialize logger
    loglevel = logging.INFO
    if CONF['debug']:
        loglevel = logging.DEBUG

    logformat = ("[%(process)d] %(asctime)s,%(msecs)05.1f %(levelname)s "
                 "(%(funcName)s) %(message)s")
    logging.basicConfig(level=loglevel,
                        format=logformat,
                        filename=CONF['logfile'],
                        filemode='a')
    print("Log: {}, press CTRL+C to terminate..".format(CONF['logfile']))

    global REDIS_CONN
    REDIS_CONN = new_redis_conn(db=CONF['db'])

    if CONF['master']:
        REDIS_CONN.set('crawl:master:state', "starting")
        logging.info("Removing all keys")
        redis_pipe = REDIS_CONN.pipeline()
        redis_pipe.delete('up')
        for key in get_keys(REDIS_CONN, 'node:*'):
            redis_pipe.delete(key)
        for key in get_keys(REDIS_CONN, 'crawl:cidr:*'):
            redis_pipe.delete(key)
        redis_pipe.delete('pending')
        redis_pipe.execute()
        set_pending()
        update_excluded_networks()
        REDIS_CONN.set('crawl:master:state', "running")

    # Spawn workers (greenlets) including one worker reserved for cron tasks
    workers = []
    if CONF['master']:
        workers.append(gevent.spawn(cron))
    for _ in xrange(CONF['workers'] - len(workers)):
        workers.append(gevent.spawn(task))
    logging.info("Workers: %d", len(workers))
    gevent.joinall(workers)

    return 0
def recovery_with_one_node_option(testparams, session, nodes, dropped_groups, indexes):
    """Test case for recovery with `--one-node` option."""
    result = copy.deepcopy(recovery_skeleton)

    available_nodes = [node for node in nodes
                       if node.group not in dropped_groups]
    node = random.choice(available_nodes)

    keys = utils.get_keys(testparams, session, dropped_groups, indexes)
    # Split inconsistent keys to recovered and still inconsistent keys
    # depend on following condition: "is the key belong to chosen node?"
    node_address = socket.gethostbyname(node.host)
    node_recovered_keys = {}
    node_inconsistent_keys = {}
    for key, key_indexes in keys["inconsistent"].items():
        key_id = session.transform(key)
        key_node = session.lookup_address(key_id, node.group)
        if node_address == key_node.host and \
           node.port == key_node.port:
            node_recovered_keys[key] = key_indexes
        else:
            node_inconsistent_keys[key] = key_indexes

    result["keys"]["consistent"] = keys["consistent"]
    result["keys"]["recovered"] = node_recovered_keys
    result["keys"]["inconsistent"] = node_inconsistent_keys

    result["recovery_indexes"] = False

    result["cmd"] = ["dnet_recovery",
                     "--one-node", "{}:{}:2".format(node.host, node.port),
                     "--groups", ','.join([str(g) for g in session.groups]),
                     "dc"]

    return result
Ejemplo n.º 5
0
 def __init__(self, args):
     self.args = args
     AWS_ACCESS_KEY, AWS_SECRET_KEY = get_keys(args.file_key)
     s3client = boto3.client('s3', 
                     aws_access_key_id=AWS_ACCESS_KEY,
                     aws_secret_access_key=AWS_SECRET_KEY)
     self.s3client = s3client
Ejemplo n.º 6
0
def name_isexit():
    while True:
        keys = get_keys()
        for key in keys:
            dict1 = get_values(key)
            write_log(str(dict1))
            try:
                copywriting = dict1[b'copywriting'].decode('utf-8')
                rooms = [dict1[b'room'].decode('utf-8')]
            except:
                pass
            try:
                photo_path = dict1[b'photo_path'].decode('utf-8')
                if photo_path:
                    send_photo(copywriting, photo_path, rooms)
                    del_key(key)
            except:
                pass
            try:
                text = dict1[b'text'].decode('utf-8')
                if text:
                    send_text(copywriting, text, rooms)
                    del_key(key)
            except:
                pass
            time.sleep(60)
Ejemplo n.º 7
0
    def output_reducer(self, label_key, pix_datas):
        """
        fill the data in to a raster image and return the
        names of the generated images
        """
        # download a template raster
        job = os.environ.get('LT_JOB')

        rast_keys = utils.get_keys(s.IN_RASTS % job)
        tmplt_key = [
            k.key for k in rast_keys
            if s.RAST_TRIGGER in k.key
        ][0].key
        tmplt_rast = utils.rast_dl(tmplt_key)

        # name raster so it uploads to correct location
        rast_key = s.OUT_RAST_KEYNAME % (job, label_key)
        rast_fn = utils.keyname2filename(rast_key)

        # write data to raster
        utils.data2raster(pix_datas, tmplt_rast, out_fn=rast_fn)

        # upload raster
        rast_key = utils.upload([rast_fn])[0]
        yield label_key, [rast_key.key]
Ejemplo n.º 8
0
    def setup_mapper(self, _, line):
        """
        Reads in a dummy line from the input.txt file, ignores it,
        and sets up the job passed to MRLandTrendrJob by reading from
        the input S3 dir for that job.

        Outputs a list of the S3 keys for each of the input rasters
        """
        job = os.environ.get('LT_JOB')
        print 'Setting up %s' % job
        rast_keys = [k.key for k in utils.get_keys(s.IN_RASTS % job)]
        analysis_rasts = filter(lambda k: s.RAST_SUFFIX in k, rast_keys)
        if not analysis_rasts:
            raise Exception('No analysis rasters specified for job %s' % job)

        # download template rast for grid
        rast_fn = utils.rast_dl(analysis_rasts[0])

        # set up grid
        grid_fn = utils.keyname2filename(s.OUT_GRID % job)
        utils.rast2grid(rast_fn, out_csv=grid_fn)
        utils.upload([grid_fn])

        # note - must yield at end to ensure grid is created
        for i, keyname in enumerate(analysis_rasts):
            yield i, keyname
Ejemplo n.º 9
0
    def setup_mapper(self, _, line):
        """
        Reads in a dummy line from the input.txt file, ignores it,
        and sets up the job passed to MRLandTrendrJob by reading from
        the input S3 dir for that job.

        Outputs a list of the S3 keys for each of the input rasters
        """
        job = os.environ.get('LT_JOB')
        print 'Setting up %s' % job
        analysis_rasts = [
            k.key for k in utils.get_keys(s.IN_RASTS % job)
            if s.RAST_TRIGGER in k.key
        ]
        if not analysis_rasts:
            raise Exception('No analysis rasters specified for job %s' % job)

        # download template rast for grid
        rast_fn = utils.rast_dl(analysis_rasts[0])

        # set up grid
        grid_fn = utils.keyname2filename(s.OUT_GRID % job)
        utils.rast2grid(rast_fn, out_csv=grid_fn)
        utils.upload([grid_fn])

        # note - must yield at end to ensure grid is created
        for i, keyname in enumerate(analysis_rasts):
            yield i, keyname
Ejemplo n.º 10
0
def recovery_with_dump_file_option(testparams, session, nodes, dropped_groups, indexes):
    """Test case for recovery with `--dump-file` option."""
    result = copy.deepcopy(recovery_skeleton)

    keys = utils.get_keys(testparams, session, dropped_groups, indexes)
    # Split inconsistent keys: keys which will be recovered and keys which will not be
    inconsistent_keys_number = int(len(keys["inconsistent"]) * testparams["inconsistent_files_percentage"])
    inconsistent_keys = dict(keys["inconsistent"].items()[:inconsistent_keys_number])
    recovered_keys = dict(keys["inconsistent"].items()[inconsistent_keys_number:])

    result["keys"]["consistent"] = keys["consistent"]
    result["keys"]["recovered"] = recovered_keys
    result["keys"]["inconsistent"] = inconsistent_keys

    result["recovery_indexes"] = False

    dump_file_path = "./dump_file"
    utils.dump_keys_to_file(session, result["keys"]["recovered"], dump_file_path)
    node = random.choice(nodes)
    result["cmd"] = ["dnet_recovery",
                     "--remote", "{}:{}:2".format(node.host, node.port),
                     "--groups", ','.join([str(g) for g in session.groups]),
                     "--dump-file", dump_file_path,
                     "dc"]
    return result
Ejemplo n.º 11
0
 async def change_access_token(cls):
     if cls.last_updated:
         if datetime.now() - cls.last_updated < timedelta(0, 60, 0):
             return True
     query = "?grant_type=client_credentials" \
             + "&client_id={}".format(utils.get_keys()["blizzard"]["id"]) \
             + "&client_secret={}".format(utils.get_keys()["blizzard"]["secret"])
     url = encode(cls.OAUTH_BASE, query)
     async with aiohttp.ClientSession() as session:
         async with session.get(url) as response:
             if response.status == 200:
                 token = await response.json()
                 cls.ACCESS_TOKEN = token["access_token"]
                 cls.last_updated = datetime.now()
                 return True
             else:
                 return False
Ejemplo n.º 12
0
def main(argv):
    if len(argv) < 3 or not os.path.exists(argv[1]):
        print("Usage: crawl.py [config] [master|slave]")
        return 1

    # Initialize global settings
    init_settings(argv)

    # Initialize logger
    loglevel = logging.INFO
    if SETTINGS['debug']:
        loglevel = logging.DEBUG

    logformat = ("[%(process)d] %(asctime)s,%(msecs)05.1f %(levelname)s "
                 "(%(funcName)s) %(message)s")
    logging.basicConfig(level=loglevel,
                        format=logformat,
                        filename=SETTINGS['logfile'],
                        filemode='a')
    print("Writing output to {}, press CTRL+C to terminate..".format(
        SETTINGS['logfile']))

    if SETTINGS['master']:
        REDIS_CONN.set('crawl:master:state', "starting")
        logging.info("Removing all keys")
        redis_pipe = REDIS_CONN.pipeline()
        for key in get_keys(REDIS_CONN, 'node:*'):
            redis_pipe.delete(key)
        for key in get_keys(REDIS_CONN, 'crawl:cidr:*'):
            redis_pipe.delete(key)
        redis_pipe.delete('pending')
        redis_pipe.execute()
        set_pending()
        update_excluded_networks()

    # Spawn workers (greenlets) including one worker reserved for cron tasks
    workers = []
    if SETTINGS['master']:
        workers.append(gevent.spawn(cron))
    for _ in xrange(SETTINGS['workers'] - len(workers)):
        workers.append(gevent.spawn(task))
    logging.info("Workers: %d", len(workers))
    gevent.joinall(workers)

    return 0
Ejemplo n.º 13
0
def restart(timestamp):
    """
    Dumps data for the reachable nodes into a JSON file.
    Loads all reachable nodes from Redis into the crawl set.
    Removes keys for all nodes from current crawl.
    Updates excluded networks with current list of bogons.
    Updates number of reachable nodes and most common height in Redis.
    """
    redis_pipe = REDIS_CONN.pipeline()

    nodes = REDIS_CONN.smembers('up')  # Reachable nodes
    redis_pipe.delete('up')

    for node in nodes:
        (address, port, services) = node[5:].split("-", 2)
        redis_pipe.sadd('pending', (address, int(port), int(services)))

    for key in get_keys(REDIS_CONN, 'node:*'):
        redis_pipe.delete(key)

    for key in get_keys(REDIS_CONN, 'crawl:cidr:*'):
        redis_pipe.delete(key)

    if CONF['include_checked']:
        checked_nodes = REDIS_CONN.zrangebyscore(
            'check', timestamp - CONF['max_age'], timestamp)
        for node in checked_nodes:
            (address, port, services) = eval(node)
            if is_excluded(address):
                logging.debug("Exclude: %s", address)
                continue
            redis_pipe.sadd('pending', (address, port, services))

    redis_pipe.execute()

    update_excluded_networks()

    reachable_nodes = len(nodes)
    logging.info("Reachable nodes: %d", reachable_nodes)
    REDIS_CONN.lpush('nodes', (timestamp, reachable_nodes))

    height = dump(timestamp, nodes)
    REDIS_CONN.set('height', height)
    logging.info("Height: %d", height)
Ejemplo n.º 14
0
def restart(timestamp):
    """
    Dumps data for the reachable nodes into a JSON file.
    Loads all reachable nodes from Redis into the crawl set.
    Removes keys for all nodes from current crawl.
    Updates excluded networks with current list of bogons.
    Updates number of reachable nodes and most common height in Redis.
    """
    nodes = []  # Reachable nodes

    redis_pipe = REDIS_CONN.pipeline()
    for key in get_keys(REDIS_CONN, 'node:*'):
        state = REDIS_CONN.hget(key, 'state')
        if state == "up":
            nodes.append(key)
            (address, port, services) = key[5:].split("-", 2)
            redis_pipe.sadd('pending', (address, int(port), int(services)))
        redis_pipe.delete(key)
    for key in get_keys(REDIS_CONN, 'crawl:cidr:*'):
        redis_pipe.delete(key)

    if CONF['include_checked']:
        checked_nodes = REDIS_CONN.zrangebyscore('check',
                                                 timestamp - CONF['max_age'],
                                                 timestamp)
        for node in checked_nodes:
            (address, port, services) = eval(node)
            if is_excluded(address):
                logging.debug("Exclude: %s", address)
                continue
            redis_pipe.sadd('pending', (address, port, services))

    redis_pipe.execute()

    update_excluded_networks()

    reachable_nodes = len(nodes)
    logging.info("Reachable nodes: %d", reachable_nodes)
    REDIS_CONN.lpush('nodes', (timestamp, reachable_nodes))

    height = dump(timestamp, nodes)
    REDIS_CONN.set('height', height)
    logging.info("Height: %d", height)
Ejemplo n.º 15
0
def get_quote(symbol):
    if symbol.lower() == 'futures':
        return get_index_futures()
    # url = "https://api.iextrading.com/1.0/stock/"+symbol+"/quote?displayPercent=true"
    token = utils.get_keys("iex")['public']
    url = "https://cloud.iexapis.com/stable/stock/%s/quote?displayPercent=true&token=%s" % (
        symbol, token)
    print(url)
    req = Request(url)
    req.headers["User-Agent"] = "windows 10 bot"
    # Load data
    quote = json.loads(urlopen(req).read().decode("utf-8"))
    try:
        change = float(quote['change'])
        quote['ch'] = "%0.2f" % (change)
        quote['chper'] = "%0.2f" % (quote['changePercent'])
        quote['chytd'] = "%0.2f" % (quote['ytdChange'])
    except TypeError:
        change = "n/a"
        quote['ch'] = "n/a"
        quote['chper'] = "n/a"
        quote['chytd'] = "n/a"
    mcap = quote['marketCap']
    if mcap is not None:
        if mcap >= 1e12:
            cap = round(mcap / 1e12, 1)
            cap = str(cap) + "T"
        elif mcap >= 1e9:
            cap = round(mcap / 1e9, 1)
            cap = str(cap) + "B"
        elif mcap >= 1e6:
            cap = round(mcap / 1e6, 1)
            cap = str(cap) + "M"
        elif mcap >= 1e3:
            cap = str(round(mcap / 1e3, 1)) + "k"
        else:
            cap = str(mcap)
    else:
        cap = ""
    if change != "n/a" and change > 0:
        quote['ch'] = "+" + quote['ch']
        quote['chper'] = "+" + quote['chper']
    output = "%s - %s:\n" % (symbol.upper(), quote['companyName'])
    output += "```python\n"
    if quote['latestSource'] != "IEX real time price":
        output += "Realtime price: {iexRealtimePrice}\n".format_map(quote)
    output += "{latestSource} - {latestTime}:\n" \
              "Last price: {latestPrice} ({ch}, {chper}%, {chytd}% YTD)".format_map(quote)
    output = output + " %s mkt cap\n" % cap
    if quote['week52High'] is not None:
        output = output + "52w high: %.02f\t52w low:%.02f" % (
            quote.get('week52High'), quote.get('week52Low'))
    output = output + "```"
    return output
Ejemplo n.º 16
0
 async def get_reports(cls, region, realm, guild_name):
     query = "?api_key={}".format(utils.get_keys()["warcraftlogs"]["token"])
     url = encode(
         "{}/reports/guild/{}/{}/{}".format(cls.BASE, guild_name, realm,
                                            region), query)
     async with aiohttp.ClientSession() as session:
         async with session.get(url) as response:
             if response.status == 200:
                 return await response.json()
             else:
                 return None
Ejemplo n.º 17
0
    async def edit_poll(self, poll_info, message, pollObject):
        poll = self.embed(
            title=poll_info["question"],
            description=utils.to_str(pollObject.perfect_options(utils.get_keys(poll_info["options"]), poll_info)),
            colour=0x0892d0
            # how to improve: make a poll object when poll is called in commands, making the poll obj have an attribute of get_question() and get_option()
        )
        poll.set_footer(text=self.cfg["footer"])


        await message.edit(content=None, embed=poll)
Ejemplo n.º 18
0
def default_recovery(testparams, session, nodes, dropped_groups, indexes):
    """Test case for recovery with no special options."""
    result = copy.deepcopy(recovery_skeleton)

    keys = utils.get_keys(testparams, session, dropped_groups, indexes)

    result["keys"]["consistent"] = keys["consistent"]
    result["keys"]["recovered"] = keys["inconsistent"]

    node = random.choice(nodes)
    result["cmd"] = ["dnet_recovery",
                     "--remote", "{}:{}:2".format(node.host, node.port),
                     "--groups", ','.join([str(g) for g in session.groups]),
                     "dc"]

    return result
Ejemplo n.º 19
0
 async def get_rankings(cls, region, class_name, difficulty, encounter):
     class_id, spec_id = WCL_CLASS_IDS[class_name]
     metric = "hps" if CLASS_SPECS[class_name] in CLASS_HEALS else "dps"
     query = "?api_key={}".format(utils.get_keys()["warcraftlogs"]["token"]) \
             + "&region={}".format(region) \
             + "&class={}".format(class_id) \
             + "&spec={}".format(spec_id) \
             + "&difficulty={}".format(difficulty) \
             + "&metric={}".format(metric)
     url = encode("{}/rankings/encounter/{}".format(cls.BASE, encounter),
                  query)
     async with aiohttp.ClientSession() as session:
         async with session.get(url) as response:
             if response.status == 200:
                 return await response.json()
             else:
                 return None
Ejemplo n.º 20
0
def get_quote(symbol):
    # url = "https://api.iextrading.com/1.0/stock/"+symbol+"/quote?displayPercent=true"
    token = utils.get_keys("iex")['public']
    url = "https://cloud.iexapis.com/stable/stock/%s/quote?displayPercent=true&token=%s" % (
        symbol, token)
    print(url)
    req = Request(url)
    req.headers["User-Agent"] = "windows 10 bot"
    # Load data
    quote = json.loads(urlopen(req).read().decode("utf-8"))
    try:
        change = float(quote['change'])
        ch = "%0.2f" % (change)
        chper = "%0.2f" % (quote['changePercent'])
        chytd = "%0.2f" % (quote['ytdChange'])
    except TypeError:
        change = "n/a"
        ch = "n/a"
        chper = "n/a"
        chytd = "n/a"
    mcap = quote['marketCap']
    if mcap >= 1e12:
        cap = round(mcap / 1e12, 1)
        cap = str(cap) + "T"
    elif mcap >= 1e9:
        cap = round(mcap / 1e9, 1)
        cap = str(cap) + "B"
    elif mcap >= 1e6:
        cap = round(mcap / 1e6, 1)
        cap = str(cap) + "M"
    elif mcap >= 1e3:
        cap = str(round(mcap / 1e3, 1)) + "k"
    else:
        cap = str(mcap)
    if change != "n/a" and change > 0:
        ch = "+" + ch
        chper = "+" + chper
    output = "%s - %s:```python\n Last price: %s (%s, %s%%, %s%% YTD" % \
             (symbol.upper(),quote['companyName'],quote['latestPrice'],ch,chper,chytd)+")"
    output = output + " %s mkt cap\n" % cap
    output = output + " 52w high: %.02f\t52w low:%.02f" % (quote['week52High'],
                                                           quote['week52Low'])
    output = output + "```"
    return output
Ejemplo n.º 21
0
def get_current_weatherbit(text):
    url = "https://api.weatherbit.io/v2.0/current"
    key = utils.get_keys("weatherbit")['key']
    lat,lon,loc = get_lat_lon(text)
    url = url + "?key=%s&units=I&lat=%f&lon=%f" % (key, lat, lon)
    resp = utils.get_json(url)
    if len(resp['data']) > 0:
        data = resp['data'][0]
        obstime = data['ob_time']
        obsdatetime = datetime.strptime(obstime, '%Y-%m-%d %H:%M')
        ret = "```python\n" \
              "Location search result: %s\n" \
              "%s in %s, %s, %s:\n" \
              "  Temp: %.1f (Feels like: %.1f)\n" \
              "  Humidity: %d%%\n" \
              "  Observed: %s at station: %s```" % (loc, data['weather']['description'], data['city_name'], data['state_code'], data['country_code'],
                                     data['temp'], data['app_temp'], data['rh'],
                                     utils.prettydate(obsdatetime, utc=True), data['station'])
        return ret
Ejemplo n.º 22
0
def search_gfys(query, num=0):
    api_url = "https://api.gfycat.com/v1/"
    oauth_url = api_url + "oauth/token"
    search_url = api_url + "me/gfycats/search"
    keys = utils.get_keys('gfycat')
    params = {
        'client_id': keys['client_id'],
        'client_secret': keys['client_secret'],
        'username': keys['username'],
        'password': keys['password'],
        "grant_type": 'password'
    }
    # print(params)
    r = requests.post(oauth_url, data=str(params))
    # print(r.json())
    access_token = r.json()['access_token']
    headers = {'Authorization': 'Bearer {}'.format(access_token)}
    url = search_url + "?search_text=%s&count=20" % urllib.parse.quote_plus(
        query)
    s = requests.get(url, headers=headers).json()
    try:
        if len(s['gfycats']) > 0:
            if num == 0:
                return gfy_str(s['gfycats'][0])
            else:
                out = ""
                for i in range(min(num, len(s['gfycats']))):
                    out = "%s%s\n" % (out, gfy_str(s['gfycats'][i],
                                                   embed=False))
                return out
        else:
            print("query: %s\n%s\n\n" % (query, s))
            return "No gfycats found"
    except:
        import traceback
        f = utils.write_to_file(json.dumps(s),
                                "gfycat.json",
                                'errors',
                                prependtimestamp=True)
        print("error: json output written to %s" % f)
        traceback.print_exc()
        return "encountered error"
Ejemplo n.º 23
0
def get_indexable_bills():
    revision = select([bill_revs,bills],bill_revs.c.bill_id==bills.c.id).apply_labels()
    conn = engine.connect()
    result = conn.execute(revision)
    data = result.fetchall()
    for item in data:
        temp = {}
        for key in item.keys()[1:]:
            new_key = utils.get_keys(key)
            
            if new_key == 'url':
                full_path = download(item[key])
                if not full_path:
                    continue

                temp['document'] = pyes.file_to_attachment(full_path)
            else:
                temp[new_key] = item[key]

        yield temp
Ejemplo n.º 24
0
    def remove_vote(self, msg_id, member_name, vote_id):
        #shit this as well rip
        poll = self.load_polls()[msg_id]
        option_keys = utils.get_keys(poll["options"])
        if vote_id == -1 and len(
                poll["options"]
        ) > 11:  #number is the amount of available emojis that connect to one option
            for i in option_keys[10:]:  #the number -1
                if member_name in poll["options"][i]["names"]:
                    poll["options"][i]["votes"] -= 1
                    poll["options"][i]["names"].remove(member_name)
        else:  #i am cringing at how messy this is.. oof
            if member_name in poll["options"][option_keys[vote_id]]["names"]:
                poll["options"][option_keys[vote_id]]["votes"] -= 1
                poll["options"][option_keys[vote_id]]["names"].remove(
                    member_name)

        whole_poll = self.load_polls()
        whole_poll[msg_id] = poll
        self.save_polls(whole_poll)
Ejemplo n.º 25
0
def main(argv):
    if len(argv) < 3 or not os.path.exists(argv[1]):
        print("Usage: ping.py [config] [master|slave]")
        return 1

    # Initialize global conf
    init_conf(argv)

    # Initialize logger
    loglevel = logging.INFO
    if CONF['debug']:
        loglevel = logging.DEBUG

    logformat = ("%(filename)s %(lineno)d  %(levelname)s "
                 "(%(funcName)s) %(message)s")
    logging.basicConfig(level=loglevel,
                        format=logformat,
                        filename=CONF['logfile'],
                        filemode='a')
    print("Log: {}, press CTRL+C to terminate..".format(CONF['logfile']))

    global REDIS_CONN
    REDIS_CONN = new_redis_conn(db=CONF['db'])

    if CONF['master']:
        redis_pipe = REDIS_CONN.pipeline()
        logging.info("Removing all keys")
        redis_pipe.delete('reachable')
        redis_pipe.delete('open')
        redis_pipe.delete('opendata')
        for key in get_keys(REDIS_CONN, 'ping:cidr:*'):
            logging.info("Deleting %s", key)
            redis_pipe.delete(key)
        redis_pipe.execute()

    # Initialize a pool of workers (greenlets)
    pool = gevent.pool.Pool(CONF['workers'])
    pool.spawn(cron, pool)
    pool.join()

    return 0
Ejemplo n.º 26
0
def main(argv):
    if len(argv) < 3 or not os.path.exists(argv[1]):
        print("Usage: ping.py [config] [master|slave]")
        return 1

    # Initialize global conf
    init_conf(argv)

    # Initialize logger
    loglevel = logging.INFO
    if CONF['debug']:
        loglevel = logging.DEBUG

    logformat = ("[%(process)d] %(asctime)s,%(msecs)05.1f %(levelname)s "
                 "(%(funcName)s) %(message)s")
    logging.basicConfig(level=loglevel,
                        format=logformat,
                        filename=CONF['logfile'],
                        filemode='a')
    print("Log: {}, press CTRL+C to terminate..".format(CONF['logfile']))

    global REDIS_CONN
    REDIS_CONN = new_redis_conn(db=CONF['db'])

    if CONF['master']:
        redis_pipe = REDIS_CONN.pipeline()
        logging.info("Removing all keys")
        redis_pipe.delete('reachable')
        redis_pipe.delete('open')
        redis_pipe.delete('opendata')
        for key in get_keys(REDIS_CONN, 'ping:cidr:*'):
            redis_pipe.delete(key)
        redis_pipe.execute()

    # Initialize a pool of workers (greenlets)
    pool = gevent.pool.Pool(CONF['workers'])
    pool.spawn(cron, pool)
    pool.join()

    return 0
Ejemplo n.º 27
0
    def add_vote(self, msg_id, member_name, vote_id):
        #oh shit i gotta tidy this dumpsterfire up
        #sorry about the microsoft code below, to whoever is reading this
        poll = self.load_polls()[msg_id]
        option_keys = utils.get_keys(poll["options"])
        if vote_id == -1 and len(
                poll["options"]
        ) > 11:  #number is the amount of available emojis that connect to one option
            for i in option_keys[11:]:  #the number -1
                if member_name not in poll["options"][i]["names"]:
                    poll["options"][i]["votes"] += 1
                    poll["options"][i]["names"].append(member_name)
        else:  #i am cringing at how messy this is.. oof
            if member_name not in poll["options"][
                    option_keys[vote_id]]["names"]:
                poll["options"][option_keys[vote_id]]["votes"] += 1
                poll["options"][option_keys[vote_id]]["names"].append(
                    member_name)

        whole_poll = self.load_polls()
        whole_poll[msg_id] = poll
        self.save_polls(whole_poll)
Ejemplo n.º 28
0
def main(argv):
    if len(argv) < 3 or not os.path.exists(argv[1]):
        print("Usage: ping.py [config] [master|slave]")
        return 1

    # Initialize global settings
    init_settings(argv)

    # Initialize logger
    loglevel = logging.INFO
    if SETTINGS['debug']:
        loglevel = logging.DEBUG

    logformat = ("[%(process)d] %(asctime)s,%(msecs)05.1f %(levelname)s "
                 "(%(funcName)s) %(message)s")
    logging.basicConfig(level=loglevel,
                        format=logformat,
                        filename=SETTINGS['logfile'],
                        filemode='a')
    print("Writing output to {}, press CTRL+C to terminate..".format(
        SETTINGS['logfile']))

    if SETTINGS['master']:
        redis_pipe = REDIS_CONN.pipeline()
        logging.info("Removing all keys")
        redis_pipe.delete('reachable')
        redis_pipe.delete('open')
        redis_pipe.delete('opendata')
        for key in get_keys(REDIS_CONN, 'ping:cidr:*'):
            redis_pipe.delete(key)
        redis_pipe.execute()

    # Initialize a pool of workers (greenlets)
    pool = gevent.pool.Pool(SETTINGS['workers'])
    pool.spawn(cron, pool)
    pool.join()

    return 0
Ejemplo n.º 29
0
def get_stocks():
    output = "Latest quotes:\n```python\n"
    stocks = []
    token = utils.get_keys("iex")['public']
    for symbol in ["DIA", "VOO", "VTI", "ONEQ", "VXUS"]:
        url = "https://cloud.iexapis.com/stable/stock/%s/quote?displayPercent=true&token=%s" % (
            symbol, token)
        # url = "https://api.iextrading.com/1.0/stock/"+symbol+"/quote?displayPercent=true"
        req = Request(url)
        req.headers["User-Agent"] = "windows 10 bot"
        # Load data
        quote = json.loads(urlopen(req).read().decode("utf-8"))
        stock = dict()
        change = float(quote['change'])
        ch = "%0.2f" % (change)
        chper = "%0.2f" % (quote['changePercent'])
        chytd = "%0.2f" % (quote['ytdChange'])
        if change > 0:
            ch = "+" + ch
            chper = "+" + chper
        stock['symbol'] = symbol.upper()
        stock['price'] = "%.2f" % float(quote['latestPrice'])
        stock['change'] = ch
        stock['%'] = chper
        stock['% YTD'] = chytd
        stock['description'] = quote['companyName']
        stock['high52w'] = quote['week52High']
        stock['low52w'] = quote['week52Low']
        stocks.append(stock)
        # output = output + "%s - %s (%s, %s%%, %s%% YTD) - %s\n" % (symbol.upper(),quote['latestPrice'],ch,chper,chytd,quote['companyName'])


#    output = "%s - %s:```python\n Last price: %s (%s, %s%%, %s%% YTD" % (symbol.upper(),quote['companyName'],quote['latestPrice'],ch,chper,chytd)+")"
    labels = ['symbol', 'price', 'change', '%', '% YTD', 'description']
    left = ['symbol', 'description']
    output = output + utils.format_table(labels, stocks, left_list=left)
    output = output + "```"
    return output
Ejemplo n.º 30
0
    def output_reducer(self, label_key, pix_datas):
        """
        fill the data in to a raster image and return the
        names of the generated images
        """
        # download a template raster
        job = os.environ.get('LT_JOB')

        rast_keys = utils.get_keys(s.IN_RASTS % job)
        tmplt_key = filter(lambda x: s.RAST_SUFFIX in x.key, rast_keys)[0].key
        tmplt_rast = utils.rast_dl(tmplt_key)

        # name raster so it uploads to correct location
        rast_key = s.OUT_RAST_KEYNAME % (job, label_key)
        rast_fn = utils.keyname2filename(rast_key)

        # write data to raster
        utils.data2raster(pix_datas, tmplt_rast, out_fn=rast_fn)
        compressed = utils.compress([rast_fn], '%s.zip' % rast_fn)

        # upload raster
        rast_key = utils.upload([compressed])[0]
        yield label_key, [rast_key.key]
Ejemplo n.º 31
0
    def output_reducer(self, label_key, pix_datas):
        """
        fill the data in to a raster image and return the
        names of the generated images
        """
        # download a template raster
        job = os.environ.get('LT_JOB')

        rast_keys = utils.get_keys(s.IN_RASTS % job)
        tmplt_key = [k.key for k in rast_keys
                     if s.RAST_TRIGGER in k.key][0].key
        tmplt_rast = utils.rast_dl(tmplt_key)

        # name raster so it uploads to correct location
        rast_key = s.OUT_RAST_KEYNAME % (job, label_key)
        rast_fn = utils.keyname2filename(rast_key)

        # write data to raster
        utils.data2raster(pix_datas, tmplt_rast, out_fn=rast_fn)

        # upload raster
        rast_key = utils.upload([rast_fn])[0]
        yield label_key, [rast_key.key]
Ejemplo n.º 32
0
def restart(timestamp, start, elapsed):
    """
    Dumps data for the reachable nodes into a JSON file.
    Loads all reachable nodes from Redis into the crawl set.
    Removes keys for all nodes from current crawl.
    Updates excluded networks with current list of bogons.
    Updates number of reachable nodes and most common height in Redis.

    EDIT this; not only json dump. also export to Postgres.
    """
    #connection to Postgres Db:
    dbconn = psycopg2.connect(
        user="******",
        password="******",
        host="testpg",
        #host="pg_docker",
        #host='localhost',
        port="5432",
        database="btc_crawl")
    cursor = dbconn.cursor()

    redis_pipe = REDIS_CONN.pipeline()

    #nodes = REDIS_CONN.smembers('up')  # Reachable nodes
    nodes = REDIS_CONN.smembers('up-{}'.format(CONF['BLOCKCHAIN']))
    #redis_pipe.delete('up')
    redis_pipe.delete('up-{}'.format(CONF['BLOCKCHAIN']))

    # Insert all nodes in all_nodes table;
    # Also, count them and make an insertion in master status - reachable

    for node in nodes:
        (address, port, services) = node[5:].split("-", 2)
        redis_pipe.sadd('pending', (address, int(port), int(services)))

        height_key = "height:{}-{}-{}-{}".format(address, port, services,
                                                 CONF['BLOCKCHAIN'])
        height = 0
        try:
            height = int(REDIS_CONN.get(height_key))
        except TypeError:
            logging.warning("%s missing", height_key)
            height = 0

        version = REDIS_CONN.get(node)

        glinks = REDIS_CONN.smembers('glinks-{}-{}'.format(
            address, CONF['BLOCKCHAIN']))
        #JUST A REMINDER:
        #redis_pipe.sadd('glinks-{}-{}'.format(source_ip, CONF['BLOCKCHAIN']), '{}-{}-{}'.format( address, port, now))
        #print("GLINKS LEN for node %s on chain %s: %d" %(address, CONF['BLOCKCHAIN'], len(glinks)))
        for link in glinks:
            l = link.split("-")
            #print(l)
            cursor.execute(
                "INSERT INTO GRAPH_LINKS "
                "(SOURCE_IP, SOURCE_PORT, BLOCKCHAIN, SINK_IP, SINK_PORT, FIRST_SEEN)  "
                "VALUES (%s, %s, %s, %s, %s, %s)  "
                "ON CONFLICT (source_ip, source_port, blockchain, sink_ip, sink_port)  "
                "DO UPDATE SET  "
                "last_seen=EXCLUDED.FIRST_SEEN, "
                "COUNTER_SEEN=COALESCE(graph_links.COUNTER_SEEN,0)+1",
                (str(address), port, CONF['BLOCKCHAIN'], str(l[0]), int(
                    l[1]), int(l[2])))
        dbconn.commit()

        redis_pipe.delete('glinks-{}-{}'.format(address, CONF['BLOCKCHAIN']))
        cursor.execute(
            "INSERT INTO ALL_NODES "
            "(IP_ADDRESS, PORT, SERVICES, HEIGHT, FIRST_SEEN, VERSION, MY_IP, BLOCKCHAIN) "
            "VALUES(%s, %s, %s, %s, %s, %s, %s, %s) "
            "on conflict "
            "(IP_ADDRESS, PORT, services, blockchain) "
            "DO UPDATE set "
            "last_seen=excluded.FIRST_SEEN, "
            "height=excluded.height, "
            "version=excluded.version, "
            "COUNTER_SEEN=COALESCE(ALL_NODES.COUNTER_SEEN,0)+1",
            (str(address), port, str(services), height, timestamp,
             str(version), CONF['MY_IP'], CONF['BLOCKCHAIN']))

    dbconn.commit()

    for key in get_keys(REDIS_CONN, 'node:*'):
        redis_pipe.delete(key)

    for key in get_keys(REDIS_CONN, 'crawl:cidr:*'):
        redis_pipe.delete(key)

    if CONF['include_checked']:
        checked_nodes = REDIS_CONN.zrangebyscore('check',
                                                 timestamp - CONF['max_age'],
                                                 timestamp)
        for node in checked_nodes:
            (address, port, services) = eval(node)
            if is_excluded(address):
                logging.debug("Exclude: %s", address)
                continue
            redis_pipe.sadd('pending', (address, port, services))

    redis_pipe.execute()

    update_excluded_networks()

    reachable_nodes = len(nodes)
    logging.info("Reachable nodes: %d", reachable_nodes)
    REDIS_CONN.lpush('nodes', (timestamp, reachable_nodes))

    height = dump(timestamp, nodes)
    REDIS_CONN.set('height-{}'.format(CONF['BLOCKCHAIN']), height)
    logging.info("(%s)_Height: %d" % (CONF['BLOCKCHAIN'], height))

    cursor.execute(
        "INSERT INTO MASTER_STATUS (STATUS, START_TIME, ELAPSED_TIME, HEIGHT, BLOCKCHAIN, REACHABLE_NODES)"
        "VALUES(%s, %s, %s, %s, %s, %s)",
        ('Finished', start, elapsed, height, CONF['BLOCKCHAIN'],
         reachable_nodes))
    dbconn.commit()
    cursor.close()
    dbconn.close()

    CONF['BLOCKCHAIN'] = cyclingBlockchainList.next()
    REDIS_CONN.set('crawl:master:blockchain', CONF['BLOCKCHAIN'])
    logging.info("Change Chain: %s", CONF['BLOCKCHAIN'])
    set_bchain_params()
    set_pending()
Ejemplo n.º 33
0
client = KerberosClient(HDFS_URL)

conn = jdbc.connect("oracle.jdbc.driver.OracleDriver", URL_ORACLE_SERVER,
                    [USER_ORACLE, PASSWD_ORACLE], ORACLE_DRIVER_PATH)
curs = conn.cursor()

df = get_train_data(curs, start_date=START_DATE, end_date=END_DATE)

nb_documents = len(df)
if nb_documents == 0:
    print('No data to train model!')
    sys.exit()
else:
    print('{} documents available to train model.\n'.format(nb_documents))

train_keys = get_keys(df, ID_COLUMN)

print('Preparing data...')
df[TEXT_COLUMN] = df[TEXT_COLUMN].apply(clean_text)

# Labels need to be grouped to be passed to the MultiLabelBinarizer
df = df.groupby(TEXT_COLUMN)\
       .agg(lambda x: set(x))\
       .reset_index()

classes = get_list_of_classes(curs)
mlb = MultiLabelBinarizer(classes)
y = df[LABEL_COLUMN]
y = mlb.fit_transform(y)

NEGATIVE_COLUMN_INDEX = np.where(mlb.classes_ == NEGATIVE_CLASS_VALUE)[0][0]
def test_get_keys():
    expected_output = GET_KEYS_RESULT_EXAMPLE
    output = get_keys(EXPANDED_RESULT_EXAMPLE, 'SNCA_DK')
    assert output == expected_output
Ejemplo n.º 35
0
model_dates = sorted(client.list(FORMATTED_HDFS_PATH))
validated_datasets = []
classified_datasets = []

for model_date in model_dates:
    try:
        data_hdfs = get_results_from_hdfs(client,
                                          FORMATTED_HDFS_PATH,
                                          model_date=model_date)
    except BaseException:
        continue
    # Results are stored as a tuple represented as a string
    data_hdfs['MDEC_DK'] = data_hdfs['MDEC_DK'].apply(
        lambda x: ast.literal_eval(x))

    keys = get_keys(data_hdfs, 'SNCA_DK')

    # Only needs the keys that are in the predictions
    data_oracle = get_evaluate_data(curs, keys)

    data_hdfs = expand_results(data_hdfs)

    # TODO: Optimization possible here,
    # gets all keys in query each time before filtering by keys
    sinalid_id_df = get_id_sinalid(curs, keys)
    data_hdfs = pd.merge(data_hdfs, sinalid_id_df, how='left', on='SNCA_DK')

    date = '{}/{}/{}'.format(model_date[6:8], model_date[4:6], model_date[:4])
    data_hdfs['DT_MODELO'] = date
    data_oracle['DT_MODELO'] = date
    data_oracle['IS_VALIDATION'] = True
Ejemplo n.º 36
0
def buildModel(params):
    #asumming we're passed a dictionary of requisite
    #parameters, we can unpack it and construct a model
    #from the inputs.

    #bind the parameters locally for easier reference.
    (
        MTT,  #scalar, Total number of training teams
        ms,  #numerical range of [0..MTT)
        WKS,  #scalr, total number of weeks to schedule
        ws,  #numerical range of [0..WKS)
        types,  #set of unit types
        total_units,  #scalar count of units
        us,  #numerical range [0..total_units)
        unit_type,  #unitid->type
        interval_type,  #unit_type->training-interval
        #all the yearly demands.
        #yearly demands can be trivially translated to weekly
        #demands...
        msn_demand) = utils.get_keys(params, 'MTT', 'ms', 'WKS', 'ws', 'types',
                                     'total_units', 'us', 'unit_type',
                                     'interval_type', 'msn_demand')

    ##    #setting up a minimization.
    sched = LpProblem("Sched", LpMinimize)

    months_weeks = 12 / 52

    #scale the weeks to months....
    ws = range(WKS * months_weeks)
    #scale the intervals to months....
    interval_type = {
        k: weeks * months_weeks
        for k, weeks in interval_type.items()
    }

    #variables

    print(len(ws))
    ## assign(m,u,w) # binary decision variable determining whether training
    ## team m is assigned to train unit u, during week w of the schedule.

    #This lets us say "we want to create family of variables, across the
    #index m_u_w, with similar properties (in this case they're binary)
    assign = LpVariable.dicts('assign',
                              utils.product(ms, us, ws),
                              lowBound=0.0,
                              upBound=1.0,
                              cat=LpInteger)

    #Objective function: z = assigncost = sum(m,u,w)assign(m,u,w)
    #min z, e.g. minimize the total number of assignments
    #For now, we reflect no higher order notion of cost within the parameters.
    #One could easily envision certain combinations being more desirable,
    #hence the inclusion of a weight parameter
    assigncost = LpVariable('assigncost')
    sched += assigncost == lpSum(assign[(m, u, w)] * 1.0
                                 for (m, u, w) in utils.product(ms, us, ws))

    #helper variables:
    #mtt training events per week
    #events(m,w) = sum(u)assign(m,u,w) forall m in ms,w in ws, u in us
    events = LpVariable.dicts("events", utils.product(ms, ws))
    for (m, w) in events.keys():
        sched += events[m, w] == lpSum((assign[m, u, w] for u in us))

    #only one training event allowed per week
    for (m, w) in events.keys():
        sched += events[m, w] <= 1

    #add a var to track unit training

    trained = LpVariable.dicts("trained", utils.product(us, ws))
    for (u, w) in trained.keys():
        sched += trained[u, w] == lpSum((assign[m, u, w] for m in ms))

    #unit training credits by interval
    #We account for training intervals by
    #recording wait times as a function of
    #max training intervals and previous
    #wait times.  We introduce the linear
    #decision variable, wait(u,w), which
    #indicates how long unit u has been waiting
    #for training at week w.

    #we may want to add in some minimum frequency too, to ensure
    #we don't allow things like month-month training, which is likely
    #infeasible in practice.  Something like, 1/2 the interval is the
    #lower bound.

    wait = LpVariable.dicts("wait", utils.product(us, ws), lowBound=0.0)
    for u in us:
        interval = interval_type[unit_type[u]]
        for w in utils.butlast(ws):
            sched += wait[u, w +
                          1] == wait[u, w] + 1 - trained[u, w] * (interval + 1)

    #we can train a unit more frequently, but not lapse in training.
    #Wait times cannot exceed mandated training intervals.
    #note: we can add a deviation goal to this later if we want
    #to or need to relax this.  Alternately, include in objective...
    for (u, w) in wait.keys():
        sched += wait[u, w] <= interval_type[unit_type[u]]

    #goal is to minimize the total number of assignments we need.
    #This should induce maximal wait times as well.
    sched.setObjective(assigncost)

    def getsolution():
        res = {}
        for (m, u, w) in assign.keys():
            if value(assign[m, u, w]) > 0.0:
                res[m, u, w] = value(assign[m, u, w])
        return {
            'assigned': res,  #dictvals(assign),
            'trained': dictvals(trained),
            'wait': dictvals(wait),
            'assigncost': value(assigncost)
        }

    return (sched, getsolution)
Ejemplo n.º 37
0
from flask import Flask, render_template, redirect, url_for, g, session, request
import settings
import utils
from responses import resposta
from database.objects import Conta, Pagamento, Usuario
from authentication import authenticate, unauthenticate, authenticate_api
from errors import KnownError, MissingParamsError, UserNotLoggedInError

app = Flask(__name__, static_folder=settings.STATIC_FOLDER, template_folder=settings.TEMPLATES_FOLDER)
app.secret_key = utils.get_keys()["APP_SECRET_KEY"]


# import os; print(os.urandom(16))

@app.errorhandler(500)
def handle_server_error(e):
    logger.exception(e)
    return "Um erro ocorreu no servidor"


@app.route("/")
@authenticate
def page_index():
    return redirect(url_for("page_resumo"))


@app.route("/entrar")
@unauthenticate
def page_entrar():
    return render_template("login.html")