Exemple #1
0
def labelFaces(jsonInput):
    faces = jsonInput["summarizedInsights"]["faces"]
    breakdownId = jsonInput["breakdowns"][0]["id"]

    vi = VideoIndexer()
    pg = PersonGroup(_groupId)
    rate_limiter = ratelimiter.RateLimiter(max_calls=10, period=60)

    for face in faces:
        with rate_limiter:
            results = pg.identifyFace(face["thumbnailFullUrl"])
            if results and len(results) and len(results[0]["candidates"]):
                personId = results[0]["candidates"][0]["personId"]
                name = pg.getPerson(personId)["name"]
                if name:
                    vi.labelFace(breakdownId, face["id"], name)
Exemple #2
0
from backends.redis import RedisQueue, redis_connect
import ratelimiter


class Request(object):
    def __init__(self, uid):
        self.remote_user = uid


redis = redis_connect()
if not redis:
    print("Read the warning in the documentation before running this script.")
    sys.exit(-1)


limiter = ratelimiter.RateLimiter(backend=RedisQueue(None, redis=redis))

if len(sys.argv) < 2:
    print('Usage: RATELIMITER_REDIS_DB=1 python ./hit.py [uid]')
    print(__doc__)
    sys.exit(-1)

uid = sys.argv[1]
try:
    limiter.hit(Request(uid))
except limiter.Exceeded:
    expiry = limiter.backend.head(uid) + limiter.period
    delta = int((expiry - datetime.datetime.utcnow()).total_seconds())
    print('Enhance your calm. Try again in {} seconds ({} UTC)'.format(delta, expiry))
    sys.exit(-1)
else:
Exemple #3
0
    if args.dataColPath != None:
        conf.savePath = args.dataColPath
    if args.agentStr != None:
        conf.agentType = args.agentStr
    if args.serialize != None:
        conf.serialize = args.serialize
    conf.getAgentConstructor()
    return conf
#
# Conditional decorator.
# https://stackoverflow.com/questions/20850571/decorate-a-function-if-condition-is-true
def maybe_decorate(condition, decorator):
    return decorator if condition else lambda x: x
#
# Rate limited stepping code limit to 30hz.
@maybe_decorate(args.ratelimit,ratelimiter.RateLimiter(max_calls=30, period=1))
def step(agent, env, vis):
    obs = env.observe()
    agent.giveObservation(obs)
    action = agent.getAction()
    env.runAction(action, obs)
    vis.visualize(obs, action, agent)
#
# Main loop for running the agent.
def loop(conf):
    agent = conf.agentConstructor(conf)
    # vis = visual.Visualizer()
    exitNow = SigHandler.SigHandler()

    with Environment.Environment(conf.envType, conf.getFullSavePath(conf.serialize), conf.serialize) as env:
Exemple #4
0
def main(unused):
    twitter = twython.Twython(CONSUMER_KEY, CONSUMER_SECRET, OAUTH_TOKEN,
                              OAUTH_TOKEN_SECRET)
    if not twitter:
        logging.fatal('Invalid twitter credentials!')

    if not FLAGS.input_tweet_ids_file:
        logging.fatal('Must specify --input_tweet_ids_file!')

    if not FLAGS.output_tweets_directory:
        logging.fatal('Must specify --output_tweets_directory!')

    if not os.path.isdir(FLAGS.output_tweets_directory):
        os.makedirs(FLAGS.output_tweets_directory)

    # Prevents us from sending too many requests to Twitter too quickly.
    limiter = ratelimiter.RateLimiter(max_calls=FLAGS.rate_limit, period=1.5)

    # Fetches a single Tweet at a time.
    def GetTweet(id):
        with limiter:
            return twitter.show_status(id=id)

    # Fetches up to 100 Tweets at a time.
    def GetTweets(ids):
        if len(ids) > 100:
            logging.fatal('Max 100 ids per batch lookup')

        combined_ids = ','.join(ids)

        with limiter:
            return twitter.lookup_status(id=combined_ids)

    # Maps tweet id to the actual data of the tweet (text, timestamp, etc).
    tweet_id_to_tweet_data = dict()

    # Maps tweet id to which file the actual data is in.
    tweet_id_to_tweet_data_filename = dict()

    tweet_mapping_filename = os.path.join(FLAGS.output_tweets_directory,
                                          'tweet_mapping.json')
    if os.path.exists(tweet_mapping_filename):
        with open(tweet_mapping_filename, 'r') as tweet_mapping_file:
            tweet_id_to_tweet_data_filename = json.load(tweet_mapping_file)

    with open(FLAGS.input_tweet_ids_file, 'r') as input_tweet_ids_file:
        tweet_ids_to_fetch = []
        for tweet_id in input_tweet_ids_file:
            tweet_id = tweet_id.strip()

            # Already fetched this Tweet before, don't do it again.
            if tweet_id in tweet_id_to_tweet_data_filename:
                logging.info('Skipping fetch tweet ' + tweet_id)
                continue

            # Add this Tweet to the batch of Tweets to lookup next.
            tweet_ids_to_fetch.append(tweet_id)

            # Lookup in batches of 100
            if len(tweet_ids_to_fetch) < 100:
                continue

            logging.info('Fetching batch of tweets...')

            while True:
                try:
                    tweet_datas = GetTweets(tweet_ids_to_fetch)
                    for tweet_data in tweet_datas:
                        tweet_id = tweet_data['id_str']
                        tweet_id_to_tweet_data[tweet_id] = tweet_data

                    # Mark that we've already tried to fetch failures.
                    for tweet_id_to_fetch in tweet_ids_to_fetch:
                        if tweet_id_to_fetch not in tweet_id_to_tweet_data:
                            tweet_id_to_tweet_data[tweet_id_to_fetch] = {}

                    tweet_ids_to_fetch = []
                    break
                except twython.TwythonRateLimitError as err:
                    logging.info(str(err) + ' ... trying again')
                    continue
                except twython.TwythonError as err:
                    logging.fatal(err)

            # Dump the Tweets to a file in batches.
            if len(tweet_id_to_tweet_data) >= FLAGS.output_tweets_file_size:
                tweet_data_basename = GetNextBasename(
                    FLAGS.output_tweets_directory)
                tweet_data_filename = os.path.join(
                    FLAGS.output_tweets_directory, tweet_data_basename)
                with open(tweet_data_filename, 'w') as tweet_data_file:
                    json.dump(tweet_id_to_tweet_data, tweet_data_file)

                for tweet_id in tweet_id_to_tweet_data:
                    tweet_id_to_tweet_data_filename[
                        tweet_id] = tweet_data_basename

                with open(tweet_mapping_filename, 'w') as tweet_mapping_file:
                    json.dump(tweet_id_to_tweet_data_filename,
                              tweet_mapping_file)

                tweet_id_to_tweet_data = dict()
Exemple #5
0
            if team == Team.ALL:
                continue
            jobs.append({
                "projection_type": projection_type,
                "stat_type": StatType.BATTING,
                "team": team,
                "ros": REST_OF_SEASON,
            })
            jobs.append({
                "projection_type": projection_type,
                "stat_type": StatType.PITCHING,
                "team": team,
                "ros": REST_OF_SEASON,
            })

    rate_limiter = ratelimiter.RateLimiter(max_calls=MAX_REQUESTS_PER_SECOND,
                                           period=1)
    bar = progressbar.ProgressBar(max_value=len(jobs)).start()
    for job in jobs:
        with rate_limiter:
            result = fangraphs.get_projections(**job)
            projections[job["stat_type"]][job["projection_type"]].append(
                result)
            bar.update(bar.value + 1)
    bar.finish()

    for stat_type in StatType:
        for projection_type in ProjectionType:
            utils.write_projections(
                projections=pd.concat(
                    projections[stat_type][projection_type]).drop_duplicates(),
                projections_dir=PROJECTIONS_DIR,