Exemple #1
0
	def lambda_invoker(payload, shortid):
		try:
			LAMBDA.invoke(
				FunctionName="process_single_replay_upload_stream_handler",
				InvocationType="RequestResponse",  # Triggers synchronous invocation
				Payload=payload,
			)
		finally:
			logger.debug("Lambda completed for %s Decrementing latch.", shortid)
			countdown_latch.count_down()
Exemple #2
0
    def schedule_lambda_trigger(self, url, payload):
        from hsreplaynet.utils.aws.clients import LAMBDA

        final_payload = self._serialize_payload(payload)

        LAMBDA.invoke(
            FunctionName="trigger_webhook",
            InvocationType="Event",  # Triggers asynchronous invocation
            Payload=final_payload,
        )
Exemple #3
0
	def schedule_lambda_trigger(self, url, payload):
		from hsreplaynet.utils.aws.clients import LAMBDA

		final_payload = self._serialize_payload(payload)

		LAMBDA.invoke(
			FunctionName="trigger_webhook",
			InvocationType="Event",  # Triggers asynchronous invocation
			Payload=final_payload,
		)
Exemple #4
0
 def lambda_invoker(payload, shortid):
     try:
         LAMBDA.invoke(
             FunctionName="process_single_replay_upload_stream_handler",
             InvocationType=
             "RequestResponse",  # Triggers synchronous invocation
             Payload=payload,
         )
     finally:
         logger.info("Lambda completed for %s Decrementing latch.", shortid)
         countdown_latch.count_down()
Exemple #5
0
    def predict_archetype_id(self, deck):
        event = self._to_prediction_event(deck)

        if settings.USE_ARCHETYPE_PREDICTION_LAMBDA or settings.ENV_AWS:
            with influx_timer("callout_to_predict_deck_archetype"):
                response = LAMBDA.invoke(
                    FunctionName="predict_deck_archetype",
                    InvocationType="RequestResponse",  # Synchronous invocation
                    Payload=json.dumps(event),
                )
                if response[
                        "StatusCode"] == 200 and "FunctionError" not in response:
                    result = json.loads(
                        response["Payload"].read().decode("utf8"))
                else:
                    raise RuntimeError(
                        response["Payload"].read().decode("utf8"))
        else:
            from keras_handler import handler
            result = handler(event, None)

        predicted_class = result["predicted_class"]
        id_encoding = self.one_hot_external_ids(inverse=True)
        predicted_archetype_id = id_encoding[predicted_class]

        if predicted_archetype_id == -1:
            return None
        else:
            return predicted_archetype_id
Exemple #6
0
    def schedule_delivery(self):
        """
		Schedule the webhook for delivery.

		On ENV_AWS, this schedules a Lambda trigger.
		Otherwise, triggers immediately.
		"""
        self.status = WebhookStatus.PENDING
        self.save()

        if settings.WEBHOOKS["USE_LAMBDA"]:
            from hsreplaynet.utils.aws.clients import LAMBDA
            LAMBDA.invoke(
                FunctionName="trigger_webhook",
                InvocationType="Event",
                Payload=json.dumps({"webhook": str(self.pk)}),
            )
        else:
            self.deliver()
def execute_query(parameterized_query, run_local=False):
    if run_local:
        _do_execute_query_work(parameterized_query)

    # It's safe to launch multiple attempts to execute for the same query
    # Because the dogpile lock will only allow one to execute
    # But we can save resources by not even launching the attempt
    # If we see that the lock already exists
    if not _lock_exists(parameterized_query.cache_key):
        log.info(
            "No lock already exists for query. Will attempt to execute async.")

        if settings.ENV_AWS and settings.PROCESS_REDSHIFT_QUERIES_VIA_LAMBDA:
            # In PROD use Lambdas so the web-servers don't get overloaded
            LAMBDA.invoke(
                FunctionName="execute_redshift_query",
                InvocationType="Event",  # Triggers asynchronous invocation
                Payload=_to_lambda_payload(parameterized_query),
            )
        else:
            _do_execute_query_work(parameterized_query)
    else:
        msg = "An async attempt to run this query is in-flight. Will not launch another."
        log.info(msg)
Exemple #8
0
def get_meta_preview(num_items=10):
    from hsreplaynet.utils.aws.clients import LAMBDA
    from hsreplaynet.utils.aws.redshift import get_redshift_query

    LAMBDA.invoke(FunctionName="do_refresh_meta_preview",
                  InvocationType="Event")

    query = get_redshift_query("archetype_popularity_distribution_stats")

    unique_archetypes = set()
    data = []

    ranks = [x for x in range(0, 21)]

    for rank in ranks:
        regions = ["REGION_EU", "REGION_US", "REGION_KR", "REGION_CN"]
        for region in regions:
            parameterized_query = query.build_full_params(
                dict(TimeRange="LAST_1_DAY",
                     GameType="RANKED_STANDARD",
                     RankRange=RANK_MAP[rank],
                     Region=region))

            if not parameterized_query.result_available:
                continue

            response = parameterized_query.response_payload

            archetypes = []
            for class_values in response["series"]["data"].values():
                for value in class_values:
                    if (value["archetype_id"] > 0
                            and value["pct_of_total"] > 0.5
                            and value["total_games"] > 30
                            and value["win_rate"] > 51):
                        archetypes.append(value)
            if not len(archetypes):
                continue

            archetype = list(
                sorted(archetypes, key=lambda a: a["win_rate"],
                       reverse=True))[0]
            unique_archetypes.add(archetype["archetype_id"])
            data.append({
                "rank": rank,
                "region": region,
                "data": archetype,
                "as_of": response["as_of"]
            })

    results = []
    for archetype_id in unique_archetypes:
        for datum in data:
            if datum["data"]["archetype_id"] == archetype_id:
                results.append(datum)
                break
        data.remove(datum)

    hour = datetime.utcnow().replace(minute=0, second=0, microsecond=0)
    random = seed(int(hour.timestamp()))

    if len(results) < num_items:
        shuffle(data, random=random)
        for i in range(0, num_items - len(results)):
            results.append(data[i])

    shuffle(results, random=random)

    return results[:num_items]