Ejemplo n.º 1
0
    def predict_archetype_id(self, deck):
        event = self._to_prediction_event(deck)

        if settings.USE_ARCHETYPE_PREDICTION_LAMBDA or settings.ENV_AWS:
            with influx_timer("callout_to_predict_deck_archetype"):
                response = LAMBDA.invoke(
                    FunctionName="predict_deck_archetype",
                    InvocationType="RequestResponse",  # Synchronous invocation
                    Payload=json.dumps(event),
                )
                if response[
                        "StatusCode"] == 200 and "FunctionError" not in response:
                    result = json.loads(
                        response["Payload"].read().decode("utf8"))
                else:
                    raise RuntimeError(
                        response["Payload"].read().decode("utf8"))
        else:
            from keras_handler import handler
            result = handler(event, None)

        predicted_class = result["predicted_class"]
        id_encoding = self.one_hot_external_ids(inverse=True)
        predicted_archetype_id = id_encoding[predicted_class]

        if predicted_archetype_id == -1:
            return None
        else:
            return predicted_archetype_id
Ejemplo n.º 2
0
def validate_parser(parser, meta):
	# Validate upload
	if len(parser.games) != 1:
		raise ValidationError("Expected exactly 1 game, got %i" % (len(parser.games)))
	packet_tree = parser.games[0]
	with influx_timer("replay_exporter_duration"):
		exporter = RedshiftPublishingExporter(
			packet_tree,
			stream_prefix=fetch_active_stream_prefix()
		).export()

	game = exporter.game

	if len(game.players) != 2:
		raise ValidationError("Expected 2 players, found %i" % (len(game.players)))

	for player in game.players:
		# Set the player's name
		player.name = parser.games[0].manager.get_player_by_id(player.id).name
		if player.name is None:
			# If it's None, this is an unsupported replay.
			log.error("Cannot find player %i name. Replay not supported.", player.player_id)
			raise GameTooShort("The game was too short to parse correctly")

		heroes = list(player.heroes)
		if not heroes:
			raise UnsupportedReplay("No hero found for player %r" % (player.name))
		player._hero = heroes[0]

		try:
			db_hero = Card.objects.get(id=player._hero.card_id)
		except Card.DoesNotExist:
			raise UnsupportedReplay("Hero %r not found." % (player._hero))
		if db_hero.type != CardType.HERO:
			raise ValidationError("%r is not a valid hero." % (player._hero))

	friendly_player_id = packet_tree.export(cls=FriendlyPlayerExporter)
	if friendly_player_id:
		meta["friendly_player"] = friendly_player_id
	elif "friendly_player" not in meta:
		raise ValidationError("Friendly player ID not present at upload and could not guess it.")

	# We ignore "reconnecting" from the API, we only trust the log.
	# if "reconnecting" not in meta:
	# 	meta["reconnecting"] = False
	# There are two ways of identifying a reconnected game:
	# In reconnected games, the initial CREATE_GAME packet contains a STEP and STATE value.
	# In older versions of HS (pre-13xxx), STATE is RUNNING even in the CREATE_GAME packet.
	# Thankfully, looking at STEP is consistent across all versions, so we use that.
	# It will be Step.INVALID if it's NOT a reconnected game.
	meta["reconnecting"] = not not game.initial_step

	# Add the start/end time to meta dict
	meta["start_time"] = packet_tree.start_time
	meta["end_time"] = packet_tree.end_time

	return game, exporter
Ejemplo n.º 3
0
def get_game_info(global_game, replay):
    player1 = replay.player(1)
    player2 = replay.player(2)

    with influx_timer("generate_redshift_player_decklists_duration"):
        player1_decklist = player1.deck_list.as_dbf_json()
        player2_decklist = player2.deck_list.as_dbf_json()

    if settings.REDSHIFT_USE_MATCH_START_AS_GAME_DATE and global_game.match_start:
        game_date = global_game.match_start.date()
    else:
        game_date = timezone.now().date()

    game_info = {
        "game_id": int(global_game.id),
        "shortid": replay.shortid,
        "game_type": int(global_game.game_type),
        "scenario_id": global_game.scenario_id,
        "ladder_season": global_game.ladder_season,
        "brawl_season": global_game.brawl_season,
        "game_date": game_date,
        "players": {
            "1": {
                "deck_id":
                int(player1.deck_list.id),
                "archetype_id":
                get_archetype_id(player1),
                "deck_list":
                player1_decklist,
                "rank":
                0 if player1.legend_rank else
                player1.rank if player1.rank else -1,
                "legend_rank":
                player1.legend_rank,
                "full_deck_known":
                player1.deck_list.size == 30
            },
            "2": {
                "deck_id":
                int(player2.deck_list.id),
                "archetype_id":
                get_archetype_id(player2),
                "deck_list":
                player2_decklist,
                "rank":
                0 if player2.legend_rank else
                player2.rank if player2.rank else -1,
                "legend_rank":
                player2.legend_rank,
                "full_deck_known":
                player2.deck_list.size == 30,
            },
        }
    }

    return game_info
Ejemplo n.º 4
0
def process_upload_event(upload_event):
	"""
	Wrapper around do_process_upload_event() to set the event's
	status and error/traceback as needed.
	"""
	upload_event.error = ""
	upload_event.traceback = ""
	if upload_event.status != UploadEventStatus.PROCESSING:
		upload_event.status = UploadEventStatus.PROCESSING
		upload_event.save()

	try:
		replay, do_flush_exporter = do_process_upload_event(upload_event)
	except Exception as e:
		from traceback import format_exc
		upload_event.error = str(e)
		upload_event.traceback = format_exc()
		upload_event.status, reraise = handle_upload_event_exception(e, upload_event)
		metric_fields = {"count": 1}
		if upload_event.game:
			metric_fields["shortid"] = str(upload_event.game.shortid)
		influx_metric(
			"upload_event_exception",
			metric_fields,
			error=upload_event.status.name.lower()
		)
		upload_event.save()
		if reraise:
			raise
		else:
			return
	else:
		upload_event.game = replay
		upload_event.status = UploadEventStatus.SUCCESS
		upload_event.save()

	try:
		with influx_timer("redshift_exporter_flush_duration"):
			do_flush_exporter()
	except Exception as e:
		# Don't fail on this
		error_handler(e)
		influx_metric(
			"flush_redshift_exporter_error",
			{
				"count": 1,
				"error": str(e)
			}
		)

	return replay
Ejemplo n.º 5
0
    def do_flush_exporter():
        # Only if we were able to claim the advisory lock do we proceed here.
        if can_attempt_redshift_load:
            log.debug(
                "Redshift lock acquired. Will attempt to flush to redshift")

            if should_load_into_redshift(upload_event, global_game):
                with influx_timer("generate_redshift_game_info_duration"):
                    game_info = get_game_info(global_game, replay)
                exporter.set_game_info(game_info)

                try:
                    with influx_timer("flush_exporter_to_firehose_duration"):
                        flush_failures_report = flush_exporter_to_firehose(
                            exporter, records_to_flush=get_records_to_flush())
                        for target_table, errors in flush_failures_report.items(
                        ):
                            for error in errors:
                                influx_metric(
                                    "firehose_flush_failure", {
                                        "stream_name": error["stream_name"],
                                        "error_code": error["error_code"],
                                        "error_message":
                                        error["error_message"],
                                        "count": 1
                                    },
                                    target_table=target_table)
                except Exception:
                    raise
                else:
                    global_game.loaded_into_redshift = timezone.now()
                    global_game.save()
                    # Okay to release the advisory lock once loaded_into_redshift is set
                    # It will also be released automatically when the lambda exits.
                    global_game.release_redshift_lock()
        else:
            log.debug(
                "Did not acquire redshift lock. Will not flush to redshift")
Ejemplo n.º 6
0
	def do_flush_exporter():
		# Only if we were able to claim the advisory lock do we proceed here.
		if can_attempt_redshift_load:
			log.debug("Redshift lock acquired. Will attempt to flush to redshift")

			if should_load_into_redshift(upload_event, global_game):
				with influx_timer("generate_redshift_game_info_duration"):
					game_info = get_game_info(global_game, replay)
				exporter.set_game_info(game_info)

				try:
					with influx_timer("flush_exporter_to_firehose_duration"):
						flush_exporter_to_firehose(exporter)
				except:
					raise
				else:
					global_game.loaded_into_redshift = timezone.now()
					global_game.save()
					# Okay to release the advisory lock once loaded_into_redshift is set
					# It will also be released automatically when the lambda exits.
					global_game.release_redshift_lock()
		else:
			log.debug("Did not acquire redshift lock. Will not flush to redshift")