Beispiel #1
0
def load_replay_into_redshift(event, context):
    """A handler that loads a replay into Redshift"""
    logger = logging.getLogger("hsreplaynet.lambdas.load_replay_into_redshift")
    replay_bucket = event["replay_bucket"]
    replay_key = event["replay_key"]
    metadata_str = event["metadata"]

    obj = S3.get_object(Bucket=replay_bucket, Key=replay_key)
    body_data = obj["Body"].read()
    log_str = decompress(body_data, 15 + 32)
    out = BytesIO()
    out.write(log_str)
    out.seek(0)

    try:
        replay = HSReplayDocument.from_xml_file(out)
        metadata = json.loads(metadata_str)

        global_game_id = metadata["game_id"]
        from hsreplaynet.games.models import GlobalGame

        global_game = GlobalGame.objects.get(id=global_game_id)

        packet_tree = replay.to_packet_tree()[0]
        exporter = RedshiftPublishingExporter(packet_tree).export()
        exporter.set_game_info(metadata)
        flush_exporter_to_firehose(exporter)
    except Exception:
        logger.info(metadata_str)
        raise
    else:
        global_game.loaded_into_redshift = datetime.now()
        global_game.save()
Beispiel #2
0
	def do_flush_exporter():
		# Only if we were able to claim the advisory lock do we proceed here.
		if can_attempt_redshift_load:
			log.debug("Redshift lock acquired. Will attempt to flush to redshift")

			if should_load_into_redshift(upload_event, global_game):
				with influx_timer("generate_redshift_game_info_duration"):
					game_info = get_game_info(global_game, replay)
				exporter.set_game_info(game_info)

				try:
					with influx_timer("flush_exporter_to_firehose_duration"):
						flush_exporter_to_firehose(exporter)
				except:
					raise
				else:
					global_game.loaded_into_redshift = timezone.now()
					global_game.save()
					# Okay to release the advisory lock once loaded_into_redshift is set
					# It will also be released automatically when the lambda exits.
					global_game.release_redshift_lock()
		else:
			log.debug("Did not acquire redshift lock. Will not flush to redshift")
Beispiel #3
0
    def do_flush_exporter():
        # Only if we were able to claim the advisory lock do we proceed here.
        if can_attempt_redshift_load:
            log.debug(
                "Redshift lock acquired. Will attempt to flush to redshift")

            if should_load_into_redshift(upload_event, global_game):
                with influx_timer("generate_redshift_game_info_duration"):
                    game_info = get_game_info(global_game, replay)
                exporter.set_game_info(game_info)

                try:
                    with influx_timer("flush_exporter_to_firehose_duration"):
                        flush_failures_report = flush_exporter_to_firehose(
                            exporter, records_to_flush=get_records_to_flush())
                        for target_table, errors in flush_failures_report.items(
                        ):
                            for error in errors:
                                influx_metric(
                                    "firehose_flush_failure", {
                                        "stream_name": error["stream_name"],
                                        "error_code": error["error_code"],
                                        "error_message":
                                        error["error_message"],
                                        "count": 1
                                    },
                                    target_table=target_table)
                except Exception:
                    raise
                else:
                    global_game.loaded_into_redshift = timezone.now()
                    global_game.save()
                    # Okay to release the advisory lock once loaded_into_redshift is set
                    # It will also be released automatically when the lambda exits.
                    global_game.release_redshift_lock()
        else:
            log.debug(
                "Did not acquire redshift lock. Will not flush to redshift")