def wrapper(arg1, *args, **kwargs):

			if settings.DEBUG:
				# Feature policies are not enforced in development mode
				return view_func(arg1, *args, **kwargs)

			try:
				feature = Feature.objects.get(name=feature_name)

				if issubclass(arg1.__class__, View):
					# If we are using class based views the request is in args
					request = args[0]
				else:
					request = arg1

				is_enabled = feature.enabled_for_user(request.user)

			except Feature.DoesNotExist as e:
				error_handler(e)
				# Missing features are treated as if they are set to
				# FeatureStatus.STAFF_ONLY. This occurs when new feature code is deployed
				# before the DB is updated
				is_enabled = arg1.user.is_staff

			if is_enabled:
				return view_func(arg1, *args, **kwargs)
			else:
				raise PermissionDenied()
Exemple #2
0
        def wrapper(arg1, *args, **kwargs):

            if settings.DEBUG:
                # Feature policies are not enforced in development mode
                return view_func(arg1, *args, **kwargs)

            try:
                feature = Feature.objects.get(name=feature_name)

                if issubclass(arg1.__class__, View):
                    # If we are using class based views the request is in args
                    request = args[0]
                else:
                    request = arg1

                is_enabled = feature.enabled_for_user(request.user)

            except Feature.DoesNotExist as e:
                error_handler(e)
                # Missing features are treated as if they are set to
                # FeatureStatus.STAFF_ONLY. This occurs when new feature code is deployed
                # before the DB is updated
                is_enabled = arg1.user.is_staff

            if is_enabled:
                return view_func(arg1, *args, **kwargs)
            else:
                raise PermissionDenied()
def feature(context, feature_name):
	"""
	Expected usage is:

	{% feature "winrates" as winrates %}
	{% if winrates.is_enabled %}
		...
		{% if winrates.read_only %} ... {% endif %}
		...
	{% endif %}
	"""
	feature_context = {
		"is_enabled": True,
		"read_only": False
	}

	if settings.DEBUG:
		# Feature policies are not enforced in development mode
		return feature_context

	user = context["request"].user

	try:
		feature = Feature.objects.get(name=feature_name)
	except Feature.DoesNotExist as e:
		error_handler(e)
		# Missing features are treated as if they are set to FeatureStatus.STAFF_ONLY
		# Occurs when new feature code is deployed before the DB is updated
		feature_context["is_enabled"] = user.is_staff
	else:
		feature_context["is_enabled"] = feature.enabled_for_user(user)
		feature_context["read_only"] = feature.read_only

	return feature_context
Exemple #4
0
    def _log_mailchimp_error(e):

        # Log a warning, an InfluxDB metric, and a Sentry alert.

        log.warning("Failed to contact MailChimp API: %s" % e)
        influx_metric("mailchimp_request_failures", {"count": 1})
        error_handler(e)
Exemple #5
0
def update_player_class_distribution(replay):
    try:
        game_type_name = BnetGameType(replay.global_game.game_type).name
        distribution = get_player_class_distribution(game_type_name)
        opponent = replay.opposing_player
        player_class = opponent.hero_class_name
        distribution.increment(player_class, win=opponent.won)
    except Exception as e:
        error_handler(e)
Exemple #6
0
def sync_premium_accounts_for_stripe_subscription(event, **kwargs):
    if event.customer and event.customer.subscriber:
        user = event.customer.subscriber
        check_for_referrals(user)
        enable_premium_accounts_for_users_in_redshift([user])

        if event.customer.active_subscriptions.count() > 1:
            try:
                raise RuntimeError(
                    "Customer %r (%r) has multiple subscriptions!" %
                    (user, event.customer.stripe_id))
            except Exception as e:
                error_handler(e)
Exemple #7
0
def process_upload_event(upload_event):
	"""
	Wrapper around do_process_upload_event() to set the event's
	status and error/traceback as needed.
	"""
	upload_event.error = ""
	upload_event.traceback = ""
	if upload_event.status != UploadEventStatus.PROCESSING:
		upload_event.status = UploadEventStatus.PROCESSING
		upload_event.save()

	try:
		replay, do_flush_exporter = do_process_upload_event(upload_event)
	except Exception as e:
		from traceback import format_exc
		upload_event.error = str(e)
		upload_event.traceback = format_exc()
		upload_event.status, reraise = handle_upload_event_exception(e, upload_event)
		metric_fields = {"count": 1}
		if upload_event.game:
			metric_fields["shortid"] = str(upload_event.game.shortid)
		influx_metric(
			"upload_event_exception",
			metric_fields,
			error=upload_event.status.name.lower()
		)
		upload_event.save()
		if reraise:
			raise
		else:
			return
	else:
		upload_event.game = replay
		upload_event.status = UploadEventStatus.SUCCESS
		upload_event.save()

	try:
		with influx_timer("redshift_exporter_flush_duration"):
			do_flush_exporter()
	except Exception as e:
		# Don't fail on this
		error_handler(e)
		influx_metric(
			"flush_redshift_exporter_error",
			{
				"count": 1,
				"error": str(e)
			}
		)

	return replay
Exemple #8
0
def capture_played_card_stats(global_game, played_cards, is_friendly_player):
    try:
        elapsed_minutes = elapsed_seconds_from_match_end(global_game) / 60.0
        if not is_friendly_player and elapsed_minutes <= 5.0:
            game_type_name = BnetGameType(global_game.game_type).name
            redis = get_live_stats_redis()
            pipeline = redis.pipeline(transaction=True)
            dist = get_played_cards_distribution(game_type_name,
                                                 redis_client=pipeline)
            for dbf_id in played_cards:
                dist.increment(dbf_id)
            pipeline.execute()
    except Exception as e:
        error_handler(e)
def queue_upload_event_for_processing(upload_event_id):
    """
	This method is used when UploadEvents are initially created.
	However it can also be used to requeue an UploadEvent to be
	processed again if an error was detected downstream that has now been fixed.
	"""
    if settings.IS_RUNNING_LIVE or settings.IS_RUNNING_AS_LAMBDA:
        if "TRACING_REQUEST_ID" in os.environ:
            token = os.environ["TRACING_REQUEST_ID"]
        else:
            # If this was re-queued manually the tracing ID may not be set yet.
            event = UploadEvent.objects.get(id=upload_event_id)
            token = str(event.token.key)

        message = {
            "id": upload_event_id,
            "token": token,
        }

        success = True
        try:
            logger.info("Submitting %r to SNS", message)
            response = sns_client().publish(
                TopicArn=settings.SNS_PROCESS_UPLOAD_EVENT_TOPIC,
                Message=json.dumps({"default": json.dumps(message)}),
                MessageStructure="json")
            logger.info("SNS Response: %s" % str(response))
        except Exception as e:
            logger.error("Exception raised.")
            error_handler(e)
            success = False
        finally:
            influx_metric("queue_upload_event_for_processing",
                          fields={"value": 1},
                          timestamp=now(),
                          tags={
                              "success":
                              success,
                              "is_running_as_lambda":
                              settings.IS_RUNNING_AS_LAMBDA,
                          })
    else:
        logger.info("Processing UploadEvent %r locally", upload_event_id)
        upload = UploadEvent.objects.get(id=upload_event_id)
        upload.process()
Exemple #10
0
def get_feature_context(user, feature_name):
	feature_context = {
		"name": feature_name,
		"is_enabled": True,
		"read_only": False,
	}

	try:
		feature = Feature.objects.get(name=feature_name)
	except Feature.DoesNotExist as e:
		error_handler(e)
		# Missing features are treated as if they are set to FeatureStatus.STAFF_ONLY
		# Occurs when new feature code is deployed before the DB is updated
		feature_context["is_enabled"] = user.is_staff
	else:
		feature_context["is_enabled"] = feature.enabled_for_user(user)
		feature_context["read_only"] = feature.read_only

	return feature_context
Exemple #11
0
def queue_upload_event_for_processing(upload_event_id):
	"""
	This method is used when UploadEvents are initially created.
	However it can also be used to requeue an UploadEvent to be
	processed again if an error was detected downstream that has now been fixed.
	"""
	if settings.ENV_PROD:
		if "TRACING_REQUEST_ID" in os.environ:
			token = os.environ["TRACING_REQUEST_ID"]
		else:
			# If this was re-queued manually the tracing ID may not be set yet.
			event = UploadEvent.objects.get(id=upload_event_id)
			token = str(event.shortid)

		message = {
			"id": upload_event_id,
			"token": token
		}

		success = True
		try:
			logger.info("Submitting %r to SNS", message)
			response = aws.publish_sns_message(settings.SNS_PROCESS_UPLOAD_EVENT_TOPIC, message)
			logger.info("SNS Response: %s" % str(response))
		except Exception as e:
			logger.error("Exception raised.")
			error_handler(e)
			success = False
		finally:
			influx_metric(
				"queue_upload_event_for_processing",
				fields={"value": 1},
				timestamp=now(),
				tags={
					"success": success,
					"is_running_as_lambda": settings.ENV_LAMBDA,
				}
			)
	else:
		logger.info("Processing UploadEvent %r locally", upload_event_id)
		upload = UploadEvent.objects.get(id=upload_event_id)
		upload.process()
Exemple #12
0
def on_paypal_webhook_error(sender, instance, **kwargs):
    if instance.exception:
        try:
            raise Exception("%s - %s" % (instance.exception, instance.id))
        except Exception as e:
            error_handler(e)
Exemple #13
0
def update_global_players(global_game, entity_tree, meta, upload_event,
                          exporter):
    # Fill the player metadata and objects
    players = {}
    played_cards = exporter.export_played_cards()

    is_spectated_replay = meta.get("spectator_mode", False)
    is_dungeon_run = meta.get("scenario_id", 0) == 2663

    for player in entity_tree.players:
        is_friendly_player = player.player_id == meta["friendly_player"]
        player_meta = meta.get("player%i" % (player.player_id), {})

        decklist_from_meta = player_meta.get("deck")
        decklist_from_replay = [
            c.initial_card_id for c in player.initial_deck if c.card_id
        ]

        meta_decklist_is_superset = _is_decklist_superset(
            decklist_from_meta, decklist_from_replay)

        # We disregard the meta decklist if it's not matching the replay decklist
        # We always want to use it in dungeon run though, since the initial deck is garbage
        disregard_meta = not meta_decklist_is_superset and (
            not is_dungeon_run or not is_friendly_player)

        if not decklist_from_meta or is_spectated_replay or disregard_meta:
            # Spectated replays never know more than is in the replay data
            # But may have erroneous data from the spectator's client's memory
            # Read from before they entered the spectated game
            decklist = decklist_from_replay
        else:
            decklist = decklist_from_meta

        name, real_name = get_player_names(player)
        player_hero_id = player._hero.card_id

        try:
            deck, _ = Deck.objects.get_or_create_from_id_list(
                decklist,
                hero_id=player_hero_id,
                game_type=global_game.game_type,
                classify_archetype=True)
            log.debug("Prepared deck %i (created=%r)", deck.id, _)
        except IntegrityError as e:
            # This will happen if cards in the deck are not in the DB
            # For example, during a patch release
            influx_metric(
                "replay_deck_create_failure", {
                    "count": 1,
                    "build": meta["build"],
                    "global_game_id": global_game.id,
                    "server_ip": meta.get("server_ip", ""),
                    "upload_ip": upload_event.upload_ip,
                    "error": str(e),
                })
            log.exception("Could not create deck for player %r", player)
            global_game.tainted_decks = True
            # Replace with an empty deck
            deck, _ = Deck.objects.get_or_create_from_id_list([])

        capture_played_card_stats(
            global_game, [c.dbf_id for c in played_cards[player.player_id]],
            is_friendly_player)

        eligible_formats = [FormatType.FT_STANDARD, FormatType.FT_WILD]
        is_eligible_format = global_game.format in eligible_formats

        deck_prediction_enabled = getattr(settings,
                                          "FULL_DECK_PREDICTION_ENABLED", True)
        if deck_prediction_enabled and is_eligible_format and settings.ENV_AWS:
            try:
                player_class = Deck.objects._convert_hero_id_to_player_class(
                    player_hero_id)
                tree = deck_prediction_tree(player_class, global_game.format)
                played_cards_for_player = played_cards[player.player_id]

                # 5 played cards partitions a 14 day window into buckets of ~ 500 or less
                # We can search through ~ 2,000 decks in 100ms so that gives us plenty of headroom
                min_played_cards = tree.max_depth - 1

                # We can control via settings the minumum number of cards we need
                # To know about in the deck list before we attempt to guess the full deck
                min_observed_cards = settings.DECK_PREDICTION_MINIMUM_CARDS

                played_card_dbfs = [c.dbf_id for c in played_cards_for_player
                                    ][:min_played_cards]
                played_card_names = [c.name for c in played_cards_for_player
                                     ][:min_played_cards]

                if deck.size is not None:
                    deck_size = deck.size
                else:
                    deck_size = sum(i.count for i in deck.includes.all())

                has_enough_observed_cards = deck_size >= min_observed_cards
                has_enough_played_cards = len(
                    played_card_dbfs) >= min_played_cards

                if deck_size == 30:
                    tree.observe(deck.id, deck.dbf_map(), played_card_dbfs)
                    # deck_id == proxy_deck_id for complete decks
                    deck.guessed_full_deck = deck
                    deck.save()
                elif has_enough_observed_cards and has_enough_played_cards:
                    res = tree.lookup(
                        deck.dbf_map(),
                        played_card_dbfs,
                    )
                    predicted_deck_id = res.predicted_deck_id

                    fields = {
                        "actual_deck_id":
                        deck.id,
                        "deck_size":
                        deck_size,
                        "game_id":
                        global_game.id,
                        "sequence":
                        "->".join("[%s]" % c for c in played_card_names),
                        "predicted_deck_id":
                        res.predicted_deck_id,
                        "match_attempts":
                        res.match_attempts,
                        "tie":
                        res.tie
                    }

                    if settings.DETAILED_PREDICTION_METRICS:
                        fields["actual_deck"] = repr(deck)

                        if res.predicted_deck_id:
                            predicted_deck = Deck.objects.get(
                                id=res.predicted_deck_id)
                            fields["predicted_deck"] = repr(predicted_deck)

                    if res.node:
                        fields["depth"] = res.node.depth

                        if settings.DETAILED_PREDICTION_METRICS:
                            node_labels = []
                            for path_dbf_id in res.path():
                                if path_dbf_id == "ROOT":
                                    path_str = path_dbf_id
                                else:
                                    path_card = Card.objects.get(
                                        dbf_id=path_dbf_id)
                                    path_str = path_card.name
                                node_labels.append("[%s]" % path_str)
                            fields["node"] = "->".join(node_labels)

                            popularity = res.popularity_distribution.popularity(
                                res.predicted_deck_id)
                            fields["predicted_deck_popularity"] = popularity

                            deck_count = res.popularity_distribution.size()
                            fields["distribution_deck_count"] = deck_count

                            observation_count = res.popularity_distribution.observations(
                            )
                            fields[
                                "distribution_observation_count"] = observation_count

                    tree_depth = res.node.depth if res.node else None
                    influx_metric(
                        "deck_prediction",
                        fields,
                        missing_cards=30 - deck_size,
                        player_class=CardClass(int(player_class)).name,
                        format=FormatType(int(global_game.format)).name,
                        tree_depth=tree_depth,
                        made_prediction=predicted_deck_id is not None)

                    if predicted_deck_id:
                        deck.guessed_full_deck = Deck.objects.get(
                            id=predicted_deck_id)
                        deck.save()
            except Exception as e:
                error_handler(e)

        # Create the BlizzardAccount first
        defaults = {
            "region": BnetRegion.from_account_hi(player.account_hi),
            "battletag": name,
        }

        if not is_spectated_replay and not player.is_ai and is_friendly_player:
            user = upload_event.token.user if upload_event.token else None
            if user and not user.is_fake:
                # and user.battletag and user.battletag.startswith(player.name):
                defaults["user"] = user

        blizzard_account, created = BlizzardAccount.objects.get_or_create(
            account_hi=player.account_hi,
            account_lo=player.account_lo,
            defaults=defaults)
        if not created and not blizzard_account.user and "user" in defaults:
            # Set BlizzardAccount.user if it's an available claim for the user
            influx_metric(
                "pegasus_account_claimed", {
                    "count": 1,
                    "account": str(blizzard_account.id),
                    "region": str(blizzard_account.region),
                    "account_lo": str(blizzard_account.account_lo),
                    "game": str(global_game.id)
                })
            blizzard_account.user = defaults["user"]
            blizzard_account.save()

        log.debug("Prepared BlizzardAccount %r", blizzard_account)

        # Now create the GlobalGamePlayer object
        common = {
            "game": global_game,
            "player_id": player.player_id,
        }
        defaults = {
            "is_first": player.tags.get(GameTag.FIRST_PLAYER, False),
            "is_ai": player.is_ai,
            "hero_id": player_hero_id,
            "hero_premium": player._hero.tags.get(GameTag.PREMIUM, False),
            "final_state": player.tags.get(GameTag.PLAYSTATE, 0),
            "extra_turns": player.tags.get(GameTag.EXTRA_TURNS_TAKEN_THIS_GAME,
                                           0),
            "deck_list": deck,
        }

        update = {
            "name": name,
            "real_name": real_name,
            "pegasus_account": blizzard_account,
            "rank": player_meta.get("rank"),
            "legend_rank": player_meta.get("legend_rank"),
            "stars": player_meta.get("stars"),
            "wins": player_meta.get("wins"),
            "losses": player_meta.get("losses"),
            "deck_id": player_meta.get("deck_id") or None,
            "cardback_id": player_meta.get("cardback"),
        }

        defaults.update(update)
        game_player, created = GlobalGamePlayer.objects.get_or_create(
            defaults=defaults, **common)
        log.debug("Prepared player %r (%i) (created=%r)", game_player,
                  game_player.id, created)

        if not created:
            # Go through the update dict and update values on the player
            # This gets us extra data we might not have had when the player was first created
            updated = False
            for k, v in update.items():
                if v and getattr(game_player, k) != v:
                    setattr(game_player, k, v)
                    updated = True

            # Skip updating the deck if we already have a bigger one
            # TODO: We should make deck_list nullable and only create it here
            if game_player.deck_list.size is None or len(
                    decklist) > game_player.deck_list.size:
                # XXX: Maybe we should also check friendly_player_id for good measure
                game_player.deck_list = deck
                updated = True

            if updated:
                log.debug("Saving updated player to the database.")
                game_player.save()

        players[player.player_id] = game_player

    return players