def on_startup(): if Config.data and Config.data.get("subscribed_witnesses", None) is not None: raise Exception( "Please update your config.yaml to match the new format, subscribed_witnesses is outdated" ) Config.get("subscriptions", "mask_providers") try: IncidentsNormalizer.use_chain(Config.get("bookiesports_chain", default="beatrice"), not_found_file=os.path.join( Config.get("dump_folder"), "missing_bookiesports_entries.txt")) except AttributeError: IncidentsNormalizer.DEFAULT_CHAIN = Config.get("bookiesports_chain", default="beatrice") IncidentsNormalizer.NOT_FOUND_FILE = os.path.join( Config.get("dump_folder"), "missing_bookiesports_entries.txt") logging.getLogger(__name__).debug( "Incidents normalizer set for chain " + IncidentsNormalizer.DEFAULT_CHAIN + ", using " + str(IncidentsNormalizer.NOT_FOUND_FILE) + " for missing entries") providers = list(Config.get("providers", default={}).keys()) for key in providers: # check and load optional provider configs _config_file = Config.get("providers", key).get("config_file", None) if _config_file is None: _config_file = "config-" + key + ".yaml" else: _config_file = _config_file + ".yaml" if os.path.isfile(_config_file): Config.load(_config_file, True)
def _process_event_json(self, source_json): incidents = [] for _event in source_json["results"]: try: incident_id = { "sport": _id_to_sport(_event["sport_id"])["name"], "home": _event["home"]["name"], "away": _event["away"]["name"], "start_time": date_to_string(_event["time"]), "event_group_name": _event["league"]["name"], } except KeyError: # wrong format? pass call = self._mapStatus(_event["time_status"]) arguments = self._getArgument(call, incident_id, _event["id"]) incidents.append({ "id": incident_id, "call": call, "arguments": arguments, "provider_info": self._get_provider_info(_event) }) if call == "create" and "etfa" in source_json.get("api", None): # resolve dbmgs betfair_event = _get_event("etfa", _event["id"]) print(betfair_event) elif call == "finish": call = "result" arguments = self._getArgument(call, incident_id, _event["id"]) incidents.append({ "id": incident_id, "call": call, "arguments": arguments, "provider_info": self._get_provider_info(_event) }) # it's unfeasible to maintain a list of applicable leagues. We attempt normalization # and only forward incidents that can be normalized (witness will still re-normalize as # dataproxies normally don't do that normalized = [] normalizer = IncidentsNormalizer(chain=_get("bookiesports_chain")) for incident in incidents: try: normalized.append(normalizer.normalize(incident, True)) except NotNormalizableException as e: logging.getLogger(__name__).debug(str(e.__class__.__name__) + ": " + str(incident["id"])) pass return normalized
def __init__(self, message, lookup_instance, config, clear_caches=True, **kwargs): self.message = message self.lookup = lookup_instance self.config = config # Obtain data for unique key # The "id" contains everything we need to identify an individual event # which itself contains at least the sport, and the teams # Get the id (internally used only) self.id = message.get("id") # Incident Storage if "storage" in kwargs and kwargs["storage"]: self.storage = kwargs["storage"] else: self.storage = factory.get_incident_storage( kwargs.get("mongodb", None), purge=kwargs.get("purge", False)) # Normalize incident self.normalizer = IncidentsNormalizer( chain=lookup_instance._network_name) self.normalize(message) # Let's clear the caches for Proposals and events # We need to do so because of the internal pypeerplays cache. # The cache reduces the API calls to the backend and thus latency. # However, to be sure the event hasn't been created since before the # cache has expired, we force a refresh from the blockchain. if clear_caches: Events.clear_cache() Proposals.clear_cache() # Try obtain the sport self.sport = LookupSport(self.id.get("sport")) # Given the sport, try to obtain the league (event group) self.eventgroup = LookupEventGroup(self.sport, self.id.get("event_group_name")) self.event = None # Will be filled in after receiving a trigger # Get Teams from query self.teams = [self.id.get("home"), self.id.get("away")] # Get start time from query self.start_time = parse(self.id.get("start_time", ""))
def ensure_incident_format(raw_dict): """ reformats dates, validates the json and creates unique_string identifier """ reformat_datetimes(raw_dict) IncidentValidator().validate_incident(raw_dict) # normalize before creating unique_string formatted_dict = IncidentsNormalizer().normalize(raw_dict) formatted_dict["unique_string"] = incident_to_string(formatted_dict) return formatted_dict
def __init__(self, message, lookup_instance, config, **kwargs): self.message = message self.lookup = lookup_instance self.config = config # Obtain data for unique key # The "id" contains everything we need to identify an individual event # which itself contains at least the sport, and the teams # Get the id (internally used only) self.id = message.get("id") # Incident Storage if "storage" in kwargs and kwargs["storage"]: self.storage = kwargs["storage"] else: self.storage = factory.get_incident_storage( kwargs.get("mongodb", None), purge=kwargs.get("purge", False)) # Normalize incident self.normalizer = IncidentsNormalizer( chain=lookup_instance._network_name) self.normalize(message) # Try obtain the sport self.sport = LookupSport(self.id.get("sport")) # Given the sport, try to obtain the league (event group) self.eventgroup = LookupEventGroup(self.sport, self.id.get("event_group_name")) self.event = None # Will be filled in after receiving a trigger # Get Teams from query self.teams = [self.id.get("home"), self.id.get("away")] # Get start time from query self.start_time = parse(self.id.get("start_time", ""))
network="unittests", sports_folder=os.path.join(os.path.dirname(os.path.realpath(__file__)), "bookiesports"), ) lookup.set_approving_account("init0") lookup.set_proposing_account("init0") # ensure lookup isn't broadcasting either assert lookup.blockchain.nobroadcast # Storage storage = factory.get_incident_storage("mongodbtest", purge=True) # Incident validator validator = IncidentValidator() normalizer = IncidentsNormalizer(chain="unittests") def reset_storage(): return factory.get_incident_storage("mongodbtest", purge=True) def lookup_test_event(id): return LookupEvent( **{ "id": "1.22.2242", "teams": ["Atlanta Hawks", "Boston Celtics"], "eventgroup_identifier": "NBA", "sport_identifier": "Basketball", "season": { "en": "2017-00-00"
def trigger(): """ This endpoint is used to submit data to the queue so we can process it asynchronously to the web requests. The webrequests should be answered fast, while the processing might take more time The endpoint opens an API according to the ``--port`` and ``--host`` settings on launch. Thise API provides an endpoint on /trigger and consumes POST messages with JSON formatted body. The body is validated against the incident schema defined in bos-incidents .. note:: The trigger endpoint stores the incidents through (bos-incidents) already to allow later replaying. """ if request.method == "POST": # Don't bother wit requests from IPs that are not # whitelisted if request.remote_addr not in api_whitelist and "0.0.0.0" not in api_whitelist: return "Your IP address is not allowed to post here!", 403 # Obtain message from request body incident = request.get_json() # Ensure it is json try: validator.validate_incident(incident) except InvalidIncidentFormatException: log.error("Received invalid request: {}".format(str(incident))) return "Invalid data format", 400 # Only accept normalizable incidents # Normalize incident normalizer = IncidentsNormalizer(chain=config.get("network", "beatrice")) try: incident = normalizer.normalize(incident, True) except NotNormalizableException: log.warning( "Received not normalizable incident, discarding {}".format( str(incident) ) ) return "Not normalized incident", 400 try: # FIXME, remove copy() storage.insert_incident(incident.copy()) except exceptions.DuplicateIncidentException as e: # We merely pass here since we have the incident already # alerting anyone won't do anything # traceback.print_exc() pass # Send incident to redis with Connection(redis): q = Queue(connection=redis) job = q.enqueue( work.process, args=(incident,), kwargs=dict( proposer=app.config.get("BOOKIE_PROPOSER"), approver=app.config.get("BOOKIE_APPROVER"), ), ) log.info("Forwarded incident {} to worker via redis".format(str(incident))) # In case we "proposed" something, we also need to approve, # we do that by queuing a approve approve_job = q.enqueue( work.approve, args=(), kwargs=dict( proposer=app.config.get("BOOKIE_PROPOSER"), approver=app.config.get("BOOKIE_APPROVER"), ), ) # Return message with id return jsonify( dict( result="processing", message=incident, id=str(job.id), id_approve=str(approve_job.id), ) ) return "", 503
# node.unlock(config["password"]) ppy = node.get_node() rpc = ppy.rpc INCIDENT_CALLS = [ "create", "in_progress", "finish", "result", "canceled", "dynamic_bmgs", ] # normalizer = IncidentsNormalizer(chain="elizabeth") normalizer = IncidentsNormalizer(chain=chainName) normalize = normalizer.normalize def substitution(teams, scheme): class Teams: home = " ".join([x for x in teams[0].split(" ")]) away = " ".join([x for x in teams[1].split(" ")]) ret = dict() for lang, name in scheme.items(): ret[lang] = name.format(teams=Teams) ret = ret["en"] return ret
class Trigger: """ This class is used to deal with Messages that have been received by any means and need processing thru bookied-sync """ def __init__(self, message, lookup_instance, config, clear_caches=True, **kwargs): self.message = message self.lookup = lookup_instance self.config = config # Obtain data for unique key # The "id" contains everything we need to identify an individual event # which itself contains at least the sport, and the teams # Get the id (internally used only) self.id = message.get("id") # Incident Storage if "storage" in kwargs and kwargs["storage"]: self.storage = kwargs["storage"] else: self.storage = factory.get_incident_storage( kwargs.get("mongodb", None), purge=kwargs.get("purge", False)) # Normalize incident self.normalizer = IncidentsNormalizer( chain=lookup_instance._network_name) self.normalize(message) # Let's clear the caches for Proposals and events # We need to do so because of the internal pypeerplays cache. # The cache reduces the API calls to the backend and thus latency. # However, to be sure the event hasn't been created since before the # cache has expired, we force a refresh from the blockchain. if clear_caches: Events.clear_cache() Proposals.clear_cache() # Try obtain the sport self.sport = LookupSport(self.id.get("sport")) # Given the sport, try to obtain the league (event group) self.eventgroup = LookupEventGroup(self.sport, self.id.get("event_group_name")) self.event = None # Will be filled in after receiving a trigger # Get Teams from query self.teams = [self.id.get("home"), self.id.get("away")] # Get start time from query self.start_time = parse(self.id.get("start_time", "")) @property def incident(self): """ Return the incident message """ return self.message @property def call(self): """ Return the trigger/call name """ return self.message.get("call").lower() def getEvent(self): """ Get an event from the lookup """ event = LookupEvent.find_event( teams=self.teams, start_time=self.start_time, eventgroup_identifier=self.eventgroup.identifier, sport_identifier=self.sport.identifier, ) if event: # Store in object self.event = event eventgroup = event.eventgroup if not eventgroup.is_open: log.debug("Skipping not-yet-open BMG: {}".format( str(eventgroup.identifier))) raise exceptions.EventGroupClosedException return event else: raise exceptions.EventDoesNotExistException def trigger(self, *args, **kwargs): tic = time.time() """ Forward a trigger to the actual trigger implementation in the subclass """ # Test if I am supposed to proceed with this self.testConditions() # Execute the actual Trigger status = self._trigger(*args, **kwargs) log.debug( 'trigger.py, def trigger, line 118, after _trigger done, time:', time.time() - tic) # if a proposal is going to be published, let's enable # blocking so we can obtain the proposal id # FIXME: This can be optimized for speed by putting this into # an independent thread or throwing it into the redis queue # so the worker can continue with other incidents if self.lookup.proposal_operations(): self.lookup.set_blocking(True) else: self.lookup.set_blocking(False) # Broadcast that stuff transactions = self.broadcast() # Obtain data from the blockchain proposal_ids = [x.get_proposal_id() for x in transactions] actions = [x.action() for x in transactions] # unless _trigger raises an exception if status == 'midway': self.set_incident_status(status_name="midway", status_add=dict(proposals=proposal_ids, actions=actions)) return transactions else: self.set_incident_status(status_name="done", status_add=dict(proposals=proposal_ids, actions=actions)) return transactions def normalize(self, *args, **kwargs): try: message = self.normalizer.normalize(self.message, errorIfNotFound=True) except NotNormalizableException as e: self.set_incident_status(status_name="not normalizable") raise e if message != self.message: try: self.storage.delete_incident(self.message) except Exception: pass self.message = message self.store_incident() def get_all_incidents(self): """ Let's get all the incidents for an event """ return self.storage.get_event_by_id(self.message) def set_incident_status(self, **kwargs): """ We here set the status of an **event** in the incidents storage """ try: self.storage.update_event_status_by_id(self.id, call=self.call, **kwargs) except Exception: pass def get_incident_status(self): """ Get the current status of an **event** in the incidents storage """ event = self.storage.get_event_by_id(self.id, False) return event.get(self.call, {}).get("status", None) def broadcast(self): """ This method broadcasts the updates to the chain """ """ try: """ return self.lookup.broadcast() """ except UnhandledRPCError as e: if "Proposed operation is already pending for approval" in str(e): raise exceptions.ProposalAlreadyExistsOrIsPendingException() else: raise e """ def store_incident(self): """ This call stores the incident in the incident-store (bos-incident) """ self.storage.insert_incident(self.message) def get_onchain_bmgs(self): """ Returns a list of BettingMarketGroups of the event that already exist on the Blockchain """ if not self.event: self.event = self.getEvent() if not self.event: return [] return BettingMarketGroups(self.event.id) # Methods that need to be overwritten by trigger def testConditions(self, *args, **kwargs): """ Test If we can actually call the trigger. This method is called from trigger() and is supposed to be overwritten by the actual trigger. """ pass def _trigger(self, *args, **kwargs): """ To be implemented by the sub class """ pass
def normalize_for_witness(self, validated_incident): return IncidentsNormalizer().normalize(validated_incident)