Ejemplo n.º 1
0
    def get_statistics_string(self, storage=None):
        if storage is None:
            storage = factory.get_incident_storage()

        events = storage.get_events()
        providers = []
        for idx, event in enumerate(events):
            for incident in storage.get_incidents_from_event(event):
                if incident["provider_info"]["name"] not in providers:
                    providers.append(incident["provider_info"]["name"])
        providers = sorted(providers)

        buffer = ";".join(storage._collection_names.values()) + "\n"

        def list_all(event, call, buffer, providers, one_line=None):
            if one_line is None:
                if event.get(call, None) is not None:
                    buffer = buffer + ("    " + call)
                    if event[call].get("status", None) is not None:
                        buffer = buffer + (
                            "  " + json.dumps(event[call]["status"])) + "\n"
                    if event[call].get("incidents", None) is not None:
                        for incident in event[call]["incidents"]:
                            buffer = buffer + (
                                "        " +
                                incident["provider_info"]["pushed"] + " - " +
                                incident["provider_info"]["name"] + " - " +
                                json.dumps(incident["arguments"])) + "\n"
            else:
                providers_found = []
                for unused in providers:
                    providers_found.append("-")
                if event.get(call, None) is not None:
                    if event[call].get("incidents", None) is not None:
                        for idx, provider in enumerate(providers):
                            for incident in event[call]["incidents"]:
                                if provider in incident["provider_info"][
                                        "name"]:
                                    providers_found[idx] = "x"
                buffer = buffer + one_line + " " + "".join(
                    providers_found) + " "
            return buffer
#         for idx, event in enumerate(events):
#             buffer = buffer + (str(idx) + ": " + event["id_string"]) + "\n"

        buffer = buffer + ("Providers: " + " ".join(providers) + "\n")
        buffer = buffer + (
            "event number; create; in_progress; finish; result; \n")
        for idx, event in enumerate(events):
            buffer = buffer + (event["id_string"] + " ")
            buffer = list_all(event, "create", buffer, providers, ';')
            buffer = list_all(event, "in_progress", buffer, providers, ';')
            buffer = list_all(event, "finish", buffer, providers, ';')
            buffer = list_all(event, "result", buffer, providers, ';')
            buffer = buffer + ("\n")

        return buffer
Ejemplo n.º 2
0
 def resend(url, unique_string, provider):
     """ Resend one or more incidents from the store
     """
     from bos_incidents import factory
     storage = factory.get_incident_storage()
     incident = storage.get_incident_by_unique_string_and_provider(
         unique_string, provider)
     pprint(incident)
     incident.update(dict(skip_storage=True))
     resend_incidents(url, incident)
Ejemplo n.º 3
0
    def setUp(self):
        super(TestWithSampleIncidents, self).setUp()
        self.storage = factory.get_incident_storage("mongodbtest", purge=True)

        # iterate the sampledata and insert all incidents
        sampledata_path = join("dump", "sampledata", "incidents")
        onlyfiles = [f for f in listdir(sampledata_path) if isfile(join(sampledata_path, f))]

        for _file in onlyfiles:
            with open(join(sampledata_path, _file)) as json_file:
                data = json.load(json_file)
                self.storage.insert_incident(data)
Ejemplo n.º 4
0
    def __init__(self,
                 message,
                 lookup_instance,
                 config,
                 clear_caches=True,
                 **kwargs):
        self.message = message
        self.lookup = lookup_instance
        self.config = config

        # Obtain data for unique key
        # The "id" contains everything we need to identify an individual event
        # which itself contains at least the sport, and the teams

        # Get the id (internally used only)
        self.id = message.get("id")

        # Incident Storage
        if "storage" in kwargs and kwargs["storage"]:
            self.storage = kwargs["storage"]
        else:
            self.storage = factory.get_incident_storage(
                kwargs.get("mongodb", None), purge=kwargs.get("purge", False))

        # Normalize incident
        self.normalizer = IncidentsNormalizer(
            chain=lookup_instance._network_name)
        self.normalize(message)

        # Let's clear the caches for Proposals and events
        # We need to do so because of the internal pypeerplays cache.
        # The cache reduces the API calls to the backend and thus latency.
        # However, to be sure the event hasn't been created since before the
        # cache has expired, we force a refresh from the blockchain.
        if clear_caches:
            Events.clear_cache()
            Proposals.clear_cache()

        # Try obtain the sport
        self.sport = LookupSport(self.id.get("sport"))

        # Given the sport, try to obtain the league (event group)
        self.eventgroup = LookupEventGroup(self.sport,
                                           self.id.get("event_group_name"))

        self.event = None  # Will be filled in after receiving a trigger

        # Get Teams from query
        self.teams = [self.id.get("home"), self.id.get("away")]

        # Get start time from query
        self.start_time = parse(self.id.get("start_time", ""))
Ejemplo n.º 5
0
def show_incidents_per_id(incident_id=None, call=None, use="mongodb"):
    if use == "bos-auto":
        use = "mongodb"

    store = factory.get_incident_storage(use=use)
    try:
        event = store.get_event_by_id(incident_id, resolve=True)
    except EventNotFoundException:
        return jsonify("Event not found")

    for key in list(event.keys()):
        if not (key == call or key == "id" or key == "id_string"):
            event.pop(key)

    return jsonify(event)
Ejemplo n.º 6
0
def worker(queue):
    """ Start the (redis queue) worker to deal with the received messages
    """
    from .redis_con import get_redis
    from . import work

    work.unlock()
    # Let's drop the redis database and refill it from incident store
    with Connection(get_redis()):

        def retrigger_from_events(events, call):
            for event in events:
                for incidentid in event.get(call, {}).get("incidents", []):
                    incident = storage.resolve_to_incident(incidentid)
                    q.enqueue(work.process,
                              args=(incident, ),
                              kwargs=dict(
                                  proposer=config.get("BOOKIE_PROPOSER"),
                                  approver=config.get("BOOKIE_APPROVER")))
                    # only reply the first incident!
                    break

        q = Queue("default")
        # Empty queue!
        q.empty()
        log.info("Redis Queue cleared")

        log.info("Refilling redis queue from incident store")
        storage = factory.get_incident_storage()
        for call in INCIDENT_CALLS:
            # "postponed", "unhandled exception, retrying soon" and "unknown" handled by scheduler
            for status_name in [
                    "undecided", "connection lost", "related object not found",
                    "event missing in bos_incidents"
            ]:
                events = list(
                    storage.get_events_by_call_status(call=call,
                                                      status_name=status_name))
                if len(events):
                    log.info("Retriggering {} {}:{} incidents".format(
                        len(events), call, status_name))
                retrigger_from_events(events, call)

    # This runs the Worker as thread
    with Connection(get_redis()):
        w = Worker([queue])
        w.work()
 def ensure_consistency(filter, test):
     """ Delete references to deleted incidents
     """
     from bos_incidents import factory
     storage = factory.get_incident_storage()
     if filter:
         incidents = storage.get_incidents(
             dict(unique_string={"$regex": ".*" + filter + ".*"}))
     else:
         incidents = storage.get_incidents()
     for incident in incidents:
         if "id" not in incident:
             if test:
                 print(" > " + incident["unique_string"] + "-" +
                       incident["provider_info"]["name"])
             else:
                 storage.delete_incident(incident)
Ejemplo n.º 8
0
def list(call, status):
    """ List events
    """
    from bos_incidents import factory
    t = PrettyTable(["identifier", "Incidents", "status"], hrules=ALLBORDERS)
    t.align = 'l'
    storage = factory.get_incident_storage()
    if not call:
        events = storage.get_events(resolve=False)
    else:
        events = storage.get_events_by_call_status(call=call, status_name=status)
    for event in events:
        t.add_row([
            event["id_string"],
            format_event_incidents(event),
            format_event_incident_statuses(event)
        ])
    click.echo(str(t))
Ejemplo n.º 9
0
    def list_providers():
        """ List events
        """
        from bos_incidents import factory
        t = PrettyTable(["Provider", "Incident Count"], hrules=ALLBORDERS)
        t.align = 'l'
        storage = factory.get_incident_storage()
        providers = storage.get_distinct("provider_info.name")

        for provider in providers:
            t.add_row([
                provider,
                str(
                    storage.get_incidents_count(
                        {"provider_info.name": provider}))
            ])

        click.echo(t)
Ejemplo n.º 10
0
    def get_statistics(self, storage=None):
        if storage is None:
            storage = factory.get_incident_storage()

        events = storage.get_events()
        providers = []
        for event in events:
            for incident in storage.get_incidents_from_event(event):
                if incident["provider_info"]["name"] not in providers:
                    providers.append(incident["provider_info"]["name"])

        return_dict = {}
        return_dict["providers"] = providers
        return_dict["storage_information"] = storage._mongodb_config

        return_dict["events"] = events

        return json.dumps(return_dict)
Ejemplo n.º 11
0
def show_incidents_per_id(incident_id=None, call=None, use="mongodb"):
    if use == "bos-auto":
        use = "mongodb"

    store = factory.get_incident_storage(use=use)
    try:
        event = store.get_event_by_id(incident_id, resolve=True)
    except EventNotFoundException:
        return jsonify("Event not found")

    # fetch them seperately, we want raw data
    incidents = store.get_incidents_by_id(incident_id, call)

    return jsonify(
        {
            "status": event.get(call, {"status": "empty"})["status"],
            "incidents": list(incidents)
        }
    )
Ejemplo n.º 12
0
 def status(status_name):
     """ Show events that have a status 'status'
     """
     import builtins
     from bos_incidents import factory
     t = PrettyTable(["identifier", "Incidents", "Status"],
                     hrules=ALLBORDERS)
     t.align = 'l'
     storage = factory.get_incident_storage()
     for call in INCIDENT_CALLS:
         events = storage.get_events_by_call_status(call=call,
                                                    status_name=status_name)
         for event in events:
             full_event = storage.get_event_by_id(event["id_string"])
             t.add_row([
                 full_event["id_string"],
                 format_event_incidents(full_event),
                 format_event_incident_statuses(full_event)
             ])
     click.echo(t)
Ejemplo n.º 13
0
    def rm(unique_string, provider, filter, test):
        """ Remove an incident from the store
        """
        from bos_incidents import factory
        storage = factory.get_incident_storage()

        if unique_string and provider:
            incidents = [
                storage.get_incident_by_unique_string_and_provider(
                    unique_string, provider)
            ]
        elif filter:
            incidents = storage.get_incidents(
                dict(unique_string={"$regex": ".*" + filter + ".*i"}))
        if test:
            print("To be deleted: ")
        for incident in incidents:
            if test:
                print(" > " + incident["unique_string"] + "-" +
                      incident["provider_info"]["name"])
            else:
                storage.delete_incident(incident)
    def show(unique_string, provider, filter):
        """ Show the content of a specific incidents
        """
        from bos_incidents import factory
        storage = factory.get_incident_storage()

        if provider is not None:
            incident = [
                storage.get_incident_by_unique_string_and_provider(
                    unique_string, provider)
            ]
        else:
            if filter is not None:
                incidents = storage.get_incidents(
                    dict(unique_string={"$regex": ".*" + filter + ".*i"}))
            elif unique_string is not None:
                incidents = storage.get_incidents(
                    dict(unique_string=unique_string))
            else:
                incidents = storage.get_incidents()
        for incident in incidents:
            pprint(incident)
Ejemplo n.º 15
0
    def __init__(self, message, lookup_instance, config, **kwargs):
        self.message = message
        self.lookup = lookup_instance
        self.config = config

        # Obtain data for unique key
        # The "id" contains everything we need to identify an individual event
        # which itself contains at least the sport, and the teams

        # Get the id (internally used only)
        self.id = message.get("id")

        # Incident Storage
        if "storage" in kwargs and kwargs["storage"]:
            self.storage = kwargs["storage"]
        else:
            self.storage = factory.get_incident_storage(
                kwargs.get("mongodb", None), purge=kwargs.get("purge", False))

        # Normalize incident
        self.normalizer = IncidentsNormalizer(
            chain=lookup_instance._network_name)
        self.normalize(message)

        # Try obtain the sport
        self.sport = LookupSport(self.id.get("sport"))

        # Given the sport, try to obtain the league (event group)
        self.eventgroup = LookupEventGroup(self.sport,
                                           self.id.get("event_group_name"))

        self.event = None  # Will be filled in after receiving a trigger

        # Get Teams from query
        self.teams = [self.id.get("home"), self.id.get("away")]

        # Get start time from query
        self.start_time = parse(self.id.get("start_time", ""))
Ejemplo n.º 16
0
def show(identifier):
    """ Show event
    """
    from bos_incidents import factory
    t = PrettyTable(["identifier", "Incidents"], hrules=ALLBORDERS)
    t.align = 'l'

    storage = factory.get_incident_storage()
    event = storage.get_event_by_id(identifier)
    incidents = format_incidents(event)
    id = event["id"]
    id["start_time"] = parser.parse(id["start_time"]).replace(tzinfo=None)
    t.add_row([
        "\n".join([
            id["sport"],
            id["event_group_name"],
            id["start_time"].strftime("%Y/%m/%d"),
            "home: {}".format(id["home"]),
            "away: {}".format(id["away"]),
        ]),
        str(incidents)
    ])
    click.echo(t)
Ejemplo n.º 17
0
    def list(begin, end, filter):
        """ List incidents from the bos-incidents store
        """
        from bos_incidents import factory
        t = PrettyTable(["identifier", "Incidents"], hrules=ALLBORDERS)
        t.align = 'l'

        storage = factory.get_incident_storage()

        for event in storage.get_events(resolve=True):

            # pprint(event)
            if not ("id" in event and event["id"]):
                continue
            id = event["id"]
            id["start_time"] = parser.parse(
                id["start_time"]).replace(tzinfo=None)

            # Limit time
            if begin and end and (id["start_time"] < begin
                                  or id["start_time"] > end):
                continue

            incidents = format_incidents(event)

            t.add_row([
                "\n".join([
                    id["sport"],
                    id["event_group_name"],
                    id["start_time"].strftime("%Y/%m/%d"),
                    "home: {}".format(id["home"]),
                    "away: {}".format(id["away"]),
                ]),
                str(incidents)
            ])

        click.echo(t)
Ejemplo n.º 18
0
def replay(identifier, call, status_name, url):
    """ replay from event
    """
    from bos_incidents import factory
    storage = factory.get_incident_storage()
    event = storage.get_event_by_id(identifier, resolve=False)

    for incident_call, content in event.items():

        if not content or "incidents" not in content:
            continue

        if call and call != "*" and incident_call != call:
            continue

        if status_name and content["status"]["name"] != status_name:
            continue

        for _incident in content["incidents"]:
            incident = storage.resolve_to_incident(_incident)

            pprint(incident)
            incident.update(dict(skip_storage=True))
            resend_incidents(url, incident)
Ejemplo n.º 19
0
def finalize_purge():
    storage = factory.get_incident_storage(purge=True)
    return "success"
Ejemplo n.º 20
0
 def purge():
     """ Purge the entire store
     """
     from bos_incidents import factory
     factory.get_incident_storage(purge=True)
Ejemplo n.º 21
0
def replay(filter=None, use="mongodb"):
    if use == "bos-auto":
        use = "mongodb"
    if not Config.get("advanced_features", False):
        abort(404)

    form = ReplayForm()
    form.chain.data = Config.get("connection", "use")
    formTitle = "Replay incidents"
    formMessages = []

    proxy_incidents = None

    if not form.back.data and form.validate_on_submit():
        if form.check.data:
            from .dataproxy_link import control
            # query dataproxy
            try:
                proxy_incidents = control.get_replayable_incidents(form.unique_string.data, form.dataproxy.data, form.chain.data, form.witness.data)

                form.replay.render_kw = {'disabled': False}

                del form.check

                formMessages.append(str(len(proxy_incidents)) + " incidents found on dataproxy")
                for _tmp in proxy_incidents:
                    if type(_tmp) == dict:
                        formMessages.append(" - " + _tmp["unique_string"] + " provider: " + _tmp["provider_info"]["name"])
                    else:
                        formMessages.append(" - " + _tmp)
            except Exception as e:
                del form.back
                flash(str(e), category="error")
        if form.replay.data:
            del form.back
            from .dataproxy_link import control
            try:
                proxy_incidents = control.replay_incidents(form.unique_string.data, form.dataproxy.data, form.chain.data, form.witness.data)

                formMessages = ["Replay has been triggered"]
                formMessages.append("")
                formMessages.append(str(len(proxy_incidents)) + " incidents found on dataproxy")
                for _tmp in proxy_incidents:
                    if type(_tmp) == dict:
                        formMessages.append(" - " + _tmp["unique_string"] + " provider: " + _tmp["provider_info"]["name"])
                    else:
                        formMessages.append(" - " + _tmp)
            except Exception as e:
                flash(str(e), category="error")
    else:
        if filter is not None:
            form.unique_string.data = filter
        form.witness.data = "All"
        del form.back

    if form.unique_string.data is not None and not form.unique_string.data.strip() == "":
        store = factory.get_incident_storage(use=use)
        incidents = list(store.get_incidents(filter_dict=dict(
            unique_string={"$regex": ".*" + form.unique_string.data + ".*"}
        )))
        if len(formMessages) > 0:
            formMessages.append("")
        formMessages.append(str(len(incidents)) + " incidents found locally for " + str(form.unique_string.data))
        for _tmp in incidents:
            formMessages.append(" - " + _tmp["unique_string"] + " provider: " + _tmp["provider_info"]["name"])

    return render_template_menuinfo('generic.html', **locals())
Ejemplo n.º 22
0
def check_scheduled(storage=None,
                    func_callback=None,
                    proposer=None,
                    approver=None):  # pragma: no cover
    """
    """
    log.info("Scheduler checking incidents database ... "
             "(approver: {}/ proposer: {})".format(approver, proposer))

    # Invident Storage
    if not storage:
        storage = factory.get_incident_storage()

    if not proposer:
        proposer = config.get("BOOKIE_PROPOSER")
    if not approver:
        approver = config.get("BOOKIE_APPROVER")

    push_to_queue = []

    for call in INCIDENT_CALLS:
        log.info("- querying call {}".format(call))

        events = []

        for status_name in ["postponed", "unhandled exception, retrying soon"]:
            for event in storage.get_events_by_call_status(
                    call=call,
                    status_name=status_name,
                    status_expired_before=datetime.utcnow(),
            ):
                events.append(event)

        for status_name in ["connection lost", "unknown"]:
            for event in storage.get_events_by_call_status(
                    call=call, status_name=status_name):
                events.append(event)

        ids = list()
        log.info("Scheduler retriggering " + str(len(events)) +
                 " incident ...")
        for event in events:
            for incidentid in event.get(call, {}).get("incidents", []):
                incident = storage.resolve_to_incident(incidentid)
                if func_callback:
                    push_to_queue.append(incident)
                    # it is enough to trigger one incident, worker will check the whole call
                    break

    # Flask queue
    q = Queue(connection=get_redis())

    # only push into the queue if it's somewhat empty (with 10% buffer), otherwise wait
    if q.count + 2 < len(push_to_queue):
        for incident in push_to_queue:
            job = q.enqueue(
                func_callback,
                args=(incident, ),
                kwargs=dict(proposer=proposer, approver=approver),
            )
            ids.append(job.id)

    return ids
Ejemplo n.º 23
0
 def setUp(self):
     super(TestMongoOperationStorage, self).setUp()
     self.storage = factory.get_incident_storage("mongodbtest", purge=True)
Ejemplo n.º 24
0
from bos_incidents import factory, exceptions
from bos_incidents.validator import IncidentValidator, InvalidIncidentFormatException
from bookiesports.normalize import IncidentsNormalizer, NotNormalizableException

from .log import log
from . import INCIDENT_CALLS

config = loadConfig()

# Flask app and parameters
app = Flask(__name__)
redis = get_redis()
use_connection(redis)

# Invident Storage
storage = factory.get_incident_storage()

validator = IncidentValidator()

# API whitelist
api_whitelist = resolve_hostnames(config.get("api_whitelist", ["0.0.0.0"]))

background_threads = []


@app.route("/")
def home():
    """ Let's not expose that this is a bos-auto endpoint
    """
    return "", 404
 def __init__(self):
     self.storage = factory.get_incident_storage()
     self.providers = self.storage.get_distinct("provider_info.name")
     pass
Ejemplo n.º 26
0
def show_incidents(from_date=None, to_date=None, matching=None, use="mongodb"):
    if request.args.get("matching_today", None) is not None:
        return redirect(url_for("show_incidents", matching=utils.date_to_string()[0:10]))

    if matching is not None:
        try:
            match_date = utils.string_to_date(matching[0:20])
            if from_date is None:
                from_date = match_date - timedelta(days=3)
            if to_date is None:
                to_date = match_date + timedelta(days=3)
        except InvalidRFC3339Error:
            pass

    if type(matching) == str:
        matching = matching.split(",")

    if from_date is None:
        from_date = request.args.get("from_date", None)
        if from_date is None:
            from_date = utils.date_to_string(-7)
        if type(from_date) == str:
            from_date = utils.string_to_date(from_date)
    if to_date is None:
        to_date = request.args.get("to_date", None)
        if to_date is None:
            to_date = utils.date_to_string(21)
        if type(to_date) == str:
            to_date = utils.string_to_date(to_date)

    store = None
    unresolved_events = None
    try:
        store = factory.get_incident_storage(use=use)
        unresolved_events = store.get_events(resolve=False)
    except IncidentStorageLostException:
        flash("BOS-mint could not find an incident store, or connection failed. Is a BOS-auto instance running alongside that grants access?")
        return redirect(url_for('overview'))

    events = []
    # resort for provider view
    for event in unresolved_events:
        try:
            event_scheduled = utils.string_to_date(event["id_string"][0:20])
        except InvalidRFC3339Error:
            event_scheduled = utils.string_to_date(event["id_string"][0:23])
        if event_scheduled <= to_date and event_scheduled >= from_date and\
                (matching is None or all([x.lower() in event["id_string"].lower() for x in matching])):
            store.resolve_event(event)
        else:
            continue
        for call in ["create", "in_progress", "finish", "result", "dynamic_bmgs", "canceled"]:
            try:
                incident_provider_dict = {}
                for incident in event[call]["incidents"]:
                    provider = incident["provider_info"]["name"]
                    try:
                        incident_dict = incident_provider_dict[provider]
                    except KeyError:
                        incident_provider_dict[provider] = {"incidents": [],
                                                            "replay_links": {}}
                        incident_dict = incident_provider_dict[provider]

                    incident_provider_dict[provider]["incidents"].append(incident)

                    try:
                        replay_url = Ping().get_replay_url(provider, incident, call)
                        if replay_url is not None:
                            incident_dict["replay_links"][incident["unique_string"]] = replay_url
                    except Exception as e:
                        pass

                event[call]["incidents_per_provider"] = incident_provider_dict
            except KeyError:
                pass
        event["id_string"] = get_id_as_string(event["id"])
        events.append(event)

    from_date = utils.date_to_string(from_date)
    to_date = utils.date_to_string(to_date)

    if use == "mongodb":
        use = "bos-auto"

    if use == "auto":
        use = "bos-auto"

    return render_template_menuinfo('showIncidents.html', **locals())
Ejemplo n.º 27
0
def reset_storage():
    return factory.get_incident_storage("mongodbtest", purge=True)
Ejemplo n.º 28
0
# Setup base lookup
lookup = Lookup(
    proposer="init0",
    blockchain_instance=peerplays,
    network="unittests",
    sports_folder=os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               "bookiesports"),
)
lookup.set_approving_account("init0")
lookup.set_proposing_account("init0")

# ensure lookup isn't broadcasting either
assert lookup.blockchain.nobroadcast

# Storage
storage = factory.get_incident_storage("mongodbtest", purge=True)

# Incident validator
validator = IncidentValidator()
normalizer = IncidentsNormalizer(chain="unittests")


def reset_storage():
    return factory.get_incident_storage("mongodbtest", purge=True)


def lookup_test_event(id):
    return LookupEvent(
        **{
            "id": "1.22.2242",
            "teams": ["Atlanta Hawks", "Boston Celtics"],
Ejemplo n.º 29
0
def check_scheduled(storage=None,
                    func_callback=None,
                    proposer=None,
                    approver=None):  # pragma: no cover
    """
    """
    log.info("Scheduler checking incidents database ... "
             "(approver: {}/ proposer: {})".format(approver, proposer))

    # Invident Storage
    if not storage:
        storage = factory.get_incident_storage()

    if not proposer:
        proposer = config.get("BOOKIE_PROPOSER")
    if not approver:
        approver = config.get("BOOKIE_APPROVER")

    push_to_queue = []

    for call in INCIDENT_CALLS:
        log.info("- querying call {}".format(call))

        events = []

        # midway is the status of the incidents which are terminated intentionally as creating bms took time.
        for status_name in ["midway"]:
            for event in storage.get_events_by_call_status(
                    call=call, status_name=status_name):
                events.append(event)

        for status_name in ["postponed", "unhandled exception, retrying soon"]:
            for event in storage.get_events_by_call_status(
                    call=call,
                    status_name=status_name,
                    status_expired_before=datetime.utcnow(),
            ):
                events.append(event)

        for status_name in ["connection lost", "unknown"]:
            for event in storage.get_events_by_call_status(
                    call=call, status_name=status_name):
                events.append(event)

        ids = list()
        log.info("Scheduler retriggering " + str(len(events)) +
                 " incident ...")
        for event in events:
            for incidentid in event.get(call, {}).get("incidents", []):
                incident = storage.resolve_to_incident(incidentid)
                if func_callback:
                    push_to_queue.append(incident)
                    # it is enough to trigger one incident, worker will check the whole call
                    break

    # Flask queue
    q = Queue(connection=get_redis())

    # If push_to_queue has events, then empty queue and fill it again with push_to_queue
    # This approach ensures that events which are on the chain but not all bms are created shall be processed first.
    if len(push_to_queue) > 0:
        q.empty()
        for incident in push_to_queue:
            job = q.enqueue(
                func_callback,
                args=(incident, ),
                kwargs=dict(proposer=proposer, approver=approver),
            )
            ids.append(job.id)

    return ids