Exemple #1
0
def replay_task(replay_job: Job, training_initial_job: Job) -> list:
    """ The function create a replay task to ask the server to demo the arriving of events

        :param replay_job: job dictionary
        :param training_initial_job: job dictionary
        :return: List of requests
    """
    logger.error("Start replay task ID {}".format(replay_job.id))
    requests = list()
    try:
        replay_job.status = JobStatuses.RUNNING.value
        replay_job.error = ''
        replay_job.save()
        requests = replay_core(replay_job, training_initial_job)
        replay_job.status = JobStatuses.COMPLETED.value
        for r in requests:
            if r.status_code != status.HTTP_201_CREATED:
                replay_job.error += [r]
    except Exception as e:
        logger.error(e)
        replay_job.status = JobStatuses.ERROR.value
        replay_job.error += [str(e.__repr__())]
        raise e
    finally:
        replay_job.save()
        publish(replay_job)
        return requests
Exemple #2
0
def prediction_task(job_id):
    print("Start prediction task ID {}".format(job_id))
    job = Job.objects.get(id=job_id)

    try:
        if job.status == JobStatuses.CREATED.value:
            job.status = JobStatuses.RUNNING.value
            job.save()
            start_time = time.time()
            if job.hyperparameter_optimizer is not None:
                result, model_split = hyperopt_task(job)
            else:
                result, model_split = calculate(job)
            elapsed_time = time.time() - start_time
            print('\tJob took: {} in HH:MM:ss'.format(
                time.strftime("%H:%M:%S", time.gmtime(elapsed_time))))
            if job.create_models:
                save_models(model_split, job)
            job.result = result
            job.status = JobStatuses.COMPLETED.value
    except Exception as e:
        job.status = JobStatuses.ERROR.value
        job.error = str(e.__repr__())
        raise e
    finally:
        job.save()
        publish(job)
Exemple #3
0
def prediction_task(job_id):
    logger.info("Start prediction task ID {}".format(job_id))
    job = Job.objects.get(id=job_id)

    try:
        if (job.status == JobStatuses.CREATED.value and job.type != JobTypes.UPDATE.value) or \
           (job.status == JobStatuses.CREATED.value and job.type == JobTypes.UPDATE.value and
            job.incremental_train.status == JobStatuses.COMPLETED.value):

            job.status = JobStatuses.RUNNING.value
            job.save()
            start_time = time.time()
            if job.hyperparameter_optimizer is not None and \
                job.hyperparameter_optimizer.optimization_method != HyperparameterOptimizationMethods.NONE.value:
                result, model_split = hyperopt_task(job)
            else:
                result, model_split = calculate(job)
            elapsed_time = time.time() - start_time
            logger.info('\tJob took: {} in HH:MM:ss'.format(
                time.strftime("%H:%M:%S", time.gmtime(elapsed_time))))
            if job.create_models:
                save_models(model_split, job)
            job.result = result
            job.status = JobStatuses.COMPLETED.value
    except Exception as e:
        logger.error(e)
        job.status = JobStatuses.ERROR.value
        job.error = str(e.__repr__())
        raise e
    finally:
        job.save()
        publish(job)
def replay_prediction_task(replay_prediction_job, training_initial_job, log):
    logger.info("Start replay_prediction task ID {}".format(
        replay_prediction_job.id))
    try:
        replay_prediction_job.status = JobStatuses.RUNNING.value
        replay_prediction_job.save()
        max_len = max(len(trace) for trace in log)
        if replay_prediction_job.encoding.prefix_length != max_len:
            prediction_job = create_prediction_job(training_initial_job,
                                                   max_len)
            prediction_task(prediction_job.id)
            prediction_job.refresh_from_db()
            new_replay_prediction_job = duplicate_orm_row(prediction_job)
            new_replay_prediction_job.split = Split.objects.filter(
                pk=replay_prediction_job.split.id)[0]
            new_replay_prediction_job.type = JobTypes.REPLAY_PREDICT.value
            new_replay_prediction_job.status = JobStatuses.CREATED.value
            replay_prediction_task(new_replay_prediction_job, prediction_job,
                                   log)
            return
        result = replay_prediction_calculate(replay_prediction_job, log)
        replay_prediction_job.results = {'result': str(result)}
        replay_prediction_job.status = JobStatuses.COMPLETED.value
        replay_prediction_job.error = ''
    except Exception as e:
        logger.error(e)
        replay_prediction_job.status = JobStatuses.ERROR.value
        replay_prediction_job.error = str(e.__repr__())
        raise e
    finally:
        replay_prediction_job.save()
        publish(replay_prediction_job)
def runtime_task(job):
    logger.info("Start runtime task ID {}".format(job.id))
    try:
        job.status = JobStatuses.RUNNING.value
        job.save()
        result = runtime_calculate(job)
        job.results = {'result': str(result)}
        job.status = JobStatuses.COMPLETED.value
        job.error = ''
    except Exception as e:
        logger.error(e)
        job.status = JobStatuses.ERROR.value
        job.error = str(e.__repr__())
        raise e
    finally:
        job.save()
        publish(job)\
def runtime_task(job, model):
    print("Start runtime task ID {}".format(job.pk))
    try:
        job.status = JobStatuses.RUNNING.value
        job.save()
        log = Log.objects.get(pk=job.config['log_id'])
        run_log = get_log(log.path)
        result_data = runtime_calculate(run_log, model.to_dict())
        result = result_data['prediction']
        job.result = result
        job.status = JobStatuses.COMPLETED.value
        job.error = ''
    except Exception as e:
        print("error " + str(e.__repr__()))
        job.status = JobStatuses.ERROR.value
        job.error = str(e.__repr__())
        raise e
    finally:
        job.save()
        publish(job)
Exemple #7
0
def runtime_task(job: Job):
    """ The function create a runtime task to ask a single prediction to the server

        :param job: job dictionary
    """
    logger.info("Start runtime task ID {}".format(job.id))
    try:
        job.status = JobStatuses.RUNNING.value
        job.save()
        result = runtime_calculate(job)
        job.results = {'result': str(result)}
        job.status = JobStatuses.COMPLETED.value
        job.error = ''
    except Exception as e:
        logger.error(e)
        job.status = JobStatuses.ERROR.value
        job.error = str(e.__repr__())
        raise e
    finally:
        job.save()
        publish(job)
def replay_task(replay_job, training_initial_job):
    logger.error("Start replay task ID {}".format(replay_job.id))
    requests = list()
    try:
        replay_job.status = JobStatuses.RUNNING.value
        replay_job.error = ''
        replay_job.save()
        requests = replay_core(replay_job, training_initial_job)
        replay_job.status = JobStatuses.COMPLETED.value
        for r in requests:
            if r.status_code != status.HTTP_201_CREATED:
                replay_job.error += [r]
    except Exception as e:
        logger.error(e)
        replay_job.status = JobStatuses.ERROR.value
        replay_job.error += [str(e.__repr__())]
        raise e
    finally:
        replay_job.save()
        publish(replay_job)
        return requests
Exemple #9
0
def replay_prediction_task(replay_prediction_job: Job, training_initial_job: Job, log: Log):
    """ The function create a replat prediction task to ask a single prediction to the server for a portion of a trace

        :param replay_prediction_job: job dictionary
        :param training_initial_job: job dictionary
        :param log: job dictionary
    """
    logger.info("Start replay_prediction task ID {}".format(replay_prediction_job.id))
    try:
        replay_prediction_job.status = JobStatuses.RUNNING.value
        replay_prediction_job.save()
        max_len = max(len(trace) for trace in log)
        if replay_prediction_job.encoding.prefix_length != max_len:
            prediction_job = create_prediction_job(training_initial_job, max_len)
            prediction_task(prediction_job.id)
            prediction_job.refresh_from_db()
            new_replay_prediction_job = duplicate_orm_row(prediction_job)
            new_replay_prediction_job.split = Split.objects.filter(pk=replay_prediction_job.split.id)[0]
            new_replay_prediction_job.type = JobTypes.REPLAY_PREDICT.value
            new_replay_prediction_job.parent_job = replay_prediction_job.parent_job
            new_replay_prediction_job.status = JobStatuses.CREATED.value
            replay_prediction_task(new_replay_prediction_job, prediction_job, log)
            return
        result_dict, events_for_trace = replay_prediction_calculate(replay_prediction_job, log)
        replay_prediction_job.results = dict(result_dict)
        replay_prediction_job.event_number = dict(events_for_trace)
        replay_prediction_job.status = JobStatuses.COMPLETED.value
        replay_prediction_job.error = ''
    except Exception as e:
        logger.error(e)
        replay_prediction_job.status = JobStatuses.ERROR.value
        replay_prediction_job.error = str(e.__repr__())
        raise e
    finally:
        replay_prediction_job.save()
        publish(replay_prediction_job)
def prepare(ev, tr, lg, replayer_id, reg_id, class_id, real_log, nn=False, end=False):
    if int(reg_id) > 0:
        reg_model = PredModels.objects.get(pk=reg_id)
    else:
        reg_model = None
    if int(class_id) > 0:
        class_model = PredModels.objects.get(pk=class_id)
    else:
        class_model = None
    run = XFactory()
    serializer = XesXmlSerializer()
    temp_log = Et.Element("log")
    temp_trace = Et.Element("trace")
    temp_event = Et.Element("event")

    serializer.add_attributes(temp_log, lg.get_attributes().values())
    serializer.add_attributes(temp_trace, tr.get_attributes().values())
    serializer.add_attributes(temp_event, ev.get_attributes().values())

    # TODO: check if still needed
    # log_config = Et.tostring(logtmp)
    # trace_config = Et.tostring(trtmp)
    # event_config = Et.tostring(evtmp)
    event_xid = ev.get_id()

    log_map = json.dumps(xMap_to_dict(lg.get_attributes()))
    tmap = xMap_to_dict(tr.get_attributes())
    tname = str(tmap.get('concept:name'))
    trace_map = json.dumps(tmap)
    xmap = ev.get_attributes()
    event_map = json.dumps(xMap_to_dict(xmap))

    log, created = XLog.objects.get_or_create(config=log_map, real_log=real_log)
    try:
        trace = XTrace.objects.get(name=tname, config=trace_map, xlog=log)
        trace.reg_model = reg_model
        trace.class_model = class_model
    except XTrace.DoesNotExist:
        trace = XTrace.objects.create(name=tname, config=trace_map, xlog=log, reg_model=reg_model,
                                      class_model=class_model, real_log=real_log.id)

    if end:
        trace.completed = True
        trace.save()
        publish(trace)
        return
    elif trace.completed:
        trace.completed = False
        trace.save()

    try:
        event = XEvent.objects.get(config=event_map, trace=trace)
    except XEvent.DoesNotExist:
        event = XEvent.objects.create(config=event_map, trace=trace, xid=event_xid.__str__())

    events = XEvent.objects.filter(trace=trace, pk__lte=event.id)

    if nn:
        next_activities(events, trace)
    else:
        run_log = run.create_log(XAttributeMap(json.loads(log.config)))
        run_trace = run.create_trace(XAttributeMap(json.loads(trace.config)))
        c = 0

        for event in events:
            c = c + 1
            evt = run.create_event(XAttributeMap(json.loads(event.config)))
            run_trace.append(evt)
        run_log.append(run_trace)
        if c == 1:
            trace.first_event = str(xmap.get('time:timestamp'))
        trace.last_event = str(xmap.get('time:timestamp'))

        trace.duration = datetime.timedelta.total_seconds(
            dateparser(str(trace.last_event)) - dateparser(str(trace.first_event)))
        trace.n_events = c
        trace.save()
        error = True

        try:
            if trace.reg_model is not None:
                if trace.reg_model.config['encoding']['padding'] != ZERO_PADDING and trace.reg_model.config['encoding'][
                    'generation_type'] != ALL_IN_ONE:
                    reg_config = trace.reg_model.config
                    reg_config['encoding']['prefix_length'] = c
                    right_reg_model = PredModels.objects.filter(config=reg_config)
                    trace.reg_model = right_reg_model[0]
                result_data = runtime_calculate(run_log, trace.reg_model.to_dict())
                trace.reg_results = result_data['prediction']
                trace.reg_actual = result_data['label']
                trace.save()
        except Exception as e:
            print("An exception has occurred in regression, error:" + str(e.__repr__()))
            trace.error = str(e.__repr__())
            trace.save()
            raise e
        try:
            if trace.class_model is not None:
                if trace.class_model.config['encoding']['padding'] != ZERO_PADDING and \
                    trace.class_model.config['encoding']['generation_type'] != ALL_IN_ONE:
                    class_config = trace.class_model.config
                    class_config['encoding']['prefix_length'] = c
                    right_class_model = PredModels.objects.filter(config=class_config)
                    trace.class_model = right_class_model[0]
                result_data = runtime_calculate(run_log, trace.class_model.to_dict())
                trace.class_results = result_data['prediction']
                trace.class_actual = result_data['label']
                trace.save()
        except Exception as e:
            trace.error = str(e.__repr__())
            error = False
            trace.save()
            print("An exception has occurred in classification, error:" + str(e.__repr__()))
            raise e
        finally:
            if error:
                trace.error = ""
            trace.save()
            publish(trace)
Exemple #11
0
def replay_prediction_task(replay_prediction_job: Job,
                           training_initial_job: Job, log: Log):
    """ The function create a replat prediction task to ask a single prediction to the server for a portion of a trace

        :param replay_prediction_job: job dictionary
        :param training_initial_job: job dictionary
        :param log: job dictionary
    """
    logger.info("Start replay_prediction task ID {}".format(
        replay_prediction_job.id))
    try:
        replay_prediction_job.status = JobStatuses.RUNNING.value
        replay_prediction_job.save()
        max_len = max(len(trace) for trace in log)
        if replay_prediction_job.encoding.prefix_length != max_len:
            prediction_job = create_prediction_job(training_initial_job,
                                                   max_len)
            prediction_task(prediction_job.id)
            prediction_job.refresh_from_db()
            # new_replay_prediction_job = duplicate_orm_row(prediction_job)  #todo: replace with simple CREATE
            new_replay_prediction_job = Job.objects.create(
                created_date=prediction_job.created_date,
                modified_date=prediction_job.modified_date,
                error=prediction_job.error,
                status=prediction_job.status,
                type=prediction_job.type,
                create_models=prediction_job.create_models,
                case_id=prediction_job.case_id,
                event_number=prediction_job.event_number,
                gold_value=prediction_job.gold_value,
                results=prediction_job.results,
                parent_job=prediction_job.parent_job,
                split=prediction_job.split,
                encoding=prediction_job.encoding,
                labelling=prediction_job.labelling,
                clustering=prediction_job.clustering,
                predictive_model=prediction_job.predictive_model,
                evaluation=prediction_job.evaluation,
                hyperparameter_optimizer=prediction_job.
                hyperparameter_optimizer,
                incremental_train=prediction_job.incremental_train)
            new_replay_prediction_job.split = Split.objects.filter(
                pk=replay_prediction_job.split.id)[0]
            new_replay_prediction_job.type = JobTypes.REPLAY_PREDICT.value
            new_replay_prediction_job.parent_job = replay_prediction_job.parent_job
            new_replay_prediction_job.status = JobStatuses.CREATED.value
            replay_prediction_task(new_replay_prediction_job, prediction_job,
                                   log)
            return
        result_dict, events_for_trace = replay_prediction_calculate(
            replay_prediction_job, log)
        replay_prediction_job.results = dict(result_dict)
        replay_prediction_job.event_number = dict(events_for_trace)
        replay_prediction_job.status = JobStatuses.COMPLETED.value
        replay_prediction_job.error = ''
    except Exception as e:
        logger.error(e)
        replay_prediction_job.status = JobStatuses.ERROR.value
        replay_prediction_job.error = str(e.__repr__())
        raise e
    finally:
        replay_prediction_job.save()
        publish(replay_prediction_job)