def create_multiple(request): """No request validation""" payload = json.loads(request.body.decode('utf-8')) try: split = Split.objects.get(pk=payload['split_id']) except Split.DoesNotExist: return Response({'error': 'not in database'}, status=status.HTTP_404_NOT_FOUND) # detect either or not a predictive_model to update has been specified otherwise train a new one. if 'incremental_train' in payload['config'] and payload['config'][ 'incremental_train']['base_model'] is not None: jobs = update(split, payload) elif payload['type'] in [e.value for e in PredictiveModels]: jobs = generate(split, payload) elif payload['type'] == JobTypes.LABELLING.value: jobs = generate_labelling(split, payload) else: return Response( {'error': 'type not supported'.format(payload['type'])}, status=status.HTTP_422_UNPROCESSABLE_ENTITY) for job in jobs: django_rq.enqueue(tasks.prediction_task, job.id) serializer = JobSerializer(jobs, many=True) return Response(serializer.data, status=201)
def test_update(self): job = create_test_job() prediction_task(job.id) job2 = duplicate_orm_row(job) job.refresh_from_db() job2.incremental_train = job job2.type = JobTypes.UPDATE.value job2.save() initial_job = job2 #.to_dict() generated_job = update(split=job.split, payload={ 'type': 'classification', 'split_id': 1, 'config': { 'clusterings': ['noCluster'], 'encodings': ['simpleIndex'], 'encoding': { 'padding': False, 'prefix_length': 1, 'generation_type': 'only', 'add_remaining_time': False, 'add_elapsed_time': False, 'add_executed_events': False, 'add_resources_used': False, 'add_new_traces': False, 'features': [], }, 'create_models': False, 'methods': ['randomForest'], 'kmeans': {}, 'incremental_train': [job.id], 'hyperparameter_optimizer': { 'algorithm_type': 'tpe', 'max_evaluations': 10, 'performance_metric': 'rmse', 'type': 'none', }, 'labelling': { 'type': 'next_activity', 'attribute_name': '', 'threshold_type': 'threshold_mean', 'threshold': 0, } } })[0] #.to_dict()
def test_update(self): job = create_test_job() prediction_task(job.id) # job2 = duplicate_orm_row(job) #todo: replace with simple CREATE job2 = Job.objects.create( created_date=job.created_date, modified_date=job.modified_date, error=job.error, status=job.status, type=job.type, create_models=job.create_models, case_id=job.case_id, event_number=job.event_number, gold_value=job.gold_value, results=job.results, parent_job=job.parent_job, split=job.split, encoding=job.encoding, labelling=job.labelling, clustering=job.clustering, predictive_model=job.predictive_model, evaluation=job.evaluation, hyperparameter_optimizer=job.hyperparameter_optimizer, incremental_train=job.incremental_train) job.refresh_from_db() job2.incremental_train = job job2.type = JobTypes.UPDATE.value job2.save() initial_job = job2 #.to_dict() generated_job = update(split=job.split, payload={ 'type': 'classification', 'split_id': 1, 'config': { 'clusterings': ['noCluster'], 'encodings': ['simpleIndex'], 'encoding': { 'padding': False, 'prefix_length': 1, 'generation_type': 'only', 'add_remaining_time': False, 'add_elapsed_time': False, 'add_executed_events': False, 'add_resources_used': False, 'add_new_traces': False, 'features': [], }, 'create_models': False, 'methods': ['randomForest'], 'kmeans': {}, 'incremental_train': [job.id], 'hyperparameter_optimizer': { 'algorithm_type': 'tpe', 'max_evaluations': 10, 'performance_metric': 'rmse', 'type': 'none', }, 'labelling': { 'type': 'next_activity', 'attribute_name': '', 'threshold_type': 'threshold_mean', 'threshold': 0, } } })[0] #.to_dict()