def get(self, dataset_id: int): if dataset_id <= 0: raise NotFoundException(f'Failed to find dataset: {dataset_id}') name = request.args.get('name', None) if not name: raise InvalidArgumentException(f'required params name') with db.session_scope() as session: data = DatasetService(session).feature_metrics(name, dataset_id) return {'data': data}
def _handle_bad_request(error): """Handles the bad request raised by reqparse""" if not isinstance(error, WebConsoleApiException): # error.data.message contains the details raised by reqparse details = None if error.data is not None: details = error.data['message'] return make_response(InvalidArgumentException(details)) return error
def post(self): parser = reqparse.RequestParser() parser.add_argument('name', required=True, help='name is empty') parser.add_argument('project_id', type=int, required=True, help='project_id is empty') # TODO: should verify if the config is compatible with # workflow template parser.add_argument('config', type=dict, required=True, help='config is empty') parser.add_argument('forkable', type=bool, required=True, help='forkable is empty') parser.add_argument('forked_from', type=int, required=False, help='fork from base workflow') parser.add_argument('reuse_job_names', type=list, required=False, location='json', help='fork and inherit jobs') parser.add_argument('peer_reuse_job_names', type=list, required=False, location='json', help='peer fork and inherit jobs') parser.add_argument('fork_proposal_config', type=dict, required=False, help='fork and edit peer config') parser.add_argument('comment') data = parser.parse_args() name = data['name'] if Workflow.query.filter_by(name=name).first() is not None: raise ResourceConflictException( 'Workflow {} already exists.'.format(name)) # form to proto buffer template_proto = dict_to_workflow_definition(data['config']) workflow = Workflow(name=name, comment=data['comment'], project_id=data['project_id'], forkable=data['forkable'], forked_from=data['forked_from'], state=WorkflowState.NEW, target_state=WorkflowState.READY, transaction_state=TransactionState.READY) if workflow.forked_from is not None: fork_config = dict_to_workflow_definition( data['fork_proposal_config']) # TODO: more validations if len(fork_config.job_definitions) != \ len(template_proto.job_definitions): raise InvalidArgumentException( 'Forked workflow\'s template does not match base workflow') workflow.set_fork_proposal_config(fork_config) workflow.set_reuse_job_names(data['reuse_job_names']) workflow.set_peer_reuse_job_names(data['peer_reuse_job_names']) workflow.set_config(template_proto) db.session.add(workflow) db.session.commit() logging.info('Inserted a workflow to db') scheduler.wakeup(workflow.id) return {'data': workflow.to_dict()}, HTTPStatus.CREATED
def patch(self, workflow_id): parser = reqparse.RequestParser() parser.add_argument('target_state', type=str, required=False, default=None, help='target_state is empty') parser.add_argument('forkable', type=bool) parser.add_argument('config', type=dict, required=False, default=None, help='updated config') data = parser.parse_args() workflow = _get_workflow(workflow_id) forkable = data['forkable'] if forkable is not None: workflow.forkable = forkable db.session.commit() target_state = data['target_state'] if target_state: try: db.session.refresh(workflow) workflow.update_target_state(WorkflowState[target_state]) db.session.commit() logging.info('updated workflow %d target_state to %s', workflow.id, workflow.target_state) scheduler.wakeup(workflow.id) except ValueError as e: raise InvalidArgumentException(details=str(e)) from e config = data['config'] if config: try: db.session.refresh(workflow) if workflow.target_state != WorkflowState.INVALID or \ workflow.state not in \ [WorkflowState.READY, WorkflowState.STOPPED]: raise NoAccessException('Cannot edit running workflow') config_proto = dict_to_workflow_definition(data['config']) workflow.set_config(config_proto) db.session.commit() except ValueError as e: raise InvalidArgumentException(details=str(e)) from e return {'data': workflow.to_dict()}, HTTPStatus.OK
def delete(self, user_id): user = self._find_user(user_id) current_user = get_current_user() if current_user.id == user_id: raise InvalidArgumentException('cannot delete yourself') user.state = State.DELETED db.session.commit() return {'data': user.to_dict()}, HTTPStatus.OK
def get(self): templates = WorkflowTemplate.query if 'group_alias' in request.args: templates = templates.filter_by( group_alias=request.args['group_alias']) if 'is_left' in request.args: is_left = request.args.get(key='is_left', type=int) if is_left is None: raise InvalidArgumentException('is_left must be 0 or 1') templates = templates.filter_by(is_left=is_left) return {'data': [t.to_dict() for t in templates.all()]}\ , HTTPStatus.OK
def dict_to_workflow_definition(config): try: template_proto = ParseDict( config, workflow_definition_pb2.WorkflowDefinition()) for variable in template_proto.variables: _classify_variable(variable) for job in template_proto.job_definitions: for variable in job.variables: _classify_variable(variable) except ParseError as e: raise InvalidArgumentException(details={'config': str(e)}) return template_proto
def post(self): service = SparkAppService() data = request.json try: config = SparkAppConfig.from_dict(data) if config.files: config.files = base64.b64decode(config.files) except ValueError as err: raise InvalidArgumentException(details=err) res = service.submit_sparkapp(config=config) return {'data': res.to_dict()}, HTTPStatus.CREATED
def start_or_stop_cronjob(batch_update_interval: int, workflow: Workflow): """start a cronjob for workflow if batch_update_interval is valid Args: batch_update_interval (int): restart workflow interval, unit is minutes Returns: raise when workflow is_left is False """ item_name = f'workflow_cron_job_{workflow.id}' batch_update_interval = batch_update_interval * 60 if workflow.get_config().is_left and batch_update_interval > 0: status = composer.get_item_status(name=item_name) # create a cronjob if not status: composer.collect(name=item_name, items=[WorkflowCronJobItem(workflow.id)], metadata={}, interval=batch_update_interval) return if status == ItemStatus.OFF: raise InvalidArgumentException( f'cannot set item [{item_name}], since item is off') # patch a cronjob try: composer.patch_item_attr(name=item_name, key='interval_time', value=batch_update_interval) except ValueError as err: raise InvalidArgumentException(details=repr(err)) elif batch_update_interval < 0: composer.finish(name=item_name) elif not workflow.get_config().is_left: raise InvalidArgumentException('Only left can operate this') else: logging.info('skip cronjob since batch_update_interval is -1')
def post(self, dataset_id: int): parser = reqparse.RequestParser() parser.add_argument('event_time', type=int) parser.add_argument('files', required=True, type=list, location='json', help=_FORMAT_ERROR_MESSAGE.format('files')) parser.add_argument('move', type=bool) parser.add_argument('comment', type=str) body = parser.parse_args() event_time = body.get('event_time') files = body.get('files') move = body.get('move', False) comment = body.get('comment') dataset = Dataset.query.filter_by(id=dataset_id).first() if dataset is None: raise NotFoundException() if event_time is None and dataset.type == DatasetType.STREAMING: raise InvalidArgumentException( details='data_batch.event_time is empty') # TODO: PSI dataset should not allow multi batches # Create batch batch = DataBatch( dataset_id=dataset.id, # Use current timestamp to fill when type is PSI event_time=datetime.datetime.fromtimestamp( event_time or datetime.datetime.now().timestamp()), comment=comment, state=BatchState.NEW, move=move, ) batch_details = dataset_pb2.DataBatch() root_dir = current_app.config.get('STORAGE_ROOT') batch_folder_name = batch.event_time.strftime('%Y%m%d%H%M%S') for file_path in files: file = batch_details.files.add() file.source_path = file_path file_name = file_path.split('/')[-1] file.destination_path = f'{root_dir}/dataset/{dataset.id}' \ f'/batch/{batch_folder_name}/{file_name}' batch.set_details(batch_details) db.session.add(batch) db.session.commit() db.session.refresh(batch) scheduler.wakeup(data_batch_ids=[batch.id]) return {'data': batch.to_dict()}
def get(self): templates = WorkflowTemplate.query if 'group_alias' in request.args: templates = templates.filter_by( group_alias=request.args['group_alias']) if 'is_left' in request.args: is_left = request.args.get(key='is_left', type=int) if is_left is None: raise InvalidArgumentException('is_left must be 0 or 1') templates = templates.filter_by(is_left=is_left) # remove config from dicts to reduce the size of the list return { 'data': [_dic_without_key(t.to_dict(), 'config') for t in templates.all()] }, HTTPStatus.OK
def _format_template_with_yaml_editor(template_proto, editor_info_proto): for job_def in template_proto.job_definitions: # if job is in editor_info, than use meta_yaml format with # slots instead of yaml_template yaml_editor_infos = editor_info_proto.yaml_editor_infos if not job_def.expert_mode and job_def.name in yaml_editor_infos: yaml_editor_info = yaml_editor_infos[job_def.name] job_def.yaml_template = generate_yaml_template( yaml_editor_info.meta_yaml, yaml_editor_info.slots) try: check_workflow_definition(template_proto) except ValueError as e: raise InvalidArgumentException( details={'config.yaml_template': str(e)}) return template_proto
def post(self): parser = reqparse.RequestParser() parser.add_argument('name', required=True, help='name is empty') parser.add_argument('comment') parser.add_argument('config', type=dict, required=True, help='config is empty') data = parser.parse_args() name = data['name'] comment = data['comment'] config = data['config'] # TODO: format check if 'group_alias' not in config: raise InvalidArgumentException( details={ 'config.group_alias': 'config.group_alias is required' }) if 'is_left' not in config: raise InvalidArgumentException( details={'config.is_left': 'config.is_left is required'}) if WorkflowTemplate.query.filter_by(name=name).first() is not None: raise ResourceConflictException( 'Workflow template {} already exists'.format(name)) # form to proto buffer template_proto = dict_to_workflow_definition(config) template = WorkflowTemplate(name=name, comment=comment, group_alias=template_proto.group_alias, is_left=template_proto.is_left) template.set_config(template_proto) db.session.add(template) db.session.commit() logging.info('Inserted a workflow_template to db') return {'data': template.to_dict()}, HTTPStatus.CREATED
def get_job_kibana(self, job_name, json_args): msg = service_pb2.GetJobKibanaRequest(auth_info=self._auth_info, job_name=job_name, json_args=json_args) response = self._client.GetJobKibana(request=msg, metadata=self._get_metadata(), timeout=Envs.GRPC_CLIENT_TIMEOUT) status = response.status if status.code != common_pb2.STATUS_SUCCESS: if status.code == common_pb2.STATUS_UNAUTHORIZED: raise UnauthorizedException(status.msg) if status.code == common_pb2.STATUS_INVALID_ARGUMENT: raise InvalidArgumentException(status.msg) logging.debug('get_job_kibana request error: %s', response.status.msg) return response
def patch(self, project_id): project = Project.query.filter_by(id=project_id).first() if project is None: raise NotFoundException(f'Failed to find project: {project_id}') config = project.get_config() if request.json.get('token') is not None: new_token = request.json.get('token') config.token = new_token project.token = new_token if request.json.get('variables') is not None: del config.variables[:] config.variables.extend([ ParseDict(variable, Variable()) for variable in request.json.get('variables') ]) # exact configuration from variables grpc_ssl_server_host = None egress_host = None for variable in config.variables: if variable.name == 'GRPC_SSL_SERVER_HOST': grpc_ssl_server_host = variable.value if variable.name == 'EGRESS_HOST': egress_host = variable.value if request.json.get('participant_name'): config.participants[0].name = request.json.get('participant_name') if request.json.get('comment'): project.comment = request.json.get('comment') for participant in config.participants: if participant.domain_name in\ project.get_certificate().domain_name_to_cert.keys(): _create_add_on( participant, project.get_certificate().domain_name_to_cert[ participant.domain_name], grpc_ssl_server_host) if egress_host: participant.grpc_spec.authority = egress_host project.set_config(config) try: db.session.commit() except Exception as e: raise InvalidArgumentException(details=e) return {'data': project.to_dict()}
def patch(self, workflow_id): parser = reqparse.RequestParser() parser.add_argument('target_state', type=str, required=True, help='target_state is empty') target_state = parser.parse_args()['target_state'] workflow = _get_workflow(workflow_id) try: workflow.update_target_state(WorkflowState[target_state]) db.session.commit() logging.info('updated workflow %d target_state to %s', workflow.id, workflow.target_state) scheduler.wakeup(workflow.id) except ValueError as e: raise InvalidArgumentException(details=str(e)) from e return {'data': workflow.to_dict()}, HTTPStatus.OK
def patch(self, user_id): user = self._find_user(user_id) current_user = get_current_user() if current_user.role != Role.ADMIN and current_user.id != user_id: raise UnauthorizedException('user cannot modify others infomation') mutable_attrs = MUTABLE_ATTRS_MAPPER.get(current_user.role) data = request.get_json() for k, v in data.items(): if k not in mutable_attrs: raise InvalidArgumentException(f'cannot edit {k} attribute!') if k == 'password': user.set_password(v) else: setattr(user, k, v) db.session.commit() return {'data': user.to_dict()}, HTTPStatus.OK
def put(self, user_id): user = self._find_user(user_id) data = request.get_json() new_password = data.pop('new_password', None) if new_password: old_password = data.pop('old_password', None) if data: details = {} for key in data.keys(): details[key] = 'Invalid field' raise InvalidArgumentException(details=details) if new_password: if not user.verify_password(old_password): raise UnauthorizedException(message='Wrong old password') user.set_password(new_password) db.session.commit() return {'username': user.username}, HTTPStatus.OK
def patch(self, user_id): self._check_current_user( user_id, 'user cannot modify other user\'s information') user = self._find_user(user_id) mutable_attrs = MUTABLE_ATTRS_MAPPER.get(get_current_user().role) data = request.get_json() for k, v in data.items(): if k not in mutable_attrs: raise InvalidArgumentException(f'cannot edit {k} attribute!') if k == 'password': password = base64decode(v) check_password_format(password) user.set_password(password) else: setattr(user, k, v) db.session.commit() return {'data': user.to_dict()}, HTTPStatus.OK
def get(self): parser = reqparse.RequestParser() parser.add_argument('code_path', type=str, location='args', required=True, help='code_path is required') data = parser.parse_args() code_path = data['code_path'] try: with tarfile.open(code_path) as tar: code_dict = {} for file in tar.getmembers(): if tar.extractfile(file) is not None: if '._' not in file.name and file.isfile(): code_dict[file.name] = str( tar.extractfile(file).read(), encoding='utf-8') return {'data': code_dict}, HTTPStatus.OK except Exception as e: logging.error(f'Get code, code_path: {code_path}, exception: {e}') raise InvalidArgumentException(details={'code_path': 'wrong path'})
def get(self): preset_datajoin = request.args.get('from', '') == 'preset_datajoin' templates = WorkflowTemplate.query if 'group_alias' in request.args: templates = templates.filter_by( group_alias=request.args['group_alias']) if 'is_left' in request.args: is_left = request.args.get(key='is_left', type=int) if is_left is None: raise InvalidArgumentException('is_left must be 0 or 1') templates = templates.filter_by(is_left=is_left) if preset_datajoin: templates = templates.filter_by( kind=WorkflowTemplateKind.PRESET_DATAJOIN.value) # remove config from dicts to reduce the size of the list return { 'data': [ _dic_without_key(t.to_dict(), ['config', 'editor_info']) for t in templates.all() ] }, HTTPStatus.OK
def post(self): parser = reqparse.RequestParser() parser.add_argument('name', required=True, type=str, help=_FORMAT_ERROR_MESSAGE.format('name')) parser.add_argument('dataset_type', required=True, type=DatasetType, help=_FORMAT_ERROR_MESSAGE.format('dataset_type')) parser.add_argument('comment', type=str) parser.add_argument('project_id', required=True, type=int, help=_FORMAT_ERROR_MESSAGE.format('project_id')) body = parser.parse_args() name = body.get('name') dataset_type = body.get('dataset_type') comment = body.get('comment') project_id = body.get('project_id') with db.session_scope() as session: try: # Create dataset dataset = Dataset( name=name, dataset_type=dataset_type, comment=comment, path=_get_dataset_path(name), project_id=project_id, ) session.add(dataset) # TODO: scan cronjob session.commit() return {'data': dataset.to_dict()} except Exception as e: session.rollback() raise InvalidArgumentException(details=str(e))
def patch(self, project_id): project = Project.query.filter_by(id=project_id).first() if project is None: raise NotFoundException() config = project.get_config() if request.json.get('token') is not None: new_token = request.json.get('token') config.token = new_token project.token = new_token if request.json.get('variables') is not None: del config.variables[:] config.variables.extend([ ParseDict(variable, Variable()) for variable in request.json.get('variables') ]) # exact configuration from variables custom_host = None for variable in config.variables: if variable.name == 'CUSTOM_HOST': custom_host = variable.value project.set_config(config) if request.json.get('comment'): project.comment = request.json.get('comment') for participant in project.get_config().participants: if participant.domain_name in\ project.get_certificate().domain_name_to_cert.keys(): _create_add_on( participant, project.get_certificate().domain_name_to_cert[ participant.domain_name], custom_host) try: db.session.commit() except Exception as e: raise InvalidArgumentException(details=e) return {'data': project.to_dict()}
def patch(self, project_id): project = Project.query.filter_by(id=project_id).first() if project is None: raise NotFoundException() config = project.get_config() if request.json.get('token') is not None: new_token = request.json.get('token') config.token = new_token project.token = new_token if request.json.get('variables') is not None: del config.variables[:] config.variables.extend([ ParseDict(variable, Variable()) for variable in request.json.get('variables') ]) project.set_config(config) if request.json.get('comment') is not None: project.comment = request.json.get('comment') try: db.session.commit() except Exception as e: raise InvalidArgumentException(details=e) return {'data': project.to_dict()}
def post(self): parser = reqparse.RequestParser() parser.add_argument('name', required=True, type=str, help=ErrorMessage.PARAM_FORMAT_ERROR.value.format( 'name', 'Empty')) parser.add_argument('config', required=True, type=dict, help=ErrorMessage.PARAM_FORMAT_ERROR.value.format( 'config', 'Empty')) parser.add_argument('comment') data = parser.parse_args() name = data['name'] config = data['config'] comment = data['comment'] if Project.query.filter_by(name=name).first() is not None: raise InvalidArgumentException( details=ErrorMessage.NAME_CONFLICT.value.format(name)) if config.get('participants') is None: raise InvalidArgumentException( details=ErrorMessage.PARAM_FORMAT_ERROR.value.format( 'participants', 'Empty')) if len(config.get('participants')) != 1: # TODO: remove limit after operator supports multiple participants raise InvalidArgumentException( details='Currently not support multiple participants.') # exact configuration from variables # TODO: one custom host for one participant custom_host = None for variable in config.get('variables', []): if variable.get('name') == 'CUSTOM_HOST': custom_host = variable.get('value') # parse participant certificates = {} for participant in config.get('participants'): if 'name' not in participant.keys() or \ 'url' not in participant.keys() or \ 'domain_name' not in participant.keys(): raise InvalidArgumentException( details=ErrorMessage.PARAM_FORMAT_ERROR.value.format( 'participants', 'Participant must have name, ' 'domain_name and url.')) if re.match(_URL_REGEX, participant.get('url')) is None: raise InvalidArgumentException('URL pattern is wrong') domain_name = participant.get('domain_name') # Grpc spec participant['grpc_spec'] = { 'authority': '{}-client-auth.com'.format(domain_name[:-4]) } if participant.get('certificates') is not None: current_cert = parse_certificates( participant.get('certificates')) success, err = verify_certificates(current_cert) if not success: raise InvalidArgumentException(err) certificates[domain_name] = {'certs': current_cert} if 'certificates' in participant.keys(): participant.pop('certificates') new_project = Project() # generate token # If users send a token, then use it instead. # If `token` is None, generate a new one by uuid. config['name'] = name token = config.get('token', uuid4().hex) config['token'] = token # check format of config try: new_project.set_config(ParseDict(config, ProjectProto())) except Exception as e: raise InvalidArgumentException( details=ErrorMessage.PARAM_FORMAT_ERROR.value.format( 'config', e)) new_project.set_certificate( ParseDict({'domain_name_to_cert': certificates}, CertificateStorage())) new_project.name = name new_project.token = token new_project.comment = comment # create add on for participant in new_project.get_config().participants: if participant.domain_name in\ new_project.get_certificate().domain_name_to_cert.keys(): _create_add_on( participant, new_project.get_certificate().domain_name_to_cert[ participant.domain_name], custom_host) try: new_project = db.session.merge(new_project) db.session.commit() except Exception as e: raise InvalidArgumentException(details=str(e)) return {'data': new_project.to_dict()}
def _check_present(args, arg_name): if arg_name not in args or args[arg_name] is None: raise InvalidArgumentException( 'Missing required argument [{}].'.format(arg_name))
def patch(self, workflow_id): parser = reqparse.RequestParser() parser.add_argument('target_state', type=str, required=False, default=None, help='target_state is empty') parser.add_argument('state', type=str, required=False, default=None, help='state is empty') parser.add_argument('forkable', type=bool) parser.add_argument('metric_is_public', type=bool) parser.add_argument('config', type=dict, required=False, default=None, help='updated config') data = parser.parse_args() workflow = _get_workflow(workflow_id) forkable = data['forkable'] if forkable is not None: workflow.forkable = forkable db.session.flush() metric_is_public = data['metric_is_public'] if metric_is_public is not None: workflow.metric_is_public = metric_is_public db.session.flush() target_state = data['target_state'] if target_state: try: if WorkflowState[target_state] == WorkflowState.RUNNING: for job in workflow.owned_jobs: try: generate_job_run_yaml(job) # TODO: check if peer variables is valid except RuntimeError as e: raise ValueError( f'Invalid Variable when try ' f'to format the job {job.name}:{str(e)}') workflow.update_target_state(WorkflowState[target_state]) db.session.flush() logging.info('updated workflow %d target_state to %s', workflow.id, workflow.target_state) scheduler.wakeup(workflow.id) except ValueError as e: raise InvalidArgumentException(details=str(e)) from e state = data['state'] if state: try: assert state == 'INVALID', \ 'Can only set state to INVALID for invalidation' workflow.invalidate() db.session.flush() logging.info('invalidate workflow %d', workflow.id) except ValueError as e: raise InvalidArgumentException(details=str(e)) from e config = data['config'] if config: try: if workflow.target_state != WorkflowState.INVALID or \ workflow.state not in \ [WorkflowState.READY, WorkflowState.STOPPED]: raise NoAccessException('Cannot edit running workflow') config_proto = dict_to_workflow_definition(data['config']) workflow.set_config(config_proto) db.session.flush() except ValueError as e: raise InvalidArgumentException(details=str(e)) from e db.session.commit() return {'data': workflow.to_dict()}, HTTPStatus.OK
def post(self): parser = reqparse.RequestParser() parser.add_argument('name', required=True, type=str, help=ErrorMessage.PARAM_FORMAT_ERROR.value.format( 'name', 'Empty')) parser.add_argument('config', required=True, type=dict, help=ErrorMessage.PARAM_FORMAT_ERROR.value.format( 'config', 'Empty')) parser.add_argument('comment') data = parser.parse_args() name = data['name'] config = data['config'] comment = data['comment'] if Project.query.filter_by(name=name).first() is not None: raise InvalidArgumentException( details=ErrorMessage.NAME_CONFLICT.value.format(name)) if config.get('participants') is None: raise InvalidArgumentException( details=ErrorMessage.PARAM_FORMAT_ERROR.value.format( 'participants', 'Empty')) if len(config.get('participants')) != 1: # TODO: remove limit after operator supports multiple participants raise InvalidArgumentException( details='Currently not support multiple participants.') certificates = {} for participant in config.get('participants'): if 'name' not in participant.keys() or \ 'url' not in participant.keys() or \ 'domain_name' not in participant.keys(): raise InvalidArgumentException( details=ErrorMessage.PARAM_FORMAT_ERROR.value.format( 'participants', 'Participant must have name, ' 'domain_name and url.')) domain_name = participant.get('domain_name') if participant.get('certificates') is not None: current_cert = parse_certificates( participant.get('certificates')) # check validation for file_name in _CERTIFICATE_FILE_NAMES: if current_cert.get(file_name) is None: raise InvalidArgumentException( details=ErrorMessage.PARAM_FORMAT_ERROR.value. format('certificates', '{} not existed'.format( file_name))) certificates[domain_name] = {'certs': current_cert} participant.pop('certificates') # create add on try: k8s_client = get_client() for domain_name, certificate in certificates.items(): create_add_on(k8s_client, domain_name, participant.get('url'), current_cert) except RuntimeError as e: raise InvalidArgumentException(details=str(e)) new_project = Project() # generate token # If users send a token, then use it instead. # If `token` is None, generate a new one by uuid. config['name'] = name token = config.get('token', uuid4().hex) config['token'] = token # check format of config try: new_project.set_config(ParseDict(config, ProjectProto())) except Exception as e: raise InvalidArgumentException( details=ErrorMessage.PARAM_FORMAT_ERROR.value.format( 'config', e)) new_project.set_certificate( ParseDict({'domain_name_to_cert': certificates}, CertificateStorage())) new_project.name = name new_project.token = token new_project.comment = comment try: new_project = db.session.merge(new_project) db.session.commit() except Exception as e: raise InvalidArgumentException(details=str(e)) return {'data': new_project.to_dict()}
def post(self): parser = reqparse.RequestParser() parser.add_argument('name', required=True, help='name is empty') parser.add_argument('project_id', type=int, required=True, help='project_id is empty') # TODO: should verify if the config is compatible with # workflow template parser.add_argument('config', type=dict, required=True, help='config is empty') parser.add_argument('forkable', type=bool, required=True, help='forkable is empty') parser.add_argument('forked_from', type=int, required=False, help='fork from base workflow') parser.add_argument('create_job_flags', type=list, required=False, location='json', help='flags in common.CreateJobFlag') parser.add_argument('peer_create_job_flags', type=list, required=False, location='json', help='peer flags in common.CreateJobFlag') parser.add_argument('fork_proposal_config', type=dict, required=False, help='fork and edit peer config') parser.add_argument('comment') data = parser.parse_args() name = data['name'] if Workflow.query.filter_by(name=name).first() is not None: raise ResourceConflictException( 'Workflow {} already exists.'.format(name)) # form to proto buffer template_proto = dict_to_workflow_definition(data['config']) workflow = Workflow( name=name, # 20 bytes # a DNS-1035 label must start with an # alphabetic character. substring uuid[:19] has # no collision in 10 million draws uuid=f'u{uuid4().hex[:19]}', comment=data['comment'], project_id=data['project_id'], forkable=data['forkable'], forked_from=data['forked_from'], state=WorkflowState.NEW, target_state=WorkflowState.READY, transaction_state=TransactionState.READY) workflow.set_create_job_flags(data['create_job_flags']) if workflow.forked_from is not None: fork_config = dict_to_workflow_definition( data['fork_proposal_config']) # TODO: more validations if len(fork_config.job_definitions) != \ len(template_proto.job_definitions): raise InvalidArgumentException( 'Forked workflow\'s template does not match base workflow') workflow.set_fork_proposal_config(fork_config) # TODO: check that federated jobs have # same reuse policy on both sides workflow.set_peer_create_job_flags(data['peer_create_job_flags']) workflow.set_config(template_proto) db.session.add(workflow) db.session.commit() logging.info('Inserted a workflow to db') scheduler.wakeup(workflow.id) return {'data': workflow.to_dict()}, HTTPStatus.CREATED
def patch(self, workflow_id): parser = reqparse.RequestParser() parser.add_argument('target_state', type=str, required=False, default=None, help='target_state is empty') parser.add_argument('state', type=str, required=False, help='state is empty') parser.add_argument('forkable', type=bool) parser.add_argument('metric_is_public', type=bool) parser.add_argument('config', type=dict, required=False, help='updated config') parser.add_argument('create_job_flags', type=list, required=False, location='json', help='flags in common.CreateJobFlag') parser.add_argument('batch_update_interval', type=int, required=False, help='interval for restart workflow in minute') data = parser.parse_args() workflow = _get_workflow(workflow_id) # start workflow every interval time batch_update_interval = data['batch_update_interval'] if batch_update_interval: start_or_stop_cronjob(batch_update_interval, workflow) forkable = data['forkable'] if forkable is not None: workflow.forkable = forkable db.session.flush() metric_is_public = data['metric_is_public'] if metric_is_public is not None: workflow.metric_is_public = metric_is_public db.session.flush() target_state = data['target_state'] if target_state: try: if WorkflowState[target_state] == WorkflowState.RUNNING: for job in workflow.owned_jobs: try: generate_job_run_yaml(job) # TODO: check if peer variables is valid except Exception as e: # pylint: disable=broad-except raise ValueError( f'Invalid Variable when try ' f'to format the job {job.name}:{str(e)}') workflow.update_target_state(WorkflowState[target_state]) db.session.flush() logging.info('updated workflow %d target_state to %s', workflow.id, workflow.target_state) except ValueError as e: raise InvalidArgumentException(details=str(e)) from e state = data['state'] if state: try: assert state == 'INVALID', \ 'Can only set state to INVALID for invalidation' workflow.invalidate() db.session.flush() logging.info('invalidate workflow %d', workflow.id) except ValueError as e: raise InvalidArgumentException(details=str(e)) from e config = data['config'] if config: try: if workflow.target_state != WorkflowState.INVALID or \ workflow.state not in \ [WorkflowState.READY, WorkflowState.STOPPED]: raise NoAccessException('Cannot edit running workflow') config_proto = dict_to_workflow_definition(data['config']) workflow.set_config(config_proto) db.session.flush() except ValueError as e: raise InvalidArgumentException(details=str(e)) from e create_job_flags = data['create_job_flags'] if create_job_flags: jobs = workflow.get_jobs() if len(create_job_flags) != len(jobs): raise InvalidArgumentException( details='Number of job defs does not match number ' f'of create_job_flags {len(jobs)} ' f'vs {len(create_job_flags)}') workflow.set_create_job_flags(create_job_flags) flags = workflow.get_create_job_flags() for i, job in enumerate(jobs): if job.workflow_id == workflow.id: job.is_disabled = flags[i] == \ common_pb2.CreateJobFlag.DISABLED db.session.commit() scheduler.wakeup(workflow.id) return {'data': workflow.to_dict()}, HTTPStatus.OK