def test_cluster_updated(self, activitylogs_record, tracker_record): auditor.record(event_type=cluster_events.CLUSTER_UPDATED, instance=self.cluster, is_upgrade=True) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0
def test_notebook_stopped(self, activitylogs_record, tracker_record): auditor.record(event_type=notebook_events.NOTEBOOK_STOPPED, instance=self.notebook, target='project') assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0
def test_permission_cluster_denied(self, activitylogs_record, tracker_record): auditor.record(event_type=permission_events.PERMISSION_CLUSTER_DENIED, actor_id=1, event='some.event') assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1
def test_cluster_created(self, activitylogs_record, tracker_record): auditor.record(event_type=cluster_events.CLUSTER_CREATED, instance=self.cluster, namespace='test', environment='test', is_upgrade='test', provisioner_enabled=False, use_data_claim=False, use_outputs_claim=False, use_logs_claim=False, use_repos_claim=False, use_upload_claim=False, node_selector_core_enabled=False, node_selector_experiments_enabled=False, cli_min_version='', cli_latest_version='', platform_min_version='', platform_latest_version='', chart_version='', cpu=0, memory=0, gpu=0) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0
def test_user_gitlab(self, activitylogs_record, tracker_record): auditor.record(event_type=user_events.USER_GITLAB, instance=self.user, actor_id=1) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1
def create(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) instance = self.perform_create(serializer) auditor.record(event_type=EXPERIMENT_CREATED, instance=instance) headers = self.get_success_headers(serializer.data) return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def finish_wizard(self): identity = self.provider.build_identity(self.state.data) defaults = { 'valid': True, 'scopes': identity.get('scopes', []), 'data': identity.get('data', {}), 'last_verified': timezone.now(), } user = self.get_or_create_user(identity=identity) _, created = SSOIdentity.objects.update_or_create( provider=self.provider.key, user=user, external_id=identity['id'], defaults=defaults, ) if created: auditor.record(event_type=self.provider.event_type, instance=user) self.state.clear() response = HttpResponseRedirect('/') login_user(request=self.request, response=response, user=user, login=True) return response
def test_user_bitbucket(self, activitylogs_record, tracker_record): auditor.record(event_type=user_events.USER_BITBUCKET, instance=self.user, actor_id=1) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1
def test_experiment_group_finished(self, activitylogs_record, tracker_record): auditor.record(event_type=experiment_group_events.EXPERIMENT_GROUP_FINISHED, instance=self.experiment_group, actor_id=1) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0
def perform_create(self, serializer): project = serializer.validated_data['name'] user = self.request.user if self.queryset.filter(user=user, name=project).count() > 0: raise ValidationError('A project with name `{}` already exists.'.format(project)) instance = serializer.save(user=user) auditor.record(event_type=PROJECT_CREATED, instance=instance)
def test_experiment_group_resumed_triggered(self, activitylogs_record, tracker_record): auditor.record(event_type=experiment_group_events.EXPERIMENT_GROUP_RESUMED_TRIGGERED, instance=self.experiment_group, actor_id=1) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1
def test_experiment_failed(self, activitylogs_record, tracker_record): auditor.record(event_type=tensorboard_events.TENSORBOARD_FAILED, instance=self.tensorboard, target='project') assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0
async def job_resources(request, ws, username, project_name, experiment_sequence, job_sequence): project = _get_project(username, project_name) if not has_project_permissions(request.app.user, project, 'GET'): exceptions.Forbidden("You don't have access to this project") experiment = _get_validated_experiment(project, experiment_sequence) job = _get_job(experiment, job_sequence) job_uuid = job.uuid.hex job_name = '{}.{}'.format(job.role, job.sequence) auditor.record(event_type=EXPERIMENT_JOB_RESOURCES_VIEWED, instance=job, actor_id=request.app.user.id) if not RedisToStream.is_monitored_job_resources(job_uuid=job_uuid): logger.info('Job resources with uuid `%s` is now being monitored', job_name) RedisToStream.monitor_job_resources(job_uuid=job_uuid) if job_uuid in request.app.job_resources_ws_mangers: ws_manager = request.app.job_resources_ws_mangers[job_uuid] else: ws_manager = SocketManager() request.app.job_resources_ws_mangers[job_uuid] = ws_manager def handle_job_disconnected_ws(ws): ws_manager.remove_sockets(ws) if not ws_manager.ws: logger.info('Stopping resources monitor for job %s', job_name) RedisToStream.remove_job_resources(job_uuid=job_uuid) request.app.job_resources_ws_mangers.pop(job_uuid, None) logger.info('Quitting resources socket for job %s', job_name) ws_manager.add_socket(ws) should_check = 0 while True: resources = RedisToStream.get_latest_job_resources(job=job_uuid, job_name=job_name) should_check += 1 # After trying a couple of time, we must check the status of the job if should_check > RESOURCES_CHECK: job.refresh_from_db() if job.is_done: logger.info('removing all socket because the job `%s` is done', job_name) ws_manager.ws = set([]) handle_job_disconnected_ws(ws) return else: should_check -= CHECK_DELAY if resources: try: await ws.send(resources) except ConnectionClosed: handle_job_disconnected_ws(ws) return # Just to check if connection closed if ws._connection_lost: # pylint:disable=protected-access handle_job_disconnected_ws(ws) return await asyncio.sleep(SOCKET_SLEEP)
def test_experiment_new_status(self, activitylogs_record, tracker_record): auditor.record(event_type=notebook_events.NOTEBOOK_NEW_STATUS, instance=self.notebook, target='project') assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0
def test_experiment_triggered_copied(self, activitylogs_record, tracker_record): auditor.record(event_type=experiment_events.EXPERIMENT_COPIED_TRIGGERED, instance=self.experiment, actor_id=1) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1
def test_experiment_failed(self, activitylogs_record, tracker_record): auditor.record(event_type=notebook_events.NOTEBOOK_FAILED, instance=self.notebook, target='project') assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0
def test_experiment_viewed(self, activitylogs_record, tracker_record): auditor.record(event_type=experiment_events.EXPERIMENT_VIEWED, instance=self.experiment, actor_id=1) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1
def test_user_deleted(self, activitylogs_record, tracker_record): auditor.record(event_type=user_events.USER_DELETED, instance=self.user, actor_id=1) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1
def test_superuser_revoked(self, activitylogs_record, tracker_record): auditor.record(event_type=superuser_events.SUPERUSER_ROLE_REVOKED, id=2, actor_id=1) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1
def post(self, request, *args, **kwargs): obj = self.get_object() auditor.record(event_type=self.event_type, instance=obj, actor_id=self.request.user.id) description = None config = None declarations = None update_code_reference = False if 'config' in request.data: spec = validate_experiment_spec_config( [obj.specification.parsed_data, request.data['config']], raise_for_rest=True) config = spec.parsed_data declarations = spec.declarations if 'update_code' in request.data: try: update_code_reference = to_bool(request.data['update_code']) except TypeError: raise ValidationError('update_code should be a boolean') if 'description' in request.data: description = request.data['description'] new_obj = self.clone(obj=obj, config=config, declarations=declarations, update_code_reference=update_code_reference, description=description) serializer = self.get_serializer(new_obj) return Response(status=status.HTTP_201_CREATED, data=serializer.data)
def test_project_experiment_groups_viewed(self, activitylogs_record, tracker_record): auditor.record(event_type=project_events.PROJECT_EXPERIMENT_GROUPS_VIEWED, instance=self.project, actor_id=1) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1
def test_experiment_succeeded(self, activitylogs_record, tracker_record): auditor.record(event_type=build_job_events.BUILD_JOB_SUCCEEDED, instance=self.notebook, target='project') assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0
def destroy(self, request, *args, **kwargs): instance = self.get_object() auditor.record(event_type=USER_DELETED, instance=instance, actor_id=self.request.user.id) self.perform_destroy(instance) return Response(status=status.HTTP_204_NO_CONTENT)
def test_project_viewed(self, activitylogs_record, tracker_record): auditor.record(event_type=project_events.PROJECT_VIEWED, instance=self.project, actor_id=1) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1
def test_project_deleted_triggered(self, activitylogs_record, tracker_record): auditor.record(event_type=project_events.PROJECT_DELETED_TRIGGERED, instance=self.project, actor_id=1) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1
def test_repo_downloaded(self, activitylogs_record, tracker_record): auditor.record(event_type=repo_events.REPO_DOWNLOADED, instance=self.project, actor_id=1) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1
def test_repo_new_commit(self, activitylogs_record, tracker_record): auditor.record(event_type=repo_events.REPO_NEW_COMMIT, instance=self.project, actor_id=1) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1
def test_project_set_private(self, activitylogs_record, tracker_record): auditor.record(event_type=project_events.PROJECT_SET_PRIVATE, instance=self.project, actor_id=1) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0
def test_notebook_started(self, activitylogs_record, tracker_record): auditor.record(event_type=build_job_events.BUILD_JOB_STARTED, instance=self.notebook, target='project') assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0
def test_repo_created(self, activitylogs_record, tracker_record): auditor.record(event_type=repo_events.REPO_CREATED, instance=self.project, actor_id=1) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1
def audit(self, instance): auditor.record(event_type=NOTEBOOK_VIEWED, instance=instance.notebook, target='project', actor_id=self.request.user.id, actor_name=self.request.user.username)
def experiment_post_delete(sender, **kwargs): instance = kwargs['instance'] auditor.record(event_type=EXPERIMENT_DELETED, instance=instance)
def start_dockerizer(build_job): # Update job status to show that its started build_job.set_status(JobLifeCycle.SCHEDULED) spawner_class = get_spawner_class(build_job.backend) local_build = build_job.backend in {BuildBackend.NATIVE, None} spawner = spawner_class( project_name=build_job.project.unique_name, project_uuid=build_job.project.uuid.hex, job_name=build_job.unique_name, job_uuid=build_job.uuid.hex, commit=build_job.commit, from_image=build_job.build_image, dockerfile_path=build_job.build_dockerfile, context_path=build_job.build_context, image_tag=build_job.uuid.hex, image_name=get_image_name( build_job, local=local_build), build_steps=build_job.build_steps, env_vars=build_job.build_env_vars, nocache=build_job.build_nocache, in_cluster_registry=conf.get('REGISTRY_IN_CLUSTER'), spec=build_job.specification, k8s_config=conf.get('K8S_CONFIG'), namespace=conf.get('K8S_NAMESPACE'), in_cluster=True, use_sidecar=True) error = {} try: results = spawner.start_dockerizer(resources=build_job.resources, node_selector=build_job.node_selector, affinity=build_job.affinity, tolerations=build_job.tolerations) auditor.record(event_type=BUILD_JOB_STARTED, instance=build_job) build_job.definition = get_job_definition(results) build_job.save(update_fields=['definition']) return True except ApiException: _logger.error('Could not start build job, please check your polyaxon spec', exc_info=True) error = { 'raised': True, 'traceback': traceback.format_exc(), 'message': 'Could not start build job, encountered a Kubernetes ApiException.' } except VolumeNotFoundError as e: _logger.error('Could not start build job, please check your volume definitions.', exc_info=True) error = { 'raised': True, 'traceback': traceback.format_exc(), 'message': 'Could not start build job, encountered a volume definition problem. %s' % e } except Exception as e: _logger.error('Could not start build job, please check your polyaxon spec.', exc_info=True) error = { 'raised': True, 'traceback': traceback.format_exc(), 'message': 'Could not start build job encountered an {} exception.'.format( e.__class__.__name__ ) } finally: if error.get('raised'): build_job.set_status( JobLifeCycle.FAILED, message=error.get('message'), traceback=error.get('traceback'))
def filter_queryset(self, queryset): project = get_permissible_project(view=self) auditor.record(event_type=PROJECT_BUILDS_VIEWED, instance=project, actor_id=self.request.user.id) return queryset.filter(project=project)
async def experiment_job_resources(request, ws, username, project_name, experiment_id, job_id): job, _, message = validate_experiment_job(request=request, username=username, project_name=project_name, experiment_id=experiment_id, job_id=job_id) if job is None: await ws.send(get_error_message(message)) return job_uuid = job.uuid.hex job_name = '{}.{}'.format(job.role, job.id) auditor.record(event_type=EXPERIMENT_JOB_RESOURCES_VIEWED, instance=job, actor_id=request.app.user.id, actor_name=request.app.user.username) if not RedisToStream.is_monitored_job_resources(job_uuid=job_uuid): logger.info('Job resources with uuid `%s` is now being monitored', job_name) RedisToStream.monitor_job_resources(job_uuid=job_uuid) if job_uuid in request.app.job_resources_ws_managers: ws_manager = request.app.job_resources_ws_managers[job_uuid] else: ws_manager = SocketManager() request.app.job_resources_ws_managers[job_uuid] = ws_manager def handle_job_disconnected_ws(ws): ws_manager.remove_sockets(ws) if not ws_manager.ws: logger.info('Stopping resources monitor for job %s', job_name) RedisToStream.remove_job_resources(job_uuid=job_uuid) request.app.job_resources_ws_managers.pop(job_uuid, None) logger.info('Quitting resources socket for job %s', job_name) ws_manager.add_socket(ws) should_check = 0 while True: resources = RedisToStream.get_latest_job_resources(job=job_uuid, job_name=job_name) should_check += 1 # After trying a couple of time, we must check the status of the job if should_check > RESOURCES_CHECK: job.refresh_from_db() if job.is_done: logger.info('removing all socket because the job `%s` is done', job_name) ws_manager.ws = set([]) handle_job_disconnected_ws(ws) return else: should_check -= CHECK_DELAY if resources: try: await ws.send(resources) except ConnectionClosed: handle_job_disconnected_ws(ws) return # Just to check if connection closed if ws._connection_lost: # pylint:disable=protected-access handle_job_disconnected_ws(ws) return await asyncio.sleep(SOCKET_SLEEP)
def test_experiment_group_bo(self, activitylogs_record, tracker_record): auditor.record(event_type=experiment_group_events.EXPERIMENT_GROUP_BO, instance=self.experiment_group) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0
def test_build_job_stopped(self, activitylogs_record, tracker_record): auditor.record(event_type=build_job_events.BUILD_JOB_STOPPED, instance=self.build_job) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0
def project_post_deleted(sender, **kwargs): instance = kwargs['instance'] auditor.record(event_type=PROJECT_DELETED, instance=instance)
def node_gpu_created(sender, **kwargs): instance = kwargs['instance'] auditor.record(event_type=CLUSTER_NODE_GPU, instance=instance)
def experiment_group_post_delete(sender, **kwargs): """Delete all group outputs.""" instance = kwargs['instance'] auditor.record(event_type=EXPERIMENT_GROUP_DELETED, instance=instance) remove_bookmarks(object_id=instance.id, content_type='experimentgroup')
def job_post_delete(sender, **kwargs): instance = kwargs['instance'] auditor.record(event_type=JOB_DELETED, instance=instance) remove_bookmarks(object_id=instance.id, content_type='job')
def perform_destroy(self, instance): auditor.record(event_type=SEARCH_DELETED, instance=instance) super().perform_destroy(instance)
def perform_create(self, serializer): instance = serializer.save(user=self.request.user, content_type=self.content_type, project=self.project) auditor.record(event_type=SEARCH_CREATED, instance=instance)
async def build_logs( request, # pylint:disable=too-many-branches ws, username, project_name, build_id): from streams.consumers.consumers import Consumer job, message = validate_build(request=request, username=username, project_name=project_name, build_id=build_id) if job is None: await ws.send(get_error_message(message)) return job_uuid = job.uuid.hex auditor.record(event_type=BUILD_JOB_LOGS_VIEWED, instance=job, actor_id=request.app.user.id, actor_name=request.app.user.username) if not RedisToStream.is_monitored_job_logs(job_uuid=job_uuid): logger.info('Job uuid `%s` logs is now being monitored', job_uuid) RedisToStream.monitor_job_logs(job_uuid=job_uuid) # start consumer if job_uuid in request.app.job_logs_consumers: consumer = request.app.job_logs_consumers[job_uuid] else: logger.info('Add job log consumer for %s', job_uuid) consumer = Consumer( routing_key='{}.{}'.format(RoutingKeys.STREAM_LOGS_SIDECARS_BUILDS, job_uuid), queue='{}.{}'.format(CeleryQueues.STREAM_LOGS_SIDECARS, job_uuid)) request.app.job_logs_consumers[job_uuid] = consumer consumer.run() def should_disconnect(): if not consumer.ws: logger.info('Stopping logs monitor for job uuid %s', job_uuid) RedisToStream.remove_job_logs(job_uuid=job_uuid) # if job_uuid in request.app.job_logs_consumers: # consumer = request.app.job_logs_consumers.pop(job_uuid, None) # if consumer: # consumer.stop() return True return False # add socket manager consumer.add_socket(ws) should_quite = False num_message_retries = 0 # Stream phase changes status = None while status != JobLifeCycle.RUNNING and not JobLifeCycle.is_done(status): job.refresh_from_db() if status != job.last_status: status = job.last_status await notify(ws_manager=consumer, message=get_status_message(status)) if should_disconnect(): return await asyncio.sleep(SOCKET_SLEEP) if JobLifeCycle.is_done(status): await notify(ws_manager=consumer, message=get_status_message(status)) RedisToStream.remove_job_logs(job_uuid=job_uuid) return while True: num_message_retries += 1 for message in consumer.get_messages(): num_message_retries = 0 await notify(ws_manager=consumer, message=message) # After trying a couple of time, we must check the status of the experiment if num_message_retries > MAX_RETRIES: job.refresh_from_db() if job.is_done: logger.info('removing all socket because the job `%s` is done', job_uuid) consumer.ws = set([]) else: num_message_retries -= CHECK_DELAY # Just to check if connection closed if ws._connection_lost: # pylint:disable=protected-access logger.info('Quitting logs socket for job uuid %s', job_uuid) consumer.remove_sockets({ ws, }) should_quite = True if should_disconnect(): should_quite = True if should_quite: return await asyncio.sleep(SOCKET_SLEEP)
def test_build_job_new_status(self, activitylogs_record, tracker_record): auditor.record(event_type=build_job_events.BUILD_JOB_NEW_STATUS, instance=self.build_job) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0
def perform_create(self, serializer): instance = serializer.save(user=self.request.user, project=get_permissible_project(view=self)) auditor.record(event_type=EXPERIMENT_CREATED, instance=instance)
def project_post_deleted(sender, **kwargs): instance = kwargs['instance'] auditor.record(event_type=PROJECT_DELETED, instance=instance) remove_bookmarks(object_id=instance.id, content_type='project')
def get(self, request, *args, **kwargs): response = super().get(request, *args, **kwargs) auditor.record(event_type=EXPERIMENT_JOB_STATUSES_VIEWED, instance=self.job, actor_id=request.user.id) return response
def test_project_deleted(self, activitylogs_record, tracker_record): auditor.record(event_type=project_events.PROJECT_DELETED, instance=self.project) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0
def tensorboard_job_post_delete(sender, **kwargs): instance = kwargs['instance'] auditor.record(event_type=TENSORBOARD_DELETED, instance=instance) remove_bookmarks(object_id=instance.id, content_type='tensorboardjob')
def filter_queryset(self, queryset): auditor.record(event_type=PROJECT_JOBS_VIEWED, instance=self.project, actor_id=self.request.user.id, actor_name=self.request.user.username) return super().filter_queryset(queryset=queryset)
def get(self, request, *args, **kwargs): response = super(BuildStatusListView, self).get(request, *args, **kwargs) auditor.record(event_type=BUILD_JOB_STATUSES_VIEWED, instance=self.job, actor_id=request.user.id) return response
def notebook_job_pre_delete(sender, **kwargs): job = kwargs['instance'] auditor.record(event_type=NOTEBOOK_CLEANED_TRIGGERED, instance=job)
def tensorboard_job_pre_delete(sender, **kwargs): job = kwargs['instance'] auditor.record(event_type=TENSORBOARD_CLEANED_TRIGGERED, instance=job)
def perform_update(self, serializer): instance = serializer.save() auditor.record(event_type=self.delete_event, instance=instance, actor_id=self.request.user.id)
def experiment_post_delete(sender, **kwargs): instance = kwargs['instance'] auditor.record(event_type=EXPERIMENT_DELETED, instance=instance) remove_bookmarks(object_id=instance.id, content_type='experiment')
def get_object(self): project = get_permissible_project(view=self) repo = get_object_or_404(Repo, project=project) if not is_internal_user(self.request.user): auditor.record(event_type=REPO_DOWNLOADED, instance=repo, actor_id=self.request.user.id) return repo
def start_dockerizer(build_job): # Update job status to show that its started build_job.set_status(JobLifeCycle.SCHEDULED) spawner_class = get_spawner_class(build_job.backend) try: registry_spec = get_registry_context(build_backend=build_job.backend) except ContainerRegistryError: build_job.set_status( JobLifeCycle.FAILED, message= 'Could not start the dockerizer job, please check your registry configuration.' ) return spawner = spawner_class(project_name=build_job.project.unique_name, project_uuid=build_job.project.uuid.hex, job_name=build_job.unique_name, job_uuid=build_job.uuid.hex, commit=build_job.commit, from_image=build_job.build_image, dockerfile_path=build_job.build_dockerfile, context_path=build_job.build_context, image_tag=build_job.uuid.hex, image_name=get_image_name( build_job=build_job, registry_host=registry_spec.host), build_steps=build_job.build_steps, env_vars=build_job.build_env_vars, lang_env=build_job.build_lang_env, nocache=build_job.build_nocache, insecure=registry_spec.insecure, creds_secret_ref=registry_spec.secret, creds_secret_items=registry_spec.secret_items, k8s_config=conf.get(K8S_CONFIG), namespace=conf.get(K8S_NAMESPACE), in_cluster=True, use_sidecar=True, log_level=build_job.specification.log_level) error = {} try: results = spawner.start_dockerizer( secret_refs=build_job.secret_refs, config_map_refs=build_job.config_map_refs, resources=build_job.resources, labels=build_job.labels, annotations=build_job.annotations, node_selector=build_job.node_selector, affinity=build_job.affinity, tolerations=build_job.tolerations, max_restarts=get_max_restart(build_job.max_restarts, conf.get(MAX_RESTARTS_BUILD_JOBS)), reconcile_url=get_build_reconcile_url(build_job.unique_name)) auditor.record(event_type=BUILD_JOB_STARTED, instance=build_job) build_job.definition = get_job_definition(results) build_job.save(update_fields=['definition']) return True except ApiException: _logger.error( 'Could not start build job, please check your polyaxon spec', exc_info=True) error = { 'raised': True, 'traceback': traceback.format_exc(), 'message': 'Could not start build job, encountered a Kubernetes ApiException.' } except StoreNotFoundError as e: _logger.error( 'Could not start build job, please check your volume definitions.', exc_info=True) error = { 'raised': True, 'traceback': traceback.format_exc(), 'message': 'Could not start build job, encountered a volume definition problem. %s' % e } except Exception as e: _logger.error( 'Could not start build job, please check your polyaxon spec.', exc_info=True) error = { 'raised': True, 'traceback': traceback.format_exc(), 'message': 'Could not start build job encountered an {} exception.'.format( e.__class__.__name__) } finally: if error.get('raised'): build_job.set_status(JobLifeCycle.FAILED, message=error.get('message'), traceback=error.get('traceback'))
def job_post_delete(sender, **kwargs): instance = kwargs['instance'] auditor.record(event_type=JOB_DELETED, instance=instance)
def test_experiment_copied(self, activitylogs_record, tracker_record): auditor.record(event_type=experiment_events.EXPERIMENT_COPIED, instance=self.experiment) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 0