def serialize_trainer_info_default(self, session, user_id): user_to_job_serialized = None user_to_job = User_To_Job.get_single_by_ids(session=session, user_id=user_id, job_id=self.id) if user_to_job: user_to_job_serialized = user_to_job.serialize_trainer_info_default( ) guide = None if self.guide_default_id: guide = self.guide_default.serialize_for_trainer() default_userscript = None if self.default_userscript: default_userscript = self.default_userscript.serialize() return { 'id': self.id, 'name': self.name, 'type': self.type, 'share_type': self.share_type, 'user_to_job': user_to_job_serialized, 'guide': guide, 'default_userscript': default_userscript }
def attach_user_to_job(self, session, user, add_to_session: bool = False): user_to_job = User_To_Job(job=self, user=user) if add_to_session: session.add(user_to_job) return user_to_job
def check_existing_user_relationship(self, session, user_id: int): existing_user_to_job = User_To_Job.get_single_by_ids(session=session, user_id=user_id, job_id=self.id) if self.repeatable is not True: if existing_user_to_job: return True if self.repeatable is True: # Even if the job can be repeated (ie an exam), # We don't want two of the same jobs happening at once if existing_user_to_job.status in ['active', 'pending']: return True return False
def job_cancel_core(session, user, log, mode, job_id): """ QUESTIONs option to "hide" job as well? What about super admin option to actually delete (ie for database clean up...) Arguments session, db ojbect user, class User object job, class Job object log, diffgram regular log dict Returns """ job = Job.get_by_id(session=session, job_id=job_id) if user is None or job is None: log['error']['user_job'] = "No user or job" return False, log # JOB LIMITs result, log = job_cancel_limits(session, log, user, job, mode) if result is False: return result, log # TASK spcific limits # Difference that a job may have tasks that # Aren't cancelable status_list = None if mode in ["cancel"]: status_list = ["created", "available", "active"] if mode in ["delete"]: # Don't allow even a super admin to delete completed # from this method? # QUESTION # For that matter should a "completed" job even be allowed to be deleted? status_list = ["draft", "created", "available", "active"] # TODO disallow deleting jobs that have # any completed tasks / transactions if status_list: # Just a question, is there really any point of doing this # If the the job was cancelled? # like maybe for deleting but status I don't know task_list = job.task_list(session=session, status_list=status_list) for task in task_list: if mode == "cancel": session.add(task) task.status = "cancelled" if mode == "delete": session.delete(task) if mode == "archive": # We may want to rename "hidden" to archived? session.add(job) job.status = 'archived' job.hidden = True job.member_updated = user.member # Assume we want to remove sync dirs on archive, we might remove if that is not the case. job_dir_sync_manager = job_dir_sync_utils.JobDirectorySyncManager( job=job, session=session, log=log) job_dir_sync_manager.remove_job_from_all_dirs() if mode == "cancel": session.add(job) job.status = "cancelled" job.member_updated = user.member if mode == "delete": """ Question, is there a better way to do this with CASCADE / sql rules? It feels a bit funny to do it this way BUT also want to be careful since so much reuse!!! ie wouldn't want to delete a guide that was attached to a job on cascade """ # What about a job's directory, # TODO what about deleting associated credential links / other tables? user_to_job = User_To_Job.get_single_by_ids(session=session, user_id=user.id, job_id=job.id) task_list = job.task_list(session) for task in task_list: if task.file.type == "video": # Is this the right way to delete stuff here? video_frame_query = WorkingDirFileLink.image_file_list_from_video( session=session, video_parent_file_id=task.file.id, return_mode="query") # Not working yet! video_frame_query.delete() session.delete(task) session.delete(task.file) # TODO still getting an integrity error # Must be some file that exists related to this job? # Or some other file that got updated incorrectly? job_dir_sync_manager = job_dir_sync_utils.JobDirectorySyncManager( job=job, session=session, log=log) job_dir_sync_manager.remove_job_from_all_dirs(soft_delete=False) session.delete(job) session.delete(user_to_job) return True, log
def serialize_builder_info_default(self, session, user=None): # TODO share this with trainer info function user_to_job_serialized = None if user: user_to_job = User_To_Job.get_single_by_ids(session=session, user_id=user.id, job_id=self.id) if user_to_job: user_to_job_serialized = user_to_job.serialize_trainer_info_default( ) percent_completed = 0 tasks_remaining = 0 if self.stat_count_tasks: percent_completed = (self.stat_count_complete / self.stat_count_tasks) * 100 tasks_remaining = self.stat_count_tasks - self.stat_count_complete external_mappings = ExternalMap.get( session=session, job_id=self.id, diffgram_class_string='task_template', return_kind='all') member_list_ids = None if session: member_list_ids = self.get_with_cache( cache_key='member_list_ids', cache_miss_function=self.regenerate_member_list_ids, session=session, miss_function_args={'session': session}) external_mappings_serialized = [ x.serialize() for x in external_mappings ] default_userscript = None if self.default_userscript: default_userscript = self.default_userscript.serialize() return { 'id': self.id, 'name': self.name, 'type': self.type, 'share_type': self.share_type, 'member_list_ids': member_list_ids, 'status': self.status, 'time_created': self.time_created, 'time_completed': self.time_completed, 'user_to_job': user_to_job_serialized, 'attached_directories_dict': self.get_with_cache( cache_key='attached_directories_dict', cache_miss_function=self.get_attached_dirs_serialized, session=session, miss_function_args={'session': session}), 'external_mappings': external_mappings_serialized, 'file_count_statistic': self.file_count_statistic, 'stat_count_tasks': self.stat_count_tasks, 'stat_count_complete': self.stat_count_complete, 'percent_completed': percent_completed, 'tasks_remaining': tasks_remaining, 'is_live': self.is_live, 'pending_initial_dir_sync': self.pending_initial_dir_sync, 'interface_connection': self.interface_connection.serialize() if self.interface_connection else None, # For now the SDK uses the /info path # So if we want to expose this stuff we need that there # maybe something to review in the future 'file_count': self.file_count, 'launch_datetime': self.launch_datetime, 'launch_datetime_deferred': self.launch_datetime_deferred, 'launch_attempt_log': self.launch_attempt_log, 'waiting_to_be_launched': self.waiting_to_be_launched, 'interface_connection_id': self.interface_connection_id, # Realizing we want the label dict # ie to show the label information (not just ids...) 'label_dict': self.label_dict, 'completion_directory_id': self.completion_directory_id, 'output_dir_action': self.output_dir_action, 'pro_network': self.pro_network, 'default_userscript': default_userscript }
def regenerate_member_list_ids(self, session): member_list_ids = User_To_Job.list(session=session, job=self, serialize=True) return member_list_ids
def update_member_list(self, member_list_ids: list, session, log: dict, add_to_session: bool = True): # TODO feel like this could be a more generic pattern # main id is given a list of ids handle updating the attachments # TODO abstract more functions here... # TODO optimize by caching # / checking existing_member_list_ids # An empty list is OK because that indicates clearing all if member_list_ids is None: log['info']['member_list_ids'] = 'Provide member lists IDs' return log log['info']['update_user_list'] = {} log['info']['update_member_list'] = {} user_list = [] user_added_id_list = [] if 'all' in member_list_ids: user_list = self.project.users else: for member_id in member_list_ids: user = User.get_by_member_id(session=session, member_id=member_id) if not user: log['error']['update_member_list'] = {} log['error']['update_member_list'][ member_id] = "Invalid member_id " + str(member_id) return log for user in user_list: user_added_id_list.append(user.id) existing_user_to_job = User_To_Job.get_single_by_ids( session=session, user_id=user.id, job_id=self.id) if existing_user_to_job: # Add user back into job if existing_user_to_job.status == 'removed': existing_user_to_job.status = 'active' log['info']['update_member_list'][user.member_id] = "Added" if add_to_session is True: session.add(existing_user_to_job) else: log['info']['update_member_list'][ user.member_id] = "Unchanged." continue user_to_job = self.attach_user_to_job( session=session, user=user, add_to_session=add_to_session) log['info']['update_member_list'][user.member_id] = "Added" remaining_user_to_job_list = User_To_Job.list( session=session, user_id_ignore_list=user_added_id_list ) # careful, user_id not member_id for user_to_job in remaining_user_to_job_list: if user_to_job.status != 'removed': user_to_job.status = 'removed' if add_to_session is True: session.add(user_to_job) # TODO this should be uniform, it's not right now # this is update_user_list but we need to add member_id to user_to_job # it sounds like this needed to be member_list for current tests so just leaving it for now. log['info']['update_member_list'][ user_to_job.user_id] = "Removed" self.set_cache_by_key(cache_key='member_list_ids', value=member_list_ids) if add_to_session is True: session.add(self) return log
def inner(*args, **kwds): job_id = kwds.get('job_id', None) if job_id is None or job_id == "null" or job_id == "undefined": raise Forbidden("job_id is invalid") with sessionMaker.session_scope() as session: # Permissions cascading from project project_string_id = get_project_string_from_job_id( session, job_id) # API if request.authorization is not None: result = API_Permissions.by_project( session=session, project_string_id=project_string_id, Roles=project_role_list) if result is True: return func(*args, **kwds) else: raise Forbidden("API access invalid") # TODO do we need to validate user has applicable mode? # ie they pass mode builder but are trainer? # Basics should fail on project level check anyway here... # User # TODO move login stuff into the general User_Permissions if LoggedIn() != True: raise Forbidden("Login again.") user = session.query(User).filter( User.id == getUserID()).first() if user is None: raise Forbidden("Login again.") # Want to use the builder API permissions instead of # flags since a user may be testing this as a builder # TODO deprecate 'mode' flag or have it as something else # like "builder_only" or something # Jan 3, 2020 # One downside of doing it this way is it means # that we need to be careful with # project_role_list list... if user.api_enabled_builder is True: result = Project_permissions.check_permissions( session=session, project_string_id=project_string_id, Roles=project_role_list, apis_project_list=apis_project_list, apis_user_list=apis_user_list) if result is True: return func(*args, **kwds) else: raise Forbidden("Project access invalid") if user.api_enabled_trainer is True: # TODO refactor into function # TODO handle "info" case of a trainer not yet # on a job seeing basic stuff on active jobs... # We allow trainers to see # Basic info before they apply # as long as job is active... #if job.status != "active": # raise Forbidden("No access.") User_Permissions.general(user=user, apis_user_list=apis_user_list) user_to_job = User_To_Job.get_single_by_ids( session=session, user_id=user.id, job_id=job_id) # TODO other status checking on this... if user_to_job is None: raise Forbidden( "No access to this job. Please apply first.") # Success case for trainer return func(*args, **kwds) raise Forbidden("No access.")