def __init__(self, app): super(HistoryContentsController, self).__init__(app) self.hda_manager = hdas.HDAManager(app) self.history_manager = histories.HistoryManager(app) self.folder_manager = folders.FolderManager() self.hda_serializer = hdas.HDASerializer(app) self.hda_deserializer = hdas.HDADeserializer(app)
def __init__(self, app, **kwargs): super(HistorySerializer, self).__init__(app, **kwargs) self.history_manager = self.manager self.hda_manager = hdas.HDAManager(app) self.hda_serializer = hdas.HDASerializer(app) self.history_contents_serializer = history_contents.HistoryContentsSerializer(app) self.default_view = 'summary' self.add_view('summary', [ 'id', 'model_class', 'name', 'deleted', 'purged', # 'count' 'url', # TODO: why these? 'published', 'annotation', 'tags', ]) self.add_view('detailed', [ 'contents_url', 'empty', 'size', 'user_id', 'create_time', 'update_time', 'importable', 'slug', 'username_and_slug', 'genome_build', # TODO: remove the next three - instead getting the same info from the 'hdas' list 'state', 'state_details', 'state_ids', # 'community_rating', # 'user_rating', ], include_keys_from='summary') # in the Historys' case, each of these views includes the keys from the previous #: ..note: this is a custom view for newer (2016/3) UI and should be considered volatile self.add_view('dev-detailed', [ 'contents_url', 'size', 'user_id', 'create_time', 'update_time', 'importable', 'slug', 'username_and_slug', 'genome_build', # 'contents_states', 'contents_active', 'hid_counter', ], include_keys_from='summary')
def __init__(self, app): super(DCESerializer, self).__init__(app) self.hda_serializer = hdas.HDASerializer(app) self.dc_serializer = DCSerializer(app, dce_serializer=self) self.default_view = 'summary' self.add_view('summary', [ 'id', 'model_class', 'element_index', 'element_identifier', 'element_type', 'object' ])
def set_up_managers(self): super(HDASerializerTestCase, self).set_up_managers() self.hda_serializer = hdas.HDASerializer(self.app)
def display_structured(self, trans, id=None): """ Display a history as a nested structure showing the jobs and workflow invocations that created each dataset (if any). """ # Get history if id is None: id = trans.history.id else: id = self.decode_id(id) # Expunge history from the session to allow us to force a reload # with a bunch of eager loaded joins trans.sa_session.expunge(trans.history) history = trans.sa_session.query(model.History).options( joinedload('active_datasets').joinedload('creating_job_associations').joinedload('job').joinedload('workflow_invocation_step').joinedload('workflow_invocation').joinedload('workflow'), ).get(id) if not (history and ((history.user and trans.user and history.user.id == trans.user.id) or (trans.history and history.id == trans.history.id) or trans.user_is_admin)): return trans.show_error_message("Cannot display history structure.") # Resolve jobs and workflow invocations for the datasets in the history # items is filled with items (hdas, jobs, or workflows) that go at the # top level items = [] # First go through and group hdas by job, if there is no job they get # added directly to items jobs = {} for hda in history.active_datasets: if hda.visible is False: continue # Follow "copied from ..." association until we get to the original # instance of the dataset original_hda = hda # while original_hda.copied_from_history_dataset_association: # original_hda = original_hda.copied_from_history_dataset_association # Check if the job has a creating job, most should, datasets from # before jobs were tracked, or from the upload tool before it # created a job, may not if not original_hda.creating_job_associations: items.append((hda, None)) # Attach hda to correct job # -- there should only be one creating_job_association, so this # loop body should only be hit once for assoc in original_hda.creating_job_associations: job = assoc.job if job in jobs: jobs[job].append((hda, None)) else: jobs[job] = [(hda, None)] # Second, go through the jobs and connect to workflows wf_invocations = {} for job, hdas in jobs.items(): # Job is attached to a workflow step, follow it to the # workflow_invocation and group if job.workflow_invocation_step: wf_invocation = job.workflow_invocation_step.workflow_invocation if wf_invocation in wf_invocations: wf_invocations[wf_invocation].append((job, hdas)) else: wf_invocations[wf_invocation] = [(job, hdas)] # Not attached to a workflow, add to items else: items.append((job, hdas)) # Finally, add workflow invocations to items, which should now # contain all hdas with some level of grouping items.extend(wf_invocations.items()) # Sort items by age items.sort(key=(lambda x: x[0].create_time), reverse=True) # logic taken from mako files from galaxy.managers import hdas hda_serializer = hdas.HDASerializer(trans.app) hda_dicts = [] id_hda_dict_map = {} for hda in history.active_datasets: hda_dict = hda_serializer.serialize_to_view(hda, user=trans.user, trans=trans, view='detailed') id_hda_dict_map[hda_dict['id']] = hda_dict hda_dicts.append(hda_dict) html_template = '' for entity, children in items: html_template += render_item(trans, entity, children) return { 'name': history.name, 'history_json': hda_dicts, 'template': html_template }