def encode_channel(obj, encoder): return { 'id': encode_pk(obj.id), 'name': obj.name, 'bit_depth': obj.bit_depth, 'layers': [encoder.default(ch) for ch in obj.layers], }
def encode_mapobject_type(obj, encoder): return { 'id': encode_pk(obj.id), 'name': obj.name, 'features': map(encoder.default, obj.features), 'layers': [encoder.default(layer) for layer in obj.layers] }
def encode_mapobject_type(obj, encoder): return { 'id': encode_pk(obj.id), 'name': obj.name, 'features': map(encoder.default, obj.features), 'layers': [encoder.default(layer) for layer in obj.layers] }
def encode_experiment(obj, encoder): return { 'id': encode_pk(obj.id), 'name': obj.name, 'description': obj.description, 'user': obj.user.name }
def encode_channel(obj, encoder): return { 'id': encode_pk(obj.id), 'name': obj.name, 'bit_depth': obj.bit_depth, 'layers': [encoder.default(ch) for ch in obj.layers], }
def encode_experiment(obj, encoder): return { 'id': encode_pk(obj.id), 'name': obj.name, 'description': obj.description, 'user': obj.user.name }
def encode_well(obj, encoder): return { 'id': encode_pk(obj.id), 'name': obj.name, 'description': obj.description, 'plate_name': obj.plate.name, 'dimensions': list(obj.dimensions) }
def encode_well(obj, encoder): return { 'id': encode_pk(obj.id), 'name': obj.name, 'description': obj.description, 'plate_name': obj.plate.name, 'dimensions': list(obj.dimensions) }
def encode_plate(obj, encoder): return { 'id': encode_pk(obj.id), 'name': obj.name, 'description': obj.description, 'acquisitions': map(encoder.default, obj.acquisitions), 'status': obj.status, }
def encode_acquisition(obj, encoder): return { 'id': encode_pk(obj.id), 'name': obj.name, 'description': obj.description, 'status': obj.status, 'plate_name': obj.plate.name }
def encode_plate(obj, encoder): return { 'id': encode_pk(obj.id), 'name': obj.name, 'description': obj.description, 'acquisitions': map(encoder.default, obj.acquisitions), 'status': obj.status, }
def encode_acquisition(obj, encoder): return { 'id': encode_pk(obj.id), 'name': obj.name, 'description': obj.description, 'status': obj.status, 'plate_name': obj.plate.name }
def encode_segmentation_layer(obj, encoder): return { 'id': encode_pk(obj.id), 'tpoint': obj.tpoint, 'zplane': obj.zplane, 'image_size': { 'width': obj.mapobject_type.experiment.pyramid_width, 'height': obj.mapobject_type.experiment.pyramid_height } }
def encode_segmentation_layer(obj, encoder): return { 'id': encode_pk(obj.id), 'tpoint': obj.tpoint, 'zplane': obj.zplane, 'image_size': { 'width': obj.mapobject_type.experiment.pyramid_width, 'height': obj.mapobject_type.experiment.pyramid_height } }
def encode_tool_result(obj, encoder): return { 'id': encode_pk(obj.id), 'name': obj.name, 'submission_id': obj.submission_id, 'tool_name': obj.tool_name, 'type': obj.type, 'attributes': obj.attributes, 'layers': [encoder.default(layer) for layer in obj.mapobject_type.layers], 'plots': map(encoder.default, obj.plots) }
def encode_site(obj, encoder): return { 'id': encode_pk(obj.id), 'y': obj.y, 'x': obj.x, 'height': obj.height, 'width': obj.width, 'well_name': obj.well.name, 'plate_name': obj.well.plate.name # TODO: shifts ? }
def encode_site(obj, encoder): return { 'id': encode_pk(obj.id), 'y': obj.y, 'x': obj.x, 'height': obj.height, 'width': obj.width, 'well_name': obj.well.name, 'plate_name': obj.well.plate.name # TODO: shifts ? }
def encode_channel_layer(obj, encoder): return { 'id': encode_pk(obj.id), 'max_zoom': obj.maxzoom_level_index, 'tpoint': obj.tpoint, 'zplane': obj.zplane, 'max_intensity': obj.max_intensity, 'min_intensity': obj.min_intensity, 'image_size': { 'width': obj.channel.experiment.pyramid_width, 'height': obj.channel.experiment.pyramid_height } }
def encode_channel_layer(obj, encoder): return { 'id': encode_pk(obj.id), 'max_zoom': obj.maxzoom_level_index, 'tpoint': obj.tpoint, 'zplane': obj.zplane, 'max_intensity': obj.max_intensity, 'min_intensity': obj.min_intensity, 'image_size': { 'width': obj.channel.experiment.pyramid_width, 'height': obj.channel.experiment.pyramid_height } }
def get_tool_jobs(experiment_id): """ .. http:get:: /api/experiments/(string:experiment_id)/tools/jobs Get the status of each :class:`ToolJob <tmlib.models.jobs.ToolJob>` processing a tool request. **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "data": [ { "id": "dG1hcHM3NzYxOA==", "name": "tool_Heatmap", "submission_id": 4, "submitted_at": "2017-04-01 10:42:10", "state": "RUNNING", "exitcode": null, "memory": 1024, "time": "1:21:33", "cpu_time": "1:14:12" }, ... ] } :query submission_id: numeric ID of the submission for which the job status should be retrieved (optional) :query tool_name: name of a tool for which job status should be retrieved (optional) :query state: state jobs should have, e.g. RUNNING (optional) :statuscode 400: malformed request :statuscode 200: no error """ logger.info('get status of tool jobs for experiment %d', experiment_id) submission_id = request.args.get('submission_id', type=int) tool_name = request.args.get('tool_name') state = request.args.get('state') # TODO: batch_size, index - see workflow.get_jobs_status() with tm.utils.MainSession() as session: tool_jobs = session.query( tm.Task.created_at, tm.Task.updated_at, tm.Task.id, tm.Task.name, tm.Task.type, tm.Task.time, tm.Task.cpu_time, tm.Task.memory, tm.Task.state, tm.Task.submission_id, tm.Task.exitcode ).\ join(tm.Submission, tm.Task.submission_id == tm.Submission.id).\ filter( tm.Submission.program == 'tool', tm.Submission.experiment_id == experiment_id, tm.Submission.user_id == current_identity.id ) if state is not None: logger.info('filter tool jobs for state "%s"', state) tool_jobs = tool_jobs.filter(tm.Task.state == state) if tool_name is not None: logger.info('filter tool jobs for tool name "%s"', tool_name) tool_jobs = tool_jobs.filter(tm.Task.name == 'tool_%s' % tool_name) if submission_id is not None: logger.info('filter tool jobs for submission %d', submission_id) tool_jobs = tool_jobs.\ filter(tm.Task.submission_id == submission_id) tool_jobs = tool_jobs.all() tool_job_status = list() for j in tool_jobs: status = format_task_data( j.name, j.type, j.created_at, j.updated_at, j.state, j.exitcode, j.memory, j.time, j.cpu_time ) status['id'] = encode_pk(j.id) status['submission_id'] = j.submission_id status['submitted_at'] = str(j.created_at) tool_job_status.append(status) return jsonify(data=tool_job_status)
def get_workflow_jobs(experiment_id): """ .. http:get:: /api/experiments/(string:experiment_id)/workflow/jobs Query the status of jobs for a given :class:`WorkflowStep <tmlib.workflow.workflow.WorkflowStep>`. **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "data": [ { "id": "dG1hcHM3NzYxOA==", "name": "metaconfig_run_000001", "state": "RUNNING", "exitcode": 0, "memory": 1024, "time": "1:21:33" "cpu_time": "1:14:12" }, ... ] } :query step_name: name of the workflow step for which jobs should be queried (required) :query step_phase: name of the workflow step phase for which jobs should be queried (optional) :query name: name of the job (optional) :query index: the index of the first job queried (optional) :query batch_size: the amount of job stati to return starting from ``index`` (optional) :reqheader Authorization: JWT token issued by the server :statuscode 400: malformed request :statuscode 200: no error .. note:: Parameters ``index`` and ``batch_size`` can only be used togethger. Parameters ``name`` and ``step_phase`` are exclusive and cannot be combined with ``index`` and ``batch_size``. """ step_name = request.args.get('step_name') logger.info('get status of jobs for workflow step "%s" of experiment %d', step_name, experiment_id) step_phase = request.args.get('phase') name = request.args.get('name') index = request.args.get('index', type=int) batch_size = request.args.get('batch_size', type=int) if ((index is not None and batch_size is None) or (index is None and batch_size is not None)): raise MalformedRequestError( 'Either both or none of the following parameters must be specified: ' '"index", "batch_size"') if index is not None and name is not None: raise MalformedRequestError( 'Only one of the following parameters can be specified: ' '"name", "index"') if batch_size is not None and name is not None: raise MalformedRequestError( 'Only one of the following parameters can be specified: ' '"name", "batch_size"') if step_phase is not None and name is not None: raise MalformedRequestError( 'Only one of the following parameters can be specified: ' '"name", "step_phase"') # If the index is negative don't send `batch_size` jobs. # For example, if the index is -5 and the batch_size 50, # send the first 45 jobs back. if index is not None and batch_size is not None: if index < 0 and batch_size is not None: batch_size = batch_size + index index = 0 if batch_size <= 0: return jsonify(data=[]) submission_id = gc3pie.get_id_of_most_recent_submission( experiment_id, 'workflow') # TODO: Upon reload, the submission_id of tasks doesn't get updated. # While this makes sense to track tasks belonging to the same collection # it doesn't allow the differentiation of submissions (as the name implies). with tm.utils.MainSession() as session: step_task_id = session.query(tm.Task.id).\ filter( tm.Task.submission_id == submission_id, tm.Task.name == step_name, tm.Task.is_collection ).\ one_or_none() if step_task_id is None: status = [] else: phase_tasks = session.query(tm.Task.id, tm.Task.name).\ filter_by(parent_id=step_task_id).\ all() if len(phase_tasks) == 0: status = [] else: task_ids = collections.defaultdict(list) for phase_id, phase_name in phase_tasks: if step_phase is not None: if not phase_name.endswith(step_phase): continue subtasks = session.query( tm.Task.id, tm.Task.is_collection, tm.Task.name ).\ filter_by(parent_id=phase_id).\ order_by(tm.Task.id).\ all() if len(subtasks) == 0: continue else: for st in subtasks: if st.is_collection: subsubtasks = session.query( tm.Task.id, tm.Task.name ).\ filter_by(parent_id=st.id).\ order_by(tm.Task.id).\ all() for sst in subsubtasks: task_ids[sst.name].append(sst.id) else: task_ids[st.name].append(st.id) task_ids = [v[0] for v in task_ids.values()] if task_ids: tasks = session.query( tm.Task.id, tm.Task.name, tm.Task.type, tm.Task.state, tm.Task.created_at, tm.Task.updated_at, tm.Task.exitcode, tm.Task.memory, tm.Task.time, tm.Task.cpu_time ).\ filter(tm.Task.id.in_(task_ids)).\ order_by(tm.Task.name) if index is not None and batch_size is not None: logger.debug('query status of %d jobs starting at %d', batch_size, index) tasks = tasks.limit(batch_size).offset(index) if name is not None: tasks = tasks.filter_by(name=name) tasks = tasks.all() status = [] for t in tasks: s = format_task_data(t.name, t.type, t.created_at, t.updated_at, t.state, t.exitcode, t.memory, t.time, t.cpu_time) s['id'] = encode_pk(t.id) status.append(s) else: status = [] return jsonify(data=status)
def create_experiment(): """ .. http:post:: /api/experiments Create a new :class:`Experiment <tmlib.models.experiment.Experiment>`. **Example request**: .. sourcecode:: http Content-Type: application/json { "name": "Experiment XY", "description": "Optional description", "workflow_type": "canonical", "plate_format": "0", "plate_acquisition_mode": "multiplexing", "microscope_type": "cellvoyager" } **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "data": { "id": "MQ==", "name": "Experiment XY", "description": "Optional description", "user": "******" } } :reqheader Authorization: JWT token issued by the server :statuscode 200: no error """ data = request.get_json() name = data.get('name') workflow_type = data.get('workflow_type') microscope_type = data.get('microscope_type') plate_format = int(data.get('plate_format')) plate_acquisition_mode = data.get('plate_acquisition_mode') description = data.get('description', '') logger.info('create experiment "%s"', name) with tm.utils.MainSession() as session: experiment_ref = tm.ExperimentReference( name=name, description=description, user_id=current_identity.id, root_directory=lib_cfg.storage_home ) session.add(experiment_ref) session.commit() experiment_id = experiment_ref.id experiment_location = experiment_ref.location with tm.utils.ExperimentSession(experiment_id) as session: experiment = tm.Experiment( id=experiment_id, location=experiment_location, workflow_type=workflow_type, microscope_type=microscope_type, plate_format=plate_format, plate_acquisition_mode=plate_acquisition_mode ) session.add(experiment) return jsonify({ 'data': { 'id': encode_pk(experiment_id), 'name': name, 'description': description, 'user': current_identity.name } })
def get_workflow_jobs(experiment_id): """ .. http:get:: /api/experiments/(string:experiment_id)/workflow/jobs Query the status of jobs for a given :class:`WorkflowStep <tmlib.workflow.workflow.WorkflowStep>`. **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "data": [ { "id": "dG1hcHM3NzYxOA==", "name": "metaconfig_run_000001", "state": "RUNNING", "exitcode": 0, "memory": 1024, "time": "1:21:33" "cpu_time": "1:14:12" }, ... ] } :query step_name: name of the workflow step for which jobs should be queried (required) :query step_phase: name of the workflow step phase for which jobs should be queried (optional) :query name: name of the job (optional) :query index: the index of the first job queried (optional) :query batch_size: the amount of job stati to return starting from ``index`` (optional) :reqheader Authorization: JWT token issued by the server :statuscode 400: malformed request :statuscode 200: no error .. note:: Parameters ``index`` and ``batch_size`` can only be used togethger. Parameters ``name`` and ``step_phase`` are exclusive and cannot be combined with ``index`` and ``batch_size``. """ step_name = request.args.get('step_name') logger.info( 'get status of jobs for workflow step "%s" of experiment %d', step_name, experiment_id ) step_phase = request.args.get('phase') name = request.args.get('name') index = request.args.get('index', type=int) batch_size = request.args.get('batch_size', type=int) if ((index is not None and batch_size is None) or (index is None and batch_size is not None)): raise MalformedRequestError( 'Either both or none of the following parameters must be specified: ' '"index", "batch_size"' ) if index is not None and name is not None: raise MalformedRequestError( 'Only one of the following parameters can be specified: ' '"name", "index"' ) if batch_size is not None and name is not None: raise MalformedRequestError( 'Only one of the following parameters can be specified: ' '"name", "batch_size"' ) if step_phase is not None and name is not None: raise MalformedRequestError( 'Only one of the following parameters can be specified: ' '"name", "step_phase"' ) # If the index is negative don't send `batch_size` jobs. # For example, if the index is -5 and the batch_size 50, # send the first 45 jobs back. if index is not None and batch_size is not None: if index < 0 and batch_size is not None: batch_size = batch_size + index index = 0 if batch_size <= 0: return jsonify(data=[]) submission_id = gc3pie.get_id_of_most_recent_submission( experiment_id, 'workflow' ) # TODO: Upon reload, the submission_id of tasks doesn't get updated. # While this makes sense to track tasks belonging to the same collection # it doesn't allow the differentiation of submissions (as the name implies). with tm.utils.MainSession() as session: step_task_id = session.query(tm.Task.id).\ filter( tm.Task.submission_id == submission_id, tm.Task.name == step_name, tm.Task.is_collection ).\ one_or_none() if step_task_id is None: status = [] else: phase_tasks = session.query(tm.Task.id, tm.Task.name).\ filter_by(parent_id=step_task_id).\ all() if len(phase_tasks) == 0: status = [] else: task_ids = collections.defaultdict(list) for phase_id, phase_name in phase_tasks: if step_phase is not None: if not phase_name.endswith(step_phase): continue subtasks = session.query( tm.Task.id, tm.Task.is_collection, tm.Task.name ).\ filter_by(parent_id=phase_id).\ order_by(tm.Task.id).\ all() if len(subtasks) == 0: continue else: for st in subtasks: if st.is_collection: subsubtasks = session.query( tm.Task.id, tm.Task.name ).\ filter_by(parent_id=st.id).\ order_by(tm.Task.id).\ all() for sst in subsubtasks: task_ids[sst.name].append(sst.id) else: task_ids[st.name].append(st.id) task_ids = [v[0] for v in task_ids.values()] if task_ids: tasks = session.query( tm.Task.id, tm.Task.name, tm.Task.type, tm.Task.state, tm.Task.created_at, tm.Task.updated_at, tm.Task.exitcode, tm.Task.memory, tm.Task.time, tm.Task.cpu_time ).\ filter(tm.Task.id.in_(task_ids)).\ order_by(tm.Task.name) if index is not None and batch_size is not None: logger.debug( 'query status of %d jobs starting at %d', batch_size, index ) tasks = tasks.limit(batch_size).offset(index) if name is not None: tasks = tasks.filter_by(name=name) tasks = tasks.all() status = [] for t in tasks: s = format_task_data( t.name, t.type, t.created_at, t.updated_at, t.state, t.exitcode, t.memory, t.time, t.cpu_time ) s['id'] = encode_pk(t.id) status.append(s) else: status = [] return jsonify(data=status)
def create_experiment(): """ Create a new :class:`Experiment <tmlib.models.experiment.Experiment>`. .. note:: The ``description`` parameter in this request is *not* the "workflow description" YAML file: the latter is set to a default value (depending on the ``workflow_type`` key) and can be later changed with the ``update_workflow_description()``:func: API call; the former is only used to set the ``description`` columnt in table ``experiment_references`` which is used when listing existing experiments in the UI. .. http:post:: /api/experiments **Example request**: .. sourcecode:: http Content-Type: application/json { "name": "Experiment XY", "description": "Optional description", "workflow_type": "canonical", "plate_format": "0", "plate_acquisition_mode": "multiplexing", "microscope_type": "cellvoyager" } **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "data": { "id": "MQ==", "name": "Experiment XY", "description": "Optional description", "user": "******" } } :reqheader Authorization: JWT token issued by the server :statuscode 200: no error """ data = request.get_json() name = data.get('name') workflow_type = data.get('workflow_type') microscope_type = data.get('microscope_type') plate_format = int(data.get('plate_format')) plate_acquisition_mode = data.get('plate_acquisition_mode') # WARNING: this description is just human-readable text, # has no connection to the "workflow description" YAML file description = data.get('description', '') logger.info('create experiment "%s"', name) with tm.utils.MainSession() as session: experiment_ref = tm.ExperimentReference( name=name, description=description, user_id=current_identity.id, root_directory=lib_cfg.storage_home) session.add(experiment_ref) session.commit() experiment_id = experiment_ref.id experiment_location = experiment_ref.location with tm.utils.ExperimentSession(experiment_id) as session: experiment = tm.Experiment( id=experiment_id, location=experiment_location, workflow_type=workflow_type, microscope_type=microscope_type, plate_format=plate_format, plate_acquisition_mode=plate_acquisition_mode) session.add(experiment) session.commit() return jsonify({ 'data': { 'id': encode_pk(experiment_id), 'name': name, 'description': description, 'user': current_identity.name } })
def create_experiment(): """ Create a new :class:`Experiment <tmlib.models.experiment.Experiment>`. .. note:: The ``description`` parameter in this request is *not* the "workflow description" YAML file: the latter is set to a default value (depending on the ``workflow_type`` key) and can be later changed with the ``update_workflow_description()``:func: API call; the former is only used to set the ``description`` columnt in table ``experiment_references`` which is used when listing existing experiments in the UI. .. http:post:: /api/experiments **Example request**: .. sourcecode:: http Content-Type: application/json { "name": "Experiment XY", "description": "Optional description", "workflow_type": "canonical", "plate_format": "0", "plate_acquisition_mode": "multiplexing", "microscope_type": "cellvoyager" } **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "data": { "id": "MQ==", "name": "Experiment XY", "description": "Optional description", "user": "******" } } :reqheader Authorization: JWT token issued by the server :statuscode 200: no error """ data = request.get_json() name = data.get('name') workflow_type = data.get('workflow_type') microscope_type = data.get('microscope_type') plate_format = int(data.get('plate_format')) plate_acquisition_mode = data.get('plate_acquisition_mode') # WARNING: this description is just human-readable text, # has no connection to the "workflow description" YAML file description = data.get('description', '') logger.info('create experiment "%s"', name) with tm.utils.MainSession() as session: experiment_ref = tm.ExperimentReference( name=name, description=description, user_id=current_identity.id, root_directory=lib_cfg.storage_home ) session.add(experiment_ref) session.commit() experiment_id = experiment_ref.id experiment_location = experiment_ref.location with tm.utils.ExperimentSession(experiment_id) as session: experiment = tm.Experiment( id=experiment_id, location=experiment_location, workflow_type=workflow_type, microscope_type=microscope_type, plate_format=plate_format, plate_acquisition_mode=plate_acquisition_mode ) session.add(experiment) session.commit() return jsonify({ 'data': { 'id': encode_pk(experiment_id), 'name': name, 'description': description, 'user': current_identity.name } })
def get_tool_jobs(experiment_id): """ .. http:get:: /api/experiments/(string:experiment_id)/tools/jobs Get the status of each :class:`ToolJob <tmlib.models.jobs.ToolJob>` processing a tool request. **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "data": [ { "id": "dG1hcHM3NzYxOA==", "name": "tool_Heatmap", "submission_id": 4, "submitted_at": "2017-04-01 10:42:10", "state": "RUNNING", "exitcode": null, "memory": 1024, "time": "1:21:33", "cpu_time": "1:14:12" }, ... ] } :query submission_id: numeric ID of the submission for which the job status should be retrieved (optional) :query tool_name: name of a tool for which job status should be retrieved (optional) :query state: state jobs should have, e.g. RUNNING (optional) :statuscode 400: malformed request :statuscode 200: no error """ logger.info('get status of tool jobs for experiment %d', experiment_id) submission_id = request.args.get('submission_id', type=int) tool_name = request.args.get('tool_name') state = request.args.get('state') # TODO: batch_size, index - see workflow.get_jobs_status() with tm.utils.MainSession() as session: tool_jobs = session.query( tm.Task.created_at, tm.Task.updated_at, tm.Task.id, tm.Task.name, tm.Task.type, tm.Task.time, tm.Task.cpu_time, tm.Task.memory, tm.Task.state, tm.Task.submission_id, tm.Task.exitcode ).\ join(tm.Submission, tm.Task.submission_id == tm.Submission.id).\ filter( tm.Submission.program == 'tool', tm.Submission.experiment_id == experiment_id, tm.Submission.user_id == current_identity.id ) if state is not None: logger.info('filter tool jobs for state "%s"', state) tool_jobs = tool_jobs.filter(tm.Task.state == state) if tool_name is not None: logger.info('filter tool jobs for tool name "%s"', tool_name) tool_jobs = tool_jobs.filter(tm.Task.name == 'tool_%s' % tool_name) if submission_id is not None: logger.info('filter tool jobs for submission %d', submission_id) tool_jobs = tool_jobs.\ filter(tm.Task.submission_id == submission_id) tool_jobs = tool_jobs.all() tool_job_status = list() for j in tool_jobs: status = format_task_data(j.name, j.type, j.created_at, j.updated_at, j.state, j.exitcode, j.memory, j.time, j.cpu_time) status['id'] = encode_pk(j.id) status['submission_id'] = j.submission_id status['submitted_at'] = str(j.created_at) tool_job_status.append(status) return jsonify(data=tool_job_status)
def encode_cycle(obj, encoder): return { 'id': encode_pk(obj.id), 'index': obj.index, 'tpoint': obj.tpoint }
def encode_plot(obj, encoder): return { 'id': encode_pk(obj.id), 'type': obj.type, 'attributes': obj.attributes }
def encode_cycle(obj, encoder): return {'id': encode_pk(obj.id), 'index': obj.index, 'tpoint': obj.tpoint}
def encode_microscope_metadata_file(obj, encoder): return {'id': encode_pk(obj.id), 'name': obj.name, 'status': obj.status}
def encode_plot(obj, encoder): return { 'id': encode_pk(obj.id), 'type': obj.type, 'attributes': obj.attributes }
def encode_feature(obj, encoder): return { 'id': encode_pk(obj.id), 'name': obj.name, }
def encode_feature(obj, encoder): return { 'id': encode_pk(obj.id), 'name': obj.name, }
def encode_microscope_metadata_file(obj, encoder): return { 'id': encode_pk(obj.id), 'name': obj.name, 'status': obj.status }