def setUp(self): AbstractPlateAnalysisCtx.DEFAULT_ORIGINAL_FILE_PROVIDER = \ FromFileOriginalFileProvider original_files = list() # Create our container images and an original file image map images = list() n_images = 0 for row in range(16): for column in range(24): well = WellI(n_images, True) well.column = rint(column) well.row = rint(row) well_sample = WellSampleI(n_images, True) well_sample.well = well image = ImageI(n_images, True) image.addWellSample(well_sample) images.append(image) original_file_image_map = dict() # Our required original file format format = rstring('Companion/InCell') # Create original file representing the result file o = OriginalFileI(1, True) o.name = rstring(self.RESULT_FILE) o.path = rstring(os.path.join(self.ROOT, self.RESULT_FILE)) o.mimetype = format original_files.append(o) # [1] = o original_file_image_map[1] = image sf = TestingServiceFactory() self.analysis_ctx = InCellPlateAnalysisCtx(images, original_files, original_file_image_map, 1, sf)
def setUp(self): AbstractPlateAnalysisCtx.DEFAULT_ORIGINAL_FILE_PROVIDER = \ FromFileOriginalFileProvider original_files = list() # Create our container images and an original file image map images = list() n_images = 0 for row in range(16): for column in range(24): well = WellI(n_images, True) well.column = rint(column) well.row = rint(row) well_sample = WellSampleI(n_images, True) well_sample.well = well image = ImageI(n_images, True) image.addWellSample(well_sample) images.append(image) original_file_image_map = dict() # Our required original file format format = rstring('Companion/InCell') # Create original file representing the result file o = OriginalFileI(1L, True) o.name = rstring(self.RESULT_FILE) o.path = rstring(os.path.join(self.ROOT, self.RESULT_FILE)) o.mimetype = format original_files.append(o) #[1L] = o original_file_image_map[1L] = image sf = TestingServiceFactory() self.analysis_ctx = InCellPlateAnalysisCtx( images, original_files, original_file_image_map, 1L, sf)
def test_file_annotation(self): """Tests AnnotationWrapper methods return strings""" file_name = u'₩€_file_$$' f = OriginalFileI() f.name = rstring(file_name) obj = FileAnnotationI() obj.file = f file_ann = MockConnection(obj).getObject("Annotation", 1) assert file_ann.getFileName() == file_name.encode('utf8')
def link_table(conn, table, project): orig_file = table.getOriginalFile() file_ann = FileAnnotationWrapper(conn) file_ann.setNs(NSBULKANNOTATIONS) file_ann._obj.file = OriginalFileI(orig_file.id.val, False) file_ann.save() project.linkAnnotation(file_ann)
def save_results(conn, files, plate): # Upload the results as OMERO.table print("saving results...") Nuclei = pandas.concat(files, ignore_index=True) summary = Nuclei.groupby('Image').mean() # Free memory del Nuclei cols = [] for col in summary.columns: if col == 'Image': cols.append(ImageColumn(col, '', summary[col])) elif col == 'Well': cols.append(WellColumn(col, '', summary[col])) elif summary[col].dtype == 'int64': cols.append(LongColumn(col, '', summary[col])) elif summary[col].dtype == 'float64': cols.append(DoubleColumn(col, '', summary[col])) resources = conn.c.sf.sharedResources() repository_id = resources.repositories().descriptions[0].getId().getValue() table_name = "idr0002_cellprofiler" table = resources.newTable(repository_id, table_name) table.initialize(cols) table.addData(cols) # Link the table to the plate orig_file = table.getOriginalFile() file_ann = FileAnnotationWrapper(conn) file_ann.setNs(NSBULKANNOTATIONS) file_ann._obj.file = OriginalFileI(orig_file.id.val, False) file_ann.save() plate.linkAnnotation(file_ann) table.close()
def create_figure_file(conn, figure_json, figure_name): """Create Figure FileAnnotation from json data.""" if len(figure_json['panels']) == 0: raise Exception('No Panels') first_img_id = figure_json['panels'][0]['imageId'] # we store json in description field... description = {} description['name'] = figure_name description['imageId'] = first_img_id # Try to set Group context to the same as first image conn.SERVICE_OPTS.setOmeroGroup('-1') i = conn.getObject("Image", first_img_id) gid = i.getDetails().getGroup().getId() conn.SERVICE_OPTS.setOmeroGroup(gid) json_bytes = json.dumps(figure_json).encode('utf-8') file_size = len(json_bytes) f = BytesIO() try: f.write(json_bytes) update = conn.getUpdateService() orig_file = conn.createOriginalFileFromFileObj( f, '', figure_name, file_size, mimetype="application/json") finally: f.close() fa = FileAnnotationI() fa.setFile(OriginalFileI(orig_file.getId(), False)) fa.setNs(rstring(JSON_FILEANN_NS)) desc = json.dumps(description) fa.setDescription(rstring(desc)) fa = update.saveAndReturnObject(fa, conn.SERVICE_OPTS) return fa.getId().getValue()
def upload(self, args): client = self.ctx.conn(args) obj_ids = [] for local_file in args.file: if not path(local_file).exists(): self.ctx.die(500, "File: %s does not exist" % local_file) for local_file in args.file: omero_format = UNKNOWN if args.mimetype: omero_format = args.mimetype elif (mimetypes.guess_type(local_file) != (None, None)): omero_format = mimetypes.guess_type(local_file)[0] if args.data_dir: obj = upload_ln_s(client, local_file, args.data_dir, omero_format) obj_id = obj.id else: obj = client.upload(local_file, type=omero_format) obj_id = obj.id.val if args.wrap: fa = FileAnnotationI() fa.setFile(OriginalFileI(obj_id, False)) if args.namespace: fa.setNs(rstring(args.namespace)) fa = client.sf.getUpdateService().saveAndReturnObject(fa) obj_ids.append(fa.id.val) else: obj_ids.append(obj_id) self.ctx.set("last.upload.id", obj_id) obj_ids = self._order_and_range_ids(obj_ids) if args.wrap: self.ctx.out("FileAnnotation:%s" % obj_ids) else: self.ctx.out("OriginalFile:%s" % obj_ids)
def write_to_omero(self): sf = self.client.getSession() group = str(self.value_resolver.target_object.details.group.id.val) sr = sf.sharedResources() update_service = sf.getUpdateService() name = 'bulk_annotations' table = sr.newTable(1, name, {'omero.group': group}) if table is None: raise MetadataError( "Unable to create table: %s" % name) original_file = table.getOriginalFile() log.info('Created new table OriginalFile:%d' % original_file.id.val) table.initialize(self.columns) log.info('Table initialized with %d columns.' % (len(self.columns))) table.addData(self.columns) log.info('Added data column data.') table.close() file_annotation = FileAnnotationI() file_annotation.ns = \ rstring('openmicroscopy.org/omero/bulk_annotations') file_annotation.description = rstring(name) file_annotation.file = OriginalFileI(original_file.id.val, False) link = self.create_annotation_link() link.parent = self.target_object link.child = file_annotation update_service.saveObject(link, {'omero.group': group})
def testPopulateRoisPlate(self): """ Create a small csv file, use populate_roi.py to parse and attach to Plate. Then query to check table has expected content. """ csvName = self.createCsv(colNames="Well,Field,X,Y,Type", rowData=("A1,0,15,15,Test", )) rowCount = 1 colCount = 1 plate = self.createPlate(rowCount, colCount) # As opposed to the ParsingContext, here we are expected # to link the file ourselves ofile = self.client.upload(csvName).proxy() ann = FileAnnotationI() ann.file = ofile link = PlateAnnotationLinkI() link.parent = plate.proxy() link.child = ann link = self.client.sf.getUpdateService()\ .saveAndReturnObject(link) # End linking factory = PlateAnalysisCtxFactory(self.client.sf) factory.implementations = (MockPlateAnalysisCtx, ) ctx = factory.get_analysis_ctx(plate.id.val) assert 1 == ctx.get_measurement_count() meas = ctx.get_measurement_ctx(0) meas.parse_and_populate() # Get file annotations query = """select p from Plate p left outer join fetch p.annotationLinks links left outer join fetch links.child as ann left outer join fetch ann.file as file where p.id=%s""" % plate.id.val qs = self.client.sf.getQueryService() plate = qs.findByQuery(query, None) anns = plate.linkedAnnotationList() # Only expect a single annotation which is a 'bulk annotation' # the other is the original CSV assert len(anns) == 2 files = dict([(a.ns.val, a.file.id.val) for a in anns if a.ns]) fileid = files[NSMEASUREMENT] # Open table to check contents r = self.client.sf.sharedResources() t = r.openTable(OriginalFileI(fileid), None) cols = t.getHeaders() rows = t.getNumberOfRows() assert rows == 1 data = t.read(range(len(cols)), 0, 1) imag = data.columns[0].values[0] rois = self.client.sf.getRoiService() anns = rois.getRoiMeasurements(imag, RoiOptions()) assert anns
def create_fileset(): """Creates and returns a Fileset with associated Images.""" fileset = FilesetI() fileset.templatePrefix = rstring("") for image_index in range(2): image = create_image(image_index) for fileset_index in range(2): fileset_entry = FilesetEntryI() fileset_entry.clientPath = rstring("/client/path/filename_%d.ext" % fileset_index) original_file = OriginalFileI() original_file.name = rstring("filename_%d.ext" % fileset_index) original_file.path = rstring("/server/path/") original_file.size = rlong(50L) fileset_entry.originalFile = original_file fileset.addFilesetEntry(fileset_entry) fileset.addImage(image) return fileset
def link_table(conn, table, project): """Create FileAnnotation for OMERO.table and links to Project.""" orig_file = table.getOriginalFile() file_ann = FileAnnotationWrapper(conn) file_ann.setNs(NSBULKANNOTATIONS) file_ann._obj.file = OriginalFileI(orig_file.id.val, False) file_ann.save() project.linkAnnotation(file_ann)
def create_fileset(): """Creates and returns a Fileset with associated Images.""" fileset = FilesetI() fileset.templatePrefix = rstring('') for image_index in range(2): image = create_image(image_index) for fileset_index in range(2): fileset_entry = FilesetEntryI() fileset_entry.clientPath = rstring('/client/path/filename_%d.ext' % fileset_index) original_file = OriginalFileI() original_file.name = rstring('filename_%d.ext' % fileset_index) original_file.path = rstring('/server/path/') original_file.size = rlong(50L) fileset_entry.originalFile = original_file fileset.addFilesetEntry(fileset_entry) fileset.addImage(image) return fileset
def make_file_annotation(self, name=None, binary=None, format=None, client=None, ns=None): """ Creates a new DatasetI instance and returns the persisted object. If no name has been provided, a UUID string shall be used. :param name: the name of the project :param client: The client to use to create the object :param ns: The namespace for the annotation """ if client is None: client = self.client update = client.sf.getUpdateService() # file if format is None: format = "application/octet-stream" if binary is None: binary = "12345678910" if name is None: name = str(self.uuid()) oFile = OriginalFileI() oFile.setName(rstring(name)) oFile.setPath(rstring(str(self.uuid()))) oFile.setSize(rlong(len(binary))) oFile.hasher = ChecksumAlgorithmI() oFile.hasher.value = rstring("SHA1-160") oFile.setMimetype(rstring(str(format))) oFile = update.saveAndReturnObject(oFile) # save binary store = client.sf.createRawFileStore() store.setFileId(oFile.id.val) store.write(binary, 0, 0) oFile = store.save() # See ticket:1501 store.close() fa = FileAnnotationI() fa.setFile(oFile) if ns is not None: fa.setNs(rstring(ns)) return update.saveAndReturnObject(fa)
def images_with_original_files(request, gatewaywrapper): """Creates Images with associated OriginalFiles.""" gatewaywrapper.loginAsAuthor() gw = gatewaywrapper.gateway update_service = gw.getUpdateService() original_files = list() for original_file_index in range(2): original_file = OriginalFileI() original_file.name = rstring("filename_%d.ext" % original_file_index) original_file.path = rstring("/server/path/") original_file.size = rlong(50L) original_files.append(original_file) images = list() for image_index in range(2): image = create_image(image_index) for original_file in original_files: image.getPrimaryPixels().linkOriginalFile(original_file) images.append(image) image_ids = update_service.saveAndReturnIds(images) return [gw.getObject("Image", image_id) for image_id in image_ids]
def images_with_original_files(request, gatewaywrapper): """Creates Images with associated OriginalFiles.""" gatewaywrapper.loginAsAuthor() gw = gatewaywrapper.gateway update_service = gw.getUpdateService() original_files = list() for original_file_index in range(2): original_file = OriginalFileI() original_file.name = rstring('filename_%d.ext' % original_file_index) original_file.path = rstring('/server/path/') original_file.size = rlong(50L) original_files.append(original_file) images = list() for image_index in range(2): image = create_image(image_index) for original_file in original_files: image.getPrimaryPixels().linkOriginalFile(original_file) images.append(image) image_ids = update_service.saveAndReturnIds(images) return [gw.getObject('Image', image_id) for image_id in image_ids]
def get_table(conn, objtype, objid): data = _annotations(None, objtype, objid, conn=conn).get('data', []) if len(data) < 1: return None # Just use the first Table we find # TODO: handle multiple tables!? data = data[0] logger.debug('Data: %r' % data) shared_resources = conn.getSharedResources() return shared_resources.openTable(OriginalFileI(data['file']), conn.SERVICE_OPTS)
def testPopulateMetadataPlate(self): """ Create a small csv file, use populate_metadata.py to parse and attach to Plate. Then query to check table has expected content. """ csvName = "testCreate.csv" self.createCsv(csvName) rowCount = 1 colCount = 2 plate = self.createPlate(rowCount, colCount) ctx = ParsingContext(self.client, plate, csvName) ctx.parse() ctx.write_to_omero() # Delete local temp file os.remove(csvName) # Get file annotations query = """select p from Plate p left outer join fetch p.annotationLinks links left outer join fetch links.child where p.id=%s""" % plate.id.val qs = self.client.sf.getQueryService() plate = qs.findByQuery(query, None) anns = plate.linkedAnnotationList() # Only expect a single annotation which is a 'bulk annotation' assert len(anns) == 1 tableFileAnn = anns[0] assert unwrap(tableFileAnn.getNs()) == NSBULKANNOTATIONS fileid = tableFileAnn.file.id.val # Open table to check contents r = self.client.sf.sharedResources() t = r.openTable(OriginalFileI(fileid), None) cols = t.getHeaders() rows = t.getNumberOfRows() assert rows == rowCount * colCount for hit in range(rows): rowValues = [ col.values[0] for col in t.read(range(len(cols)), hit, hit + 1).columns ] assert len(rowValues) == 4 if "a1" in rowValues: assert "Control" in rowValues elif "a2" in rowValues: assert "Treatment" in rowValues else: assert False, "Row does not contain 'a1' or 'a2'"
def get_data(request, data_name, conn): """Return table data for images in a Plate.""" plate_id = request.GET.get('plate') field_id = request.GET.get('field') # dict of well_id: img_id img_ids = get_well_image_ids(conn, plate_id, field_id) print 'img_ids', img_ids query_service = conn.getQueryService() if data_name.startswith("Table_"): column_name = data_name.replace("Table_", "") # Load table and get data for named column params = ParametersI() params.addId(plate_id) query = """select oal from PlateAnnotationLink as oal left outer join fetch oal.child as ch left outer join oal.parent as pa where pa.id=:id and ch.ns='%s'""" % NSBULKANNOTATIONS links = query_service.findAllByQuery(query, params, conn.SERVICE_OPTS) shared_resources = conn.getSharedResources() # Just use the first Table we find # TODO: handle multiple tables!? file_id = links[0].child.file.id.val table = shared_resources.openTable(OriginalFileI(file_id), conn.SERVICE_OPTS) headers = table.getHeaders() column_names = [col.name for col in headers] col_index = column_names.index(column_name) rows = table.getNumberOfRows() # Load first column 'Well' & named column col_data = table.read([0, col_index], 0, rows).columns table_data = {} well_ids = col_data[0].values values = col_data[1].values for well_id, value in zip(well_ids, values): print 'well_id', well_id, value img_id = img_ids[well_id] table_data[img_id] = value return table_data
def test_original_file_wrapper(self): file_text = """String to return in chunks from a file-like object within the OriginalFileWrapper""" class MockFile(object): def __init__(self, text, buffer=2621440): self.text = text self.buffer = buffer def seek(self, n, mode): pass def tell(self): return 0 def read(self, n=-1): return self.text def close(self): pass def __iter__(self): for c in self.text: yield c def __enter__(self): return self def __exit__(self, type, value, traceback): pass class MockOriginalFile(OriginalFileWrapper): def asFileObj(self, buf=2621440): return MockFile(file_text) orig_file = OriginalFileI() wrapper = MockOriginalFile(None, orig_file) text = "".join(wrapper.getFileInChunks()) assert text == file_text
def save_microscope(request, conn=None, **kwargs): body_json = json.loads(request.body) microscope_json = body_json['microscope'] file_name = microscope_json["Name"] project = conn.getObject("Project", attributes={'name': PROJECT_NAME}) curr_gid = conn.SERVICE_OPTS.getOmeroGroup() if project is not None: gid = project.getDetails().getGroup().getId() conn.SERVICE_OPTS.setOmeroGroup(gid) else: # TODO: create Project conn.SERVICE_OPTS.setOmeroGroup(curr_gid) update = conn.getUpdateService() file_data = json.dumps(microscope_json) file_size = len(file_data) f = BytesIO() f.write(file_data.encode("utf-8")) orig_file = conn.createOriginalFileFromFileObj(f, '', file_name, file_size, mimetype="application/json") fa = FileAnnotationI() fa.setFile(OriginalFileI(orig_file.getId(), False)) fa.setNs(wrap(JSON_FILEANN_NS)) # fa.setDescription(wrap(desc)) fa = update.saveAndReturnObject(fa, conn.SERVICE_OPTS) file_id = fa.getId().getValue() if project is not None: fa_wrapper = FileAnnotationWrapper(conn, fa) project.linkAnnotation(fa_wrapper) return JsonResponse({'file_id': file_id})
def copyFiles(self, orig_img, new_img, new_pix): # Then attach a copy of each of the used files in the fileset # to the synthetic image params = ParametersI() params.addId(orig_img.id.val) rows = unwrap( self.query.projection(("select f.id, f.name from Image i " "join i.fileset fs join fs.usedFiles uf " "join uf.originalFile f where i.id = :id"), params)) for row in rows: file_id = row[0] file_name = row[1] target = create_path() src = OriginalFileI(file_id, False) self.client.download(ofile=src, filename=str(target)) copy = self.client.upload(filename=str(target), name=file_name) link = PixelsOriginalFileMapI() link.parent = copy.proxy() link.child = new_pix self.update.saveObject(link)
def plate_well_table(itest, well_grid_factory, update_service, conn): """ Returns a new OMERO Plate, linked Wells, linked WellSamples, and linked Images populated by an L{omeroweb.testlib.IWebTest} instance. """ plate = PlateI() plate.name = rstring(itest.uuid()) # Well A1 has one WellSample plate.addWell(well_grid_factory({(0, 0): 1})[0]) plate = update_service.saveAndReturnObject(plate) col1 = WellColumn('Well', '', []) col2 = StringColumn('TestColumn', '', 64, []) columns = [col1, col2] tablename = "plate_well_table_test:%s" % str(random()) table = conn.c.sf.sharedResources().newTable(1, tablename) table.initialize(columns) wellIds = [w.id.val for w in plate.copyWells()] print("WellIds", wellIds) data1 = WellColumn('Well', '', wellIds) data2 = StringColumn('TestColumn', '', 64, ["foobar"]) data = [data1, data2] table.addData(data) orig_file = table.getOriginalFile() table.close() fileAnn = FileAnnotationI() fileAnn.ns = rstring('openmicroscopy.org/omero/bulk_annotations') fileAnn.setFile(OriginalFileI(orig_file.id.val, False)) fileAnn = conn.getUpdateService().saveAndReturnObject(fileAnn) link = PlateAnnotationLinkI() link.setParent(PlateI(plate.id.val, False)) link.setChild(FileAnnotationI(fileAnn.id.val, False)) update_service.saveAndReturnObject(link) return plate, wellIds
def update_table(self, columns): """Updates the OmeroTables instance backing our results.""" # Create a new OMERO table to store our measurement results sr = self.service_factory.sharedResources() name = self.get_name() self.table = sr.newTable(1, '/%s.r5' % name) if self.table is None: raise MeasurementError( "Unable to create table: %s" % name) # Retrieve the original file corresponding to the table for the # measurement, link it to the file annotation representing the # umbrella measurement run, link the annotation to the plate from # which it belongs and save the file annotation. table_original_file = self.table.getOriginalFile() table_original_file_id = table_original_file.id.val log.info("Created new table: %d" % table_original_file_id) unloaded_o_file = OriginalFileI(table_original_file_id, False) self.file_annotation.file = unloaded_o_file unloaded_plate = PlateI(self.analysis_ctx.plate_id, False) plate_annotation_link = PlateAnnotationLinkI() plate_annotation_link.parent = unloaded_plate plate_annotation_link.child = self.file_annotation plate_annotation_link = \ self.update_service.saveAndReturnObject(plate_annotation_link) self.file_annotation = plate_annotation_link.child t0 = int(time.time() * 1000) self.table.initialize(columns) log.debug("Table init took %sms" % (int(time.time() * 1000) - t0)) t0 = int(time.time() * 1000) column_report = dict() for column in columns: column_report[column.name] = len(column.values) log.debug("Column report: %r" % column_report) self.table.addData(columns) self.table.close() log.info("Table update took %sms" % (int(time.time() * 1000) - t0))
def get_dataproviders(request, conn): # Can provide data from any table column plate_id = request.GET.get('plate', None) if plate_id is None: return [] query_service = conn.getQueryService() params = ParametersI() params.addId(plate_id) query = """select oal from PlateAnnotationLink as oal left outer join fetch oal.child as ch left outer join oal.parent as pa where pa.id=:id and ch.ns='%s'""" % NSBULKANNOTATIONS links = query_service.findAllByQuery(query, params, conn.SERVICE_OPTS) shared_resources = conn.getSharedResources() # Just use the first Table we find # TODO: handle multiple tables!? file_id = links[0].child.file.id.val table = shared_resources.openTable(OriginalFileI(file_id), conn.SERVICE_OPTS) column_names = [col.name for col in table.getHeaders()] return ["Table_%s" % c for c in column_names]
def _test_parsing_context(self): """ Create a small csv file, use populate_metadata.py to parse and attach to Plate. Then query to check table has expected content. """ ctx = ParsingContext(self.client, self.plate, file=self.csvName) ctx.parse() ctx.write_to_omero() # Get file annotations anns = self.get_plate_annotations() # Only expect a single annotation which is a 'bulk annotation' assert len(anns) == 1 tableFileAnn = anns[0] assert unwrap(tableFileAnn.getNs()) == NSBULKANNOTATIONS fileid = tableFileAnn.file.id.val # Open table to check contents r = self.client.sf.sharedResources() t = r.openTable(OriginalFileI(fileid), None) cols = t.getHeaders() rows = t.getNumberOfRows() assert rows == self.rowCount * self.colCount for hit in range(rows): rowValues = [ col.values[0] for col in t.read(range(len(cols)), hit, hit + 1).columns ] assert len(rowValues) == 4 if "a1" in rowValues: assert "Control" in rowValues elif "a2" in rowValues: assert "Treatment" in rowValues else: assert False, "Row does not contain 'a1' or 'a2'"
def save_web_figure(conn, json_data): """ Saves 'figureJSON' in POST as an original file. If 'fileId' is specified in POST, then we update that file. Otherwise create a new one with name 'figureName' from POST. """ image_ids = [] first_img_id = None try: for panel in json_data['panels']: image_ids.append(panel['imageId']) if len(image_ids) > 0: first_img_id = int(image_ids[0]) # remove duplicates image_ids = list(set(image_ids)) # pretty-print json figure_json = json.dumps(json_data, sort_keys=True, indent=2, separators=(',', ': ')) except Exception: pass # See https://github.com/will-moore/figure/issues/16 figure_json = figure_json.encode('utf8') if 'figureName' in json_data and len(json_data['figureName']) > 0: figure_name = json_data['figureName'] else: print("No figure name found") return # we store json in description field... description = {} if first_img_id is not None: # We duplicate the figure name here for quicker access when # listing files # (use this instead of file name because it supports unicode) description['name'] = figure_name description['imageId'] = first_img_id if 'baseUrl' in panel: description['baseUrl'] = panel['baseUrl'] desc = json.dumps(description) # Create new file # Try to set Group context to the same as first image curr_gid = conn.SERVICE_OPTS.getOmeroGroup() i = None if first_img_id: i = conn.getObject("Image", first_img_id) if i is not None: gid = i.getDetails().getGroup().getId() conn.SERVICE_OPTS.setOmeroGroup(gid) else: # Don't leave as -1 conn.SERVICE_OPTS.setOmeroGroup(curr_gid) file_size = len(figure_json) f = BytesIO() f.write(figure_json) orig_file = conn.createOriginalFileFromFileObj(f, '', figure_name, file_size, mimetype="application/json") fa = FileAnnotationI() fa.setFile(OriginalFileI(orig_file.getId(), False)) fa.setNs(wrap(JSON_FILEANN_NS)) fa.setDescription(wrap(desc)) update = conn.getUpdateService() fa = update.saveAndReturnObject(fa, conn.SERVICE_OPTS) ann_id = fa.getId().getValue() return ann_id
from omero.rtypes import rstring from omero.model import DatasetI, FileAnnotationI, OriginalFileI from omero.model import DatasetAnnotationLinkI c = omero.client() ice_config = c.getProperty("Ice.Config") try: s = c.createSession() d = DatasetI() d.setName(rstring("FileAnnotationDelete")) d = s.getUpdateService().saveAndReturnObject(d) file = c.upload(ice_config) fa = FileAnnotationI() fa.setFile(OriginalFileI(file.id.val, False)) link = DatasetAnnotationLinkI() link.parent = DatasetI(d.id.val, False) link.child = fa link = s.getUpdateService().saveAndReturnObject(link) fa = link.child to_delete = {"Annotation": [fa.id.val]} delCmd = omero.cmd.Delete2(targetObjects=to_delete) handle = s.submit(delCmd) callback = None try: callback = omero.callbacks.CmdCallbackI(c, handle) loops = 10
def upload_csv_to_omero(ctx, file, tablename, target_id, target_type="Project"): """Upload the CSV file and attach it to the specified object""" print file print file.name svc = gateway.getFacility(DataManagerFacility) file_size = os.path.getsize(file.name) original_file = OriginalFileI() original_file.setName(rstring(tablename)) original_file.setPath(rstring(file.name)) original_file.setSize(rlong(file_size)) checksum_algorithm = ChecksumAlgorithmI() checksum_algorithm.setValue(rstring(ChecksumAlgorithmSHA1160.value)) original_file.setHasher(checksum_algorithm) original_file.setMimetype(rstring("text/csv")) original_file = svc.saveAndReturnObject(ctx, original_file) store = gateway.getRawFileService(ctx) # Open file and read stream store.setFileId(original_file.getId().getValue()) print original_file.getId().getValue() try: store.setFileId(original_file.getId().getValue()) with open(file.name, 'rb') as stream: buf = 10000 for pos in range(0, long(file_size), buf): block = None if file_size-pos < buf: block_size = file_size-pos else: block_size = buf stream.seek(pos) block = stream.read(block_size) store.write(block, pos, block_size) original_file = store.save() finally: store.close() # create the file annotation namespace = "training.demo" fa = FileAnnotationI() fa.setFile(original_file) fa.setNs(rstring(namespace)) if target_type == "Project": target_obj = ProjectData(ProjectI(target_id, False)) elif target_type == "Dataset": target_obj = DatasetData(DatasetI(target_id, False)) elif target_type == "Image": target_obj = ImageData(ImageI(target_id, False)) svc.attachAnnotation(ctx, FileAnnotationData(fa), target_obj)
def make_file_annotation(self, name=None, binary=None, format=None, client=None): """ Creates a new DatasetI instance and returns the persisted object. If no name has been provided, a UUID string shall be used. :param name: the name of the project :param client: The client to use to create the object """ if client is None: client = self.client update = client.sf.getUpdateService() # file if format is None: format = "application/octet-stream" if binary is None: binary = "12345678910" oFile = OriginalFileI() oFile.setName(rstring(str(self.uuid()))) oFile.setPath(rstring(str(self.uuid()))) oFile.setSize(rlong(len(binary))) oFile.hasher = ChecksumAlgorithmI() oFile.hasher.value = rstring("SHA1-160") oFile.setMimetype(rstring(str(format))) oFile = update.saveAndReturnObject(oFile) # save binary store = client.sf.createRawFileStore() store.setFileId(oFile.id.val) store.write(binary, 0, 0) oFile = store.save() # See ticket:1501 store.close() fa = FileAnnotationI() fa.setFile(oFile) return update.saveAndReturnObject(fa)
def get_script(request, script_name, conn): """Return a JS function to filter images by various params.""" plate_id = request.GET.get('plate') query_service = conn.getQueryService() if plate_id is None: return JsonResponse({'Error': 'Plate ID not specified'}) if script_name == "Table": params = ParametersI() params.addId(plate_id) query = """select oal from PlateAnnotationLink as oal left outer join fetch oal.child as ch left outer join oal.parent as pa where pa.id=:id and ch.ns='%s'""" % NSBULKANNOTATIONS links = query_service.findAllByQuery(query, params, conn.SERVICE_OPTS) shared_resources = conn.getSharedResources() # Just use the first Table we find # TODO: handle multiple tables!? file_id = links[0].child.file.id.val table = shared_resources.openTable(OriginalFileI(file_id), conn.SERVICE_OPTS) if not table: return JsonResponse({'ERROR': 'Failed to open table'}) headers = table.getHeaders() rows = table.getNumberOfRows() column_names = [col.name for col in headers] col_data = table.read(range(len(headers)), 0, rows).columns table_data = {} for name, col in zip(column_names, col_data): # key is column Name, values are list of col_data table_data[name] = col.values # Return a JS function that will be passed an object # e.g. {'type': 'Image', 'id': 1} # and should return true or false f = """(function filter(data, params) { if (isNaN(params.count) || params.count == '') return true; var table_data = %s; var rowIndex = table_data.Well.indexOf(data.wellId); var value = table_data[params.column_name][rowIndex]; if (params.operator === '=') return value == params.count; if (params.operator === '<') return value < params.count; if (params.operator === '>') return value > params.count; }) """ % json.dumps(table_data) filter_params = [{'name': 'column_name', 'type': 'text', 'values': column_names[1:], # 1st column is Well 'default': column_names[1]}, {'name': 'operator', 'type': 'text', 'values': ['>', '=', '<'], 'default': '>'}, {'name': 'count', 'type': 'number', 'default': ''}] return JsonResponse( { 'f': f, 'params': filter_params, })