def populate_metadata(client, conn, script_params): object_ids = script_params["IDs"] object_id = object_ids[0] data_type = script_params["Data_Type"] file_ann_id = None if "File_Annotation" in script_params: file_ann_id = int(script_params["File_Annotation"]) link_file_ann(conn, data_type, object_id, file_ann_id) original_file = get_original_file(conn, data_type, object_id, file_ann_id) provider = DownloadingOriginalFileProvider(conn) data_for_preprocessing = provider.get_original_file_data(original_file) data = provider.get_original_file_data(original_file) objecti = getattr(omero.model, data_type + 'I') omero_object = objecti(int(object_id), False) ctx = ParsingContext(client, omero_object, "") try: # Old ctx.parse_from_handle(data) ctx.write_to_omero() except AttributeError: # omero-metadata >= 0.3.0 ctx.preprocess_from_handle(data_for_preprocessing) ctx.parse_from_handle_stream(data) return "Table data populated for %s: %s" % (data_type, object_id)
def populate_metadata(client, conn, script_params): object_id = long(script_params["IDs"]) file_id = long(script_params["File_ID"]) original_file = get_original_file( conn, script_params["Data_Type"], object_id, file_id) provider = DownloadingOriginalFileProvider(conn) file_handle = provider.get_original_file_data(original_file) if script_params["Data_Type"] == "Plate": omero_object = PlateI(long(object_id), False) else: omero_object = ScreenI(long(object_id), False) ctx = ParsingContext(client, omero_object, "") ctx.parse_from_handle(file_handle) ctx.write_to_omero()
def populate_metadata(client, conn, script_params): object_id = int(script_params["IDs"]) file_id = int(script_params["File_ID"]) original_file = get_original_file(conn, script_params["Data_Type"], object_id, file_id) provider = DownloadingOriginalFileProvider(conn) file_handle = provider.get_original_file_data(original_file) if script_params["Data_Type"] == "Plate": omero_object = PlateI(int(object_id), False) else: omero_object = ScreenI(int(object_id), False) ctx = ParsingContext(client, omero_object, "") ctx.parse_from_handle(file_handle) ctx.write_to_omero()
def populate_metadata(client, conn, script_params): object_ids = script_params["IDs"] object_id = object_ids[0] file_ann_id = None if "File_Annotation" in script_params: file_ann_id = long(script_params["File_Annotation"]) data_type = script_params["Data_Type"] original_file = get_original_file(conn, data_type, object_id, file_ann_id) provider = DownloadingOriginalFileProvider(conn) file_handle = provider.get_original_file_data(original_file) if data_type == "Plate": omero_object = PlateI(long(object_id), False) else: omero_object = ScreenI(long(object_id), False) ctx = ParsingContext(client, omero_object, "") ctx.parse_from_handle(file_handle) ctx.write_to_omero() return "Table data populated for %s: %s" % (data_type, object_id)
def populate_metadata(client, conn, script_params): object_ids = script_params["IDs"] object_id = object_ids[0] data_type = script_params["Data_Type"] if data_type == "Image": try: from omero_metadata.populate import ImageWrapper # noqa: F401 except ImportError: return "Please update omero-metadata to support Image type" file_ann_id = None if "File_Annotation" in script_params: file_ann_id = int(script_params["File_Annotation"]) link_file_ann(conn, data_type, object_id, file_ann_id) original_file = get_original_file( conn, data_type, object_id, file_ann_id) provider = DownloadingOriginalFileProvider(conn) data_for_preprocessing = provider.get_original_file_data(original_file) temp_name = data_for_preprocessing.name # 5.9.1 returns NamedTempFile where name is a string. if isinstance(temp_name, int): print("omero-py 5.9.1 DownloadingOriginalFileProvider returns " "NamedTempFile. Please Upgrade to omero-py 5.9.1 or later") return "Please upgrade omero-py to 5.9.1 or later" objecti = getattr(omero.model, data_type + 'I') omero_object = objecti(int(object_id), False) ctx = ParsingContext(client, omero_object, "") try: if hasattr(ctx, "parse_from_handle"): with open(temp_name, 'rt', encoding='utf-8-sig') as f1: ctx.parse_from_handle(f1) ctx.write_to_omero() else: # omero-metadata >= 0.3.0 with open(temp_name, 'rt', encoding='utf-8-sig') as f1: ctx.preprocess_from_handle(f1) with open(temp_name, 'rt', encoding='utf-8-sig') as f2: ctx.parse_from_handle_stream(f2) finally: data_for_preprocessing.close() return "Table data populated for %s: %s" % (data_type, object_id)
def populate_metadata(client, conn, script_params): # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - dataType = script_params["Data_Type"] ids = script_params["IDs"] datasets = list(conn.getObjects(dataType, ids)) for ds in datasets: ID = ds.getId() # not sure what this is doing file_ann_id = None if "File_Annotation" in script_params: file_ann_id = long(script_params["File_Annotation"]) print("set ann id") original_file = get_original_file(conn, dataType, ID, file_ann_id) provider = DownloadingOriginalFileProvider(conn) file_handle = provider.get_original_file_data(original_file) # create a dictionary for image_name:id dict_name_id = {} for img in ds.listChildren(): img_name = img.getName() if (img_name in dict_name_id): sys.stderr.write( "File names not unique: {}".format(imageaname)) sys.exit(1) dict_name_id[img_name] = int(img.getId()) # step through the csv file data = list(csv.reader(file_handle, delimiter=',')) # keys are in the header row header = data[0] kv_data = header[1:] # first header is the fimename columns rows = data[1:] nimg_updated = 0 for row in rows: # first row is the header print(row) img_name = row[0] if (img_name not in dict_name_id): print("Can't find filename : {}".format(img_name)) else: img_ID = dict_name_id[img_name] # look up the ID img = conn.getObject('Image', img_ID) # get the img existing_kv = get_existing_MapAnnotions(img) updated_kv = copy.deepcopy(existing_kv) print("Existing kv ") for k, v in existing_kv.iteritems(): print(k, v) for i in range(1, len(row)): # first entry is the filename key = header[i].strip() vals = row[i].strip().split(';') if (len(vals) > 0): for val in vals: if (len(val) > 0): updated_kv[key] = val if (existing_kv != updated_kv): nimg_updated = nimg_updated + 1 print("The key-values pairs are different") remove_MapAnnotations(conn, 'Image', img.getId()) map_ann = omero.gateway.MapAnnotationWrapper(conn) namespace = omero.constants.metadata.NSCLIENTMAPANNOTATION namesoace = omero.constants.metadata.NSBULKANNOTATIONS map_ann.setNs(namespace) # convert the ordered dict to a list of lists map_ann.setValue([[k, v] for k, v in updated_kv.iteritems()]) map_ann.save() img.linkAnnotation(map_ann) else: print("No change change in kv's") return "Added {} kv pairs to {}/{} files ".format( len(header) - 1, nimg_updated, len(dict_name_id))
def keyval_from_csv(conn, script_params): data_type = script_params["Data_Type"] ids = script_params["IDs"] for target_object in conn.getObjects(data_type, ids): # file_ann_id is Optional. If not supplied, use first .csv attached file_ann_id = None if "File_Annotation" in script_params: file_ann_id = int(script_params["File_Annotation"]) link_file_ann(conn, data_type, target_object.id, file_ann_id) print("set ann id", file_ann_id) original_file = get_original_file(target_object, file_ann_id) print("Original File", original_file.id.val, original_file.name.val) provider = DownloadingOriginalFileProvider(conn) # read the csv file_handle = provider.get_original_file_data(original_file) try: delimiter = csv.Sniffer().sniff(file_handle.read(1024)).delimiter print("Using delimiter: ", delimiter) except Exception: print("Failed to sniff delimiter, using ','") delimiter = "," # reset to start and read whole file... file_handle.seek(0) data = list(csv.reader(file_handle, delimiter=delimiter)) file_handle.close() # keys are in the header row header = data[0] print("header", header) # create dictionaries for well/image name:object images_by_name, wells_by_name = get_children_by_name(target_object) image_index = header.index("image") well_index = header.index("well") plate_index = header.index("plate") if image_index == -1: # first header is the img-name column, if 'image' not found image_index = 0 print("image_index:", image_index, "well_index:", well_index, "plate_index:", plate_index) rows = data[1:] nimg_updated = 0 missing_names = 0 # loop over csv rows... for row in rows: # try to find 'image', then 'well', then 'plate' image_name = row[image_index] well_name = None plate_name = None obj = None if len(image_name) > 0: if image_name in images_by_name: obj = images_by_name[image_name] print("Annotating Image:", obj.id, image_name) else: missing_names += 1 print("Image not found:", image_name) if obj is None and well_index > -1 and len(row[well_index]) > 0: well_name = row[well_index] if well_name in wells_by_name: obj = wells_by_name[well_name] print("Annotating Well:", obj.id, well_name) else: missing_names += 1 print("Well not found:", well_name) if obj is None and plate_index > -1: plate_name = row[plate_index] if plate_name == target_object.name: obj = target_object print("Annotating Plate:", obj.id, plate_name) if obj is None: msg = f"Can't find object by image, well or plate name" print(msg) continue cols_to_ignore = [image_index, well_index, plate_index] updated = annotate_object(conn, obj, header, row, cols_to_ignore) if updated: nimg_updated += 1 message = "Added {} kv pairs to {}/{} files".format( len(header) - 1, nimg_updated, len(images_by_name)) if missing_names > 0: message += f". {missing_names} image names not found." return message