def populate(self, args): "Add metadata (bulk-annotations) to an object" md = self._load(args) client, conn = self._clientconn(args) log_level = logging.INFO - (10 * args.verbose) + (10 * args.quiet) if args.report: log_level = log_level - 10 omero_metadata.populate.log.setLevel(log_level) context_class = dict(self.POPULATE_CONTEXTS)[args.context] if args.localcfg: localcfg = pydict_text_io.load(args.localcfg, session=client.getSession()) else: localcfg = {} fileid = args.fileid cfgid = args.cfgid if args.attach and not args.dry_run: if args.file: fileann = conn.createFileAnnfromLocalFile( args.file, mimetype=guess_mimetype(args.file), ns=NSBULKANNOTATIONSRAW) fileid = fileann.getFile().getId() md.linkAnnotation(fileann) if args.cfg: cfgann = conn.createFileAnnfromLocalFile( args.cfg, mimetype=guess_mimetype(args.cfg), ns=NSBULKANNOTATIONSCONFIG) cfgid = cfgann.getFile().getId() md.linkAnnotation(cfgann) loops = 0 ms = 0 wait = args.wait if wait: ms = 5000 loops = int(wait * 1000 / ms) + 1 # Note some contexts only support a subset of these args ctx = context_class(client, args.obj, file=args.file, fileid=fileid, cfg=args.cfg, cfgid=cfgid, attach=args.attach, options=localcfg, batch_size=args.batch, loops=loops, ms=ms, dry_run=args.dry_run, allow_nan=args.allow_nan) ctx.parse()
def test_dump(data, tmpdir, format): d = {'a': 2} dumpstring = pydict_text_io.dump(d, format) f = tmpdir.join('test-dump.%s' % format) f.write(dumpstring) fileobj = str(f) assert pydict_text_io.load(fileobj, format) == d
def populate(self, args): "Add metadata (bulk-annotations) to an object" self.ctx.err(DEPRECATION_MESSAGE, DeprecationWarning) md = self._load(args) client, conn = self._clientconn(args) # TODO: Configure logging properly if args.report: populate_metadata.log.setLevel(logging.DEBUG) else: populate_metadata.log.setLevel(logging.INFO) context_class = dict(self.POPULATE_CONTEXTS)[args.context] if args.localcfg: localcfg = pydict_text_io.load(args.localcfg, session=client.getSession()) else: localcfg = {} fileid = args.fileid cfgid = args.cfgid if args.attach and not args.dry_run: if args.file: fileann = conn.createFileAnnfromLocalFile( args.file, mimetype=guess_mimetype(args.file), ns=NSBULKANNOTATIONSRAW) fileid = fileann.getFile().getId() md.linkAnnotation(fileann) if args.cfg: cfgann = conn.createFileAnnfromLocalFile( args.cfg, mimetype=guess_mimetype(args.cfg), ns=NSBULKANNOTATIONSCONFIG) cfgid = cfgann.getFile().getId() md.linkAnnotation(cfgann) # Note some contexts only support a subset of these args ctx = context_class(client, args.obj, file=args.file, fileid=fileid, cfg=args.cfg, cfgid=cfgid, attach=args.attach, options=localcfg) ctx.parse() if not args.dry_run: wait = args.wait if not wait: loops = 0 ms = 0 else: ms = 5000 loops = int(old_div((wait * 1000), ms)) + 1 ctx.write_to_omero(batch_size=args.batch, loops=loops, ms=ms)
def update_from_multilevel_dictfile(omerodir: str, dictfile: str) -> None: """ Updates OMERO config.xml from a file containing keys containing dictionaries. This is intended to support YAML files containing Ansible style configuration variables to be parsed. If the filename ends in .j2 the file will be pre-processed with Jinja2. No variables are passed into the template so this is mostly intended for expanding filters such as `|default(...)`. Each top-level key must contain a dictionary of key-value OMERO properties. Top-level keys are processed in alphanumeric order. If a top-level key ends in '_set' the keys in that dictionary key will have their values set. If a top-level key ends in '_append' the keys in that dictionary key will have their values appended to. All other keys are currently ignored, though this may change in future. :param omerodir str: OMERODIR :param dictfile str: Path to a file that can be parsed as a multi-level dictionary, or a Jinaj2 file that will be rendered to the aforementioned. """ try: if dictfile.endswith(".j2"): with TemporaryDirectory() as tmpdir: tmpdictfile = _parse_jinja2(dictfile, tmpdir) d = pydict_text_io.load(tmpdictfile) else: d = pydict_text_io.load(dictfile) except Exception as e: raise ExternalConfigException("Failed to parse {}: {}".format( dictfile, e)) from e for topk, topv in sorted(d.items()): if topk.endswith("_append"): add_from_dict(omerodir, topv) elif topk.endswith("_set"): update_from_dict(omerodir, topv) else: log.warning("Ignoring top-level key {}".format(topk))
def test_load(self, tmpdir, remote, format): if format == 'json': content = self.getTestJson() else: content = self.getTestYaml() if remote: fa = self.make_file_annotation(name='test.%s' % format, binary=content, mimetype=format) fid = unwrap(fa.file.id) fileobj = 'OriginalFile:%d' % fid else: f = tmpdir.join('test.%s' % format) f.write(content) fileobj = str(f) data = pydict_text_io.load(fileobj, session=self.client.getSession()) assert data == {'a': 2}
def test_load_invalidtype(self): with pytest.raises(Exception) as excinfo: pydict_text_io.load(123) assert str(excinfo.value).startswith('Invalid type: ')
def test_load_fromstring(self): content = self.getTestJson() data = pydict_text_io.load(content) assert data == {'a': 2}
def edit(self, args): client = self.ctx.conn(args) gateway = BlitzGateway(client_obj=client) newchannels = {} data = pydict_text_io.load( args.channels, session=client.getSession()) if 'channels' not in data: self.ctx.die(104, "ERROR: No channels found in %s" % args.channels) for chindex, chdict in data['channels'].iteritems(): try: cindex = int(chindex) except Exception as e: self.ctx.err('ERROR: %s' % e) self.ctx.die( 105, "Invalid channel index: %s" % chindex) try: cobj = ChannelObject(chdict) if (cobj.min is None) != (cobj.max is None): raise Exception('Both or neither of min and max required') newchannels[cindex] = cobj print '%d:%s' % (cindex, cobj) except Exception as e: self.ctx.err('ERROR: %s' % e) self.ctx.die( 105, "Invalid channel description: %s" % chdict) try: greyscale = data['greyscale'] except KeyError: greyscale = None namedict = {} cindices = [] rangelist = [] colourlist = [] for (i, c) in newchannels.iteritems(): if c.label: namedict[i] = c.label if not c.active: continue cindices.append(i) rangelist.append([c.min, c.max]) colourlist.append(c.color) iids = [] for img in self.render_images(gateway, args.object, batch=1): iids.append(img.id) try: img.setActiveChannels( cindices, windows=rangelist, colors=colourlist, noRE=True) if greyscale is not None: if greyscale: img.setGreyscaleRenderingModel() else: img.setColorRenderingModel() img.saveDefaults() self.ctx.dbg( "Updated rendering settings for Image:%s" % img.id) if not args.skipthumbs: self._generate_thumbs([img]) if args.copy: # Edit first image only, copy to rest # Don't close source image until outer # loop is done. self._copy_single(gateway, img, args.object, args.skipthumbs) break finally: img._closeRE() if namedict: self._update_channel_names(gateway, iids, namedict) gateway._assert_unregistered("edit")